[v3,2/7] iommu: Allow .iotlb_sync_map to fail and handle s390's -ENOMEM return

Message ID 20230102115619.2088685-3-schnelle@linux.ibm.com
State New
Headers
Series [v3,1/7] s390/ism: Set DMA coherent mask |

Commit Message

Niklas Schnelle Jan. 2, 2023, 11:56 a.m. UTC
  On s390 .iotlb_sync_map is used to sync mappings to an underlying
hypervisor by letting the hypervisor inspect the synced IOVA range and
updating its shadow table. This however means that it can fail as the
hypervisor may run out of resources. This can be due to the hypervisor
being unable to pin guest pages, due to a limit on concurrently mapped
addresses such as vfio_iommu_type1.dma_entry_limit or other resources.
Either way such a failure to sync a mapping should result in
a DMA_MAPPING_EROR.

Now especially when running with batched IOTLB flushes for unmap it may
be that some IOVAs have already been invalidated but not yet synced via
.iotlb_sync_map. Thus if the hypervisor indicates running out of
resources, first do a global flush allowing the hypervisor to free
resources associated with these mappings and only if that also fails
report this error to callers.

Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
---
 drivers/iommu/amd/iommu.c   |  5 +++--
 drivers/iommu/apple-dart.c  |  5 +++--
 drivers/iommu/intel/iommu.c |  5 +++--
 drivers/iommu/iommu.c       | 20 ++++++++++++++++----
 drivers/iommu/msm_iommu.c   |  5 +++--
 drivers/iommu/mtk_iommu.c   |  5 +++--
 drivers/iommu/s390-iommu.c  | 29 ++++++++++++++++++++++++-----
 drivers/iommu/sprd-iommu.c  |  5 +++--
 drivers/iommu/tegra-gart.c  |  5 +++--
 include/linux/iommu.h       |  4 ++--
 10 files changed, 63 insertions(+), 25 deletions(-)
  

Comments

Alexandra Winter Jan. 2, 2023, 6:25 p.m. UTC | #1
On 02.01.23 12:56, Niklas Schnelle wrote:
> On s390 .iotlb_sync_map is used to sync mappings to an underlying
> hypervisor by letting the hypervisor inspect the synced IOVA range and
> updating its shadow table. This however means that it can fail as the
> hypervisor may run out of resources. This can be due to the hypervisor
> being unable to pin guest pages, due to a limit on concurrently mapped
> addresses such as vfio_iommu_type1.dma_entry_limit or other resources.
> Either way such a failure to sync a mapping should result in
> a DMA_MAPPING_EROR.
> 
> Now especially when running with batched IOTLB flushes for unmap it may
> be that some IOVAs have already been invalidated but not yet synced via
> .iotlb_sync_map. Thus if the hypervisor indicates running out of
> resources, first do a global flush allowing the hypervisor to free
> resources associated with these mappings and only if that also fails
> report this error to callers.
> 
> Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
> ---
Just a small typo, I noticed
[...]
> diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
> index ed33c6cce083..6ba38b4f5b37 100644
> --- a/drivers/iommu/s390-iommu.c
> +++ b/drivers/iommu/s390-iommu.c
> @@ -210,6 +210,14 @@ static void s390_iommu_release_device(struct device *dev)
>  		__s390_iommu_detach_device(zdev);
>  }
>  
> +
> +static int zpci_refresh_all(struct zpci_dev *zdev)
> +{
> +	return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
> +				  zdev->end_dma - zdev->start_dma + 1);
> +
> +}
> +
>  static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
>  {
>  	struct s390_domain *s390_domain = to_s390_domain(domain);
> @@ -217,8 +225,7 @@ static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
>  
>  	rcu_read_lock();
>  	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
> -		zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
> -				   zdev->end_dma - zdev->start_dma + 1);
> +		zpci_refresh_all(zdev);
>  	}
>  	rcu_read_unlock();
>  }
> @@ -242,20 +249,32 @@ static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
>  	rcu_read_unlock();
>  }
>  
> -static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
> +static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
>  				      unsigned long iova, size_t size)
>  {
>  	struct s390_domain *s390_domain = to_s390_domain(domain);
>  	struct zpci_dev *zdev;
> +	int ret = 0;
>  
>  	rcu_read_lock();
>  	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
>  		if (!zdev->tlb_refresh)
>  			continue;
> -		zpci_refresh_trans((u64)zdev->fh << 32,
> -				   iova, size);
> +		ret = zpci_refresh_trans((u64)zdev->fh << 32,
> +					 iova, size);
> +		/*
> +		 * let the hypervisor disover invalidated entries
			typo: s/disover/discover/g
> +		 * allowing it to free IOVAs and unpin pages
> +		 */
> +		if (ret == -ENOMEM) {
> +			ret = zpci_refresh_all(zdev);
> +			if (ret)
> +				break;
> +		}
>  	}
>  	rcu_read_unlock();
> +
> +	return ret;
>  }
>  
>  static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
[...]
  
Niklas Schnelle Jan. 3, 2023, 8:16 a.m. UTC | #2
On Mon, 2023-01-02 at 19:25 +0100, Alexandra Winter wrote:
> 
> On 02.01.23 12:56, Niklas Schnelle wrote:
> > On s390 .iotlb_sync_map is used to sync mappings to an underlying
> > hypervisor by letting the hypervisor inspect the synced IOVA range and
> > updating its shadow table. This however means that it can fail as the
> > hypervisor may run out of resources. This can be due to the hypervisor
> > being unable to pin guest pages, due to a limit on concurrently mapped
> > addresses such as vfio_iommu_type1.dma_entry_limit or other resources.
> > Either way such a failure to sync a mapping should result in
> > a DMA_MAPPING_EROR.
> > 
> > Now especially when running with batched IOTLB flushes for unmap it may
> > be that some IOVAs have already been invalidated but not yet synced via
> > .iotlb_sync_map. Thus if the hypervisor indicates running out of
> > resources, first do a global flush allowing the hypervisor to free
> > resources associated with these mappings and only if that also fails
> > report this error to callers.
> > 
> > Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
> > ---
> Just a small typo, I noticed
> [...]

You mean the misspelled DMA_MAPPING_ERROR, right? Either way I did edit
the commit message for a bit more clarity on some of the details:

On s390 when using a paging hypervisor, .iotlb_sync_map is used to sync
mappings by letting the hypervisor inspect the synced IOVA range and
updating a shadow table. This however means that .iotlb_sync_map can
fail as the hypervisor may run out of resources while doing the sync.
This can be due to the hypervisor being unable to pin guest pages, due
to a limit on mapped addresses such as vfio_iommu_type1.dma_entry_limit
or lack of other resources. Either way such a failure to sync a mapping
should result in a DMA_MAPPING_ERROR.

Now especially when running with batched IOTLB flushes for unmap it may
be that some IOVAs have already been invalidated but not yet synced via
.iotlb_sync_map. Thus if the hypervisor indicates running out of
resources, first do a global flush allowing the hypervisor to free
resources associated with these mappings as well a retry creating the
new mappings and only if that also fails report this error to callers.



> > diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
> > index ed33c6cce083..6ba38b4f5b37 100644
> > --- a/drivers/iommu/s390-iommu.c
> > +++ b/drivers/iommu/s390-iommu.c
> > @@ -210,6 +210,14 @@ static void s390_iommu_release_device(struct device *dev)
> >  		__s390_iommu_detach_device(zdev);
> >  }
> >  
> > +
> > +static int zpci_refresh_all(struct zpci_dev *zdev)
> > +{
> > +	return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
> > +				  zdev->end_dma - zdev->start_dma + 1);
> > +
> > +}
> > +
> >  static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
> >  {
> >  	struct s390_domain *s390_domain = to_s390_domain(domain);
> > @@ -217,8 +225,7 @@ static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
> >  
> >  	rcu_read_lock();
> >  	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
> > -		zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
> > -				   zdev->end_dma - zdev->start_dma + 1);
> > +		zpci_refresh_all(zdev);
> >  	}
> >  	rcu_read_unlock();
> >  }
> > @@ -242,20 +249,32 @@ static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
> >  	rcu_read_unlock();
> >  }
> >  
> > -static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
> > +static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
> >  				      unsigned long iova, size_t size)
> >  {
> >  	struct s390_domain *s390_domain = to_s390_domain(domain);
> >  	struct zpci_dev *zdev;
> > +	int ret = 0;
> >  
> >  	rcu_read_lock();
> >  	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
> >  		if (!zdev->tlb_refresh)
> >  			continue;
> > -		zpci_refresh_trans((u64)zdev->fh << 32,
> > -				   iova, size);
> > +		ret = zpci_refresh_trans((u64)zdev->fh << 32,
> > +					 iova, size);
> > +		/*
> > +		 * let the hypervisor disover invalidated entries
> 			typo: s/disover/discover/g
> > +		 * allowing it to free IOVAs and unpin pages
> > +		 */
> > +		if (ret == -ENOMEM) {
> > +			ret = zpci_refresh_all(zdev);
> > +			if (ret)
> > +				break;
> > +		}
> >  	}
> >  	rcu_read_unlock();
> > +
> > +	return ret;
> >  }
> >  
> >  static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
> [...]
  
Niklas Schnelle Jan. 3, 2023, 9:25 a.m. UTC | #3
On Tue, 2023-01-03 at 09:16 +0100, Niklas Schnelle wrote:
> On Mon, 2023-01-02 at 19:25 +0100, Alexandra Winter wrote:
> > 
> > On 02.01.23 12:56, Niklas Schnelle wrote:
> > > On s390 .iotlb_sync_map is used to sync mappings to an underlying
> > > hypervisor by letting the hypervisor inspect the synced IOVA range and
> > > updating its shadow table. This however means that it can fail as the
> > > hypervisor may run out of resources. This can be due to the hypervisor
> > > being unable to pin guest pages, due to a limit on concurrently mapped
> > > addresses such as vfio_iommu_type1.dma_entry_limit or other resources.
> > > Either way such a failure to sync a mapping should result in
> > > a DMA_MAPPING_EROR.
> > > 
> > > Now especially when running with batched IOTLB flushes for unmap it may
> > > be that some IOVAs have already been invalidated but not yet synced via
> > > .iotlb_sync_map. Thus if the hypervisor indicates running out of
> > > resources, first do a global flush allowing the hypervisor to free
> > > resources associated with these mappings and only if that also fails
> > > report this error to callers.
> > > 
> > > Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
> > > ---
> > Just a small typo, I noticed
> > [...]
> 
> You mean the misspelled DMA_MAPPING_ERROR, right? Either way I did edit
> the commit message for a bit more clarity on some of the details:
> 
> On s390 when using a paging hypervisor, .iotlb_sync_map is used to sync
> mappings by letting the hypervisor inspect the synced IOVA range and
> updating a shadow table. This however means that .iotlb_sync_map can
> fail as the hypervisor may run out of resources while doing the sync.
> This can be due to the hypervisor being unable to pin guest pages, due
> to a limit on mapped addresses such as vfio_iommu_type1.dma_entry_limit
> or lack of other resources. Either way such a failure to sync a mapping
> should result in a DMA_MAPPING_ERROR.
> 
> Now especially when running with batched IOTLB flushes for unmap it may
> be that some IOVAs have already been invalidated but not yet synced via
> .iotlb_sync_map. Thus if the hypervisor indicates running out of
> resources, first do a global flush allowing the hypervisor to free
> resources associated with these mappings as well a retry creating the
> new mappings and only if that also fails report this error to callers.
> 
> 
> 
> > > diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
> > > index ed33c6cce083..6ba38b4f5b37 100644
> > > --- a/drivers/iommu/s390-iommu.c
> > > +++ b/drivers/iommu/s390-iommu.c
> > > @@ -210,6 +210,14 @@ static void s390_iommu_release_device(struct device *dev)
> > >  		__s390_iommu_detach_device(zdev);
> > >  }
> > >  
> > > +
> > > +static int zpci_refresh_all(struct zpci_dev *zdev)
> > > +{
> > > +	return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
> > > +				  zdev->end_dma - zdev->start_dma + 1);
> > > +
> > > +}
> > > +
> > >  static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
> > >  {
> > >  	struct s390_domain *s390_domain = to_s390_domain(domain);
> > > @@ -217,8 +225,7 @@ static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
> > >  
> > >  	rcu_read_lock();
> > >  	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
> > > -		zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
> > > -				   zdev->end_dma - zdev->start_dma + 1);
> > > +		zpci_refresh_all(zdev);
> > >  	}
> > >  	rcu_read_unlock();
> > >  }
> > > @@ -242,20 +249,32 @@ static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
> > >  	rcu_read_unlock();
> > >  }
> > >  
> > > -static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
> > > +static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
> > >  				      unsigned long iova, size_t size)
> > >  {
> > >  	struct s390_domain *s390_domain = to_s390_domain(domain);
> > >  	struct zpci_dev *zdev;
> > > +	int ret = 0;
> > >  
> > >  	rcu_read_lock();
> > >  	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
> > >  		if (!zdev->tlb_refresh)
> > >  			continue;
> > > -		zpci_refresh_trans((u64)zdev->fh << 32,
> > > -				   iova, size);
> > > +		ret = zpci_refresh_trans((u64)zdev->fh << 32,
> > > +					 iova, size);
> > > +		/*
> > > +		 * let the hypervisor disover invalidated entries
> > 			typo: s/disover/discover/g

I'm blind, missed this one. Added the change now.

> > > +		 * allowing it to free IOVAs and unpin pages
> > > +		 */
> > > +		if (ret == -ENOMEM) {
> > > +			ret = zpci_refresh_all(zdev);
> > > +			if (ret)
> > > +				break;
> > > +		}
> > >  	}
> > >  	rcu_read_unlock();
> > > +
> > > +	return ret;
> > >  }
> > >  
> > >  static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
> > [...]
>
  
Heiko Carstens Jan. 3, 2023, 9:26 a.m. UTC | #4
On Tue, Jan 03, 2023 at 09:16:22AM +0100, Niklas Schnelle wrote:
> On Mon, 2023-01-02 at 19:25 +0100, Alexandra Winter wrote:
> > 
> > On 02.01.23 12:56, Niklas Schnelle wrote:
> > > On s390 .iotlb_sync_map is used to sync mappings to an underlying
> > > hypervisor by letting the hypervisor inspect the synced IOVA range and
> > > updating its shadow table. This however means that it can fail as the
> > > hypervisor may run out of resources. This can be due to the hypervisor
> > > being unable to pin guest pages, due to a limit on concurrently mapped
> > > addresses such as vfio_iommu_type1.dma_entry_limit or other resources.
> > > Either way such a failure to sync a mapping should result in
> > > a DMA_MAPPING_EROR.
> > > 
> > > Now especially when running with batched IOTLB flushes for unmap it may
> > > be that some IOVAs have already been invalidated but not yet synced via
> > > .iotlb_sync_map. Thus if the hypervisor indicates running out of
> > > resources, first do a global flush allowing the hypervisor to free
> > > resources associated with these mappings and only if that also fails
> > > report this error to callers.
> > > 
> > > Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
> > > ---
> > Just a small typo, I noticed
> > [...]
> 
> You mean the misspelled DMA_MAPPING_ERROR, right? Either way I did edit

I think Alexandra meant the below:

> > > +		/*
> > > +		 * let the hypervisor disover invalidated entries
> > 			typo: s/disover/discover/g

Now you know why I always complain when people do full quotes and not
trim replies to relevant parts.
It is sometimes very hard to spot comments :)
  
Niklas Schnelle Jan. 3, 2023, 4:03 p.m. UTC | #5
On Mon, 2023-01-02 at 12:56 +0100, Niklas Schnelle wrote:
> On s390 .iotlb_sync_map is used to sync mappings to an underlying
> hypervisor by letting the hypervisor inspect the synced IOVA range and
> updating its shadow table. This however means that it can fail as the
> hypervisor may run out of resources. This can be due to the hypervisor
> being unable to pin guest pages, due to a limit on concurrently mapped
> addresses such as vfio_iommu_type1.dma_entry_limit or other resources.
> Either way such a failure to sync a mapping should result in
> a DMA_MAPPING_EROR.
> 
> Now especially when running with batched IOTLB flushes for unmap it may
> be that some IOVAs have already been invalidated but not yet synced via
> .iotlb_sync_map. Thus if the hypervisor indicates running out of
> resources, first do a global flush allowing the hypervisor to free
> resources associated with these mappings and only if that also fails
> report this error to callers.
> 
> Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
> ---
>  drivers/iommu/amd/iommu.c   |  5 +++--
>  drivers/iommu/apple-dart.c  |  5 +++--
>  drivers/iommu/intel/iommu.c |  5 +++--
>  drivers/iommu/iommu.c       | 20 ++++++++++++++++----
>  drivers/iommu/msm_iommu.c   |  5 +++--
>  drivers/iommu/mtk_iommu.c   |  5 +++--
>  drivers/iommu/s390-iommu.c  | 29 ++++++++++++++++++++++++-----
>  drivers/iommu/sprd-iommu.c  |  5 +++--
>  drivers/iommu/tegra-gart.c  |  5 +++--
>  include/linux/iommu.h       |  4 ++--
>  10 files changed, 63 insertions(+), 25 deletions(-)

Ok kernel test robot reported that I missed an implementation of
.iotlb_sync_map in drivers/iommu/sun50i-iommu.c during rebase as that
was only added in v6.2-rc1 by commit e563cc0c787c85 ("iommu/sun50i:
Implement .iotlb_sync_map"). Will add and send a v4 including the
proposed commit message rewording too.

> 
> diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
> index cbeaab55c0db..3df7d20e0e52 100644
> --- a/drivers/iommu/amd/iommu.c
> +++ b/drivers/iommu/amd/iommu.c
> @@ -2180,14 +2180,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
>  	return ret;
>  }
>  
> -static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
> -				     unsigned long iova, size_t size)
---8<---
  

Patch

diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index cbeaab55c0db..3df7d20e0e52 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2180,14 +2180,15 @@  static int amd_iommu_attach_device(struct iommu_domain *dom,
 	return ret;
 }
 
-static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
-				     unsigned long iova, size_t size)
+static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
+				    unsigned long iova, size_t size)
 {
 	struct protection_domain *domain = to_pdomain(dom);
 	struct io_pgtable_ops *ops = &domain->iop.iop.ops;
 
 	if (ops->map_pages)
 		domain_flush_np_cache(domain, iova, size);
+	return 0;
 }
 
 static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 4f4a323be0d0..4a76f4d95459 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -344,10 +344,11 @@  static void apple_dart_iotlb_sync(struct iommu_domain *domain,
 	apple_dart_domain_flush_tlb(to_dart_domain(domain));
 }
 
-static void apple_dart_iotlb_sync_map(struct iommu_domain *domain,
-				      unsigned long iova, size_t size)
+static int apple_dart_iotlb_sync_map(struct iommu_domain *domain,
+				     unsigned long iova, size_t size)
 {
 	apple_dart_domain_flush_tlb(to_dart_domain(domain));
+	return 0;
 }
 
 static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 59df7e42fd53..3b36a544c8fa 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4725,8 +4725,8 @@  static bool risky_device(struct pci_dev *pdev)
 	return false;
 }
 
-static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
-				       unsigned long iova, size_t size)
+static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
+				      unsigned long iova, size_t size)
 {
 	struct dmar_domain *dmar_domain = to_dmar_domain(domain);
 	unsigned long pages = aligned_nrpages(iova, size);
@@ -4736,6 +4736,7 @@  static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
 
 	xa_for_each(&dmar_domain->iommu_array, i, info)
 		__mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
+	return 0;
 }
 
 static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index de91dd88705b..8f97ed81b123 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2367,8 +2367,17 @@  static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
 	int ret;
 
 	ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
-	if (ret == 0 && ops->iotlb_sync_map)
-		ops->iotlb_sync_map(domain, iova, size);
+	if (ret == 0 && ops->iotlb_sync_map) {
+		ret = ops->iotlb_sync_map(domain, iova, size);
+		if (ret)
+			goto out_err;
+	}
+
+	return ret;
+
+out_err:
+	/* undo mappings already done */
+	iommu_unmap(domain, iova, size);
 
 	return ret;
 }
@@ -2516,8 +2525,11 @@  static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
 			sg = sg_next(sg);
 	}
 
-	if (ops->iotlb_sync_map)
-		ops->iotlb_sync_map(domain, iova, mapped);
+	if (ops->iotlb_sync_map) {
+		ret = ops->iotlb_sync_map(domain, iova, mapped);
+		if (ret)
+			goto out_err;
+	}
 	return mapped;
 
 out_err:
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index c60624910872..62fc52765554 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -486,12 +486,13 @@  static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
 	return ret;
 }
 
-static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
-			       size_t size)
+static int msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+			      size_t size)
 {
 	struct msm_priv *priv = to_msm_priv(domain);
 
 	__flush_iotlb_range(iova, size, SZ_4K, false, priv);
+	return 0;
 }
 
 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 2badd6acfb23..76d413aef1ef 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -758,12 +758,13 @@  static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
 	mtk_iommu_tlb_flush_range_sync(gather->start, length, dom->bank);
 }
 
-static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
-			       size_t size)
+static int mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+			      size_t size)
 {
 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
 
 	mtk_iommu_tlb_flush_range_sync(iova, size, dom->bank);
+	return 0;
 }
 
 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index ed33c6cce083..6ba38b4f5b37 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -210,6 +210,14 @@  static void s390_iommu_release_device(struct device *dev)
 		__s390_iommu_detach_device(zdev);
 }
 
+
+static int zpci_refresh_all(struct zpci_dev *zdev)
+{
+	return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
+				  zdev->end_dma - zdev->start_dma + 1);
+
+}
+
 static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
 	struct s390_domain *s390_domain = to_s390_domain(domain);
@@ -217,8 +225,7 @@  static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
-		zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
-				   zdev->end_dma - zdev->start_dma + 1);
+		zpci_refresh_all(zdev);
 	}
 	rcu_read_unlock();
 }
@@ -242,20 +249,32 @@  static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
 	rcu_read_unlock();
 }
 
-static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
+static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
 				      unsigned long iova, size_t size)
 {
 	struct s390_domain *s390_domain = to_s390_domain(domain);
 	struct zpci_dev *zdev;
+	int ret = 0;
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
 		if (!zdev->tlb_refresh)
 			continue;
-		zpci_refresh_trans((u64)zdev->fh << 32,
-				   iova, size);
+		ret = zpci_refresh_trans((u64)zdev->fh << 32,
+					 iova, size);
+		/*
+		 * let the hypervisor disover invalidated entries
+		 * allowing it to free IOVAs and unpin pages
+		 */
+		if (ret == -ENOMEM) {
+			ret = zpci_refresh_all(zdev);
+			if (ret)
+				break;
+		}
 	}
 	rcu_read_unlock();
+
+	return ret;
 }
 
 static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 219bfa11f7f4..9e590829992c 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -330,8 +330,8 @@  static size_t sprd_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 	return size;
 }
 
-static void sprd_iommu_sync_map(struct iommu_domain *domain,
-				unsigned long iova, size_t size)
+static int sprd_iommu_sync_map(struct iommu_domain *domain,
+			       unsigned long iova, size_t size)
 {
 	struct sprd_iommu_domain *dom = to_sprd_domain(domain);
 	unsigned int reg;
@@ -343,6 +343,7 @@  static void sprd_iommu_sync_map(struct iommu_domain *domain,
 
 	/* clear IOMMU TLB buffer after page table updated */
 	sprd_iommu_write(dom->sdev, reg, 0xffffffff);
+	return 0;
 }
 
 static void sprd_iommu_sync(struct iommu_domain *domain,
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index ed53279d1106..a59966290e46 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -252,10 +252,11 @@  static int gart_iommu_of_xlate(struct device *dev,
 	return 0;
 }
 
-static void gart_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
-				size_t size)
+static int gart_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+			       size_t size)
 {
 	FLUSH_GART_REGS(gart_handle);
+	return 0;
 }
 
 static void gart_iommu_sync(struct iommu_domain *domain,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 46e1347bfa22..e7f76599f09e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -332,8 +332,8 @@  struct iommu_domain_ops {
 			      struct iommu_iotlb_gather *iotlb_gather);
 
 	void (*flush_iotlb_all)(struct iommu_domain *domain);
-	void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
-			       size_t size);
+	int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
+			      size_t size);
 	void (*iotlb_sync)(struct iommu_domain *domain,
 			   struct iommu_iotlb_gather *iotlb_gather);