[5/5] iommu/sun50i: Invalidate iova in map and unmap callback
Commit Message
Mapped and unmapped iova addresses needs to be invalidated immediately
or otherwise they might or might not work when used by master or CPU.
This was discovered when running video decoder conformity test with
Cedrus. Some videos were now and then decoded incorrectly and generated
page faults.
Fixes: 4100b8c229b3 ("iommu: Add Allwinner H6 IOMMU driver")
Signed-off-by: Jernej Skrabec <jernej.skrabec@gmail.com>
---
drivers/iommu/sun50i-iommu.c | 51 ++++++++++++++++++++++++++++++++++++
1 file changed, 51 insertions(+)
Comments
On 2022-10-13 19:12, Jernej Skrabec wrote:
> Mapped and unmapped iova addresses needs to be invalidated immediately
> or otherwise they might or might not work when used by master or CPU.
>
> This was discovered when running video decoder conformity test with
> Cedrus. Some videos were now and then decoded incorrectly and generated
> page faults.
>
> Fixes: 4100b8c229b3 ("iommu: Add Allwinner H6 IOMMU driver")
> Signed-off-by: Jernej Skrabec <jernej.skrabec@gmail.com>
> ---
> drivers/iommu/sun50i-iommu.c | 51 ++++++++++++++++++++++++++++++++++++
> 1 file changed, 51 insertions(+)
>
> diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
> index 7c3b2ac552da..21e47ce6946a 100644
> --- a/drivers/iommu/sun50i-iommu.c
> +++ b/drivers/iommu/sun50i-iommu.c
> @@ -518,6 +518,53 @@ static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
> return page_table;
> }
>
> +static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu, unsigned long iova)
> +{
> + unsigned long flags;
> + u32 reg;
> + int ret;
> +
> + spin_lock_irqsave(&iommu->iommu_lock, flags);
> +
> + iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
> +
> + iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova);
> + iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(11, 0));
> + iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG, IOMMU_TLB_IVLD_ENABLE_ENABLE);
> +
> + ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG,
> + reg, !reg, 1, 2000);
> + if (ret)
> + dev_warn(iommu->dev, "TLB invalidation timed out!\n");
> +
> + iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
> +
> + spin_unlock_irqrestore(&iommu->iommu_lock, flags);
> +}
> +
> +static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu, unsigned long iova)
> +{
> + unsigned long flags;
> + u32 reg;
> + int ret;
> +
> + spin_lock_irqsave(&iommu->iommu_lock, flags);
> +
> + iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
> +
> + iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova);
> + iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG, IOMMU_PC_IVLD_ENABLE_ENABLE);
> +
> + ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG,
> + reg, !reg, 1, 2000);
> + if (ret)
> + dev_warn(iommu->dev, "PTW cache invalidation timed out!\n");
> +
> + iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
> +
> + spin_unlock_irqrestore(&iommu->iommu_lock, flags);
> +}
> +
> static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
> phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
> {
> @@ -546,6 +593,8 @@ static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
>
> *pte_addr = sun50i_mk_pte(paddr, prot);
> sun50i_table_flush(sun50i_domain, pte_addr, 1);
> + sun50i_iommu_zap_iova(iommu, iova);
> + sun50i_iommu_zap_ptw_cache(iommu, iova);
Consider hooking up .sync_map if you need that behaviour. I'd guess the
address/mask combination allows invalidating multiple pages at once,
which would be a heck of a lot more efficient.
In principle we probably shouldn't need walk cache maintenance for just
changing leaf entries, so that could perhaps be pushed further down into
sun50i_dte_get_page_table().
> out:
> return ret;
> @@ -571,6 +620,8 @@ static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova
>
> memset(pte_addr, 0, sizeof(*pte_addr));
> sun50i_table_flush(sun50i_domain, pte_addr, 1);
> + sun50i_iommu_zap_iova(sun50i_domain->iommu, iova);
> + sun50i_iommu_zap_ptw_cache(sun50i_domain->iommu, iova);
Hmm, we already have .iotlb_sync hooked up for this, so at best adding
more maintenance here is simply redundant, but at worst it would be
papering over some bug in sun50i_iommu_iotlb_sync() - if unmaps really
aren't working properly then that wants fixing instead. Of course it
could also be enhanced to use the gather mechanism to perform more
selective invalidations, but that's another patch in its own right.
Thanks,
Robin.
>
> return SZ_4K;
> }
Dne petek, 14. oktober 2022 ob 12:23:25 CEST je Robin Murphy napisal(a):
> On 2022-10-13 19:12, Jernej Skrabec wrote:
> > Mapped and unmapped iova addresses needs to be invalidated immediately
> > or otherwise they might or might not work when used by master or CPU.
> >
> > This was discovered when running video decoder conformity test with
> > Cedrus. Some videos were now and then decoded incorrectly and generated
> > page faults.
> >
> > Fixes: 4100b8c229b3 ("iommu: Add Allwinner H6 IOMMU driver")
> > Signed-off-by: Jernej Skrabec <jernej.skrabec@gmail.com>
> > ---
> >
> > drivers/iommu/sun50i-iommu.c | 51 ++++++++++++++++++++++++++++++++++++
> > 1 file changed, 51 insertions(+)
> >
> > diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
> > index 7c3b2ac552da..21e47ce6946a 100644
> > --- a/drivers/iommu/sun50i-iommu.c
> > +++ b/drivers/iommu/sun50i-iommu.c
> > @@ -518,6 +518,53 @@ static u32 *sun50i_dte_get_page_table(struct
> > sun50i_iommu_domain *sun50i_domain,>
> > return page_table;
> >
> > }
> >
> > +static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu, unsigned
> > long iova) +{
> > + unsigned long flags;
> > + u32 reg;
> > + int ret;
> > +
> > + spin_lock_irqsave(&iommu->iommu_lock, flags);
> > +
> > + iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
> > +
> > + iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova);
> > + iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(11, 0));
> > + iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG,
> > IOMMU_TLB_IVLD_ENABLE_ENABLE); +
> > + ret = readl_poll_timeout_atomic(iommu->base +
IOMMU_TLB_IVLD_ENABLE_REG,
> > + reg, !reg, 1, 2000);
> > + if (ret)
> > + dev_warn(iommu->dev, "TLB invalidation timed out!\n");
> > +
> > + iommu_write(iommu, IOMMU_AUTO_GATING_REG,
IOMMU_AUTO_GATING_ENABLE);
> > +
> > + spin_unlock_irqrestore(&iommu->iommu_lock, flags);
> > +}
> > +
> > +static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu,
> > unsigned long iova) +{
> > + unsigned long flags;
> > + u32 reg;
> > + int ret;
> > +
> > + spin_lock_irqsave(&iommu->iommu_lock, flags);
> > +
> > + iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
> > +
> > + iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova);
> > + iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG,
> > IOMMU_PC_IVLD_ENABLE_ENABLE); +
> > + ret = readl_poll_timeout_atomic(iommu->base +
IOMMU_PC_IVLD_ENABLE_REG,
> > + reg, !reg, 1, 2000);
> > + if (ret)
> > + dev_warn(iommu->dev, "PTW cache invalidation timed out!
\n");
> > +
> > + iommu_write(iommu, IOMMU_AUTO_GATING_REG,
IOMMU_AUTO_GATING_ENABLE);
> > +
> > + spin_unlock_irqrestore(&iommu->iommu_lock, flags);
> > +}
> > +
> >
> > static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long
> > iova,>
> > phys_addr_t paddr, size_t size, int
prot, gfp_t gfp)
> >
> > {
> >
> > @@ -546,6 +593,8 @@ static int sun50i_iommu_map(struct iommu_domain
> > *domain, unsigned long iova,>
> > *pte_addr = sun50i_mk_pte(paddr, prot);
> > sun50i_table_flush(sun50i_domain, pte_addr, 1);
> >
> > + sun50i_iommu_zap_iova(iommu, iova);
> > + sun50i_iommu_zap_ptw_cache(iommu, iova);
>
> Consider hooking up .sync_map if you need that behaviour. I'd guess the
> address/mask combination allows invalidating multiple pages at once,
> which would be a heck of a lot more efficient.
>
> In principle we probably shouldn't need walk cache maintenance for just
> changing leaf entries, so that could perhaps be pushed further down into
> sun50i_dte_get_page_table().
Note that this is my first foray into iommu and sun50i-iommu documentation is
confusing to say the least (it has english words in it, but their combination
often doesn't make sense.)
I'll try that, thanks. Without this invalidation, handing buffer between two
iommu supported peripherals works, but CPU access often doesn't. PTW cache can
be invalidated only one by one. It's TLB invalidation that has mask.
>
> > out:
> > return ret;
> >
> > @@ -571,6 +620,8 @@ static size_t sun50i_iommu_unmap(struct iommu_domain
> > *domain, unsigned long iova>
> > memset(pte_addr, 0, sizeof(*pte_addr));
> > sun50i_table_flush(sun50i_domain, pte_addr, 1);
> >
> > + sun50i_iommu_zap_iova(sun50i_domain->iommu, iova);
> > + sun50i_iommu_zap_ptw_cache(sun50i_domain->iommu, iova);
>
> Hmm, we already have .iotlb_sync hooked up for this, so at best adding
> more maintenance here is simply redundant, but at worst it would be
> papering over some bug in sun50i_iommu_iotlb_sync() - if unmaps really
> aren't working properly then that wants fixing instead. Of course it
> could also be enhanced to use the gather mechanism to perform more
> selective invalidations, but that's another patch in its own right.
.iotlb_sync assumes that flush operation will to the same thing as invalidation
of each entry separately. It obviously doesn't, as my testing shows. I'll
rewrite .iotlb_sync to do invalidation instead of flush and check if that
works.
I have two questions:
1. documentation says it's mandatory to do TLB and PTW invalidation in
interrupt handler when page fault occurs. Do you see a reason for that?
2. vendor driver and other iommu drivers have spin lock guards across whole
.iova_to_phys, .map and .unmap functions. Should I add them here too?
Best regards,
Jernej
>
> Thanks,
> Robin.
>
> > return SZ_4K;
> >
> > }
@@ -518,6 +518,53 @@ static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
return page_table;
}
+static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu, unsigned long iova)
+{
+ unsigned long flags;
+ u32 reg;
+ int ret;
+
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
+
+ iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
+
+ iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova);
+ iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(11, 0));
+ iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG, IOMMU_TLB_IVLD_ENABLE_ENABLE);
+
+ ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG,
+ reg, !reg, 1, 2000);
+ if (ret)
+ dev_warn(iommu->dev, "TLB invalidation timed out!\n");
+
+ iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
+
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+}
+
+static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu, unsigned long iova)
+{
+ unsigned long flags;
+ u32 reg;
+ int ret;
+
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
+
+ iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
+
+ iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova);
+ iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG, IOMMU_PC_IVLD_ENABLE_ENABLE);
+
+ ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG,
+ reg, !reg, 1, 2000);
+ if (ret)
+ dev_warn(iommu->dev, "PTW cache invalidation timed out!\n");
+
+ iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
+
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+}
+
static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
@@ -546,6 +593,8 @@ static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
*pte_addr = sun50i_mk_pte(paddr, prot);
sun50i_table_flush(sun50i_domain, pte_addr, 1);
+ sun50i_iommu_zap_iova(iommu, iova);
+ sun50i_iommu_zap_ptw_cache(iommu, iova);
out:
return ret;
@@ -571,6 +620,8 @@ static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova
memset(pte_addr, 0, sizeof(*pte_addr));
sun50i_table_flush(sun50i_domain, pte_addr, 1);
+ sun50i_iommu_zap_iova(sun50i_domain->iommu, iova);
+ sun50i_iommu_zap_ptw_cache(sun50i_domain->iommu, iova);
return SZ_4K;
}