[PATCHv10,10/15] x86/mm, iommu/sva: Make LAM and SVM mutually exclusive

Message ID 20221018113358.7833-11-kirill.shutemov@linux.intel.com
State New
Headers
Series Linear Address Masking enabling |

Commit Message

Kirill A. Shutemov Oct. 18, 2022, 11:33 a.m. UTC
  IOMMU and SVM-capable devices know nothing about LAM and only expect
canonical addresses. Attempt to pass down tagged pointer will lead to
address translation failure.

By default do not allow to enable both LAM and use SVM in the same
process.

The new ARCH_FORCE_TAGGED_SVM arch_prctl() overrides the limitation.
By using the arch_prctl() userspace takes responsibility to never pass
tagged address to the device.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 arch/x86/include/asm/mmu.h         |  6 ++++--
 arch/x86/include/asm/mmu_context.h |  2 ++
 arch/x86/include/uapi/asm/prctl.h  |  1 +
 arch/x86/kernel/process_64.c       | 13 +++++++++++++
 drivers/iommu/iommu-sva-lib.c      | 12 ++++++++++++
 include/linux/mmu_context.h        |  4 ++++
 6 files changed, 36 insertions(+), 2 deletions(-)
  

Comments

Ashok Raj Oct. 18, 2022, 8:07 p.m. UTC | #1
On Tue, Oct 18, 2022 at 02:33:53PM +0300, Kirill A. Shutemov wrote:
> IOMMU and SVM-capable devices know nothing about LAM and only expect
> canonical addresses. Attempt to pass down tagged pointer will lead to
> address translation failure.
> 
> By default do not allow to enable both LAM and use SVM in the same
> process.
> 
> The new ARCH_FORCE_TAGGED_SVM arch_prctl() overrides the limitation.
> By using the arch_prctl() userspace takes responsibility to never pass
> tagged address to the device.

Reviewed-by: Ashok Raj <ashok.raj@intel.com>

> 
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> ---
>  arch/x86/include/asm/mmu.h         |  6 ++++--
>  arch/x86/include/asm/mmu_context.h |  2 ++
>  arch/x86/include/uapi/asm/prctl.h  |  1 +
>  arch/x86/kernel/process_64.c       | 13 +++++++++++++
>  drivers/iommu/iommu-sva-lib.c      | 12 ++++++++++++
>  include/linux/mmu_context.h        |  4 ++++
>  6 files changed, 36 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
> index 2fdb390040b5..cce9b32b0d6d 100644
> --- a/arch/x86/include/asm/mmu.h
> +++ b/arch/x86/include/asm/mmu.h
> @@ -9,9 +9,11 @@
>  #include <linux/bits.h>
>  
>  /* Uprobes on this MM assume 32-bit code */
> -#define MM_CONTEXT_UPROBE_IA32	BIT(0)
> +#define MM_CONTEXT_UPROBE_IA32		BIT(0)
>  /* vsyscall page is accessible on this MM */
> -#define MM_CONTEXT_HAS_VSYSCALL	BIT(1)
> +#define MM_CONTEXT_HAS_VSYSCALL		BIT(1)

Nit: Looks like the two above format changes got in here :-)

Cheers,
Ashok
  
Dave Hansen Oct. 18, 2022, 9 p.m. UTC | #2
On 10/18/22 04:33, Kirill A. Shutemov wrote:
> IOMMU and SVM-capable devices know nothing about LAM and only expect
> canonical addresses. Attempt to pass down tagged pointer will lead to

		      ^ An attempt...

> address translation failure.
> 
> By default do not allow to enable both LAM and use SVM in the same
> process.
> 
> The new ARCH_FORCE_TAGGED_SVM arch_prctl() overrides the limitation.
> By using the arch_prctl() userspace takes responsibility to never pass
> tagged address to the device.
> 
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> ---
>  arch/x86/include/asm/mmu.h         |  6 ++++--
>  arch/x86/include/asm/mmu_context.h |  2 ++
>  arch/x86/include/uapi/asm/prctl.h  |  1 +
>  arch/x86/kernel/process_64.c       | 13 +++++++++++++
>  drivers/iommu/iommu-sva-lib.c      | 12 ++++++++++++
>  include/linux/mmu_context.h        |  4 ++++
>  6 files changed, 36 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
> index 2fdb390040b5..cce9b32b0d6d 100644
> --- a/arch/x86/include/asm/mmu.h
> +++ b/arch/x86/include/asm/mmu.h
> @@ -9,9 +9,11 @@
>  #include <linux/bits.h>
>  
>  /* Uprobes on this MM assume 32-bit code */
> -#define MM_CONTEXT_UPROBE_IA32	BIT(0)
> +#define MM_CONTEXT_UPROBE_IA32		BIT(0)
>  /* vsyscall page is accessible on this MM */
> -#define MM_CONTEXT_HAS_VSYSCALL	BIT(1)
> +#define MM_CONTEXT_HAS_VSYSCALL		BIT(1)
> +/* Allow LAM and SVM coexisting */
> +#define MM_CONTEXT_FORCE_TAGGED_SVM	BIT(2)
>  
>  /*
>   * x86 has arch-specific MMU state beyond what lives in mm_struct.
> diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
> index b0e9ea23758b..6b9ac2c60cec 100644
> --- a/arch/x86/include/asm/mmu_context.h
> +++ b/arch/x86/include/asm/mmu_context.h
> @@ -113,6 +113,8 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
>  	mm->context.untag_mask = -1UL;
>  }
>  
> +#define arch_pgtable_dma_compat(mm)	\
> +	(!mm_lam_cr3_mask(mm) || (mm->context.flags & MM_CONTEXT_FORCE_TAGGED_SVM))
>  #else

This needs to be a 'static inline' unless there's a compelling and
documented reason that it can't be.

>  static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
> diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h
> index a31e27b95b19..7bd22defb558 100644
> --- a/arch/x86/include/uapi/asm/prctl.h
> +++ b/arch/x86/include/uapi/asm/prctl.h
> @@ -23,5 +23,6 @@
>  #define ARCH_GET_UNTAG_MASK		0x4001
>  #define ARCH_ENABLE_TAGGED_ADDR		0x4002
>  #define ARCH_GET_MAX_TAG_BITS		0x4003
> +#define ARCH_FORCE_TAGGED_SVM		0x4004
>  
>  #endif /* _ASM_X86_PRCTL_H */
> diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
> index 9952e9f517ec..8faa8774bb93 100644
> --- a/arch/x86/kernel/process_64.c
> +++ b/arch/x86/kernel/process_64.c
> @@ -783,6 +783,13 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
>  		goto out;
>  	}
>  
> +#ifdef CONFIG_IOMMU_SVA
> +	if (pasid_valid(mm->pasid) &&
> +	    !(mm->context.flags & MM_CONTEXT_FORCE_TAGGED_SVM)) {
> +		ret = -EBUSY;
> +		goto out;
> +	}
> +#endif

Is this #ifdef really necessary?  CONFIG_IOMMU_SVA selects IOASID,
without which pasid_valid() is just stubbed out to 0.


>  	if (!nr_bits) {
>  		ret = -EINVAL;
>  		goto out;
> @@ -893,6 +900,12 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
>  				(unsigned long __user *)arg2);
>  	case ARCH_ENABLE_TAGGED_ADDR:
>  		return prctl_enable_tagged_addr(task->mm, arg2);
> +	case ARCH_FORCE_TAGGED_SVM:
> +		if (mmap_write_lock_killable(task->mm))
> +			return -EINTR;
> +		task->mm->context.flags |= MM_CONTEXT_FORCE_TAGGED_SVM;
> +		mmap_write_unlock(task->mm);
> +		return 0;
>  	case ARCH_GET_MAX_TAG_BITS:
>  		if (!cpu_feature_enabled(X86_FEATURE_LAM))
>  			return put_user(0, (unsigned long __user *)arg2);
> diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c
> index 106506143896..593ae2472e2c 100644
> --- a/drivers/iommu/iommu-sva-lib.c
> +++ b/drivers/iommu/iommu-sva-lib.c
> @@ -2,6 +2,8 @@
>  /*
>   * Helpers for IOMMU drivers implementing SVA
>   */
> +#include <linux/mm.h>
> +#include <linux/mmu_context.h>
>  #include <linux/mutex.h>
>  #include <linux/sched/mm.h>
>  
> @@ -31,6 +33,15 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
>  	    min == 0 || max < min)
>  		return -EINVAL;
>  
> +	/* Serialize against address tagging enabling */
> +	if (mmap_write_lock_killable(mm))
> +		return -EINTR;
> +
> +	if (!arch_pgtable_dma_compat(mm)) {
> +		mmap_write_unlock(mm);
> +		return -EBUSY;
> +	}
> +
>  	mutex_lock(&iommu_sva_lock);
>  	/* Is a PASID already associated with this mm? */
>  	if (pasid_valid(mm->pasid)) {
> @@ -46,6 +57,7 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
>  		mm_pasid_set(mm, pasid);
>  out:
>  	mutex_unlock(&iommu_sva_lock);
> +	mmap_write_unlock(mm);
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
> diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
> index b9b970f7ab45..115e2b518079 100644
> --- a/include/linux/mmu_context.h
> +++ b/include/linux/mmu_context.h
> @@ -28,4 +28,8 @@ static inline void leave_mm(int cpu) { }
>  # define task_cpu_possible(cpu, p)	cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
>  #endif
>  
> +#ifndef arch_pgtable_dma_compat
> +#define arch_pgtable_dma_compat(mm)	true
> +#endif
> +
>  #endif
  
Kirill A. Shutemov Oct. 18, 2022, 10:33 p.m. UTC | #3
On Tue, Oct 18, 2022 at 02:00:38PM -0700, Dave Hansen wrote:
> > diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
> > index b0e9ea23758b..6b9ac2c60cec 100644
> > --- a/arch/x86/include/asm/mmu_context.h
> > +++ b/arch/x86/include/asm/mmu_context.h
> > @@ -113,6 +113,8 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
> >  	mm->context.untag_mask = -1UL;
> >  }
> >  
> > +#define arch_pgtable_dma_compat(mm)	\
> > +	(!mm_lam_cr3_mask(mm) || (mm->context.flags & MM_CONTEXT_FORCE_TAGGED_SVM))
> >  #else
> 
> This needs to be a 'static inline' unless there's a compelling and
> documented reason that it can't be.

Seems work fine.

> >  static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
> > diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h
> > index a31e27b95b19..7bd22defb558 100644
> > --- a/arch/x86/include/uapi/asm/prctl.h
> > +++ b/arch/x86/include/uapi/asm/prctl.h
> > @@ -23,5 +23,6 @@
> >  #define ARCH_GET_UNTAG_MASK		0x4001
> >  #define ARCH_ENABLE_TAGGED_ADDR		0x4002
> >  #define ARCH_GET_MAX_TAG_BITS		0x4003
> > +#define ARCH_FORCE_TAGGED_SVM		0x4004
> >  
> >  #endif /* _ASM_X86_PRCTL_H */
> > diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
> > index 9952e9f517ec..8faa8774bb93 100644
> > --- a/arch/x86/kernel/process_64.c
> > +++ b/arch/x86/kernel/process_64.c
> > @@ -783,6 +783,13 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
> >  		goto out;
> >  	}
> >  
> > +#ifdef CONFIG_IOMMU_SVA
> > +	if (pasid_valid(mm->pasid) &&
> > +	    !(mm->context.flags & MM_CONTEXT_FORCE_TAGGED_SVM)) {
> > +		ret = -EBUSY;
> > +		goto out;
> > +	}
> > +#endif
> 
> Is this #ifdef really necessary?  CONFIG_IOMMU_SVA selects IOASID,
> without which pasid_valid() is just stubbed out to 0.

mm->pasid is only defined for CONFIG_IOMMU_SVA=y.

Do you want me to add mm_has_valid_pasid()?
  
Kirill A. Shutemov Oct. 18, 2022, 10:35 p.m. UTC | #4
On Tue, Oct 18, 2022 at 01:07:29PM -0700, Ashok Raj wrote:
> On Tue, Oct 18, 2022 at 02:33:53PM +0300, Kirill A. Shutemov wrote:
> > IOMMU and SVM-capable devices know nothing about LAM and only expect
> > canonical addresses. Attempt to pass down tagged pointer will lead to
> > address translation failure.
> > 
> > By default do not allow to enable both LAM and use SVM in the same
> > process.
> > 
> > The new ARCH_FORCE_TAGGED_SVM arch_prctl() overrides the limitation.
> > By using the arch_prctl() userspace takes responsibility to never pass
> > tagged address to the device.
> 
> Reviewed-by: Ashok Raj <ashok.raj@intel.com>

Thanks!

> > Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> > ---
> >  arch/x86/include/asm/mmu.h         |  6 ++++--
> >  arch/x86/include/asm/mmu_context.h |  2 ++
> >  arch/x86/include/uapi/asm/prctl.h  |  1 +
> >  arch/x86/kernel/process_64.c       | 13 +++++++++++++
> >  drivers/iommu/iommu-sva-lib.c      | 12 ++++++++++++
> >  include/linux/mmu_context.h        |  4 ++++
> >  6 files changed, 36 insertions(+), 2 deletions(-)
> > 
> > diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
> > index 2fdb390040b5..cce9b32b0d6d 100644
> > --- a/arch/x86/include/asm/mmu.h
> > +++ b/arch/x86/include/asm/mmu.h
> > @@ -9,9 +9,11 @@
> >  #include <linux/bits.h>
> >  
> >  /* Uprobes on this MM assume 32-bit code */
> > -#define MM_CONTEXT_UPROBE_IA32	BIT(0)
> > +#define MM_CONTEXT_UPROBE_IA32		BIT(0)
> >  /* vsyscall page is accessible on this MM */
> > -#define MM_CONTEXT_HAS_VSYSCALL	BIT(1)
> > +#define MM_CONTEXT_HAS_VSYSCALL		BIT(1)
> 
> Nit: Looks like the two above format changes got in here :-)

That's side effect of keeping the new longer flag aligned to the rest.

A separate patch looks like an overkill, no?
  
Dave Hansen Oct. 18, 2022, 10:43 p.m. UTC | #5
On 10/18/22 15:33, Kirill A. Shutemov wrote:
>> Is this #ifdef really necessary?  CONFIG_IOMMU_SVA selects IOASID,
>> without which pasid_valid() is just stubbed out to 0.
> mm->pasid is only defined for CONFIG_IOMMU_SVA=y.
> 
> Do you want me to add mm_has_valid_pasid()?

A quick grep makes it look like pasid_valid() could be *replaced* with
mm_has_pasid_valid() since all the places doing pasid_valid() do it with
mm->pasid.
  
Ashok Raj Oct. 18, 2022, 11:59 p.m. UTC | #6
On Wed, Oct 19, 2022 at 01:35:37AM +0300, Kirill A. Shutemov wrote:
> > >  
> > >  /* Uprobes on this MM assume 32-bit code */
> > > -#define MM_CONTEXT_UPROBE_IA32	BIT(0)
> > > +#define MM_CONTEXT_UPROBE_IA32		BIT(0)
> > >  /* vsyscall page is accessible on this MM */
> > > -#define MM_CONTEXT_HAS_VSYSCALL	BIT(1)
> > > +#define MM_CONTEXT_HAS_VSYSCALL		BIT(1)
> > 
> > Nit: Looks like the two above format changes got in here :-)
> 
> That's side effect of keeping the new longer flag aligned to the rest.
> 
> A separate patch looks like an overkill, no?

Agree, just thought i'll flag it, but I don't think a new patch is
required.
  
Kirill A. Shutemov Oct. 19, 2022, 12:17 a.m. UTC | #7
On Tue, Oct 18, 2022 at 03:43:24PM -0700, Dave Hansen wrote:
> On 10/18/22 15:33, Kirill A. Shutemov wrote:
> >> Is this #ifdef really necessary?  CONFIG_IOMMU_SVA selects IOASID,
> >> without which pasid_valid() is just stubbed out to 0.
> > mm->pasid is only defined for CONFIG_IOMMU_SVA=y.
> > 
> > Do you want me to add mm_has_valid_pasid()?
> 
> A quick grep makes it look like pasid_valid() could be *replaced* with
> mm_has_pasid_valid() since all the places doing pasid_valid() do it with
> mm->pasid.

Almost: the second pasid_valid() in iommu_sva_alloc_pasid() called for
just allocated pasid.

I guess we can just opencode this case.
  
Vasant Hegde Oct. 19, 2022, 7:04 a.m. UTC | #8
Hi Kirill,

On 10/18/2022 5:03 PM, Kirill A. Shutemov wrote:
> IOMMU and SVM-capable devices know nothing about LAM and only expect
> canonical addresses. Attempt to pass down tagged pointer will lead to
> address translation failure.
> 
> By default do not allow to enable both LAM and use SVM in the same
> process.
> 
> The new ARCH_FORCE_TAGGED_SVM arch_prctl() overrides the limitation.

Better s/SVM/SVA/ ? So that its consistent with IOMMU usage.

-Vasant
  

Patch

diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 2fdb390040b5..cce9b32b0d6d 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -9,9 +9,11 @@ 
 #include <linux/bits.h>
 
 /* Uprobes on this MM assume 32-bit code */
-#define MM_CONTEXT_UPROBE_IA32	BIT(0)
+#define MM_CONTEXT_UPROBE_IA32		BIT(0)
 /* vsyscall page is accessible on this MM */
-#define MM_CONTEXT_HAS_VSYSCALL	BIT(1)
+#define MM_CONTEXT_HAS_VSYSCALL		BIT(1)
+/* Allow LAM and SVM coexisting */
+#define MM_CONTEXT_FORCE_TAGGED_SVM	BIT(2)
 
 /*
  * x86 has arch-specific MMU state beyond what lives in mm_struct.
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index b0e9ea23758b..6b9ac2c60cec 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -113,6 +113,8 @@  static inline void mm_reset_untag_mask(struct mm_struct *mm)
 	mm->context.untag_mask = -1UL;
 }
 
+#define arch_pgtable_dma_compat(mm)	\
+	(!mm_lam_cr3_mask(mm) || (mm->context.flags & MM_CONTEXT_FORCE_TAGGED_SVM))
 #else
 
 static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h
index a31e27b95b19..7bd22defb558 100644
--- a/arch/x86/include/uapi/asm/prctl.h
+++ b/arch/x86/include/uapi/asm/prctl.h
@@ -23,5 +23,6 @@ 
 #define ARCH_GET_UNTAG_MASK		0x4001
 #define ARCH_ENABLE_TAGGED_ADDR		0x4002
 #define ARCH_GET_MAX_TAG_BITS		0x4003
+#define ARCH_FORCE_TAGGED_SVM		0x4004
 
 #endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9952e9f517ec..8faa8774bb93 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -783,6 +783,13 @@  static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
 		goto out;
 	}
 
+#ifdef CONFIG_IOMMU_SVA
+	if (pasid_valid(mm->pasid) &&
+	    !(mm->context.flags & MM_CONTEXT_FORCE_TAGGED_SVM)) {
+		ret = -EBUSY;
+		goto out;
+	}
+#endif
 	if (!nr_bits) {
 		ret = -EINVAL;
 		goto out;
@@ -893,6 +900,12 @@  long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
 				(unsigned long __user *)arg2);
 	case ARCH_ENABLE_TAGGED_ADDR:
 		return prctl_enable_tagged_addr(task->mm, arg2);
+	case ARCH_FORCE_TAGGED_SVM:
+		if (mmap_write_lock_killable(task->mm))
+			return -EINTR;
+		task->mm->context.flags |= MM_CONTEXT_FORCE_TAGGED_SVM;
+		mmap_write_unlock(task->mm);
+		return 0;
 	case ARCH_GET_MAX_TAG_BITS:
 		if (!cpu_feature_enabled(X86_FEATURE_LAM))
 			return put_user(0, (unsigned long __user *)arg2);
diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c
index 106506143896..593ae2472e2c 100644
--- a/drivers/iommu/iommu-sva-lib.c
+++ b/drivers/iommu/iommu-sva-lib.c
@@ -2,6 +2,8 @@ 
 /*
  * Helpers for IOMMU drivers implementing SVA
  */
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
 #include <linux/mutex.h>
 #include <linux/sched/mm.h>
 
@@ -31,6 +33,15 @@  int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
 	    min == 0 || max < min)
 		return -EINVAL;
 
+	/* Serialize against address tagging enabling */
+	if (mmap_write_lock_killable(mm))
+		return -EINTR;
+
+	if (!arch_pgtable_dma_compat(mm)) {
+		mmap_write_unlock(mm);
+		return -EBUSY;
+	}
+
 	mutex_lock(&iommu_sva_lock);
 	/* Is a PASID already associated with this mm? */
 	if (pasid_valid(mm->pasid)) {
@@ -46,6 +57,7 @@  int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
 		mm_pasid_set(mm, pasid);
 out:
 	mutex_unlock(&iommu_sva_lock);
+	mmap_write_unlock(mm);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index b9b970f7ab45..115e2b518079 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -28,4 +28,8 @@  static inline void leave_mm(int cpu) { }
 # define task_cpu_possible(cpu, p)	cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
 #endif
 
+#ifndef arch_pgtable_dma_compat
+#define arch_pgtable_dma_compat(mm)	true
+#endif
+
 #endif