[06/13] KVM: Disallow hugepages for incompatible gmem bindings, but let 'em succeed

Message ID 20230921203331.3746712-7-seanjc@google.com
State New
Headers
Series KVM: guest_memfd fixes |

Commit Message

Sean Christopherson Sept. 21, 2023, 8:33 p.m. UTC
  Remove the restriction that a guest_memfd instance that supports hugepages
can *only* be bound by memslots that are 100% compatible with hugepage
mappings, and instead force KVM to use an order-0 mapping if the binding
isn't compatible with hugepages.

The intent of the draconian binding restriction was purely to simplify the
guest_memfd implementation, e.g. to avoid repeatining the existing logic in
KVM x86ial for precisely tracking which GFNs support hugepages.  But
checking that the binding's offset and size is compatible is just as easy
to do when KVM wants to create a mapping.

And on the other hand, completely rejecting bindings that are incompatible
with hugepages makes it practically impossible for userspace to use a
single guest_memfd instance for all guest memory, e.g. on x86 it would be
impossible to skip the legacy VGA hole while still allowing hugepage
mappings for the rest of guest memory.

Suggested-by: Michael Roth <michael.roth@amd.com>
Link: https://lore.kernel.org/all/20230918163647.m6bjgwusc7ww5tyu@amd.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 virt/kvm/guest_mem.c | 54 ++++++++++++++++++++++----------------------
 1 file changed, 27 insertions(+), 27 deletions(-)
  

Comments

Michael Roth Sept. 22, 2023, 10:42 p.m. UTC | #1
On Thu, Sep 21, 2023 at 01:33:23PM -0700, Sean Christopherson wrote:
> Remove the restriction that a guest_memfd instance that supports hugepages
> can *only* be bound by memslots that are 100% compatible with hugepage
> mappings, and instead force KVM to use an order-0 mapping if the binding
> isn't compatible with hugepages.
> 
> The intent of the draconian binding restriction was purely to simplify the
> guest_memfd implementation, e.g. to avoid repeatining the existing logic in
> KVM x86ial for precisely tracking which GFNs support hugepages.  But
> checking that the binding's offset and size is compatible is just as easy
> to do when KVM wants to create a mapping.
> 
> And on the other hand, completely rejecting bindings that are incompatible
> with hugepages makes it practically impossible for userspace to use a
> single guest_memfd instance for all guest memory, e.g. on x86 it would be
> impossible to skip the legacy VGA hole while still allowing hugepage
> mappings for the rest of guest memory.
> 
> Suggested-by: Michael Roth <michael.roth@amd.com>
> Link: https://lore.kernel.org/all/20230918163647.m6bjgwusc7ww5tyu@amd.com
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  virt/kvm/guest_mem.c | 54 ++++++++++++++++++++++----------------------
>  1 file changed, 27 insertions(+), 27 deletions(-)
> 
> diff --git a/virt/kvm/guest_mem.c b/virt/kvm/guest_mem.c
> index 68528e9cddd7..4f3a313f5532 100644
> --- a/virt/kvm/guest_mem.c
> +++ b/virt/kvm/guest_mem.c
> @@ -434,20 +434,6 @@ static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags,
>  	return err;
>  }
>  
> -static bool kvm_gmem_is_valid_size(loff_t size, u64 flags)
> -{
> -	if (size < 0 || !PAGE_ALIGNED(size))
> -		return false;
> -
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -	if ((flags & KVM_GUEST_MEMFD_ALLOW_HUGEPAGE) &&
> -	    !IS_ALIGNED(size, HPAGE_PMD_SIZE))
> -		return false;
> -#endif
> -
> -	return true;
> -}
> -
>  int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
>  {
>  	loff_t size = args->size;
> @@ -460,9 +446,15 @@ int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
>  	if (flags & ~valid_flags)
>  		return -EINVAL;
>  
> -	if (!kvm_gmem_is_valid_size(size, flags))
> +	if (size < 0 || !PAGE_ALIGNED(size))
>  		return -EINVAL;
>  
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +	if ((flags & KVM_GUEST_MEMFD_ALLOW_HUGEPAGE) &&
> +	    !IS_ALIGNED(size, HPAGE_PMD_SIZE))
> +		return -EINVAL;
> +#endif
> +
>  	return __kvm_gmem_create(kvm, size, flags, kvm_gmem_mnt);
>  }
>  
> @@ -470,7 +462,7 @@ int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
>  		  unsigned int fd, loff_t offset)
>  {
>  	loff_t size = slot->npages << PAGE_SHIFT;
> -	unsigned long start, end, flags;
> +	unsigned long start, end;
>  	struct kvm_gmem *gmem;
>  	struct inode *inode;
>  	struct file *file;
> @@ -489,16 +481,9 @@ int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
>  		goto err;
>  
>  	inode = file_inode(file);
> -	flags = (unsigned long)inode->i_private;
>  
> -	/*
> -	 * For simplicity, require the offset into the file and the size of the
> -	 * memslot to be aligned to the largest possible page size used to back
> -	 * the file (same as the size of the file itself).
> -	 */
> -	if (!kvm_gmem_is_valid_size(offset, flags) ||
> -	    !kvm_gmem_is_valid_size(size, flags))
> -		goto err;
> +	if (offset < 0 || !PAGE_ALIGNED(offset))
> +		return -EINVAL;
>  
>  	if (offset + size > i_size_read(inode))
>  		goto err;
> @@ -599,8 +584,23 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
>  	page = folio_file_page(folio, index);
>  
>  	*pfn = page_to_pfn(page);
> -	if (max_order)
> -		*max_order = compound_order(compound_head(page));
> +	if (!max_order)
> +		goto success;
> +
> +	*max_order = compound_order(compound_head(page));
> +	if (!*max_order)
> +		goto success;
> +
> +	/*
> +	 * For simplicity, allow mapping a hugepage if and only if the entire
> +	 * binding is compatible, i.e. don't bother supporting mapping interior
> +	 * sub-ranges with hugepages (unless userspace comes up with a *really*
> +	 * strong use case for needing hugepages within unaligned bindings).
> +	 */
> +	if (!IS_ALIGNED(slot->gmem.pgoff, 1ull << *max_order) ||
> +	    !IS_ALIGNED(slot->npages, 1ull << *max_order))
> +		*max_order = 0;

Thanks for working this in. Unfortunately on x86 the bulk of guest memory
ends up getting slotted directly above legacy regions at GFN 0x100, so the
associated slot still ends failing these alignment checks if it tries to
match the gmemfd offsets up with the shared RAM/memfd offsets.

I tried to work around it in userspace by padding the gmemfd offset of
each slot to the next 2M boundary, but that also requires dynamically
growing the gmemfd inode to account for the padding of each new slot and
it gets ugly enough that I'm not sure it's any better than your
suggested alternative of using a unique gmemfd for each slot.

But what if we relax the check to simply make sure that any large folio
must is fully-contained by the range of the slot is bound to? It *seems*
like that would still avoid stuff like mapping 2M pages in the NPT (or
setting up 2M RMP table entries) that aren't fully contained by a slot
while still allowing the bulk of guest memory to get mapped as 2M. Are
there other edge cases to consider?

The following seems to work for a basic 16GB SNP guest at least:

diff --git a/virt/kvm/guest_mem.c b/virt/kvm/guest_mem.c
index 9109bf5751ee..e73128d4ebc2 100644
--- a/virt/kvm/guest_mem.c
+++ b/virt/kvm/guest_mem.c
@@ -618,6 +618,7 @@ int __kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
                       gfn_t gfn, kvm_pfn_t *pfn, int *max_order, bool prep)
 {
        pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
+       pgoff_t huge_index;
        struct kvm_gmem *gmem;
        struct folio *folio;
        struct page *page;
@@ -662,9 +663,12 @@ int __kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
         * sub-ranges with hugepages (unless userspace comes up with a *really*
         * strong use case for needing hugepages within unaligned bindings).
         */
-       if (!IS_ALIGNED(slot->gmem.pgoff, 1ull << *max_order) ||
-           !IS_ALIGNED(slot->npages, 1ull << *max_order))
+       huge_index = round_down(index, 1ull << *max_order);
+       if (huge_index < ALIGN(slot->gmem.pgoff, 1ull << *max_order) ||
+           huge_index + (1ull << *max_order) > slot->gmem.pgoff + slot->npages) {
+               pr_debug("%s: GFN %llx failed alignment checks\n", __func__, gfn);
                *max_order = 0;
+       }
 success:
        r = 0;

-Mike

> +success:
>  	r = 0;
>  
>  out_unlock:
> -- 
> 2.42.0.515.g380fc7ccd1-goog
>
  
Sean Christopherson Sept. 28, 2023, 6:31 p.m. UTC | #2
On Fri, Sep 22, 2023, Michael Roth wrote:
> On Thu, Sep 21, 2023 at 01:33:23PM -0700, Sean Christopherson wrote:
> > +	/*
> > +	 * For simplicity, allow mapping a hugepage if and only if the entire
> > +	 * binding is compatible, i.e. don't bother supporting mapping interior
> > +	 * sub-ranges with hugepages (unless userspace comes up with a *really*
> > +	 * strong use case for needing hugepages within unaligned bindings).
> > +	 */
> > +	if (!IS_ALIGNED(slot->gmem.pgoff, 1ull << *max_order) ||
> > +	    !IS_ALIGNED(slot->npages, 1ull << *max_order))
> > +		*max_order = 0;
> 
> Thanks for working this in. Unfortunately on x86 the bulk of guest memory
> ends up getting slotted directly above legacy regions at GFN 0x100, 

Can you provide an example?  I'm struggling to understand what the layout actually
is.  I don't think it changes the story for the kernel, but it sounds like there
might be room for optimization in QEMU?  Or more likely, I just don't understand
what you're saying :-)

> so the associated slot still ends failing these alignment checks if it tries
> to match the gmemfd offsets up with the shared RAM/memfd offsets.
> 
> I tried to work around it in userspace by padding the gmemfd offset of
> each slot to the next 2M boundary, but that also requires dynamically
> growing the gmemfd inode to account for the padding of each new slot and
> it gets ugly enough that I'm not sure it's any better than your
> suggested alternative of using a unique gmemfd for each slot.
> 
> But what if we relax the check to simply make sure that any large folio
> must is fully-contained by the range of the slot is bound to? It *seems*
> like that would still avoid stuff like mapping 2M pages in the NPT (or
> setting up 2M RMP table entries) that aren't fully contained by a slot
> while still allowing the bulk of guest memory to get mapped as 2M. Are
> there other edge cases to consider?
> 
> The following seems to work for a basic 16GB SNP guest at least:
> 
> diff --git a/virt/kvm/guest_mem.c b/virt/kvm/guest_mem.c
> index 9109bf5751ee..e73128d4ebc2 100644
> --- a/virt/kvm/guest_mem.c
> +++ b/virt/kvm/guest_mem.c
> @@ -618,6 +618,7 @@ int __kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
>                        gfn_t gfn, kvm_pfn_t *pfn, int *max_order, bool prep)
>  {
>         pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
> +       pgoff_t huge_index;
>         struct kvm_gmem *gmem;
>         struct folio *folio;
>         struct page *page;
> @@ -662,9 +663,12 @@ int __kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
>          * sub-ranges with hugepages (unless userspace comes up with a *really*
>          * strong use case for needing hugepages within unaligned bindings).
>          */
> -       if (!IS_ALIGNED(slot->gmem.pgoff, 1ull << *max_order) ||
> -           !IS_ALIGNED(slot->npages, 1ull << *max_order))
> +       huge_index = round_down(index, 1ull << *max_order);

Why not use ALIGN() here?  The size is obviously a power-of-2.  Or is my math
even worse than I thought?

> +       if (huge_index < ALIGN(slot->gmem.pgoff, 1ull << *max_order) ||
> +           huge_index + (1ull << *max_order) > slot->gmem.pgoff + slot->npages) {

Argh, I keep forgetting that the MMU is responsible for handling misaligned gfns.
Yeah, this looks right.

Can you post this as a proper patch, on top of my fixes?  And without the pr_debug().
That'll be the easiest for me to apply+squash when the time comes.

Thanks much!
  
Michael Roth Oct. 2, 2023, 3:53 p.m. UTC | #3
On Thu, Sep 28, 2023 at 11:31:51AM -0700, Sean Christopherson wrote:
> On Fri, Sep 22, 2023, Michael Roth wrote:
> > On Thu, Sep 21, 2023 at 01:33:23PM -0700, Sean Christopherson wrote:
> > > +	/*
> > > +	 * For simplicity, allow mapping a hugepage if and only if the entire
> > > +	 * binding is compatible, i.e. don't bother supporting mapping interior
> > > +	 * sub-ranges with hugepages (unless userspace comes up with a *really*
> > > +	 * strong use case for needing hugepages within unaligned bindings).
> > > +	 */
> > > +	if (!IS_ALIGNED(slot->gmem.pgoff, 1ull << *max_order) ||
> > > +	    !IS_ALIGNED(slot->npages, 1ull << *max_order))
> > > +		*max_order = 0;
> > 
> > Thanks for working this in. Unfortunately on x86 the bulk of guest memory
> > ends up getting slotted directly above legacy regions at GFN 0x100, 
> 
> Can you provide an example?  I'm struggling to understand what the layout actually
> is.  I don't think it changes the story for the kernel, but it sounds like there
> might be room for optimization in QEMU?  Or more likely, I just don't understand
> what you're saying :-)

Here's one example, which seems to be fairly normal for an x86 boot:

  kvm_set_user_memory AddrSpace#0 Slot#0 flags=0x4 gpa=0x0 size=0x80000000 ua=0x7f24afc00000 ret=0 restricted_fd=19 restricted_offset=0x0
  ^ QEMU creates Slot 0 for all of main guest RAM
  kvm_set_user_memory AddrSpace#0 Slot#0 flags=0x0 gpa=0x0 size=0x0 ua=0x7f24afc00000 ret=0 restricted_fd=19 restricted_offset=0x0
  kvm_set_user_memory AddrSpace#0 Slot#0 flags=0x4 gpa=0x0 size=0xc0000 ua=0x7f24afc00000 ret=0 restricted_fd=19 restricted_offset=0x0
  kvm_set_user_memory AddrSpace#0 Slot#3 flags=0x6 gpa=0xc0000 size=0x20000 ua=0x7f2575000000 ret=0 restricted_fd=33 restricted_offset=0x0
  kvm_set_user_memory AddrSpace#0 Slot#4 flags=0x6 gpa=0xe0000 size=0x20000 ua=0x7f2575400000 ret=0 restricted_fd=31 restricted_offset=0x0
  ^ legacy regions are created and mapped on top of GPA ranges [0xc0000:0xe0000) and [0xe0000:0x100000)
  kvm_set_user_memory AddrSpace#0 Slot#5 flags=0x4 gpa=0x100000 size=0x7ff00000 ua=0x7f24afd00000 ret=0 restricted_fd=19 restricted_offset=0x100000
  ^ QEMU divides Slot 0 into Slot 0 at [0x0:0xc0000) and Slot 5 at [0x100000:0x80000000)
    Both Slots still share the same backing memory allocation, so same gmem
    fd 19 is used,but Slot 5 is assigned to offset 0x100000, whih is not
    2M-aligned

I tried messing with QEMU handling to pad out guest_memfd offsets to 2MB
boundaries but then the inode size needs to be enlarged to account for it
and things get a bit messy. Not sure if there are alternative approaches
that can be taken from userspace, but with normal malloc()'d or mmap()'d
backing memory the kernel can still allocate a 2MB backing page for the
[0x0:0x200000) range and I think KVM still handles that when setting up
NPT of sub-ranges so there might not be much room for further optimization
there.

> 
> > so the associated slot still ends failing these alignment checks if it tries
> > to match the gmemfd offsets up with the shared RAM/memfd offsets.
> > 
> > I tried to work around it in userspace by padding the gmemfd offset of
> > each slot to the next 2M boundary, but that also requires dynamically
> > growing the gmemfd inode to account for the padding of each new slot and
> > it gets ugly enough that I'm not sure it's any better than your
> > suggested alternative of using a unique gmemfd for each slot.
> > 
> > But what if we relax the check to simply make sure that any large folio
> > must is fully-contained by the range of the slot is bound to? It *seems*
> > like that would still avoid stuff like mapping 2M pages in the NPT (or
> > setting up 2M RMP table entries) that aren't fully contained by a slot
> > while still allowing the bulk of guest memory to get mapped as 2M. Are
> > there other edge cases to consider?
> > 
> > The following seems to work for a basic 16GB SNP guest at least:
> > 
> > diff --git a/virt/kvm/guest_mem.c b/virt/kvm/guest_mem.c
> > index 9109bf5751ee..e73128d4ebc2 100644
> > --- a/virt/kvm/guest_mem.c
> > +++ b/virt/kvm/guest_mem.c
> > @@ -618,6 +618,7 @@ int __kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
> >                        gfn_t gfn, kvm_pfn_t *pfn, int *max_order, bool prep)
> >  {
> >         pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
> > +       pgoff_t huge_index;
> >         struct kvm_gmem *gmem;
> >         struct folio *folio;
> >         struct page *page;
> > @@ -662,9 +663,12 @@ int __kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
> >          * sub-ranges with hugepages (unless userspace comes up with a *really*
> >          * strong use case for needing hugepages within unaligned bindings).
> >          */
> > -       if (!IS_ALIGNED(slot->gmem.pgoff, 1ull << *max_order) ||
> > -           !IS_ALIGNED(slot->npages, 1ull << *max_order))
> > +       huge_index = round_down(index, 1ull << *max_order);
> 
> Why not use ALIGN() here?  The size is obviously a power-of-2.  Or is my math
> even worse than I thought?

I actually only originally used round_down() because kvm_gmem_get_huge_folio()
was taking that approach, but I still ended up using ALIGN() below so sorry
if the inconsistency caused any confusion. I switched to using ALIGN() above
and it works fine.

> 
> > +       if (huge_index < ALIGN(slot->gmem.pgoff, 1ull << *max_order) ||
> > +           huge_index + (1ull << *max_order) > slot->gmem.pgoff + slot->npages) {
> 
> Argh, I keep forgetting that the MMU is responsible for handling misaligned gfns.
> Yeah, this looks right.
> 
> Can you post this as a proper patch, on top of my fixes?  And without the pr_debug().
> That'll be the easiest for me to apply+squash when the time comes.

Sure, I submitted a revised patch on top of kvm-x86:

  https://lore.kernel.org/lkml/20231002133342.195882-1-michael.roth@amd.com/T/#u

I ran into a separate issue trying to test it and submitted a patch for that
here:

  https://lore.kernel.org/lkml/20231002133230.195738-1-michael.roth@amd.com/T/#u

-Mike

> 
> Thanks much!
  
Sean Christopherson Oct. 2, 2023, 4:49 p.m. UTC | #4
On Mon, Oct 02, 2023, Michael Roth wrote:
> On Thu, Sep 28, 2023 at 11:31:51AM -0700, Sean Christopherson wrote:
> > On Fri, Sep 22, 2023, Michael Roth wrote:
> > > On Thu, Sep 21, 2023 at 01:33:23PM -0700, Sean Christopherson wrote:
> > > > +	/*
> > > > +	 * For simplicity, allow mapping a hugepage if and only if the entire
> > > > +	 * binding is compatible, i.e. don't bother supporting mapping interior
> > > > +	 * sub-ranges with hugepages (unless userspace comes up with a *really*
> > > > +	 * strong use case for needing hugepages within unaligned bindings).
> > > > +	 */
> > > > +	if (!IS_ALIGNED(slot->gmem.pgoff, 1ull << *max_order) ||
> > > > +	    !IS_ALIGNED(slot->npages, 1ull << *max_order))
> > > > +		*max_order = 0;
> > > 
> > > Thanks for working this in. Unfortunately on x86 the bulk of guest memory
> > > ends up getting slotted directly above legacy regions at GFN 0x100, 
> > 
> > Can you provide an example?  I'm struggling to understand what the layout actually
> > is.  I don't think it changes the story for the kernel, but it sounds like there
> > might be room for optimization in QEMU?  Or more likely, I just don't understand
> > what you're saying :-)
> 
> Here's one example, which seems to be fairly normal for an x86 boot:
> 
>   kvm_set_user_memory AddrSpace#0 Slot#0 flags=0x4 gpa=0x0 size=0x80000000 ua=0x7f24afc00000 ret=0 restricted_fd=19 restricted_offset=0x0
>   ^ QEMU creates Slot 0 for all of main guest RAM
>   kvm_set_user_memory AddrSpace#0 Slot#0 flags=0x0 gpa=0x0 size=0x0 ua=0x7f24afc00000 ret=0 restricted_fd=19 restricted_offset=0x0
>   kvm_set_user_memory AddrSpace#0 Slot#0 flags=0x4 gpa=0x0 size=0xc0000 ua=0x7f24afc00000 ret=0 restricted_fd=19 restricted_offset=0x0
>   kvm_set_user_memory AddrSpace#0 Slot#3 flags=0x6 gpa=0xc0000 size=0x20000 ua=0x7f2575000000 ret=0 restricted_fd=33 restricted_offset=0x0
>   kvm_set_user_memory AddrSpace#0 Slot#4 flags=0x6 gpa=0xe0000 size=0x20000 ua=0x7f2575400000 ret=0 restricted_fd=31 restricted_offset=0x0
>   ^ legacy regions are created and mapped on top of GPA ranges [0xc0000:0xe0000) and [0xe0000:0x100000)
>   kvm_set_user_memory AddrSpace#0 Slot#5 flags=0x4 gpa=0x100000 size=0x7ff00000 ua=0x7f24afd00000 ret=0 restricted_fd=19 restricted_offset=0x100000
>   ^ QEMU divides Slot 0 into Slot 0 at [0x0:0xc0000) and Slot 5 at [0x100000:0x80000000)
>     Both Slots still share the same backing memory allocation, so same gmem
>     fd 19 is used,but Slot 5 is assigned to offset 0x100000, whih is not
>     2M-aligned
> 
> I tried messing with QEMU handling to pad out guest_memfd offsets to 2MB
> boundaries but then the inode size needs to be enlarged to account for it
> and things get a bit messy. Not sure if there are alternative approaches
> that can be taken from userspace, but with normal malloc()'d or mmap()'d
> backing memory the kernel can still allocate a 2MB backing page for the
> [0x0:0x200000) range and I think KVM still handles that when setting up
> NPT of sub-ranges so there might not be much room for further optimization
> there.

Oooh, duh.  QEMU intentionally creates a gap for the VGA and/or BIOS holes, and
so the lower DRAM chunk that goes from the end of the system reserved chunk to
to TOLUD is started at an unaligned offset, even though 99% of the slot is properly
aligned.

Yeah, KVM definitely needs to support that.  Requiring userspace to align based
on the hugepage size could work, e.g. QEMU could divide slot 5 into N slots, to
end up with a series of slots to get from 4KiB aligned => 2MiB aligned => 1GiB
aligned.  But pushing for that would be beyond stubborn.

Thanks for being patient :-)
  

Patch

diff --git a/virt/kvm/guest_mem.c b/virt/kvm/guest_mem.c
index 68528e9cddd7..4f3a313f5532 100644
--- a/virt/kvm/guest_mem.c
+++ b/virt/kvm/guest_mem.c
@@ -434,20 +434,6 @@  static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags,
 	return err;
 }
 
-static bool kvm_gmem_is_valid_size(loff_t size, u64 flags)
-{
-	if (size < 0 || !PAGE_ALIGNED(size))
-		return false;
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-	if ((flags & KVM_GUEST_MEMFD_ALLOW_HUGEPAGE) &&
-	    !IS_ALIGNED(size, HPAGE_PMD_SIZE))
-		return false;
-#endif
-
-	return true;
-}
-
 int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
 {
 	loff_t size = args->size;
@@ -460,9 +446,15 @@  int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
 	if (flags & ~valid_flags)
 		return -EINVAL;
 
-	if (!kvm_gmem_is_valid_size(size, flags))
+	if (size < 0 || !PAGE_ALIGNED(size))
 		return -EINVAL;
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	if ((flags & KVM_GUEST_MEMFD_ALLOW_HUGEPAGE) &&
+	    !IS_ALIGNED(size, HPAGE_PMD_SIZE))
+		return -EINVAL;
+#endif
+
 	return __kvm_gmem_create(kvm, size, flags, kvm_gmem_mnt);
 }
 
@@ -470,7 +462,7 @@  int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
 		  unsigned int fd, loff_t offset)
 {
 	loff_t size = slot->npages << PAGE_SHIFT;
-	unsigned long start, end, flags;
+	unsigned long start, end;
 	struct kvm_gmem *gmem;
 	struct inode *inode;
 	struct file *file;
@@ -489,16 +481,9 @@  int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
 		goto err;
 
 	inode = file_inode(file);
-	flags = (unsigned long)inode->i_private;
 
-	/*
-	 * For simplicity, require the offset into the file and the size of the
-	 * memslot to be aligned to the largest possible page size used to back
-	 * the file (same as the size of the file itself).
-	 */
-	if (!kvm_gmem_is_valid_size(offset, flags) ||
-	    !kvm_gmem_is_valid_size(size, flags))
-		goto err;
+	if (offset < 0 || !PAGE_ALIGNED(offset))
+		return -EINVAL;
 
 	if (offset + size > i_size_read(inode))
 		goto err;
@@ -599,8 +584,23 @@  int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
 	page = folio_file_page(folio, index);
 
 	*pfn = page_to_pfn(page);
-	if (max_order)
-		*max_order = compound_order(compound_head(page));
+	if (!max_order)
+		goto success;
+
+	*max_order = compound_order(compound_head(page));
+	if (!*max_order)
+		goto success;
+
+	/*
+	 * For simplicity, allow mapping a hugepage if and only if the entire
+	 * binding is compatible, i.e. don't bother supporting mapping interior
+	 * sub-ranges with hugepages (unless userspace comes up with a *really*
+	 * strong use case for needing hugepages within unaligned bindings).
+	 */
+	if (!IS_ALIGNED(slot->gmem.pgoff, 1ull << *max_order) ||
+	    !IS_ALIGNED(slot->npages, 1ull << *max_order))
+		*max_order = 0;
+success:
 	r = 0;
 
 out_unlock: