[3/7] KVM: x86/MMU: Move gfn_to_rmap() to rmap.c

Message ID 20221206173601.549281-4-bgardon@google.com
State New
Headers
Series KVM: x86/MMU: Factor rmap operations out of mmu.c |

Commit Message

Ben Gardon Dec. 6, 2022, 5:35 p.m. UTC
  Move gfn_to_rmap() to rmap.c. While the function is not part of
manipulating the rmap, it is the main way that the MMU gets pointers to
the rmaps.

No functional change intended.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu/mmu.c  | 9 ---------
 arch/x86/kvm/mmu/rmap.c | 8 ++++++++
 arch/x86/kvm/mmu/rmap.h | 2 ++
 3 files changed, 10 insertions(+), 9 deletions(-)
  

Comments

David Matlack Dec. 9, 2022, 11:32 p.m. UTC | #1
On Tue, Dec 06, 2022 at 05:35:57PM +0000, Ben Gardon wrote:
> Move gfn_to_rmap() to rmap.c. While the function is not part of
> manipulating the rmap, it is the main way that the MMU gets pointers to
> the rmaps.
> 
> No functional change intended.
> 
> Signed-off-by: Ben Gardon <bgardon@google.com>
> ---
[...]
> diff --git a/arch/x86/kvm/mmu/rmap.c b/arch/x86/kvm/mmu/rmap.c
> index c3bad366b627..272e89147d96 100644
> --- a/arch/x86/kvm/mmu/rmap.c
> +++ b/arch/x86/kvm/mmu/rmap.c
> @@ -200,3 +200,11 @@ u64 *rmap_get_next(struct rmap_iterator *iter)
>  	return sptep;
>  }
>  
> +struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
> +				  const struct kvm_memory_slot *slot)
> +{
> +	unsigned long idx;
> +
> +	idx = gfn_to_index(gfn, slot->base_gfn, level);
> +	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
> +}

Optional: Since this is such a short function maybe just make it a
static inline in rmap.h?
  

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index c3a7f443a213..f8d7201210c8 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -891,15 +891,6 @@  static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
 	return true;
 }
 
-static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
-					 const struct kvm_memory_slot *slot)
-{
-	unsigned long idx;
-
-	idx = gfn_to_index(gfn, slot->base_gfn, level);
-	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
-}
-
 static bool rmap_can_add(struct kvm_vcpu *vcpu)
 {
 	struct kvm_mmu_memory_cache *mc;
diff --git a/arch/x86/kvm/mmu/rmap.c b/arch/x86/kvm/mmu/rmap.c
index c3bad366b627..272e89147d96 100644
--- a/arch/x86/kvm/mmu/rmap.c
+++ b/arch/x86/kvm/mmu/rmap.c
@@ -200,3 +200,11 @@  u64 *rmap_get_next(struct rmap_iterator *iter)
 	return sptep;
 }
 
+struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
+				  const struct kvm_memory_slot *slot)
+{
+	unsigned long idx;
+
+	idx = gfn_to_index(gfn, slot->base_gfn, level);
+	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
+}
diff --git a/arch/x86/kvm/mmu/rmap.h b/arch/x86/kvm/mmu/rmap.h
index 13b265f3a95e..45732eda57e5 100644
--- a/arch/x86/kvm/mmu/rmap.h
+++ b/arch/x86/kvm/mmu/rmap.h
@@ -49,4 +49,6 @@  u64 *rmap_get_next(struct rmap_iterator *iter);
 	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
 	     _spte_; _spte_ = rmap_get_next(_iter_))
 
+struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
+				  const struct kvm_memory_slot *slot);
 #endif /* __KVM_X86_MMU_RMAP_H */