Move the functions to check if an entry can be added to an rmap and for
removing elements from an rmap to rmap.(c|h).
No functional change intended.
Signed-off-by: Ben Gardon <bgardon@google.com>
---
arch/x86/kvm/mmu/mmu.c | 34 +--------------------------------
arch/x86/kvm/mmu/mmu_internal.h | 1 +
arch/x86/kvm/mmu/rmap.c | 32 +++++++++++++++++++++++++++++++
arch/x86/kvm/mmu/rmap.h | 3 +++
4 files changed, 37 insertions(+), 33 deletions(-)
@@ -658,7 +658,7 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
static bool sp_has_gptes(struct kvm_mmu_page *sp);
-static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
+gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
if (sp->role.passthrough)
return sp->gfn;
@@ -891,38 +891,6 @@ static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
return true;
}
-static bool rmap_can_add(struct kvm_vcpu *vcpu)
-{
- struct kvm_mmu_memory_cache *mc;
-
- mc = &vcpu->arch.mmu_pte_list_desc_cache;
- return kvm_mmu_memory_cache_nr_free_objects(mc);
-}
-
-static void rmap_remove(struct kvm *kvm, u64 *spte)
-{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *slot;
- struct kvm_mmu_page *sp;
- gfn_t gfn;
- struct kvm_rmap_head *rmap_head;
-
- sp = sptep_to_sp(spte);
- gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte));
-
- /*
- * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
- * so we have to determine which memslots to use based on context
- * information in sp->role.
- */
- slots = kvm_memslots_for_spte_role(kvm, sp->role);
-
- slot = __gfn_to_memslot(slots, gfn);
- rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
-
- pte_list_remove(spte, rmap_head);
-}
-
static void drop_spte(struct kvm *kvm, u64 *sptep)
{
u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
@@ -318,4 +318,5 @@ void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
#endif /* __KVM_X86_MMU_INTERNAL_H */
@@ -208,3 +208,35 @@ struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
idx = gfn_to_index(gfn, slot->base_gfn, level);
return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
}
+
+bool rmap_can_add(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu_memory_cache *mc;
+
+ mc = &vcpu->arch.mmu_pte_list_desc_cache;
+ return kvm_mmu_memory_cache_nr_free_objects(mc);
+}
+
+void rmap_remove(struct kvm *kvm, u64 *spte)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *slot;
+ struct kvm_mmu_page *sp;
+ gfn_t gfn;
+ struct kvm_rmap_head *rmap_head;
+
+ sp = sptep_to_sp(spte);
+ gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte));
+
+ /*
+ * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
+ * so we have to determine which memslots to use based on context
+ * information in sp->role.
+ */
+ slots = kvm_memslots_for_spte_role(kvm, sp->role);
+
+ slot = __gfn_to_memslot(slots, gfn);
+ rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
+
+ pte_list_remove(spte, rmap_head);
+}
@@ -51,4 +51,7 @@ u64 *rmap_get_next(struct rmap_iterator *iter);
struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
const struct kvm_memory_slot *slot);
+
+bool rmap_can_add(struct kvm_vcpu *vcpu);
+void rmap_remove(struct kvm *kvm, u64 *spte);
#endif /* __KVM_X86_MMU_RMAP_H */