kvm_mmu_hugepage_adjust() requires "vcpu" only to get "vcpu->kvm".
Switch to pass in "kvm" directly.
No functional changes expected.
Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
---
arch/x86/kvm/mmu/mmu.c | 8 ++++----
arch/x86/kvm/mmu/mmu_internal.h | 2 +-
arch/x86/kvm/mmu/paging_tmpl.h | 2 +-
arch/x86/kvm/mmu/tdp_mmu.c | 2 +-
4 files changed, 7 insertions(+), 7 deletions(-)
@@ -3159,7 +3159,7 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
return min(host_level, max_level);
}
-void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+void kvm_mmu_hugepage_adjust(struct kvm *kvm, struct kvm_page_fault *fault)
{
struct kvm_memory_slot *slot = fault->slot;
kvm_pfn_t mask;
@@ -3179,8 +3179,8 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
* Enforce the iTLB multihit workaround after capturing the requested
* level, which will be used to do precise, accurate accounting.
*/
- fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
- fault->gfn, fault->max_level);
+ fault->req_level = kvm_mmu_max_mapping_level(kvm, slot, fault->gfn,
+ fault->max_level);
if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
return;
@@ -3222,7 +3222,7 @@ static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
int ret;
gfn_t base_gfn = fault->gfn;
- kvm_mmu_hugepage_adjust(vcpu, fault);
+ kvm_mmu_hugepage_adjust(vcpu->kvm, fault);
trace_kvm_mmu_spte_requested(fault);
for_each_shadow_entry(vcpu, fault->addr, it) {
@@ -339,7 +339,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int kvm_mmu_max_mapping_level(struct kvm *kvm,
const struct kvm_memory_slot *slot, gfn_t gfn,
int max_level);
-void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
+void kvm_mmu_hugepage_adjust(struct kvm *kvm, struct kvm_page_fault *fault);
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
@@ -716,7 +716,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
* are being shadowed by KVM, i.e. allocating a new shadow page may
* affect the allowed hugepage size.
*/
- kvm_mmu_hugepage_adjust(vcpu, fault);
+ kvm_mmu_hugepage_adjust(vcpu->kvm, fault);
trace_kvm_mmu_spte_requested(fault);
@@ -1047,7 +1047,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
struct kvm_mmu_page *sp;
int ret = RET_PF_RETRY;
- kvm_mmu_hugepage_adjust(vcpu, fault);
+ kvm_mmu_hugepage_adjust(vcpu->kvm, fault);
trace_kvm_mmu_spte_requested(fault);