@@ -2939,7 +2939,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
was_rmapped = 1;
}
- wrprot = make_spte(vcpu, &vcpu->arch.mmu->common,
+ wrprot = make_spte(vcpu->kvm, vcpu, &vcpu->arch.mmu->common,
sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
true, host_writable, &spte);
@@ -960,7 +960,7 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int
spte = *sptep;
host_writable = spte & shadow_host_writable_mask;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- make_spte(vcpu, &vcpu->arch.mmu->common, sp, slot, pte_access,
+ make_spte(vcpu->kvm, vcpu, &vcpu->arch.mmu->common, sp, slot, pte_access,
gfn, spte_to_pfn(spte), spte, true, false, host_writable, &spte);
return mmu_spte_update(sptep, spte);
@@ -138,7 +138,7 @@ bool spte_has_volatile_bits(u64 spte)
return false;
}
-bool make_spte(struct kvm_vcpu *vcpu,
+bool make_spte(struct kvm *kvm, struct kvm_vcpu *vcpu,
struct kvm_mmu_common *mmu_common, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
@@ -179,7 +179,7 @@ bool make_spte(struct kvm_vcpu *vcpu,
* just to optimize a mode that is anything but performance critical.
*/
if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
- is_nx_huge_page_enabled(vcpu->kvm)) {
+ is_nx_huge_page_enabled(kvm)) {
pte_access &= ~ACC_EXEC_MASK;
}
@@ -194,9 +194,15 @@ bool make_spte(struct kvm_vcpu *vcpu,
if (level > PG_LEVEL_4K)
spte |= PT_PAGE_SIZE_MASK;
- if (shadow_memtype_mask)
- spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
+ if (shadow_memtype_mask) {
+ if (vcpu)
+ spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
kvm_is_mmio_pfn(pfn));
+ else
+ spte |= static_call(kvm_x86_get_default_mt_mask)(kvm,
+ kvm_is_mmio_pfn(pfn));
+ }
+
if (host_writable)
spte |= shadow_host_writable_mask;
else
@@ -225,7 +231,7 @@ bool make_spte(struct kvm_vcpu *vcpu,
* e.g. it's write-tracked (upper-level SPs) or has one or more
* shadow pages and unsync'ing pages is not allowed.
*/
- if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) {
+ if (mmu_try_to_unsync_pages(kvm, slot, gfn, can_unsync, prefetch)) {
wrprot = true;
pte_access &= ~ACC_WRITE_MASK;
spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
@@ -246,7 +252,7 @@ bool make_spte(struct kvm_vcpu *vcpu,
if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) {
/* Enforced by kvm_mmu_hugepage_adjust. */
WARN_ON_ONCE(level > PG_LEVEL_4K);
- mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
+ mark_page_dirty_in_slot(kvm, slot, gfn);
}
*new_spte = spte;
@@ -530,7 +530,7 @@ static inline u64 get_mmio_spte_generation(u64 spte)
bool spte_has_volatile_bits(u64 spte);
-bool make_spte(struct kvm_vcpu *vcpu,
+bool make_spte(struct kvm *kvm, struct kvm_vcpu *vcpu,
struct kvm_mmu_common *mmu_common, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
@@ -964,7 +964,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
if (unlikely(!fault->slot))
new_spte = make_mmio_spte(vcpu->kvm, vcpu, iter->gfn, ACC_ALL);
else
- wrprot = make_spte(vcpu, &vcpu->arch.mmu->common, sp, fault->slot,
+ wrprot = make_spte(vcpu->kvm, vcpu, &vcpu->arch.mmu->common, sp, fault->slot,
ACC_ALL, iter->gfn, fault->pfn, iter->old_spte,
fault->prefetch, true, fault->map_writable,
&new_spte);