@@ -1084,9 +1084,14 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
/* Update the shadowed access bits in case they changed. */
kvm_mmu_page_set_access(sp, i, pte_access);
+ /*
+ * It doesn't matter whether it is host_writable or not since
+ * write-access is being removed.
+ */
+ host_writable = false;
+
sptep = &sp->spt[i];
spte = *sptep;
- host_writable = spte & shadow_host_writable_mask;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
make_spte(vcpu, sp, slot, pte_access, gfn,
spte_to_pfn(spte), spte, true, false,
@@ -24,7 +24,6 @@ static bool __ro_after_init allow_mmio_caching;
module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
EXPORT_SYMBOL_GPL(enable_mmio_caching);
-u64 __read_mostly shadow_host_writable_mask;
u64 __read_mostly shadow_mmu_writable_mask;
u64 __read_mostly shadow_nx_mask;
u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
@@ -192,9 +191,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (shadow_memtype_mask)
spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
kvm_is_mmio_pfn(pfn));
- if (host_writable)
- spte |= shadow_host_writable_mask;
- else
+ if (!host_writable)
pte_access &= ~ACC_WRITE_MASK;
if (shadow_me_value && !kvm_is_mmio_pfn(pfn))
@@ -332,7 +329,6 @@ u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn)
new_spte |= (u64)new_pfn << PAGE_SHIFT;
new_spte &= ~PT_WRITABLE_MASK;
- new_spte &= ~shadow_host_writable_mask;
new_spte &= ~shadow_mmu_writable_mask;
new_spte = mark_spte_for_access_track(new_spte);
@@ -440,7 +436,6 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
*/
shadow_memtype_mask = VMX_EPT_MT_MASK | VMX_EPT_IPAT_BIT;
shadow_acc_track_mask = VMX_EPT_RWX_MASK;
- shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE;
/*
@@ -500,7 +495,6 @@ void kvm_mmu_reset_all_pte_masks(void)
shadow_me_mask = 0;
shadow_me_value = 0;
- shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITABLE;
shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITABLE;
/*
@@ -75,8 +75,7 @@ static_assert(!(SPTE_TDP_AD_MASK & SHADOW_ACC_TRACK_SAVED_MASK));
* SPTE is write-protected. See is_writable_pte() for details.
*/
-/* Bits 9 and 10 are ignored by all non-EPT PTEs. */
-#define DEFAULT_SPTE_HOST_WRITABLE BIT_ULL(9)
+/* Bit 10 are ignored by all non-EPT PTEs. */
#define DEFAULT_SPTE_MMU_WRITABLE BIT_ULL(10)
/*
@@ -84,12 +83,9 @@ static_assert(!(SPTE_TDP_AD_MASK & SHADOW_ACC_TRACK_SAVED_MASK));
* to not overlap the A/D type mask or the saved access bits of access-tracked
* SPTEs when A/D bits are disabled.
*/
-#define EPT_SPTE_HOST_WRITABLE BIT_ULL(57)
#define EPT_SPTE_MMU_WRITABLE BIT_ULL(58)
-static_assert(!(EPT_SPTE_HOST_WRITABLE & SPTE_TDP_AD_MASK));
static_assert(!(EPT_SPTE_MMU_WRITABLE & SPTE_TDP_AD_MASK));
-static_assert(!(EPT_SPTE_HOST_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
static_assert(!(EPT_SPTE_MMU_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
/* Defined only to keep the above static asserts readable. */
@@ -148,7 +144,6 @@ static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11);
#define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
-extern u64 __read_mostly shadow_host_writable_mask;
extern u64 __read_mostly shadow_mmu_writable_mask;
extern u64 __read_mostly shadow_nx_mask;
extern u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
@@ -383,27 +378,23 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
*
* For cases #1 and #4, KVM can safely make such SPTEs writable without taking
* mmu_lock as capturing the Accessed/Dirty state doesn't require taking it.
- * To differentiate #1 and #4 from #2 and #3, KVM uses two software-only bits
+ * To differentiate #1 and #4 from #2 and #3, KVM uses a software-only bit
* in the SPTE:
*
* shadow_mmu_writable_mask, aka MMU-writable -
* Cleared on SPTEs that KVM is currently write-protecting for shadow paging
* purposes (case 2 above).
- *
- * shadow_host_writable_mask, aka Host-writable -
* Cleared on SPTEs that are not host-writable (case 3 above)
*
- * Note, not all possible combinations of PT_WRITABLE_MASK,
- * shadow_mmu_writable_mask, and shadow_host_writable_mask are valid. A given
- * SPTE can be in only one of the following states, which map to the
- * aforementioned 3 cases:
+ * Note, not all possible combinations of PT_WRITABLE_MASK and
+ * shadow_mmu_writable_mask are valid. A given SPTE can be in only one of the
+ * following states, which map to the aforementioned 3 cases:
*
- * shadow_host_writable_mask | shadow_mmu_writable_mask | PT_WRITABLE_MASK
- * ------------------------- | ------------------------ | ----------------
- * 1 | 1 | 1 (writable)
- * 1 | 1 | 0 (case 1)
- * 1 | 0 | 0 (case 2)
- * 0 | 0 | 0 (case 3)
+ * shadow_mmu_writable_mask | PT_WRITABLE_MASK
+ * ------------------------ | ----------------
+ * 1 | 1 (writable)
+ * 1 | 0 (case 1)
+ * 0 | 0 (case 2,3)
*
* The valid combinations of these bits are checked by
* check_spte_writable_invariants() whenever an SPTE is modified.
@@ -433,13 +424,8 @@ static inline bool is_writable_pte(unsigned long pte)
/* Note: spte must be a shadow-present leaf SPTE. */
static inline void check_spte_writable_invariants(u64 spte)
{
- if (spte & shadow_mmu_writable_mask)
- WARN_ONCE(!(spte & shadow_host_writable_mask),
- "kvm: MMU-writable SPTE is not Host-writable: %llx",
- spte);
- else
- WARN_ONCE(is_writable_pte(spte),
- "kvm: Writable SPTE is not MMU-writable: %llx", spte);
+ WARN_ONCE(!(spte & shadow_mmu_writable_mask) && is_writable_pte(spte),
+ "kvm: Writable SPTE is not MMU-writable: %llx", spte);
}
static inline bool is_mmu_writable_spte(u64 spte)