[v3,6/7] KVM: x86/mmu: Remove handle_changed_spte_dirty_log()

Message ID 20230211014626.3659152-7-vipinsh@google.com
State New
Headers
Series Optimize clear dirty log |

Commit Message

Vipin Sharma Feb. 11, 2023, 1:46 a.m. UTC
  Remove handle_changed_spte_dirty_log() as there is no code flow which
sets 4KiB SPTE writable and hit this path. This function marks the page
dirty in a memslot only if new SPTE is 4KiB in size and writable.

Current users of handle_changed_spte_dirty_log() are:
1. set_spte_gfn() - Create only non writable SPTEs.
2. write_protect_gfn() - Change an SPTE to non writable.
3. zap leaf and roots APIs - Everything is 0.
4. handle_removed_pt() - Sets SPTEs to REMOVED_SPTE
5. tdp_mmu_link_sp() - Makes non leaf SPTEs.

There is also no path which creates a writable 4KiB without going
through make_spte() and this functions takes care of marking SPTE dirty
in the memslot if it is PT_WRITABLE.

Signed-off-by: Vipin Sharma <vipinsh@google.com>
---
 arch/x86/kvm/mmu/tdp_mmu.c | 22 ----------------------
 1 file changed, 22 deletions(-)
  

Comments

David Matlack Feb. 22, 2023, 7:36 p.m. UTC | #1
On Fri, Feb 10, 2023 at 05:46:25PM -0800, Vipin Sharma wrote:
> Remove handle_changed_spte_dirty_log() as there is no code flow which
> sets 4KiB SPTE writable and hit this path. This function marks the page
> dirty in a memslot only if new SPTE is 4KiB in size and writable.
> 
> Current users of handle_changed_spte_dirty_log() are:
> 1. set_spte_gfn() - Create only non writable SPTEs.
> 2. write_protect_gfn() - Change an SPTE to non writable.
> 3. zap leaf and roots APIs - Everything is 0.
> 4. handle_removed_pt() - Sets SPTEs to REMOVED_SPTE
> 5. tdp_mmu_link_sp() - Makes non leaf SPTEs.
> 
> There is also no path which creates a writable 4KiB without going
> through make_spte() and this functions takes care of marking SPTE dirty
> in the memslot if it is PT_WRITABLE.
> 
> Signed-off-by: Vipin Sharma <vipinsh@google.com>

Aside from the comment suggestion,

Reviewed-by: David Matlack <dmatlack@google.com>

> ---
>  arch/x86/kvm/mmu/tdp_mmu.c | 22 ----------------------
>  1 file changed, 22 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index e50e869bf879..0c031319e901 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -345,24 +345,6 @@ static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
>  		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
>  }
>  
> -static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
> -					  u64 old_spte, u64 new_spte, int level)
> -{
> -	bool pfn_changed;
> -	struct kvm_memory_slot *slot;
> -
> -	if (level > PG_LEVEL_4K)
> -		return;
> -
> -	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
> -
> -	if ((!is_writable_pte(old_spte) || pfn_changed) &&
> -	    is_writable_pte(new_spte)) {
> -		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
> -		mark_page_dirty_in_slot(kvm, slot, gfn);
> -	}
> -}
> -
>  static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
>  {
>  	kvm_account_pgtable_pages((void *)sp->spt, +1);
> @@ -614,8 +596,6 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
>  	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
>  			      shared);
>  	handle_changed_spte_acc_track(old_spte, new_spte, level);
> -	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
> -				      new_spte, level);

Please leave a comment somewhere in handle_changed_spte() to document
why mark_page_dirty_in_slot() is not needed to help future readers and
in case something changes in the future.
  

Patch

diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index e50e869bf879..0c031319e901 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -345,24 +345,6 @@  static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
 }
 
-static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
-					  u64 old_spte, u64 new_spte, int level)
-{
-	bool pfn_changed;
-	struct kvm_memory_slot *slot;
-
-	if (level > PG_LEVEL_4K)
-		return;
-
-	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
-
-	if ((!is_writable_pte(old_spte) || pfn_changed) &&
-	    is_writable_pte(new_spte)) {
-		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
-		mark_page_dirty_in_slot(kvm, slot, gfn);
-	}
-}
-
 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
 	kvm_account_pgtable_pages((void *)sp->spt, +1);
@@ -614,8 +596,6 @@  static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
 			      shared);
 	handle_changed_spte_acc_track(old_spte, new_spte, level);
-	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
-				      new_spte, level);
 }
 
 /*
@@ -727,8 +707,6 @@  static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
 
 	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
 	handle_changed_spte_acc_track(old_spte, new_spte, level);
-	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, new_spte,
-				      level);
 	return old_spte;
 }