[v2] KVM: x86/pmu/misc: Fix a typo on kvm_pmu_request_counter_reprogam()

Message ID 20230310113349.31799-1-likexu@tencent.com
State New
Headers
Series [v2] KVM: x86/pmu/misc: Fix a typo on kvm_pmu_request_counter_reprogam() |

Commit Message

Like Xu March 10, 2023, 11:33 a.m. UTC
  From: Like Xu <likexu@tencent.com>

Fix a "reprogam" typo in the kvm_pmu_request_counter_reprogam(), which
should be fixed earlier to follow the meaning of {pmc_}reprogram_counter().

Fixes: 68fb4757e867 ("KVM: x86/pmu: Defer reprogram_counter() to kvm_pmu_handle_event()")
Signed-off-by: Like Xu <likexu@tencent.com>
---
v1: https://lore.kernel.org/all/20230308104707.27284-1-likexu@tencent.com
 arch/x86/kvm/pmu.c           | 2 +-
 arch/x86/kvm/pmu.h           | 2 +-
 arch/x86/kvm/svm/pmu.c       | 2 +-
 arch/x86/kvm/vmx/pmu_intel.c | 6 +++---
 4 files changed, 6 insertions(+), 6 deletions(-)


base-commit: 13738a3647368f7f600b30d241779bcd2a3ebbfd
  

Comments

Like Xu April 7, 2023, 9:04 a.m. UTC | #1
Sean, would you pick this up ?

On 10/3/2023 7:33 pm, Like Xu wrote:
> From: Like Xu <likexu@tencent.com>
> 
> Fix a "reprogam" typo in the kvm_pmu_request_counter_reprogam(), which
> should be fixed earlier to follow the meaning of {pmc_}reprogram_counter().
> 
> Fixes: 68fb4757e867 ("KVM: x86/pmu: Defer reprogram_counter() to kvm_pmu_handle_event()")
> Signed-off-by: Like Xu <likexu@tencent.com>
> ---
> v1: https://lore.kernel.org/all/20230308104707.27284-1-likexu@tencent.com
>   arch/x86/kvm/pmu.c           | 2 +-
>   arch/x86/kvm/pmu.h           | 2 +-
>   arch/x86/kvm/svm/pmu.c       | 2 +-
>   arch/x86/kvm/vmx/pmu_intel.c | 6 +++---
>   4 files changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index 7b6c3ba2c8e1..bdeec0ab5e2b 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -646,7 +646,7 @@ static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
>   {
>   	pmc->prev_counter = pmc->counter;
>   	pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
> -	kvm_pmu_request_counter_reprogam(pmc);
> +	kvm_pmu_request_counter_reprogram(pmc);
>   }
>   
>   static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
> index 79988dafb15b..cff0651b030b 100644
> --- a/arch/x86/kvm/pmu.h
> +++ b/arch/x86/kvm/pmu.h
> @@ -183,7 +183,7 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
>   					     KVM_PMC_MAX_FIXED);
>   }
>   
> -static inline void kvm_pmu_request_counter_reprogam(struct kvm_pmc *pmc)
> +static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
>   {
>   	set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
>   	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
> diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
> index cc77a0681800..5fa939e411d8 100644
> --- a/arch/x86/kvm/svm/pmu.c
> +++ b/arch/x86/kvm/svm/pmu.c
> @@ -161,7 +161,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   		data &= ~pmu->reserved_bits;
>   		if (data != pmc->eventsel) {
>   			pmc->eventsel = data;
> -			kvm_pmu_request_counter_reprogam(pmc);
> +			kvm_pmu_request_counter_reprogram(pmc);
>   		}
>   		return 0;
>   	}
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index e8a3be0b9df9..797fff9dbe80 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -57,7 +57,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
>   		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
>   
>   		__set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
> -		kvm_pmu_request_counter_reprogam(pmc);
> +		kvm_pmu_request_counter_reprogram(pmc);
>   	}
>   }
>   
> @@ -81,7 +81,7 @@ static void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
>   	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) {
>   		pmc = intel_pmc_idx_to_pmc(pmu, bit);
>   		if (pmc)
> -			kvm_pmu_request_counter_reprogam(pmc);
> +			kvm_pmu_request_counter_reprogram(pmc);
>   	}
>   }
>   
> @@ -482,7 +482,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   				reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
>   			if (!(data & reserved_bits)) {
>   				pmc->eventsel = data;
> -				kvm_pmu_request_counter_reprogam(pmc);
> +				kvm_pmu_request_counter_reprogram(pmc);
>   				return 0;
>   			}
>   		} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
> 
> base-commit: 13738a3647368f7f600b30d241779bcd2a3ebbfd
  
Sean Christopherson April 7, 2023, 2:37 p.m. UTC | #2
On Fri, Apr 07, 2023, Like Xu wrote:
> Sean, would you pick this up ?

Heh, ya, this one and Aaron's series are sitting in my local tree, just didn't
quite get to 'em yesterday.
  
Sean Christopherson April 7, 2023, 9:30 p.m. UTC | #3
On Fri, 10 Mar 2023 19:33:49 +0800, Like Xu wrote:
> Fix a "reprogam" typo in the kvm_pmu_request_counter_reprogam(), which
> should be fixed earlier to follow the meaning of {pmc_}reprogram_counter().

Applied to kvm-x86 pmu, thanks!

[1/1] KVM: x86/pmu/misc: Fix a typo on kvm_pmu_request_counter_reprogam()
      https://github.com/kvm-x86/linux/commit/4fa5843d81fd

--
https://github.com/kvm-x86/linux/tree/next
https://github.com/kvm-x86/linux/tree/fixes
  

Patch

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 7b6c3ba2c8e1..bdeec0ab5e2b 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -646,7 +646,7 @@  static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
 {
 	pmc->prev_counter = pmc->counter;
 	pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
-	kvm_pmu_request_counter_reprogam(pmc);
+	kvm_pmu_request_counter_reprogram(pmc);
 }
 
 static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 79988dafb15b..cff0651b030b 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -183,7 +183,7 @@  static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
 					     KVM_PMC_MAX_FIXED);
 }
 
-static inline void kvm_pmu_request_counter_reprogam(struct kvm_pmc *pmc)
+static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
 {
 	set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
 	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index cc77a0681800..5fa939e411d8 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -161,7 +161,7 @@  static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		data &= ~pmu->reserved_bits;
 		if (data != pmc->eventsel) {
 			pmc->eventsel = data;
-			kvm_pmu_request_counter_reprogam(pmc);
+			kvm_pmu_request_counter_reprogram(pmc);
 		}
 		return 0;
 	}
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index e8a3be0b9df9..797fff9dbe80 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -57,7 +57,7 @@  static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
 		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
 
 		__set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
-		kvm_pmu_request_counter_reprogam(pmc);
+		kvm_pmu_request_counter_reprogram(pmc);
 	}
 }
 
@@ -81,7 +81,7 @@  static void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
 	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) {
 		pmc = intel_pmc_idx_to_pmc(pmu, bit);
 		if (pmc)
-			kvm_pmu_request_counter_reprogam(pmc);
+			kvm_pmu_request_counter_reprogram(pmc);
 	}
 }
 
@@ -482,7 +482,7 @@  static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 				reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
 			if (!(data & reserved_bits)) {
 				pmc->eventsel = data;
-				kvm_pmu_request_counter_reprogam(pmc);
+				kvm_pmu_request_counter_reprogram(pmc);
 				return 0;
 			}
 		} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))