[v2,8/8] KVM: x86: Move common handling of PAT MSR writes to kvm_set_msr_common()

Message ID 20230511233351.635053-9-seanjc@google.com
State New
Headers
Series KVM: x86: Clean up MSR PAT handling |

Commit Message

Sean Christopherson May 11, 2023, 11:33 p.m. UTC
  Move the common check-and-set handling of PAT MSR writes out of vendor
code and into kvm_set_msr_common().  This aligns writes with reads, which
are already handled in common code, i.e. makes the handling of reads and
writes symmetrical in common code.

Alternatively, the common handling in kvm_get_msr_common() could be moved
to vendor code, but duplicating code is generally undesirable (even though
the duplicatated code is trivial in this case), and guest writes to PAT
should be rare, i.e. the overhead of the extra function call is a
non-issue in practice.

Suggested-by: Kai Huang <kai.huang@intel.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/svm/svm.c | 7 ++++---
 arch/x86/kvm/vmx/vmx.c | 7 +++----
 arch/x86/kvm/x86.c     | 6 ------
 3 files changed, 7 insertions(+), 13 deletions(-)
  

Comments

Kai Huang May 12, 2023, 10:48 a.m. UTC | #1
On Thu, 2023-05-11 at 16:33 -0700, Sean Christopherson wrote:
> Move the common check-and-set handling of PAT MSR writes out of vendor
> code and into kvm_set_msr_common().  This aligns writes with reads, which
> are already handled in common code, i.e. makes the handling of reads and
> writes symmetrical in common code.
> 
> Alternatively, the common handling in kvm_get_msr_common() could be moved
> to vendor code, but duplicating code is generally undesirable (even though
> the duplicatated code is trivial in this case), and guest writes to PAT
> should be rare, i.e. the overhead of the extra function call is a
> non-issue in practice.
> 
> Suggested-by: Kai Huang <kai.huang@intel.com>
> Signed-off-by: Sean Christopherson <seanjc@google.com>

Reviewed-by: Kai Huang <kai.huang@intel.com>

> ---
>  arch/x86/kvm/svm/svm.c | 7 ++++---
>  arch/x86/kvm/vmx/vmx.c | 7 +++----
>  arch/x86/kvm/x86.c     | 6 ------
>  3 files changed, 7 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index db237ccdc957..61d329760f6c 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -2935,9 +2935,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>  
>  		break;
>  	case MSR_IA32_CR_PAT:
> -		if (!kvm_pat_valid(data))
> -			return 1;
> -		vcpu->arch.pat = data;
> +		ret = kvm_set_msr_common(vcpu, msr);
> +		if (ret)
> +			break;
> +
>  		svm->vmcb01.ptr->save.g_pat = data;
>  		if (is_guest_mode(vcpu))
>  			nested_vmcb02_compute_g_pat(svm);
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 33b8625d3541..2d9d155691a7 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -2287,10 +2287,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  			return 1;
>  		goto find_uret_msr;
>  	case MSR_IA32_CR_PAT:
> -		if (!kvm_pat_valid(data))
> -			return 1;
> -
> -		vcpu->arch.pat = data;
> +		ret = kvm_set_msr_common(vcpu, msr_info);
> +		if (ret)
> +			break;
>  
>  		if (is_guest_mode(vcpu) &&
>  		    get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index d71cf924cd8f..3759737c0873 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -3701,12 +3701,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  		}
>  		break;
>  	case MSR_IA32_CR_PAT:
> -		/*
> -		 * Writes to PAT should be handled by vendor code as both SVM
> -		 * and VMX track the guest's PAT in the VMCB/VMCS.
> -		 */
> -		WARN_ON_ONCE(1);
> -
>  		if (!kvm_pat_valid(data))
>  			return 1;
>  
> -- 
> 2.40.1.606.ga4b1b128d6-goog
>
  

Patch

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index db237ccdc957..61d329760f6c 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2935,9 +2935,10 @@  static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 
 		break;
 	case MSR_IA32_CR_PAT:
-		if (!kvm_pat_valid(data))
-			return 1;
-		vcpu->arch.pat = data;
+		ret = kvm_set_msr_common(vcpu, msr);
+		if (ret)
+			break;
+
 		svm->vmcb01.ptr->save.g_pat = data;
 		if (is_guest_mode(vcpu))
 			nested_vmcb02_compute_g_pat(svm);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 33b8625d3541..2d9d155691a7 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2287,10 +2287,9 @@  static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			return 1;
 		goto find_uret_msr;
 	case MSR_IA32_CR_PAT:
-		if (!kvm_pat_valid(data))
-			return 1;
-
-		vcpu->arch.pat = data;
+		ret = kvm_set_msr_common(vcpu, msr_info);
+		if (ret)
+			break;
 
 		if (is_guest_mode(vcpu) &&
 		    get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d71cf924cd8f..3759737c0873 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3701,12 +3701,6 @@  int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		}
 		break;
 	case MSR_IA32_CR_PAT:
-		/*
-		 * Writes to PAT should be handled by vendor code as both SVM
-		 * and VMX track the guest's PAT in the VMCB/VMCS.
-		 */
-		WARN_ON_ONCE(1);
-
 		if (!kvm_pat_valid(data))
 			return 1;