[v7,03/11] KVM: pfncache: add a helper to get the gpa

Message ID 20231002095740.1472907-4-paul@xen.org
State New
Headers
Series KVM: xen: update shared_info and vcpu_info handling |

Commit Message

Paul Durrant Oct. 2, 2023, 9:57 a.m. UTC
  From: Paul Durrant <pdurrant@amazon.com>

A subsequent patch will rename this field since it will become overloaded.
To avoid churn in places that currently retrieve the gpa, add a helper for
that purpose now.

No functional change intended.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
---
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
---
 arch/x86/kvm/xen.c       | 15 ++++++++-------
 include/linux/kvm_host.h |  7 +++++++
 virt/kvm/pfncache.c      |  6 ++++++
 3 files changed, 21 insertions(+), 7 deletions(-)
  

Comments

Sean Christopherson Oct. 31, 2023, 11:37 p.m. UTC | #1
On Mon, Oct 02, 2023, Paul Durrant wrote:
> From: Paul Durrant <pdurrant@amazon.com>
> 
> A subsequent patch will rename this field since it will become overloaded.

Too. Many. Pronouns.

  Add a helper to get the gpa of a gpc cache, as a subsequent patch will
  rename "gpa" to "addr".

> To avoid churn in places that currently retrieve the gpa, add a helper for
> that purpose now.

This is silly.  If this series added any protection against incorrect usage then
I could understand the helper, but this just end up being

	return gpc->addr_is_gpa ? gpc->addr : INVALID_GPA;

which is nasty.  IIUC, there's no WARN because kvm_xen_vcpu_get_attr() doesn't
pre-check that the cache is in the correct mode.  That's a really silly reason
to not harden the rest of KVM.

> diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
> index b68ed7fa56a2..17afbb464a70 100644
> --- a/virt/kvm/pfncache.c
> +++ b/virt/kvm/pfncache.c
> @@ -386,6 +386,12 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
>  }
>  EXPORT_SYMBOL_GPL(kvm_gpc_activate);
>  
> +gpa_t kvm_gpc_gpa(struct gfn_to_pfn_cache *gpc)
> +{
> +	return gpc->gpa;
> +}
> +EXPORT_SYMBOL_GPL(kvm_gpc_gpa);

Any reason not to make this static inline?  Even in the final form, not making
this inlined seems silly.

Belatedly, same question for kvm_gpc_mark_dirty() I suppose.

>  void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc)
>  {
>  	mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
> -- 
> 2.39.2
>
  

Patch

diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 33fddd29824b..8e6fdcd7bb6e 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -261,8 +261,8 @@  static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
 	 * alignment (and the 32-bit ABI doesn't align the 64-bit integers
 	 * anyway, even if the overall struct had been 64-bit aligned).
 	 */
-	if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) {
-		user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK);
+	if ((kvm_gpc_gpa(gpc1) & ~PAGE_MASK) + user_len >= PAGE_SIZE) {
+		user_len1 = PAGE_SIZE - (kvm_gpc_gpa(gpc1) & ~PAGE_MASK);
 		user_len2 = user_len - user_len1;
 	} else {
 		user_len1 = user_len;
@@ -343,7 +343,7 @@  static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
 			 * to the second page now because the guest changed to
 			 * 64-bit mode, the second GPC won't have been set up.
 			 */
-			if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1,
+			if (kvm_gpc_activate(gpc2, kvm_gpc_gpa(gpc1) + user_len1,
 					     user_len2))
 				return;
 
@@ -677,7 +677,8 @@  int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 
 	case KVM_XEN_ATTR_TYPE_SHARED_INFO:
 		if (kvm->arch.xen.shinfo_cache.active)
-			data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
+			data->u.shared_info.gfn =
+				gpa_to_gfn(kvm_gpc_gpa(&kvm->arch.xen.shinfo_cache));
 		else
 			data->u.shared_info.gfn = KVM_XEN_INVALID_GFN;
 		r = 0;
@@ -955,7 +956,7 @@  int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 	switch (data->type) {
 	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
 		if (vcpu->arch.xen.vcpu_info_cache.active)
-			data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
+			data->u.gpa = kvm_gpc_gpa(&vcpu->arch.xen.vcpu_info_cache);
 		else
 			data->u.gpa = KVM_XEN_INVALID_GPA;
 		r = 0;
@@ -963,7 +964,7 @@  int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 
 	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
 		if (vcpu->arch.xen.vcpu_time_info_cache.active)
-			data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
+			data->u.gpa = kvm_gpc_gpa(&vcpu->arch.xen.vcpu_time_info_cache);
 		else
 			data->u.gpa = KVM_XEN_INVALID_GPA;
 		r = 0;
@@ -975,7 +976,7 @@  int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 			break;
 		}
 		if (vcpu->arch.xen.runstate_cache.active) {
-			data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
+			data->u.gpa = kvm_gpc_gpa(&vcpu->arch.xen.runstate_cache);
 			r = 0;
 		}
 		break;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c71e8fbccaaf..4d8027fe9928 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1374,6 +1374,13 @@  void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
  */
 void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc);
 
+/**
+ * kvm_gpc_gpa - retrieve the guest physical address of a cached mapping
+ *
+ * @gpc:	   struct gfn_to_pfn_cache object.
+ */
+gpa_t kvm_gpc_gpa(struct gfn_to_pfn_cache *gpc);
+
 void kvm_sigset_activate(struct kvm_vcpu *vcpu);
 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
 
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index b68ed7fa56a2..17afbb464a70 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -386,6 +386,12 @@  int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
 }
 EXPORT_SYMBOL_GPL(kvm_gpc_activate);
 
+gpa_t kvm_gpc_gpa(struct gfn_to_pfn_cache *gpc)
+{
+	return gpc->gpa;
+}
+EXPORT_SYMBOL_GPL(kvm_gpc_gpa);
+
 void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc)
 {
 	mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);