[v10,09/19] KVM: xen: separate initialization of shared_info cache and content
Commit Message
From: Paul Durrant <pdurrant@amazon.com>
A subsequent patch will allow shared_info to be initialized using either a
GPA or a user-space (i.e. VMM) HVA. To make that patch cleaner, separate
the initialization of the shared_info content from the activation of the
pfncache.
Signed-off-by: Paul Durrant <pdurrant@amazon.com>
---
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: x86@kernel.org
v10:
- New in this version.
---
arch/x86/kvm/xen.c | 55 +++++++++++++++++++++++++++-------------------
1 file changed, 32 insertions(+), 23 deletions(-)
Comments
On Mon, 2023-12-04 at 14:43 +0000, Paul Durrant wrote:
> From: Paul Durrant <pdurrant@amazon.com>
>
> A subsequent patch will allow shared_info to be initialized using either a
> GPA or a user-space (i.e. VMM) HVA. To make that patch cleaner, separate
> the initialization of the shared_info content from the activation of the
> pfncache.
>
> Signed-off-by: Paul Durrant <pdurrant@amazon.com>
Spot the difference...
> - /*
> - * This code mirrors kvm_write_wall_clock() except that it writes
> - * directly through the pfn cache and doesn't mark the page dirty.
> - */
> - wall_nsec = kvm_get_wall_clock_epoch(kvm);
> + /*
> + * This code mirrors kvm_write_wall_clock() except that it writes
> + * directly through the pfn cache and doesn't mark the page dirty.
> + */
> + wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
(Hint: it's commit 5d6d6a7d7e66a)
With that fixed,
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
On 14/12/2023 13:41, David Woodhouse wrote:
> On Mon, 2023-12-04 at 14:43 +0000, Paul Durrant wrote:
>> From: Paul Durrant <pdurrant@amazon.com>
>>
>> A subsequent patch will allow shared_info to be initialized using either a
>> GPA or a user-space (i.e. VMM) HVA. To make that patch cleaner, separate
>> the initialization of the shared_info content from the activation of the
>> pfncache.
>>
>> Signed-off-by: Paul Durrant <pdurrant@amazon.com>
>
>
> Spot the difference...
>
>> - /*
>> - * This code mirrors kvm_write_wall_clock() except that it writes
>> - * directly through the pfn cache and doesn't mark the page dirty.
>> - */
>> - wall_nsec = kvm_get_wall_clock_epoch(kvm);
>
>
>> + /*
>> + * This code mirrors kvm_write_wall_clock() except that it writes
>> + * directly through the pfn cache and doesn't mark the page dirty.
>> + */
>> + wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
>
> (Hint: it's commit 5d6d6a7d7e66a)
Ack.
>
> With that fixed,
>
> Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
>
Thanks.
@@ -34,41 +34,32 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r);
DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
-static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
+static int kvm_xen_shared_info_init(struct kvm *kvm)
{
struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
struct pvclock_wall_clock *wc;
- gpa_t gpa = gfn_to_gpa(gfn);
u32 *wc_sec_hi;
u32 wc_version;
u64 wall_nsec;
int ret = 0;
int idx = srcu_read_lock(&kvm->srcu);
- if (gfn == KVM_XEN_INVALID_GFN) {
- kvm_gpc_deactivate(gpc);
- goto out;
- }
+ read_lock_irq(&gpc->lock);
+ while (!kvm_gpc_check(gpc, PAGE_SIZE)) {
+ read_unlock_irq(&gpc->lock);
- do {
- ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE);
+ ret = kvm_gpc_refresh(gpc, PAGE_SIZE);
if (ret)
goto out;
- /*
- * This code mirrors kvm_write_wall_clock() except that it writes
- * directly through the pfn cache and doesn't mark the page dirty.
- */
- wall_nsec = kvm_get_wall_clock_epoch(kvm);
-
- /* It could be invalid again already, so we need to check */
read_lock_irq(&gpc->lock);
+ }
- if (gpc->valid)
- break;
-
- read_unlock_irq(&gpc->lock);
- } while (1);
+ /*
+ * This code mirrors kvm_write_wall_clock() except that it writes
+ * directly through the pfn cache and doesn't mark the page dirty.
+ */
+ wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
/* Paranoia checks on the 32-bit struct layout */
BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
@@ -639,12 +630,30 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
}
break;
- case KVM_XEN_ATTR_TYPE_SHARED_INFO:
+ case KVM_XEN_ATTR_TYPE_SHARED_INFO: {
+ int idx;
+
mutex_lock(&kvm->arch.xen.xen_lock);
- r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
+
+ idx = srcu_read_lock(&kvm->srcu);
+
+ if (data->u.shared_info.gfn == KVM_XEN_INVALID_GFN) {
+ kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache);
+ r = 0;
+ } else {
+ r = kvm_gpc_activate(&kvm->arch.xen.shinfo_cache,
+ gfn_to_gpa(data->u.shared_info.gfn),
+ PAGE_SIZE);
+ }
+
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ if (!r && kvm->arch.xen.shinfo_cache.active)
+ r = kvm_xen_shared_info_init(kvm);
+
mutex_unlock(&kvm->arch.xen.xen_lock);
break;
-
+ }
case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
if (data->u.vector && data->u.vector < 0x10)
r = -EINVAL;