[v3,11/13] KVM: selftests / xen: map shared_info using HVA rather than GFN
Commit Message
From: Paul Durrant <pdurrant@amazon.com>
Using the HVA of the shared_info page is more efficient, so if the
capability (KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA) is present use that method
to do the mapping.
NOTE: Have the juggle_shinfo_state() thread map and unmap using both
GFN and HVA, to make sure the older mechanism is not broken.
Signed-off-by: Paul Durrant <pdurrant@amazon.com>
---
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: David Woodhouse <dwmw2@infradead.org>
v3:
- Re-work the juggle_shinfo_state() thread
v2:
- New in this version.
---
.../selftests/kvm/x86_64/xen_shinfo_test.c | 44 +++++++++++++++----
1 file changed, 35 insertions(+), 9 deletions(-)
Comments
On Mon, 2023-09-18 at 14:41 +0000, Paul Durrant wrote:
>
> for (;;) {
> - __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate);
> - __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate);
> + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_gfn);
> pthread_testcancel();
> + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_gfn);
> +
> + if (xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA) {
> + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_hva);
> + pthread_testcancel();
> + __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_hva);
> + }
> }
>
So now the loop starts by activating it in GFN mode even if it was
already activated in HVA mode. Is that something we should even allow?
I suppose it doesn't hurt.
And it *may* leave it activated in either HVA or GFN mode.
Are both deactivate modes equivalent? I think they are, aren't they?
So it could be...
for (;;) {
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate);
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate);
if (xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA) {
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_hva);
__vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_hva);
}
pthread_testcancel();
}
But that's just nitpicking, I suppose.
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
@@ -395,6 +395,7 @@ static int cmp_timespec(struct timespec *a, struct timespec *b)
return 0;
}
+static struct shared_info *shinfo;
static struct vcpu_info *vinfo;
static struct kvm_vcpu *vcpu;
@@ -410,20 +411,38 @@ static void *juggle_shinfo_state(void *arg)
{
struct kvm_vm *vm = (struct kvm_vm *)arg;
- struct kvm_xen_hvm_attr cache_activate = {
+ struct kvm_xen_hvm_attr cache_activate_gfn = {
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
.u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE
};
- struct kvm_xen_hvm_attr cache_deactivate = {
+ struct kvm_xen_hvm_attr cache_deactivate_gfn = {
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
.u.shared_info.gfn = KVM_XEN_INVALID_GFN
};
+ struct kvm_xen_hvm_attr cache_activate_hva = {
+ .type = KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA,
+ .u.shared_info.hva = (unsigned long)shinfo
+ };
+
+ struct kvm_xen_hvm_attr cache_deactivate_hva = {
+ .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
+ .u.shared_info.hva = 0
+ };
+
+ int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
+
for (;;) {
- __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate);
- __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate);
+ __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_gfn);
pthread_testcancel();
+ __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_gfn);
+
+ if (xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA) {
+ __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_hva);
+ pthread_testcancel();
+ __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_hva);
+ }
}
return NULL;
@@ -449,6 +468,7 @@ int main(int argc, char *argv[])
bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL);
bool do_evtchn_tests = do_eventfd_tests && !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND);
bool has_vcpu_id = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND);
+ bool has_shinfo_hva = !!(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA);
clock_gettime(CLOCK_REALTIME, &min_ts);
@@ -459,7 +479,7 @@ int main(int argc, char *argv[])
SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 3, 0);
virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 3);
- struct shared_info *shinfo = addr_gpa2hva(vm, SHINFO_VADDR);
+ shinfo = addr_gpa2hva(vm, SHINFO_VADDR);
int zero_fd = open("/dev/zero", O_RDONLY);
TEST_ASSERT(zero_fd != -1, "Failed to open /dev/zero");
@@ -503,10 +523,16 @@ int main(int argc, char *argv[])
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vid);
}
- struct kvm_xen_hvm_attr ha = {
- .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
- .u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE,
- };
+ struct kvm_xen_hvm_attr ha = {};
+
+ if (has_shinfo_hva) {
+ ha.type = KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA;
+ ha.u.shared_info.hva = (unsigned long)shinfo;
+ } else {
+ ha.type = KVM_XEN_ATTR_TYPE_SHARED_INFO;
+ ha.u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE;
+ }
+
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha);
/*