On Tue, Jan 10, 2023, Peter Gonda wrote:
> Add kvm_vm.protected metadata. Protected VMs memory, potentially
> register and other state may not be accessible to KVM. This combined
> with a new protected_phy_pages bitmap will allow the selftests to check
> if a given pages is accessible.
>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Sean Christopherson <seanjc@google.com>
> Cc: Vishal Annapurve <vannapurve@google.com>
> Cc: Ackerley Tng <ackerleytng@google.com>
> cc: Andrew Jones <andrew.jones@linux.dev>
> Originally-by: Michael Roth <michael.roth@amd.com>
> Signed-off-by: Peter Gonda <pgonda@google.com>
> ---
> .../selftests/kvm/include/kvm_util_base.h | 14 ++++++++++++--
> tools/testing/selftests/kvm/lib/kvm_util.c | 16 +++++++++++++---
> 2 files changed, 25 insertions(+), 5 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> index fbc2a79369b8..015b59a0b80e 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> @@ -45,6 +45,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
> struct userspace_mem_region {
> struct kvm_userspace_memory_region region;
> struct sparsebit *unused_phy_pages;
> + struct sparsebit *protected_phy_pages;
> int fd;
> off_t offset;
> enum vm_mem_backing_src_type backing_src_type;
> @@ -111,6 +112,9 @@ struct kvm_vm {
> vm_vaddr_t handlers;
> uint32_t dirty_ring_size;
>
> + /* VM protection enabled: SEV, etc*/
> + bool protected;
> +
> /* Cache of information for binary stats interface */
> int stats_fd;
> struct kvm_stats_header stats_header;
> @@ -679,10 +683,16 @@ const char *exit_reason_str(unsigned int exit_reason);
>
> vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
> uint32_t memslot);
> -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> - vm_paddr_t paddr_min, uint32_t memslot);
> +vm_paddr_t _vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
Two underscores please. Ignore the terrible precedent that has been set, we're
slowly purging that crud.
> + vm_paddr_t paddr_min, uint32_t memslot, bool protected);
Wrap, no strong justification for running long in this case since the declaration
has already wrapped, and the definition does wrap.
@@ -45,6 +45,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
struct userspace_mem_region {
struct kvm_userspace_memory_region region;
struct sparsebit *unused_phy_pages;
+ struct sparsebit *protected_phy_pages;
int fd;
off_t offset;
enum vm_mem_backing_src_type backing_src_type;
@@ -111,6 +112,9 @@ struct kvm_vm {
vm_vaddr_t handlers;
uint32_t dirty_ring_size;
+ /* VM protection enabled: SEV, etc*/
+ bool protected;
+
/* Cache of information for binary stats interface */
int stats_fd;
struct kvm_stats_header stats_header;
@@ -679,10 +683,16 @@ const char *exit_reason_str(unsigned int exit_reason);
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot);
-vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- vm_paddr_t paddr_min, uint32_t memslot);
+vm_paddr_t _vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot, bool protected);
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
+static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot)
+{
+ return _vm_phy_pages_alloc(vm, num, paddr_min, memslot, vm->protected);
+}
+
/*
* ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
* loads the test binary into guest memory and creates an IRQ chip (x86 only).
@@ -663,6 +663,7 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
sparsebit_free(®ion->unused_phy_pages);
+ sparsebit_free(®ion->protected_phy_pages);
ret = munmap(region->mmap_start, region->mmap_size);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
if (region->fd >= 0) {
@@ -1010,6 +1011,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
region->backing_src_type = src_type;
region->unused_phy_pages = sparsebit_alloc();
+ region->protected_phy_pages = sparsebit_alloc();
sparsebit_set_num(region->unused_phy_pages,
guest_paddr >> vm->page_shift, npages);
region->region.slot = slot;
@@ -1799,6 +1801,10 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
region->host_mem);
fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
sparsebit_dump(stream, region->unused_phy_pages, 0);
+ if (vm->protected) {
+ fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, "");
+ sparsebit_dump(stream, region->protected_phy_pages, 0);
+ }
}
fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
@@ -1895,8 +1901,9 @@ const char *exit_reason_str(unsigned int exit_reason)
* and their base address is returned. A TEST_ASSERT failure occurs if
* not enough pages are available at or above paddr_min.
*/
-vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- vm_paddr_t paddr_min, uint32_t memslot)
+vm_paddr_t _vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot,
+ bool protected)
{
struct userspace_mem_region *region;
sparsebit_idx_t pg, base;
@@ -1929,8 +1936,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
abort();
}
- for (pg = base; pg < base + num; ++pg)
+ for (pg = base; pg < base + num; ++pg) {
sparsebit_clear(region->unused_phy_pages, pg);
+ if (protected)
+ sparsebit_set(region->protected_phy_pages, pg);
+ }
return base * vm->page_size;
}