[RFC,v5,07/29] KVM: selftests: TDX: Update load_td_memory_region for VM memory backed by guest memfd
Commit Message
From: Ackerley Tng <ackerleytng@google.com>
If guest memory is backed by restricted memfd
+ UPM is being used, hence encrypted memory region has to be
registered
+ Can avoid making a copy of guest memory before getting TDX to
initialize the memory region
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
Signed-off-by: Ryan Afranji <afranji@google.com>
Signed-off-by: Sagi Shahar <sagis@google.com>
---
.../selftests/kvm/lib/x86_64/tdx/tdx_util.c | 41 +++++++++++++++----
1 file changed, 32 insertions(+), 9 deletions(-)
Comments
On Tue, Dec 12, 2023 at 12:46:22PM -0800, Sagi Shahar wrote:
> From: Ackerley Tng <ackerleytng@google.com>
>
> If guest memory is backed by restricted memfd
>
> + UPM is being used, hence encrypted memory region has to be
> registered
> + Can avoid making a copy of guest memory before getting TDX to
> initialize the memory region
>
> Signed-off-by: Ackerley Tng <ackerleytng@google.com>
> Signed-off-by: Ryan Afranji <afranji@google.com>
> Signed-off-by: Sagi Shahar <sagis@google.com>
> ---
> .../selftests/kvm/lib/x86_64/tdx/tdx_util.c | 41 +++++++++++++++----
> 1 file changed, 32 insertions(+), 9 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/lib/x86_64/tdx/tdx_util.c b/tools/testing/selftests/kvm/lib/x86_64/tdx/tdx_util.c
> index 6b995c3f6153..063ff486fb86 100644
> --- a/tools/testing/selftests/kvm/lib/x86_64/tdx/tdx_util.c
> +++ b/tools/testing/selftests/kvm/lib/x86_64/tdx/tdx_util.c
> @@ -192,6 +192,21 @@ static void tdx_td_finalizemr(struct kvm_vm *vm)
> tdx_ioctl(vm->fd, KVM_TDX_FINALIZE_VM, 0, NULL);
> }
>
> +/*
> + * Other ioctls
> + */
> +
> +/**
> + * Register a memory region that may contain encrypted data in KVM.
> + */
> +static void register_encrypted_memory_region(
> + struct kvm_vm *vm, struct userspace_mem_region *region)
> +{
> + vm_set_memory_attributes(vm, region->region.guest_phys_addr,
> + region->region.memory_size,
> + KVM_MEMORY_ATTRIBUTE_PRIVATE);
> +}
> +
> /*
> * TD creation/setup/finalization
> */
> @@ -376,30 +391,38 @@ static void load_td_memory_region(struct kvm_vm *vm,
> if (!sparsebit_any_set(pages))
> return;
>
> +
> + if (region->region.guest_memfd != -1)
> + register_encrypted_memory_region(vm, region);
> +
> sparsebit_for_each_set_range(pages, i, j) {
> const uint64_t size_to_load = (j - i + 1) * vm->page_size;
> const uint64_t offset =
> (i - lowest_page_in_region) * vm->page_size;
> const uint64_t hva = hva_base + offset;
> const uint64_t gpa = gpa_base + offset;
> - void *source_addr;
> + void *source_addr = (void *)hva;
>
> /*
> * KVM_TDX_INIT_MEM_REGION ioctl cannot encrypt memory in place,
> * hence we have to make a copy if there's only one backing
> * memory source
> */
> - source_addr = mmap(NULL, size_to_load, PROT_READ | PROT_WRITE,
> - MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
> - TEST_ASSERT(
> - source_addr,
> - "Could not allocate memory for loading memory region");
> -
> - memcpy(source_addr, (void *)hva, size_to_load);
> + if (region->region.guest_memfd == -1) {
> + source_addr = mmap(NULL, size_to_load, PROT_READ | PROT_WRITE,
> + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
> + TEST_ASSERT(
> + source_addr,
> + "Could not allocate memory for loading memory region");
> +
> + memcpy(source_addr, (void *)hva, size_to_load);
> + memset((void *)hva, 0, size_to_load);
> + }
>
> tdx_init_mem_region(vm, source_addr, gpa, size_to_load);
>
> - munmap(source_addr, size_to_load);
> + if (region->region.guest_memfd == -1)
> + munmap(source_addr, size_to_load);
> }
For memslot 0, 1, 2, when guest_memfd != -1,
is it possible to also munmap(mmap_start, mmap_size) after finish loading?
> }
>
> --
> 2.43.0.472.g3155946c3a-goog
>
>
@@ -192,6 +192,21 @@ static void tdx_td_finalizemr(struct kvm_vm *vm)
tdx_ioctl(vm->fd, KVM_TDX_FINALIZE_VM, 0, NULL);
}
+/*
+ * Other ioctls
+ */
+
+/**
+ * Register a memory region that may contain encrypted data in KVM.
+ */
+static void register_encrypted_memory_region(
+ struct kvm_vm *vm, struct userspace_mem_region *region)
+{
+ vm_set_memory_attributes(vm, region->region.guest_phys_addr,
+ region->region.memory_size,
+ KVM_MEMORY_ATTRIBUTE_PRIVATE);
+}
+
/*
* TD creation/setup/finalization
*/
@@ -376,30 +391,38 @@ static void load_td_memory_region(struct kvm_vm *vm,
if (!sparsebit_any_set(pages))
return;
+
+ if (region->region.guest_memfd != -1)
+ register_encrypted_memory_region(vm, region);
+
sparsebit_for_each_set_range(pages, i, j) {
const uint64_t size_to_load = (j - i + 1) * vm->page_size;
const uint64_t offset =
(i - lowest_page_in_region) * vm->page_size;
const uint64_t hva = hva_base + offset;
const uint64_t gpa = gpa_base + offset;
- void *source_addr;
+ void *source_addr = (void *)hva;
/*
* KVM_TDX_INIT_MEM_REGION ioctl cannot encrypt memory in place,
* hence we have to make a copy if there's only one backing
* memory source
*/
- source_addr = mmap(NULL, size_to_load, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- TEST_ASSERT(
- source_addr,
- "Could not allocate memory for loading memory region");
-
- memcpy(source_addr, (void *)hva, size_to_load);
+ if (region->region.guest_memfd == -1) {
+ source_addr = mmap(NULL, size_to_load, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ TEST_ASSERT(
+ source_addr,
+ "Could not allocate memory for loading memory region");
+
+ memcpy(source_addr, (void *)hva, size_to_load);
+ memset((void *)hva, 0, size_to_load);
+ }
tdx_init_mem_region(vm, source_addr, gpa, size_to_load);
- munmap(source_addr, size_to_load);
+ if (region->region.guest_memfd == -1)
+ munmap(source_addr, size_to_load);
}
}