If guest memory is backed by restricted memfd
+ UPM is being used, hence encrypted memory region has to be
registered
+ Can avoid making a copy of guest memory before getting TDX to
initialize the memory region
Signed-off-by: Ackerley Tng <ackerleytng@google.com>
---
.../selftests/kvm/lib/x86_64/tdx/tdx_util.c | 43 +++++++++++++++----
1 file changed, 34 insertions(+), 9 deletions(-)
@@ -207,6 +207,23 @@ static void tdx_td_finalizemr(struct kvm_vm *vm)
tdx_ioctl(vm->fd, KVM_TDX_FINALIZE_VM, 0, NULL);
}
+/*
+ * Other ioctls
+ */
+
+/**
+ * Register a memory region that may contain encrypted data in KVM.
+ */
+static void register_encrypted_memory_region(
+ struct kvm_vm *vm, struct userspace_mem_region *region)
+{
+ struct kvm_enc_region range = {
+ .addr = region->region.guest_phys_addr,
+ .size = region->region.memory_size,
+ };
+ vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
+}
+
/*
* TD creation/setup/finalization
*/
@@ -393,30 +410,38 @@ static void load_td_memory_region(struct kvm_vm *vm,
if (!sparsebit_any_set(pages))
return;
+
+ if (region->region_ext.restricted_fd != -1)
+ register_encrypted_memory_region(vm, region);
+
sparsebit_for_each_set_range(pages, i, j) {
const uint64_t size_to_load = (j - i + 1) * vm->page_size;
const uint64_t offset =
(i - lowest_page_in_region) * vm->page_size;
const uint64_t hva = hva_base + offset;
const uint64_t gpa = gpa_base + offset;
- void *source_addr;
+ void *source_addr = (void *)hva;
/*
* KVM_TDX_INIT_MEM_REGION ioctl cannot encrypt memory in place,
* hence we have to make a copy if there's only one backing
* memory source
*/
- source_addr = mmap(NULL, size_to_load, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- TEST_ASSERT(
- source_addr,
- "Could not allocate memory for loading memory region");
-
- memcpy(source_addr, (void *)hva, size_to_load);
+ if (region->region_ext.restricted_fd == -1) {
+ source_addr = mmap(NULL, size_to_load, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ TEST_ASSERT(
+ source_addr,
+ "Could not allocate memory for loading memory region");
+
+ memcpy(source_addr, (void *)hva, size_to_load);
+ memset((void *)hva, 0, size_to_load);
+ }
tdx_init_mem_region(vm, source_addr, gpa, size_to_load);
- munmap(source_addr, size_to_load);
+ if (region->region_ext.restricted_fd == -1)
+ munmap(source_addr, size_to_load);
}
}