new file mode 100644
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _TOOLS_LINUX_ASM_ARM64_KVM_HOST_H
+#define _TOOLS_LINUX_ASM_ARM64_KVM_HOST_H
+
+struct kvm_vm_arch {};
+
+#endif // _TOOLS_LINUX_ASM_ARM64_KVM_HOST_H
new file mode 100644
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _TOOLS_LINUX_ASM_RISCV_KVM_HOST_H
+#define _TOOLS_LINUX_ASM_RISCV_KVM_HOST_H
+
+struct kvm_vm_arch {};
+
+#endif // _TOOLS_LINUX_ASM_RISCV_KVM_HOST_H
new file mode 100644
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _TOOLS_LINUX_ASM_S390_KVM_HOST_H
+#define _TOOLS_LINUX_ASM_S390_KVM_HOST_H
+
+struct kvm_vm_arch {};
+
+#endif // _TOOLS_LINUX_ASM_S390_KVM_HOST_H
new file mode 100644
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _TOOLS_LINUX_ASM_X86_KVM_HOST_H
+#define _TOOLS_LINUX_ASM_X86_KVM_HOST_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+struct kvm_vm_arch {
+ uint64_t pte_me_mask;
+ uint64_t c_bit;
+ uint64_t s_bit;
+};
+
+#endif // _TOOLS_LINUX_ASM_X86_KVM_HOST_H
@@ -17,6 +17,8 @@
#include "linux/rbtree.h"
#include <asm/atomic.h>
+#include <asm/kvm.h>
+#include <asm/kvm_host.h>
#include <sys/ioctl.h>
@@ -90,6 +92,9 @@ struct kvm_vm {
vm_vaddr_t idt;
vm_vaddr_t handlers;
uint32_t dirty_ring_size;
+ uint64_t gpa_protected_mask;
+
+ struct kvm_vm_arch arch;
/* VM protection enabled: SEV, etc*/
bool protected;
@@ -127,6 +132,7 @@ enum vm_guest_mode {
VM_MODE_P40V48_16K,
VM_MODE_P40V48_64K,
VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
+ VM_MODE_PXXV48_4K_SEV, /* For 48bits VA but ANY bits PA */
VM_MODE_P47V64_4K,
VM_MODE_P44V64_4K,
VM_MODE_P36V48_4K,
@@ -400,6 +406,17 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
+
+static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
+{
+ return gpa & ~vm->gpa_protected_mask;
+}
+
+static inline vm_paddr_t vm_tag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
+{
+ return gpa | vm->gpa_protected_mask;
+}
+
void vcpu_run(struct kvm_vcpu *vcpu);
int _vcpu_run(struct kvm_vcpu *vcpu);
@@ -863,4 +880,6 @@ static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
}
+bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
+
#endif /* SELFTEST_KVM_UTIL_BASE_H */
@@ -1363,9 +1363,10 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
* address providing the memory to the vm physical address is returned.
* A TEST_ASSERT failure occurs if no region containing gpa exists.
*/
-void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
+void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa_tagged)
{
struct userspace_mem_region *region;
+ vm_paddr_t gpa = vm_untag_gpa(vm, gpa_tagged);
region = userspace_mem_region_find(vm, gpa, gpa);
if (!region) {
@@ -2042,3 +2043,22 @@ void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
break;
}
}
+
+bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
+{
+ sparsebit_idx_t pg = 0;
+ struct userspace_mem_region *region;
+
+ if (!vm->protected)
+ return false;
+
+ region = userspace_mem_region_find(vm, paddr, paddr);
+ if (!region) {
+ TEST_FAIL("No vm physical memory at 0x%lx", paddr);
+ return false;
+ }
+
+ pg = paddr >> vm->page_shift;
+ return sparsebit_is_set(region->protected_phy_pages, pg);
+
+}
@@ -127,6 +127,8 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
/* If needed, create page map l4 table. */
if (!vm->pgd_created) {
vm->pgd = vm_alloc_page_table(vm);
+ vm->pgd |= vm->arch.pte_me_mask;
+
vm->pgd_created = true;
}
}
@@ -148,13 +150,17 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
int target_level)
{
uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, current_level);
+ uint64_t paddr_raw = vm_untag_gpa(vm, paddr);
if (!(*pte & PTE_PRESENT_MASK)) {
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
if (current_level == target_level)
- *pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK);
- else
+ *pte |= PTE_LARGE_MASK | (paddr_raw & PHYSICAL_PAGE_MASK);
+ else {
*pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
+ *pte |= vm->arch.pte_me_mask;
+ }
+
} else {
/*
* Entry already present. Assert that the caller doesn't want
@@ -192,6 +198,8 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
"Physical address beyond maximum supported,\n"
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size);
+ TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr,
+ "Unexpected bits in paddr: %lx", paddr);
/*
* Allocate upper level page tables, if not already present. Return
@@ -215,6 +223,11 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
"PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
+
+ if (vm_is_gpa_protected(vm, paddr))
+ *pte |= vm->arch.c_bit;
+ else
+ *pte |= vm->arch.s_bit;
}
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
@@ -542,7 +555,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
if (!(pte[index[0]] & PTE_PRESENT_MASK))
goto unmapped_gva;
- return (PTE_GET_PFN(pte[index[0]]) * vm->page_size) + (gva & ~PAGE_MASK);
+ return vm_untag_gpa(vm, PTE_GET_PFN(pte[index[0]]) * vm->page_size) + (gva & ~PAGE_MASK);
unmapped_gva:
TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);