@@ -402,6 +402,8 @@ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
+vm_vaddr_t vm_vaddr_alloc_1to1(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
+ uint32_t data_memslot);
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
@@ -1237,6 +1237,8 @@ static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
* vm - Virtual Machine
* sz - Size in bytes
* vaddr_min - Minimum starting virtual address
+ * paddr_min - Minimum starting physical address
+ * data_memslot - memslot number to allocate in
* encrypt - Whether the region should be handled as encrypted
*
* Output Args: None
@@ -1251,14 +1253,15 @@ static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
* a page.
*/
static vm_vaddr_t
-_vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, bool encrypt)
+_vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
+ vm_paddr_t paddr_min, uint32_t data_memslot, bool encrypt)
{
uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
virt_pgd_alloc(vm);
vm_paddr_t paddr = _vm_phy_pages_alloc(vm, pages,
- KVM_UTIL_MIN_PFN * vm->page_size,
- 0, encrypt);
+ paddr_min,
+ data_memslot, encrypt);
/*
* Find an unused range of virtual page addresses of at least
@@ -1281,12 +1284,34 @@ _vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, bool encrypt
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
{
- return _vm_vaddr_alloc(vm, sz, vaddr_min, vm->protected);
+ return _vm_vaddr_alloc(vm, sz, vaddr_min,
+ KVM_UTIL_MIN_PFN * vm->page_size, 0,
+ vm->protected);
}
vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
{
- return _vm_vaddr_alloc(vm, sz, vaddr_min, false);
+ return _vm_vaddr_alloc(vm, sz, vaddr_min,
+ KVM_UTIL_MIN_PFN * vm->page_size, 0, false);
+}
+
+/**
+ * Allocate memory in @vm of size @sz in memslot with id @data_memslot,
+ * beginning with the desired address of @vaddr_min.
+ *
+ * If there isn't enough memory at @vaddr_min, find the next possible address
+ * that can meet the requested size in the given memslot.
+ *
+ * Return the address where the memory is allocated.
+ */
+vm_vaddr_t vm_vaddr_alloc_1to1(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
+ uint32_t data_memslot)
+{
+ vm_vaddr_t gva = _vm_vaddr_alloc(vm, sz, vaddr_min, (vm_paddr_t) vaddr_min,
+ data_memslot, vm->protected);
+ ASSERT_EQ(gva, addr_gva2gpa(vm, gva));
+
+ return gva;
}
/*