@@ -108,6 +108,18 @@ int gunyah_rm_get_hyp_resources(struct gunyah_rm *rm, u16 vmid,
int gunyah_rm_get_vmid(struct gunyah_rm *rm, u16 *vmid);
+int gunyah_rm_vm_set_demand_paging(struct gunyah_rm *rm, u16 vmid, u32 count,
+ struct gunyah_rm_mem_entry *mem_entries);
+
+enum gunyah_rm_range_id {
+ GUNYAH_RM_RANGE_ID_IMAGE = 0,
+ GUNYAH_RM_RANGE_ID_FIRMWARE = 1,
+};
+
+int gunyah_rm_vm_set_address_layout(struct gunyah_rm *rm, u16 vmid,
+ enum gunyah_rm_range_id range_id,
+ u64 base_address, u64 size);
+
struct gunyah_resource *
gunyah_rm_alloc_resource(struct gunyah_rm *rm,
struct gunyah_rm_hyp_resource *hyp_resource);
@@ -24,6 +24,8 @@
#define GUNYAH_RM_RPC_VM_INIT 0x5600000B
#define GUNYAH_RM_RPC_VM_GET_HYP_RESOURCES 0x56000020
#define GUNYAH_RM_RPC_VM_GET_VMID 0x56000024
+#define GUNYAH_RM_RPC_VM_SET_DEMAND_PAGING 0x56000033
+#define GUNYAH_RM_RPC_VM_SET_ADDRESS_LAYOUT 0x56000034
/* clang-format on */
struct gunyah_rm_vm_common_vmid_req {
@@ -103,6 +105,23 @@ struct gunyah_rm_vm_config_image_req {
__le64 dtb_size;
} __packed;
+/* Call: VM_SET_DEMAND_PAGING */
+struct gunyah_rm_vm_set_demand_paging_req {
+ __le16 vmid;
+ __le16 _padding;
+ __le32 range_count;
+ DECLARE_FLEX_ARRAY(struct gunyah_rm_mem_entry, ranges);
+} __packed;
+
+/* Call: VM_SET_ADDRESS_LAYOUT */
+struct gunyah_rm_vm_set_address_layout_req {
+ __le16 vmid;
+ __le16 _padding;
+ __le32 range_id;
+ __le64 range_base;
+ __le64 range_size;
+} __packed;
+
/*
* Several RM calls take only a VMID as a parameter and give only standard
* response back. Deduplicate boilerplate code by using this common call.
@@ -494,3 +513,57 @@ int gunyah_rm_get_vmid(struct gunyah_rm *rm, u16 *vmid)
return ret;
}
EXPORT_SYMBOL_GPL(gunyah_rm_get_vmid);
+
+/**
+ * gunyah_rm_vm_set_demand_paging() - Enable demand paging of memory regions
+ * @rm: Handle to a Gunyah resource manager
+ * @vmid: VMID of the other VM
+ * @count: Number of demand paged memory regions
+ * @entries: Array of the regions
+ */
+int gunyah_rm_vm_set_demand_paging(struct gunyah_rm *rm, u16 vmid, u32 count,
+ struct gunyah_rm_mem_entry *entries)
+{
+ struct gunyah_rm_vm_set_demand_paging_req *req __free(kfree) = NULL;
+ size_t req_size;
+
+ req_size = struct_size(req, ranges, count);
+ if (req_size == SIZE_MAX)
+ return -EINVAL;
+
+ req = kzalloc(req_size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->vmid = cpu_to_le16(vmid);
+ req->range_count = cpu_to_le32(count);
+ memcpy(req->ranges, entries, sizeof(*entries) * count);
+
+ return gunyah_rm_call(rm, GUNYAH_RM_RPC_VM_SET_DEMAND_PAGING, req,
+ req_size, NULL, NULL);
+}
+ALLOW_ERROR_INJECTION(gunyah_rm_vm_set_demand_paging, ERRNO);
+
+/**
+ * gunyah_rm_vm_set_address_layout() - Set the start address of images
+ * @rm: Handle to a Gunyah resource manager
+ * @vmid: VMID of the other VM
+ * @range_id: Which image to set
+ * @base_address: Base address
+ * @size: Size
+ */
+int gunyah_rm_vm_set_address_layout(struct gunyah_rm *rm, u16 vmid,
+ enum gunyah_rm_range_id range_id,
+ u64 base_address, u64 size)
+{
+ struct gunyah_rm_vm_set_address_layout_req req = {
+ .vmid = cpu_to_le16(vmid),
+ .range_id = cpu_to_le32(range_id),
+ .range_base = cpu_to_le64(base_address),
+ .range_size = cpu_to_le64(size),
+ };
+
+ return gunyah_rm_call(rm, GUNYAH_RM_RPC_VM_SET_ADDRESS_LAYOUT, &req,
+ sizeof(req), NULL, NULL);
+}
+ALLOW_ERROR_INJECTION(gunyah_rm_vm_set_address_layout, ERRNO);