@@ -436,8 +436,8 @@ static int gh_rm_send_request(struct gh_rm_rpc *rsc_mgr, u32 message_id,
* Context: Process context. Will sleep waiting for reply.
* Return: >0 is standard reply error from RM. <0 on internal error.
*/
-int gh_rm_call(struct gh_rm_rpc *rsc_mgr, u32 message_id, void *req_buff, size_t req_buff_size,
- void **resp_buf, size_t *resp_buff_size)
+static int gh_rm_call(struct gh_rm_rpc *rsc_mgr, u32 message_id, void *req_buff,
+ size_t req_buff_size, void **resp_buf, size_t *resp_buff_size)
{
struct gh_rm_connection *connection;
int ret;
@@ -566,5 +566,218 @@ void gh_rm_rpc_remove(struct gh_rm_rpc *rm)
}
EXPORT_SYMBOL_GPL(gh_rm_rpc_remove);
+/*
+ * Several RM calls take only a VMID as a parameter and give only standard
+ * response back. Deduplicate boilerplate code by using this common call.
+ */
+static int gh_rm_common_vmid_call(struct gh_rm_rpc *rm, u32 message_id, u16 vmid)
+{
+ void *resp = NULL;
+ struct gh_vm_common_vmid_req req_payload = {
+ .vmid = vmid,
+ };
+ size_t resp_size;
+ int ret;
+
+ ret = gh_rm_call(rm, message_id, &req_payload, sizeof(req_payload), &resp, &resp_size);
+ if (!ret)
+ kfree(resp);
+
+ WARN_ON(!ret && resp_size);
+
+ return ret;
+}
+
+/**
+ * gh_rm_alloc_vmid() - Allocate a new VM in Gunyah. Returns the VM identifier.
+ * @vmid: Use GH_VMID_INVAL to dynamically allocate a VM. A reserved VMID can also be requested
+ * for a special-purpose platform-defined VM.
+ *
+ * Returns - the allocated VMID or negative value on error
+ */
+int gh_rm_alloc_vmid(struct gh_rm_rpc *rm, u16 vmid)
+{
+ void *resp;
+ struct gh_vm_common_vmid_req req_payload = {
+ .vmid = vmid,
+ };
+ struct gh_vm_common_vmid_req *resp_payload;
+ size_t resp_size;
+ int ret;
+
+ if (vmid == GH_VMID_INVAL)
+ vmid = 0;
+
+ ret = gh_rm_call(rm, GH_RM_RPC_VM_ALLOC_VMID, &req_payload, sizeof(req_payload), &resp,
+ &resp_size);
+ if (ret)
+ return ret;
+
+ if (!vmid) {
+ if (resp_size != sizeof(*resp_payload)) {
+ ret = -EINVAL;
+ } else {
+ resp_payload = resp;
+ ret = resp_payload->vmid;
+ }
+ }
+ kfree(resp);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gh_rm_alloc_vmid);
+
+/**
+ * gh_rm_dealloc_vmid() - Dispose the VMID
+ * @vmid: VM identifier
+ */
+int gh_rm_dealloc_vmid(struct gh_rm_rpc *rm, u16 vmid)
+{
+ return gh_rm_common_vmid_call(rm, GH_RM_RPC_VM_DEALLOC_VMID, vmid);
+}
+EXPORT_SYMBOL_GPL(gh_rm_dealloc_vmid);
+
+/**
+ * gh_rm_vm_start() - Move the VM into "ready to run" state
+ * @vmid: VM identifier
+ *
+ * On VMs which use proxy scheduling, vcpu_run is needed to actually run the VM.
+ * On VMs which use Gunyah's scheduling, the vCPUs start executing in accordance with Gunyah
+ * scheduling policies.
+ */
+int gh_rm_vm_start(struct gh_rm_rpc *rm, u16 vmid)
+{
+ return gh_rm_common_vmid_call(rm, GH_RM_RPC_VM_START, vmid);
+}
+EXPORT_SYMBOL_GPL(gh_rm_vm_start);
+
+/**
+ * gh_rm_vm_stop() - Send a request to Resource Manager VM to stop a VM.
+ * @vmid: VM identifier
+ */
+int gh_rm_vm_stop(struct gh_rm_rpc *rm, u16 vmid)
+{
+ struct gh_vm_stop_req req_payload = { 0 };
+ void *resp;
+ size_t resp_size;
+ int ret;
+
+ req_payload.vmid = vmid;
+
+ ret = gh_rm_call(rm, GH_RM_RPC_VM_STOP, &req_payload, sizeof(req_payload), &resp, &resp_size);
+ if (ret)
+ return ret;
+ kfree(resp);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gh_rm_vm_stop);
+
+int gh_rm_vm_configure(struct gh_rm_rpc *rm, u16 vmid, enum gh_rm_vm_auth_mechanism auth_mechanism,
+ u32 mem_handle, u64 image_offset, u64 image_size, u64 dtb_offset, u64 dtb_size)
+{
+ struct gh_vm_config_image_req req_payload = { 0 };
+ void *resp;
+ size_t resp_size;
+ int ret;
+
+ req_payload.vmid = vmid;
+ req_payload.auth_mech = auth_mechanism;
+ req_payload.mem_handle = mem_handle;
+ req_payload.image_offset_low = image_offset;
+ req_payload.image_offset_high = image_offset >> 32;
+ req_payload.image_size_low = image_size;
+ req_payload.image_size_high = image_size >> 32;
+ req_payload.dtb_offset_low = dtb_offset;
+ req_payload.dtb_offset_high = dtb_offset >> 32;
+ req_payload.dtb_size_low = dtb_size;
+ req_payload.dtb_size_high = dtb_size >> 32;
+
+ ret = gh_rm_call(rm, GH_RM_RPC_VM_CONFIG_IMAGE, &req_payload, sizeof(req_payload),
+ &resp, &resp_size);
+ if (ret)
+ return ret;
+ kfree(resp);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gh_rm_vm_configure);
+
+/**
+ * gh_rm_vm_init() - Move the VM to initialized state.
+ * @vmid: VM identifier
+ *
+ * RM will allocate needed resources for the VM. After gh_rm_vm_init, gh_rm_get_hyp_resources()
+ * can be called to learn of the capabilities we can use with the new VM.
+ */
+int gh_rm_vm_init(struct gh_rm_rpc *rm, u16 vmid)
+{
+ return gh_rm_common_vmid_call(rm, GH_RM_RPC_VM_INIT, vmid);
+}
+EXPORT_SYMBOL_GPL(gh_rm_vm_init);
+
+/**
+ * gh_rm_get_hyp_resources() - Retrieve hypervisor resources (capabilities) associated with a VM
+ * @vmid: VMID of the other VM to get the resources of
+ * @resources: Set by gh_rm_get_hyp_resources and contains the returned hypervisor resources.
+ *
+ * Return: >=0 value indicates the number of gh_rm_hyp_resource entries filled into *resources
+ */
+ssize_t gh_rm_get_hyp_resources(struct gh_rm_rpc *rm, u16 vmid,
+ struct gh_rm_hyp_resource **resources)
+{
+ struct gh_vm_get_hyp_resources_resp *resp;
+ size_t resp_size;
+ int ret;
+ struct gh_vm_common_vmid_req req_payload = {0};
+
+ req_payload.vmid = vmid;
+
+ ret = gh_rm_call(rm, GH_RM_RPC_VM_GET_HYP_RESOURCES,
+ &req_payload, sizeof(req_payload),
+ (void **)&resp, &resp_size);
+ if (ret)
+ return ret;
+
+ if (resp_size < sizeof(*resp) ||
+ (sizeof(*resp->entries) && (resp->n_entries > U32_MAX / sizeof(*resp->entries))) ||
+ (resp_size != sizeof(*resp) + (resp->n_entries * sizeof(*resp->entries)))) {
+ ret = -EIO;
+ goto out;
+ }
+
+ *resources = kmemdup(resp->entries, (resp->n_entries * sizeof(*resp->entries)), GFP_KERNEL);
+ ret = resp->n_entries;
+
+out:
+ kfree(resp);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gh_rm_get_hyp_resources);
+
+/**
+ * gh_rm_get_vmid() - Retrieve VMID of this virtual machine
+ * @vmid: Filled with the VMID of this VM
+ */
+int gh_rm_get_vmid(struct gh_rm_rpc *rm, u16 *vmid)
+{
+ void *resp;
+ size_t resp_size;
+ int ret;
+ int payload = 0;
+
+ ret = gh_rm_call(rm, GH_RM_RPC_VM_GET_VMID, &payload, sizeof(payload), &resp, &resp_size);
+ if (ret)
+ return ret;
+
+ if (resp_size != sizeof(*vmid))
+ return -EIO;
+ *vmid = *(u16 *)resp;
+ kfree(resp);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gh_rm_get_vmid);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Gunyah Resource Manager RPC Driver");
@@ -34,4 +34,50 @@ struct platform_device;
struct gh_rm_rpc *gh_rm_rpc_init(struct platform_device *pdev);
void gh_rm_rpc_remove(struct gh_rm_rpc *rm);
+/* Message IDs: VM Management */
+#define GH_RM_RPC_VM_ALLOC_VMID 0x56000001
+#define GH_RM_RPC_VM_DEALLOC_VMID 0x56000002
+#define GH_RM_RPC_VM_START 0x56000004
+#define GH_RM_RPC_VM_STOP 0x56000005
+#define GH_RM_RPC_VM_CONFIG_IMAGE 0x56000009
+#define GH_RM_RPC_VM_INIT 0x5600000B
+#define GH_RM_RPC_VM_GET_HYP_RESOURCES 0x56000020
+#define GH_RM_RPC_VM_GET_VMID 0x56000024
+#define GH_RM_RPC_VM_SET_BOOT_CONTEXT 0x56000031
+
+/* Call: CONSOLE_OPEN, CONSOLE_CLOSE, CONSOLE_FLUSH */
+struct gh_vm_common_vmid_req {
+ u16 vmid;
+ u16 reserved0;
+} __packed;
+
+/* Call: VM_STOP */
+struct gh_vm_stop_req {
+ u16 vmid;
+ u8 flags;
+ u8 reserved;
+ u32 stop_reason;
+} __packed;
+
+/* Call: VM_CONFIG_IMAGE */
+struct gh_vm_config_image_req {
+ u16 vmid;
+ u16 auth_mech;
+ u32 mem_handle;
+ u32 image_offset_low;
+ u32 image_offset_high;
+ u32 image_size_low;
+ u32 image_size_high;
+ u32 dtb_offset_low;
+ u32 dtb_offset_high;
+ u32 dtb_size_low;
+ u32 dtb_size_high;
+} __packed;
+
+/* Call: GET_HYP_RESOURCES */
+struct gh_vm_get_hyp_resources_resp {
+ u32 n_entries;
+ struct gh_rm_hyp_resource entries[];
+} __packed;
+
#endif
@@ -15,4 +15,62 @@
/* Gunyah recognizes VMID0 as an alias to the current VM's ID */
#define GH_VMID_SELF 0
+struct gh_rm_rpc;
+
+enum gh_rm_vm_status {
+ GH_RM_VM_STATUS_NO_STATE = 0,
+ GH_RM_VM_STATUS_INIT = 1,
+ GH_RM_VM_STATUS_READY = 2,
+ GH_RM_VM_STATUS_RUNNING = 3,
+ GH_RM_VM_STATUS_PAUSED = 4,
+ GH_RM_VM_STATUS_LOAD = 5,
+ GH_RM_VM_STATUS_AUTH = 6,
+ GH_RM_VM_STATUS_INIT_FAILED = 8,
+ GH_RM_VM_STATUS_EXITED = 9,
+ GH_RM_VM_STATUS_RESETTING = 10,
+ GH_RM_VM_STATUS_RESET = 11,
+};
+
+/* RPC Calls */
+int gh_rm_alloc_vmid(struct gh_rm_rpc *rm, u16 vmid);
+int gh_rm_dealloc_vmid(struct gh_rm_rpc *rm, u16 vmid);
+int gh_rm_vm_start(struct gh_rm_rpc *rm, u16 vmid);
+int gh_rm_vm_stop(struct gh_rm_rpc *rm, u16 vmid);
+
+enum gh_rm_vm_auth_mechanism {
+ GH_RM_VM_AUTH_NONE = 0,
+ GH_RM_VM_AUTH_QCOM_PIL_ELF = 1,
+ GH_RM_VM_AUTH_QCOM_ANDROID_PVM = 2,
+};
+
+int gh_rm_vm_configure(struct gh_rm_rpc *rm, u16 vmid, enum gh_rm_vm_auth_mechanism auth_mechanism,
+ u32 mem_handle, u64 image_offset, u64 image_size,
+ u64 dtb_offset, u64 dtb_size);
+int gh_rm_vm_init(struct gh_rm_rpc *rm, u16 vmid);
+
+struct gh_rm_hyp_resource {
+ u8 type;
+ u8 reserved;
+ u16 partner_vmid;
+ u32 resource_handle;
+ u32 resource_label;
+ u32 cap_id_low;
+ u32 cap_id_high;
+ u32 virq_handle;
+ u32 virq;
+ u32 base_low;
+ u32 base_high;
+ u32 size_low;
+ u32 size_high;
+} __packed;
+
+ssize_t gh_rm_get_hyp_resources(struct gh_rm_rpc *rm, u16 vmid,
+ struct gh_rm_hyp_resource **resources);
+int gh_rm_get_vmid(struct gh_rm_rpc *rm, u16 *vmid);
+
+#define GH_RM_BOOT_CONTEXT_REG_SET_REGISTERS 0
+#define GH_RM_BOOT_CONTEXT_REG_SET_PC 1
+#define GH_RM_BOOT_CONTEXT_REG_SET_SP_ELx 2
+int gh_rm_set_boot_context(struct gh_rm_rpc *rm, u16 vmid, u8 reg_set, u8 reg_idx, u64 value);
+
#endif