@@ -183,6 +183,99 @@ void gh_vm_function_unregister(struct gh_vm_function *fn)
}
EXPORT_SYMBOL_GPL(gh_vm_function_unregister);
+int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket)
+{
+ struct gh_vm_resource_ticket *iter;
+ struct gh_resource *ghrsc, *rsc_iter;
+ int ret = 0;
+
+ mutex_lock(&ghvm->resources_lock);
+ list_for_each_entry(iter, &ghvm->resource_tickets, vm_list) {
+ if (iter->resource_type == ticket->resource_type && iter->label == ticket->label) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+
+ if (!try_module_get(ticket->owner)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ list_add(&ticket->vm_list, &ghvm->resource_tickets);
+ INIT_LIST_HEAD(&ticket->resources);
+
+ list_for_each_entry_safe(ghrsc, rsc_iter, &ghvm->resources, list) {
+ if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) {
+ if (ticket->populate(ticket, ghrsc))
+ list_move(&ghrsc->list, &ticket->resources);
+ }
+ }
+out:
+ mutex_unlock(&ghvm->resources_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gh_vm_add_resource_ticket);
+
+void gh_vm_remove_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket)
+{
+ struct gh_resource *ghrsc, *iter;
+
+ mutex_lock(&ghvm->resources_lock);
+ list_for_each_entry_safe(ghrsc, iter, &ticket->resources, list) {
+ ticket->unpopulate(ticket, ghrsc);
+ list_move(&ghrsc->list, &ghvm->resources);
+ }
+
+ module_put(ticket->owner);
+ list_del(&ticket->vm_list);
+ mutex_unlock(&ghvm->resources_lock);
+}
+EXPORT_SYMBOL_GPL(gh_vm_remove_resource_ticket);
+
+static void gh_vm_add_resource(struct gh_vm *ghvm, struct gh_resource *ghrsc)
+{
+ struct gh_vm_resource_ticket *ticket;
+
+ mutex_lock(&ghvm->resources_lock);
+ list_for_each_entry(ticket, &ghvm->resource_tickets, vm_list) {
+ if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) {
+ if (ticket->populate(ticket, ghrsc))
+ list_add(&ghrsc->list, &ticket->resources);
+ else
+ list_add(&ghrsc->list, &ghvm->resources);
+ /* unconditonal -- we prevent multiple identical
+ * resource tickets so there will not be some other
+ * ticket elsewhere in the list if populate() failed.
+ */
+ goto found;
+ }
+ }
+ list_add(&ghrsc->list, &ghvm->resources);
+found:
+ mutex_unlock(&ghvm->resources_lock);
+}
+
+static void gh_vm_clean_resources(struct gh_vm *ghvm)
+{
+ struct gh_vm_resource_ticket *ticket, *titer;
+ struct gh_resource *ghrsc, *riter;
+
+ mutex_lock(&ghvm->resources_lock);
+ if (!list_empty(&ghvm->resource_tickets)) {
+ dev_warn(ghvm->parent, "Dangling resource tickets:\n");
+ list_for_each_entry_safe(ticket, titer, &ghvm->resource_tickets, vm_list) {
+ dev_warn(ghvm->parent, " %pS\n", ticket->populate);
+ gh_vm_remove_resource_ticket(ghvm, ticket);
+ }
+ }
+
+ list_for_each_entry_safe(ghrsc, riter, &ghvm->resources, list) {
+ gh_rm_free_resource(ghrsc);
+ }
+ mutex_unlock(&ghvm->resources_lock);
+}
+
static int gh_vm_rm_notification_status(struct gh_vm *ghvm, void *data)
{
struct gh_rm_vm_status_payload *payload = data;
@@ -265,6 +358,9 @@ static __must_check struct gh_vm *gh_vm_alloc(struct gh_rm *rm)
init_waitqueue_head(&ghvm->vm_status_wait);
INIT_WORK(&ghvm->free_work, gh_vm_free);
kref_init(&ghvm->kref);
+ mutex_init(&ghvm->resources_lock);
+ INIT_LIST_HEAD(&ghvm->resources);
+ INIT_LIST_HEAD(&ghvm->resource_tickets);
INIT_LIST_HEAD(&ghvm->functions);
ghvm->vm_status = GH_RM_VM_STATUS_NO_STATE;
@@ -274,9 +370,11 @@ static __must_check struct gh_vm *gh_vm_alloc(struct gh_rm *rm)
static int gh_vm_start(struct gh_vm *ghvm)
{
struct gh_vm_mem *mapping;
+ struct gh_rm_hyp_resources *resources;
+ struct gh_resource *ghrsc;
u64 dtb_offset;
u32 mem_handle;
- int ret;
+ int ret, i, n;
down_write(&ghvm->status_lock);
if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE) {
@@ -336,6 +434,22 @@ static int gh_vm_start(struct gh_vm *ghvm)
}
ghvm->vm_status = GH_RM_VM_STATUS_READY;
+ ret = gh_rm_get_hyp_resources(ghvm->rm, ghvm->vmid, &resources);
+ if (ret) {
+ dev_warn(ghvm->parent, "Failed to get hypervisor resources for VM: %d\n", ret);
+ goto err;
+ }
+
+ for (i = 0, n = le32_to_cpu(resources->n_entries); i < n; i++) {
+ ghrsc = gh_rm_alloc_resource(ghvm->rm, &resources->entries[i]);
+ if (!ghrsc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ gh_vm_add_resource(ghvm, ghrsc);
+ }
+
ret = gh_rm_vm_start(ghvm->rm, ghvm->vmid);
if (ret) {
dev_warn(ghvm->parent, "Failed to start VM: %d\n", ret);
@@ -457,6 +571,7 @@ static void gh_vm_free(struct work_struct *work)
gh_vm_stop(ghvm);
gh_vm_remove_functions(ghvm);
+ gh_vm_clean_resources(ghvm);
if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE &&
ghvm->vm_status != GH_RM_VM_STATUS_LOAD &&
@@ -7,6 +7,7 @@
#define _GH_VM_MGR_H
#include <linux/gunyah_rsc_mgr.h>
+#include <linux/gunyah_vm_mgr.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/miscdevice.h>
@@ -52,6 +53,9 @@ struct gh_vm {
struct list_head memory_mappings;
struct mutex fn_lock;
struct list_head functions;
+ struct mutex resources_lock;
+ struct list_head resources;
+ struct list_head resource_tickets;
};
int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *region);
@@ -94,4 +94,35 @@ void gh_vm_function_unregister(struct gh_vm_function *f);
module_gh_vm_function(_name); \
MODULE_ALIAS_GH_VM_FUNCTION(_type, _idx)
+/**
+ * struct gh_vm_resource_ticket - Represents a ticket to reserve exclusive access to VM resource(s)
+ * @vm_list: for @gh_vm->resource_tickets
+ * @resources: List of resource(s) associated with this ticket(members are from @gh_resource->list)
+ * @resource_type: Type of resource this ticket reserves
+ * @label: Label of the resource from resource manager this ticket reserves.
+ * @owner: owner of the ticket
+ * @populate: callback provided by the ticket owner and called when a resource is found that
+ * matches @resource_type and @label. Note that this callback could be called
+ * multiple times if userspace created mutliple resources with the same type/label.
+ * This callback may also have significant delay after gh_vm_add_resource_ticket()
+ * since gh_vm_add_resource_ticket() could be called before the VM starts.
+ * @unpopulate: callback provided by the ticket owner and called when the ticket owner should no
+ * no longer use the resource provided in the argument. When unpopulate() returns,
+ * the ticket owner should not be able to use the resource any more as the resource
+ * might being freed.
+ */
+struct gh_vm_resource_ticket {
+ struct list_head vm_list;
+ struct list_head resources;
+ enum gh_resource_type resource_type;
+ u32 label;
+
+ struct module *owner;
+ bool (*populate)(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc);
+ void (*unpopulate)(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc);
+};
+
+int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket);
+void gh_vm_remove_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket);
+
#endif