[drm-misc-next,3/5] drm/imagination: vm: fix drm_gpuvm reference count

Message ID 20231124233650.152653-4-dakr@redhat.com
State New
Headers
Series PowerVR VM fixes |

Commit Message

Danilo Krummrich Nov. 24, 2023, 11:36 p.m. UTC
  The driver specific reference count indicates whether the VM should be
teared down, whereas GPUVM's reference count indicates whether the VM
structure can finally be freed.

Hence, free the VM structure in pvr_gpuvm_free() and drop the last
GPUVM reference after tearing down the VM. Generally, this prevents
lifetime issues such as the VM being freed as long as drm_gpuvm_bo
structures still hold references to the VM.

Fixes: ff5f643de0bf ("drm/imagination: Add GEM and VM related code")
Signed-off-by: Danilo Krummrich <dakr@redhat.com>
---
 drivers/gpu/drm/imagination/pvr_vm.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)
  

Comments

Donald Robson Nov. 28, 2023, 10:36 a.m. UTC | #1
Thanks Danilo. It's obvious now you've pointed it out!

Reviewed-by: Donald Robson <donald.robson@imgtec.com>

On Sat, 2023-11-25 at 00:36 +0100, Danilo Krummrich wrote:
> *** CAUTION: This email originates from a source not known to Imagination Technologies. Think before you click a link or open an attachment ***
> 
> The driver specific reference count indicates whether the VM should be
> teared down, whereas GPUVM's reference count indicates whether the VM
> structure can finally be freed.
> 
> Hence, free the VM structure in pvr_gpuvm_free() and drop the last
> GPUVM reference after tearing down the VM. Generally, this prevents
> lifetime issues such as the VM being freed as long as drm_gpuvm_bo
> structures still hold references to the VM.
> 
> Fixes: ff5f643de0bf ("drm/imagination: Add GEM and VM related code")
> Signed-off-by: Danilo Krummrich <dakr@redhat.com>
> ---
>  drivers/gpu/drm/imagination/pvr_vm.c | 11 ++++++++---
>  1 file changed, 8 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
> index 1e89092c3dcc..e0d74d9a6190 100644
> --- a/drivers/gpu/drm/imagination/pvr_vm.c
> +++ b/drivers/gpu/drm/imagination/pvr_vm.c
> @@ -64,6 +64,12 @@ struct pvr_vm_context {
>  	struct drm_gem_object dummy_gem;
>  };
>  
> +static inline
> +struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
> +{
> +	return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
> +}
> +
>  struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
>  {
>  	if (vm_ctx)
> @@ -535,7 +541,7 @@ pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
>  
>  void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
>  {
> -
> +	kfree(to_pvr_vm_context(gpuvm));
>  }
>  
>  static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
> @@ -655,12 +661,11 @@ pvr_vm_context_release(struct kref *ref_count)
>  	WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
>  			     vm_ctx->gpuvm_mgr.mm_range));
>  
> -	drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
>  	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
>  	drm_gem_private_object_fini(&vm_ctx->dummy_gem);
>  	mutex_destroy(&vm_ctx->lock);
>  
> -	kfree(vm_ctx);
> +	drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
>  }
>  
>  /**
  
Maxime Ripard Nov. 28, 2023, 1:01 p.m. UTC | #2
On Sat, 25 Nov 2023 00:36:38 +0100, Danilo Krummrich wrote:
> The driver specific reference count indicates whether the VM should be
> teared down, whereas GPUVM's reference count indicates whether the VM
> structure can finally be freed.
> 
> Hence, free the VM structure in pvr_gpuvm_free() and drop the last
> GPUVM reference after tearing down the VM. Generally, this prevents
> lifetime issues such as the VM being freed as long as drm_gpuvm_bo
> structures still hold references to the VM.
> 
> [...]

Applied to drm/drm-misc (drm-misc-next).

Thanks!
Maxime
  

Patch

diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 1e89092c3dcc..e0d74d9a6190 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -64,6 +64,12 @@  struct pvr_vm_context {
 	struct drm_gem_object dummy_gem;
 };
 
+static inline
+struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
+{
+	return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
+}
+
 struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
 {
 	if (vm_ctx)
@@ -535,7 +541,7 @@  pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
 
 void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
 {
-
+	kfree(to_pvr_vm_context(gpuvm));
 }
 
 static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
@@ -655,12 +661,11 @@  pvr_vm_context_release(struct kref *ref_count)
 	WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
 			     vm_ctx->gpuvm_mgr.mm_range));
 
-	drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
 	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
 	drm_gem_private_object_fini(&vm_ctx->dummy_gem);
 	mutex_destroy(&vm_ctx->lock);
 
-	kfree(vm_ctx);
+	drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
 }
 
 /**