[v18,18/26] drm/shmem-helper: Change sgt allocation policy

Message ID 20231029230205.93277-19-dmitry.osipenko@collabora.com
State New
Headers
Series Add generic memory shrinker to VirtIO-GPU and Panfrost DRM drivers |

Commit Message

Dmitry Osipenko Oct. 29, 2023, 11:01 p.m. UTC
  In a preparation to addition of drm-shmem memory shrinker support, change
the SGT allocation policy in this way:

1. SGT can be allocated only if shmem pages are pinned at the
time of allocation, otherwise allocation fails.

2. Drivers must ensure that pages are pinned during the time of SGT usage
and should get new SGT if pages were unpinned.

This new policy is required by the shrinker because it will move pages
to/from SWAP unless pages are pinned, invalidating SGT pointer once pages
are relocated.

Previous patches prepared drivers to the new policy.

Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
---
 drivers/gpu/drm/drm_gem_shmem_helper.c | 51 +++++++++++++-------------
 1 file changed, 26 insertions(+), 25 deletions(-)
  

Comments

Boris Brezillon Nov. 10, 2023, 11:15 a.m. UTC | #1
On Mon, 30 Oct 2023 02:01:57 +0300
Dmitry Osipenko <dmitry.osipenko@collabora.com> wrote:

> In a preparation to addition of drm-shmem memory shrinker support, change
> the SGT allocation policy in this way:
> 
> 1. SGT can be allocated only if shmem pages are pinned at the
> time of allocation, otherwise allocation fails.
> 
> 2. Drivers must ensure that pages are pinned during the time of SGT usage
> and should get new SGT if pages were unpinned.

In general, I would discourage drivers from caching the sgt returned by
drm_gem_shmem_get_pages_sgt[_locked](), since the GEM SHMEM layer does
the caching already, so calling drm_gem_shmem_get_pages_sgt_locked()
should be pretty cheap. What this implies is that any portion of the
code using an sgt returned by drm_gem_shmem_get_pages_sgt_locked() must
be surrounded by get/pin_pages()/put/unpin_pages() calls unless the
pages are known to be pinned for the whole BO lifetime. And of course,
as soon as an MMU mapping is created, and even if the sgt is no longer
accessed, the pages must remain pinned until the MMU mapping is torn
down.

> 
> This new policy is required by the shrinker because it will move pages
> to/from SWAP unless pages are pinned, invalidating SGT pointer once pages
> are relocated.
> 
> Previous patches prepared drivers to the new policy.
> 
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>

Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>

> ---
>  drivers/gpu/drm/drm_gem_shmem_helper.c | 51 +++++++++++++-------------
>  1 file changed, 26 insertions(+), 25 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index f371ebc6f85c..1420d2166b76 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -133,6 +133,14 @@ drm_gem_shmem_free_pages(struct drm_gem_shmem_object *shmem)
>  {
>  	struct drm_gem_object *obj = &shmem->base;
>  
> +	if (shmem->sgt) {
> +		dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
> +				  DMA_BIDIRECTIONAL, 0);
> +		sg_free_table(shmem->sgt);
> +		kfree(shmem->sgt);
> +		shmem->sgt = NULL;
> +	}
> +
>  #ifdef CONFIG_X86
>  	if (shmem->map_wc)
>  		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
> @@ -155,23 +163,12 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
>  {
>  	struct drm_gem_object *obj = &shmem->base;
>  
> -	if (obj->import_attach) {
> +	if (obj->import_attach)
>  		drm_prime_gem_destroy(obj, shmem->sgt);
> -	} else {
> -		drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
> -
> -		if (shmem->sgt) {
> -			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
> -					  DMA_BIDIRECTIONAL, 0);
> -			sg_free_table(shmem->sgt);
> -			kfree(shmem->sgt);
> -		}
> -		if (shmem->pages)
> -			drm_gem_shmem_put_pages_locked(shmem);
>  
> -		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
> -		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
> -	}
> +	drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
> +	drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
> +	drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
>  
>  	drm_gem_object_release(obj);
>  	kfree(shmem);
> @@ -705,6 +702,9 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
>  
>  	drm_WARN_ON(obj->dev, obj->import_attach);
>  
> +	if (drm_WARN_ON(obj->dev, !shmem->pages))
> +		return ERR_PTR(-ENOMEM);
> +
>  	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
> @@ -720,15 +720,10 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
>  
>  	drm_WARN_ON(obj->dev, obj->import_attach);
>  
> -	ret = drm_gem_shmem_get_pages_locked(shmem);
> -	if (ret)
> -		return ERR_PTR(ret);
> -
>  	sgt = drm_gem_shmem_get_sg_table(shmem);
> -	if (IS_ERR(sgt)) {
> -		ret = PTR_ERR(sgt);
> -		goto err_put_pages;
> -	}
> +	if (IS_ERR(sgt))
> +		return sgt;
> +
>  	/* Map the pages for use by the h/w. */
>  	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
>  	if (ret)
> @@ -741,8 +736,6 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
>  err_free_sgt:
>  	sg_free_table(sgt);
>  	kfree(sgt);
> -err_put_pages:
> -	drm_gem_shmem_put_pages_locked(shmem);
>  	return ERR_PTR(ret);
>  }
>  
> @@ -759,6 +752,14 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
>   * and difference between dma-buf imported and natively allocated objects.
>   * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
>   *
> + * Drivers should adhere to these SGT usage rules:
> + *
> + * 1. SGT should be allocated only if shmem pages are pinned at the
> + *    time of allocation, otherwise allocation will fail.
> + *
> + * 2. Drivers should ensure that pages are pinned during the time of
> + *    SGT usage and should get new SGT if pages were unpinned.
> + *
>   * Returns:
>   * A pointer to the scatter/gather table of pinned pages or errno on failure.
>   */
  

Patch

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index f371ebc6f85c..1420d2166b76 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -133,6 +133,14 @@  drm_gem_shmem_free_pages(struct drm_gem_shmem_object *shmem)
 {
 	struct drm_gem_object *obj = &shmem->base;
 
+	if (shmem->sgt) {
+		dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
+				  DMA_BIDIRECTIONAL, 0);
+		sg_free_table(shmem->sgt);
+		kfree(shmem->sgt);
+		shmem->sgt = NULL;
+	}
+
 #ifdef CONFIG_X86
 	if (shmem->map_wc)
 		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
@@ -155,23 +163,12 @@  void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
 {
 	struct drm_gem_object *obj = &shmem->base;
 
-	if (obj->import_attach) {
+	if (obj->import_attach)
 		drm_prime_gem_destroy(obj, shmem->sgt);
-	} else {
-		drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
-
-		if (shmem->sgt) {
-			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
-					  DMA_BIDIRECTIONAL, 0);
-			sg_free_table(shmem->sgt);
-			kfree(shmem->sgt);
-		}
-		if (shmem->pages)
-			drm_gem_shmem_put_pages_locked(shmem);
 
-		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
-		drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
-	}
+	drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
+	drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
+	drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
 
 	drm_gem_object_release(obj);
 	kfree(shmem);
@@ -705,6 +702,9 @@  struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
 
 	drm_WARN_ON(obj->dev, obj->import_attach);
 
+	if (drm_WARN_ON(obj->dev, !shmem->pages))
+		return ERR_PTR(-ENOMEM);
+
 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
@@ -720,15 +720,10 @@  static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
 
 	drm_WARN_ON(obj->dev, obj->import_attach);
 
-	ret = drm_gem_shmem_get_pages_locked(shmem);
-	if (ret)
-		return ERR_PTR(ret);
-
 	sgt = drm_gem_shmem_get_sg_table(shmem);
-	if (IS_ERR(sgt)) {
-		ret = PTR_ERR(sgt);
-		goto err_put_pages;
-	}
+	if (IS_ERR(sgt))
+		return sgt;
+
 	/* Map the pages for use by the h/w. */
 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
 	if (ret)
@@ -741,8 +736,6 @@  static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
 err_free_sgt:
 	sg_free_table(sgt);
 	kfree(sgt);
-err_put_pages:
-	drm_gem_shmem_put_pages_locked(shmem);
 	return ERR_PTR(ret);
 }
 
@@ -759,6 +752,14 @@  static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
  * and difference between dma-buf imported and natively allocated objects.
  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
  *
+ * Drivers should adhere to these SGT usage rules:
+ *
+ * 1. SGT should be allocated only if shmem pages are pinned at the
+ *    time of allocation, otherwise allocation will fail.
+ *
+ * 2. Drivers should ensure that pages are pinned during the time of
+ *    SGT usage and should get new SGT if pages were unpinned.
+ *
  * Returns:
  * A pointer to the scatter/gather table of pinned pages or errno on failure.
  */