[v2,05/15] dmaengine: idxd: Export wq resource management functions

Message ID 20230328153535.126223-6-tom.zanussi@linux.intel.com
State New
Headers
Series crypto: Add Intel Analytics Accelerator (IAA) crypto compression driver |

Commit Message

Tom Zanussi March 28, 2023, 3:35 p.m. UTC
  To allow idxd sub-drivers to access the wq resource management
functions, export them.

Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
---
 drivers/dma/idxd/device.c | 5 +++++
 1 file changed, 5 insertions(+)
  

Comments

Dave Jiang March 28, 2023, 4:04 p.m. UTC | #1
On 3/28/23 8:35 AM, Tom Zanussi wrote:
> To allow idxd sub-drivers to access the wq resource management
> functions, export them.
> 
> Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>

> ---
>   drivers/dma/idxd/device.c | 5 +++++
>   1 file changed, 5 insertions(+)
> 
> diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
> index af1aa88a66c1..7837d0a56314 100644
> --- a/drivers/dma/idxd/device.c
> +++ b/drivers/dma/idxd/device.c
> @@ -161,6 +161,7 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
>   	free_hw_descs(wq);
>   	return rc;
>   }
> +EXPORT_SYMBOL_NS_GPL(idxd_wq_alloc_resources, IDXD);
>   
>   void idxd_wq_free_resources(struct idxd_wq *wq)
>   {
> @@ -174,6 +175,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
>   	dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
>   	sbitmap_queue_free(&wq->sbq);
>   }
> +EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, IDXD);
>   
>   int idxd_wq_enable(struct idxd_wq *wq)
>   {
> @@ -422,6 +424,7 @@ int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
>   	reinit_completion(&wq->wq_resurrect);
>   	return 0;
>   }
> +EXPORT_SYMBOL_NS_GPL(idxd_wq_init_percpu_ref, IDXD);
>   
>   void __idxd_wq_quiesce(struct idxd_wq *wq)
>   {
> @@ -431,6 +434,7 @@ void __idxd_wq_quiesce(struct idxd_wq *wq)
>   	complete_all(&wq->wq_resurrect);
>   	wait_for_completion(&wq->wq_dead);
>   }
> +EXPORT_SYMBOL_NS_GPL(__idxd_wq_quiesce, IDXD);
>   
>   void idxd_wq_quiesce(struct idxd_wq *wq)
>   {
> @@ -438,6 +442,7 @@ void idxd_wq_quiesce(struct idxd_wq *wq)
>   	__idxd_wq_quiesce(wq);
>   	mutex_unlock(&wq->wq_lock);
>   }
> +EXPORT_SYMBOL_NS_GPL(idxd_wq_quiesce, IDXD);
>   
>   /* Device control bits */
>   static inline bool idxd_is_enabled(struct idxd_device *idxd)
  

Patch

diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index af1aa88a66c1..7837d0a56314 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -161,6 +161,7 @@  int idxd_wq_alloc_resources(struct idxd_wq *wq)
 	free_hw_descs(wq);
 	return rc;
 }
+EXPORT_SYMBOL_NS_GPL(idxd_wq_alloc_resources, IDXD);
 
 void idxd_wq_free_resources(struct idxd_wq *wq)
 {
@@ -174,6 +175,7 @@  void idxd_wq_free_resources(struct idxd_wq *wq)
 	dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
 	sbitmap_queue_free(&wq->sbq);
 }
+EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, IDXD);
 
 int idxd_wq_enable(struct idxd_wq *wq)
 {
@@ -422,6 +424,7 @@  int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
 	reinit_completion(&wq->wq_resurrect);
 	return 0;
 }
+EXPORT_SYMBOL_NS_GPL(idxd_wq_init_percpu_ref, IDXD);
 
 void __idxd_wq_quiesce(struct idxd_wq *wq)
 {
@@ -431,6 +434,7 @@  void __idxd_wq_quiesce(struct idxd_wq *wq)
 	complete_all(&wq->wq_resurrect);
 	wait_for_completion(&wq->wq_dead);
 }
+EXPORT_SYMBOL_NS_GPL(__idxd_wq_quiesce, IDXD);
 
 void idxd_wq_quiesce(struct idxd_wq *wq)
 {
@@ -438,6 +442,7 @@  void idxd_wq_quiesce(struct idxd_wq *wq)
 	__idxd_wq_quiesce(wq);
 	mutex_unlock(&wq->wq_lock);
 }
+EXPORT_SYMBOL_NS_GPL(idxd_wq_quiesce, IDXD);
 
 /* Device control bits */
 static inline bool idxd_is_enabled(struct idxd_device *idxd)