[v5,08/16] ufs: core: mcq: Allocate memory for mcq mode

Message ID ba753579ac3a349ee4ab61d3b0a8f705db2a9670.1669176158.git.quic_asutoshd@quicinc.com
State New
Headers
Series [v5,01/16] ufs: core: Optimize duplicate code to read extended feature |

Commit Message

Asutosh Das Nov. 23, 2022, 4:10 a.m. UTC
  To read the bqueuedepth, the device descriptor is fetched
in Single Doorbell Mode. This allocated memory may not be
enough for MCQ mode because the number of tags supported
in MCQ mode may be larger than in SDB mode.
Hence, release the memory allocated in SDB mode and allocate
memory for MCQ mode operation.
Define the ufs hardware queue and Completion Queue Entry.

Co-developed-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Asutosh Das <quic_asutoshd@quicinc.com>
---
 drivers/ufs/core/ufs-mcq.c     | 58 ++++++++++++++++++++++++++++++++++++++++--
 drivers/ufs/core/ufshcd-priv.h |  1 +
 drivers/ufs/core/ufshcd.c      | 42 +++++++++++++++++++++++++++---
 include/ufs/ufshcd.h           | 19 ++++++++++++++
 include/ufs/ufshci.h           | 22 ++++++++++++++++
 5 files changed, 137 insertions(+), 5 deletions(-)
  

Comments

Bart Van Assche Nov. 26, 2022, 1:20 a.m. UTC | #1
On 11/22/22 20:10, Asutosh Das wrote:
> To read the bqueuedepth, the device descriptor is fetched
> in Single Doorbell Mode. This allocated memory may not be
> enough for MCQ mode because the number of tags supported
> in MCQ mode may be larger than in SDB mode.
> Hence, release the memory allocated in SDB mode and allocate
> memory for MCQ mode operation.
> Define the ufs hardware queue and Completion Queue Entry.

Reviewed-by: Bart Van Assche <bvanassche@acm.org>
  
Manivannan Sadhasivam Nov. 28, 2022, 3:48 p.m. UTC | #2
On Tue, Nov 22, 2022 at 08:10:21PM -0800, Asutosh Das wrote:
> To read the bqueuedepth, the device descriptor is fetched
> in Single Doorbell Mode. This allocated memory may not be
> enough for MCQ mode because the number of tags supported
> in MCQ mode may be larger than in SDB mode.
> Hence, release the memory allocated in SDB mode and allocate
> memory for MCQ mode operation.
> Define the ufs hardware queue and Completion Queue Entry.
> 
> Co-developed-by: Can Guo <quic_cang@quicinc.com>
> Signed-off-by: Can Guo <quic_cang@quicinc.com>
> Signed-off-by: Asutosh Das <quic_asutoshd@quicinc.com>
> ---
>  drivers/ufs/core/ufs-mcq.c     | 58 ++++++++++++++++++++++++++++++++++++++++--
>  drivers/ufs/core/ufshcd-priv.h |  1 +
>  drivers/ufs/core/ufshcd.c      | 42 +++++++++++++++++++++++++++---
>  include/ufs/ufshcd.h           | 19 ++++++++++++++
>  include/ufs/ufshci.h           | 22 ++++++++++++++++
>  5 files changed, 137 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
> index e95f748..51f0e40 100644
> --- a/drivers/ufs/core/ufs-mcq.c
> +++ b/drivers/ufs/core/ufs-mcq.c
> @@ -247,15 +247,69 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
>  	return 0;
>  }
>  
> +int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
> +{
> +	struct ufs_hw_queue *hwq;
> +	size_t utrdl_size, cqe_size;
> +	int i;
> +
> +	for (i = 0; i < hba->nr_hw_queues; i++) {
> +		hwq = &hba->uhq[i];
> +
> +		utrdl_size = sizeof(struct utp_transfer_req_desc) *
> +			     hwq->max_entries;
> +		hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
> +							 &hwq->sqe_dma_addr,
> +							 GFP_KERNEL);
> +		if (!hwq->sqe_dma_addr) {
> +			dev_err(hba->dev, "SQE allocation failed\n");
> +			return -ENOMEM;
> +		}
> +
> +		cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
> +		hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
> +							 &hwq->cqe_dma_addr,
> +							 GFP_KERNEL);
> +		if (!hwq->cqe_dma_addr) {
> +			dev_err(hba->dev, "CQE allocation failed\n");
> +			return -ENOMEM;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
>  int ufshcd_mcq_init(struct ufs_hba *hba)
>  {
> -	int ret;
> +	struct ufs_hw_queue *hwq;
> +	int ret, i;
>  
>  	ret = ufshcd_mcq_config_nr_queues(hba);
>  	if (ret)
>  		return ret;
>  
>  	ret = ufshcd_mcq_config_resource(hba);
> -	return ret;
> +	if (ret)
> +		return ret;
> +
> +	hba->uhq = devm_kzalloc(hba->dev,
> +				hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
> +				GFP_KERNEL);
> +	if (!hba->uhq) {
> +		dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
> +		return -ENOMEM;
> +	}
> +
> +	for (i = 0; i < hba->nr_hw_queues; i++) {
> +		hwq = &hba->uhq[i];
> +		hwq->max_entries = hba->nutrs;
> +	}
> +
> +	/* The very first HW queue serves device commands */
> +	hba->dev_cmd_queue = &hba->uhq[0];
> +	/* Give dev_cmd_queue the minimal number of entries */
> +	hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
> +
> +	return 0;
>  }
>  
> diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
> index 9f40fa5..4d2bde2 100644
> --- a/drivers/ufs/core/ufshcd-priv.h
> +++ b/drivers/ufs/core/ufshcd-priv.h
> @@ -63,6 +63,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
>  void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
>  int ufshcd_mcq_init(struct ufs_hba *hba);
>  int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
> +int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
>  
>  #define SD_ASCII_STD true
>  #define SD_RAW false
> diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
> index ae065da..45686e8 100644
> --- a/drivers/ufs/core/ufshcd.c
> +++ b/drivers/ufs/core/ufshcd.c
> @@ -3740,6 +3740,12 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
>  	}
>  
>  	/*
> +	 * Not freed if MCQ is configured see ufshcd_release_sdb_queue() and
> +	 * ufshcd_config_mcq()

The comment is vague. Use something like,

"Skip allocating memory for utmrdl if it has been allocated during the first
pass (i.e., prior to MCQ enablement)"

> +	 */
> +	if (hba->utmrdl_base_addr)
> +		goto skip_utmrdl;
> +	/*
>  	 * Allocate memory for UTP Task Management descriptors
>  	 * UFSHCI requires 1024 byte alignment of UTMRD
>  	 */
> @@ -3755,6 +3761,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
>  		goto out;
>  	}
>  
> +skip_utmrdl:
>  	/* Allocate memory for local reference block */
>  	hba->lrb = devm_kcalloc(hba->dev,
>  				hba->nutrs, sizeof(struct ufshcd_lrb),
> @@ -8197,6 +8204,22 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
>  	return ret;
>  }
>  
> +/* SDB - Single Doorbell */
> +static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
> +{
> +	size_t ucdl_size, utrdl_size;
> +
> +	ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
> +	dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
> +			   hba->ucdl_dma_addr);
> +
> +	utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
> +	dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
> +			   hba->utrdl_dma_addr);
> +
> +	devm_kfree(hba->dev, hba->lrb);
> +}
> +
>  static int ufshcd_alloc_mcq(struct ufs_hba *hba)
>  {
>  	int ret;
> @@ -8208,12 +8231,25 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba)
>  
>  	hba->nutrs = ret;
>  	ret = ufshcd_mcq_init(hba);
> -	if (ret) {
> -		hba->nutrs = old_nutrs;
> -		return ret;
> +	if (ret)
> +		goto err;
> +

A comment should be added here on why the allocation happens again even though
it is part of the commit description. This helps while going through the code
later.

> +	if (hba->nutrs != old_nutrs) {
> +		ufshcd_release_sdb_queue(hba, old_nutrs);
> +		ret = ufshcd_memory_alloc(hba);
> +		if (ret)
> +			goto err;
> +		ufshcd_host_memory_configure(hba);
>  	}
>  
> +	ret = ufshcd_mcq_memory_alloc(hba);
> +	if (ret)
> +		goto err;
> +
>  	return 0;
> +err:
> +	hba->nutrs = old_nutrs;
> +	return ret;
>  }
>  
>  /**
> diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
> index e03b310..e478bab 100644
> --- a/include/ufs/ufshcd.h
> +++ b/include/ufs/ufshcd.h
> @@ -863,6 +863,8 @@ enum ufshcd_res {
>   * @nr_queues: number of Queues of different queue types
>   * @res: array of resource info of MCQ registers
>   * @mcq_base: Multi circular queue registers base address
> + * @uhq: array of supported hardware queues
> + * @dev_cmd_queue: Queue for issuing device management commands
>   */
>  struct ufs_hba {
>  	void __iomem *mmio_base;
> @@ -1018,6 +1020,23 @@ struct ufs_hba {
>  	unsigned int nr_queues[HCTX_MAX_TYPES];
>  	struct ufshcd_res_info res[RES_MAX];
>  	void __iomem *mcq_base;
> +	struct ufs_hw_queue *uhq;
> +	struct ufs_hw_queue *dev_cmd_queue;
> +};
> +
> +/**

Kernel doc requires the description of the struct itself.

> + * @sqe_base_addr: submission queue entry base address
> + * @sqe_dma_addr: submission queue dma address
> + * @cqe_base_addr: completion queue base address
> + * @cqe_dma_addr: completion queue dma address
> + * @max_entries: max number of slots in this hardware queue
> + */
> +struct ufs_hw_queue {
> +	void *sqe_base_addr;
> +	dma_addr_t sqe_dma_addr;
> +	struct cq_entry *cqe_base_addr;
> +	dma_addr_t cqe_dma_addr;
> +	u32 max_entries;
>  };
>  
>  /* Returns true if clocks can be gated. Otherwise false */
> diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
> index 67fcebd..1aae5b2 100644
> --- a/include/ufs/ufshci.h
> +++ b/include/ufs/ufshci.h
> @@ -486,6 +486,28 @@ struct utp_transfer_req_desc {
>  	__le16  prd_table_offset;
>  };
>  
> +/* MCQ Completion Queue Entry */
> +struct cq_entry {
> +	/* DW 0-1 */
> +	__le64 command_desc_base_addr;
> +
> +	/* DW 2 */
> +	__le16  response_upiu_length;
> +	__le16  response_upiu_offset;
> +
> +	/* DW 3 */
> +	__le16  prd_table_length;
> +	__le16  prd_table_offset;
> +
> +	/* DW 4 */
> +	__le32 status;
> +
> +	/* DW 5-7 */
> +	u32 reserved[3];

It'd be good to use __le32 for the sake of uniformity even though these 3
DWORDS are reserved.

Thanks,
Mani

> +};
> +
> +static_assert(sizeof(struct cq_entry) == 32);
> +
>  /*
>   * UTMRD structure.
>   */
> -- 
> 2.7.4
>
  

Patch

diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index e95f748..51f0e40 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -247,15 +247,69 @@  static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
 	return 0;
 }
 
+int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
+{
+	struct ufs_hw_queue *hwq;
+	size_t utrdl_size, cqe_size;
+	int i;
+
+	for (i = 0; i < hba->nr_hw_queues; i++) {
+		hwq = &hba->uhq[i];
+
+		utrdl_size = sizeof(struct utp_transfer_req_desc) *
+			     hwq->max_entries;
+		hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
+							 &hwq->sqe_dma_addr,
+							 GFP_KERNEL);
+		if (!hwq->sqe_dma_addr) {
+			dev_err(hba->dev, "SQE allocation failed\n");
+			return -ENOMEM;
+		}
+
+		cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
+		hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
+							 &hwq->cqe_dma_addr,
+							 GFP_KERNEL);
+		if (!hwq->cqe_dma_addr) {
+			dev_err(hba->dev, "CQE allocation failed\n");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
 int ufshcd_mcq_init(struct ufs_hba *hba)
 {
-	int ret;
+	struct ufs_hw_queue *hwq;
+	int ret, i;
 
 	ret = ufshcd_mcq_config_nr_queues(hba);
 	if (ret)
 		return ret;
 
 	ret = ufshcd_mcq_config_resource(hba);
-	return ret;
+	if (ret)
+		return ret;
+
+	hba->uhq = devm_kzalloc(hba->dev,
+				hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
+				GFP_KERNEL);
+	if (!hba->uhq) {
+		dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < hba->nr_hw_queues; i++) {
+		hwq = &hba->uhq[i];
+		hwq->max_entries = hba->nutrs;
+	}
+
+	/* The very first HW queue serves device commands */
+	hba->dev_cmd_queue = &hba->uhq[0];
+	/* Give dev_cmd_queue the minimal number of entries */
+	hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
+
+	return 0;
 }
 
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 9f40fa5..4d2bde2 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -63,6 +63,7 @@  int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
 int ufshcd_mcq_init(struct ufs_hba *hba);
 int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
+int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
 
 #define SD_ASCII_STD true
 #define SD_RAW false
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index ae065da..45686e8 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -3740,6 +3740,12 @@  static int ufshcd_memory_alloc(struct ufs_hba *hba)
 	}
 
 	/*
+	 * Not freed if MCQ is configured see ufshcd_release_sdb_queue() and
+	 * ufshcd_config_mcq()
+	 */
+	if (hba->utmrdl_base_addr)
+		goto skip_utmrdl;
+	/*
 	 * Allocate memory for UTP Task Management descriptors
 	 * UFSHCI requires 1024 byte alignment of UTMRD
 	 */
@@ -3755,6 +3761,7 @@  static int ufshcd_memory_alloc(struct ufs_hba *hba)
 		goto out;
 	}
 
+skip_utmrdl:
 	/* Allocate memory for local reference block */
 	hba->lrb = devm_kcalloc(hba->dev,
 				hba->nutrs, sizeof(struct ufshcd_lrb),
@@ -8197,6 +8204,22 @@  static int ufshcd_add_lus(struct ufs_hba *hba)
 	return ret;
 }
 
+/* SDB - Single Doorbell */
+static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
+{
+	size_t ucdl_size, utrdl_size;
+
+	ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
+	dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
+			   hba->ucdl_dma_addr);
+
+	utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
+	dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
+			   hba->utrdl_dma_addr);
+
+	devm_kfree(hba->dev, hba->lrb);
+}
+
 static int ufshcd_alloc_mcq(struct ufs_hba *hba)
 {
 	int ret;
@@ -8208,12 +8231,25 @@  static int ufshcd_alloc_mcq(struct ufs_hba *hba)
 
 	hba->nutrs = ret;
 	ret = ufshcd_mcq_init(hba);
-	if (ret) {
-		hba->nutrs = old_nutrs;
-		return ret;
+	if (ret)
+		goto err;
+
+	if (hba->nutrs != old_nutrs) {
+		ufshcd_release_sdb_queue(hba, old_nutrs);
+		ret = ufshcd_memory_alloc(hba);
+		if (ret)
+			goto err;
+		ufshcd_host_memory_configure(hba);
 	}
 
+	ret = ufshcd_mcq_memory_alloc(hba);
+	if (ret)
+		goto err;
+
 	return 0;
+err:
+	hba->nutrs = old_nutrs;
+	return ret;
 }
 
 /**
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index e03b310..e478bab 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -863,6 +863,8 @@  enum ufshcd_res {
  * @nr_queues: number of Queues of different queue types
  * @res: array of resource info of MCQ registers
  * @mcq_base: Multi circular queue registers base address
+ * @uhq: array of supported hardware queues
+ * @dev_cmd_queue: Queue for issuing device management commands
  */
 struct ufs_hba {
 	void __iomem *mmio_base;
@@ -1018,6 +1020,23 @@  struct ufs_hba {
 	unsigned int nr_queues[HCTX_MAX_TYPES];
 	struct ufshcd_res_info res[RES_MAX];
 	void __iomem *mcq_base;
+	struct ufs_hw_queue *uhq;
+	struct ufs_hw_queue *dev_cmd_queue;
+};
+
+/**
+ * @sqe_base_addr: submission queue entry base address
+ * @sqe_dma_addr: submission queue dma address
+ * @cqe_base_addr: completion queue base address
+ * @cqe_dma_addr: completion queue dma address
+ * @max_entries: max number of slots in this hardware queue
+ */
+struct ufs_hw_queue {
+	void *sqe_base_addr;
+	dma_addr_t sqe_dma_addr;
+	struct cq_entry *cqe_base_addr;
+	dma_addr_t cqe_dma_addr;
+	u32 max_entries;
 };
 
 /* Returns true if clocks can be gated. Otherwise false */
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index 67fcebd..1aae5b2 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -486,6 +486,28 @@  struct utp_transfer_req_desc {
 	__le16  prd_table_offset;
 };
 
+/* MCQ Completion Queue Entry */
+struct cq_entry {
+	/* DW 0-1 */
+	__le64 command_desc_base_addr;
+
+	/* DW 2 */
+	__le16  response_upiu_length;
+	__le16  response_upiu_offset;
+
+	/* DW 3 */
+	__le16  prd_table_length;
+	__le16  prd_table_offset;
+
+	/* DW 4 */
+	__le32 status;
+
+	/* DW 5-7 */
+	u32 reserved[3];
+};
+
+static_assert(sizeof(struct cq_entry) == 32);
+
 /*
  * UTMRD structure.
  */