[v5,15/16] ufs: core: mcq: Add completion support in poll
Commit Message
Complete cqe requests in poll. Assumption is that
several poll completion may happen in different CPUs
for the same completion queue. Hence a spin lock
protection is added.
Co-developed-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Asutosh Das <quic_asutoshd@quicinc.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
---
drivers/ufs/core/ufs-mcq.c | 13 +++++++++++++
drivers/ufs/core/ufshcd-priv.h | 2 ++
drivers/ufs/core/ufshcd.c | 7 +++++++
include/ufs/ufshcd.h | 2 ++
4 files changed, 24 insertions(+)
Comments
On Tue, Nov 22, 2022 at 08:10:28PM -0800, Asutosh Das wrote:
> Complete cqe requests in poll. Assumption is that
> several poll completion may happen in different CPUs
> for the same completion queue. Hence a spin lock
> protection is added.
>
> Co-developed-by: Can Guo <quic_cang@quicinc.com>
> Signed-off-by: Can Guo <quic_cang@quicinc.com>
> Signed-off-by: Asutosh Das <quic_asutoshd@quicinc.com>
Reviewed-by: Manivannan Sadhasivam <mani@kernel.org>
Thanks,
Mani
> Reviewed-by: Bart Van Assche <bvanassche@acm.org>
> ---
> drivers/ufs/core/ufs-mcq.c | 13 +++++++++++++
> drivers/ufs/core/ufshcd-priv.h | 2 ++
> drivers/ufs/core/ufshcd.c | 7 +++++++
> include/ufs/ufshcd.h | 2 ++
> 4 files changed, 24 insertions(+)
>
> diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
> index 365ad98..5311857 100644
> --- a/drivers/ufs/core/ufs-mcq.c
> +++ b/drivers/ufs/core/ufs-mcq.c
> @@ -387,6 +387,18 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
> return completed_reqs;
> }
>
> +unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
> + struct ufs_hw_queue *hwq)
> +{
> + unsigned long completed_reqs;
> +
> + spin_lock(&hwq->cq_lock);
> + completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq);
> + spin_unlock(&hwq->cq_lock);
> +
> + return completed_reqs;
> +}
> +
> void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
> {
> struct ufs_hw_queue *hwq;
> @@ -483,6 +495,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
> hwq = &hba->uhq[i];
> hwq->max_entries = hba->nutrs;
> spin_lock_init(&hwq->sq_lock);
> + spin_lock_init(&hwq->cq_lock);
> }
>
> /* The very first HW queue serves device commands */
> diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
> index c5b5bf3..73ce8a2 100644
> --- a/drivers/ufs/core/ufshcd-priv.h
> +++ b/drivers/ufs/core/ufshcd-priv.h
> @@ -75,6 +75,8 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
> struct ufs_hw_queue *hwq);
> struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
> struct request *req);
> +unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
> + struct ufs_hw_queue *hwq);
>
> #define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
> #define SD_ASCII_STD true
> diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
> index 7fb7c5f..8416d42 100644
> --- a/drivers/ufs/core/ufshcd.c
> +++ b/drivers/ufs/core/ufshcd.c
> @@ -5453,6 +5453,13 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
> struct ufs_hba *hba = shost_priv(shost);
> unsigned long completed_reqs, flags;
> u32 tr_doorbell;
> + struct ufs_hw_queue *hwq;
> +
> + if (is_mcq_enabled(hba)) {
> + hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
> +
> + return ufshcd_mcq_poll_cqe_lock(hba, hwq);
> + }
>
> spin_lock_irqsave(&hba->outstanding_lock, flags);
> tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
> diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
> index d5fde64..a709391 100644
> --- a/include/ufs/ufshcd.h
> +++ b/include/ufs/ufshcd.h
> @@ -1069,6 +1069,7 @@ struct ufs_hba {
> * @sq_lock: serialize submission queue access
> * @cq_tail_slot: current slot to which CQ tail pointer is pointing
> * @cq_head_slot: current slot to which CQ head pointer is pointing
> + * @cq_lock: Synchronize between multiple polling instances
> */
> struct ufs_hw_queue {
> void __iomem *mcq_sq_head;
> @@ -1086,6 +1087,7 @@ struct ufs_hw_queue {
> spinlock_t sq_lock;
> u32 cq_tail_slot;
> u32 cq_head_slot;
> + spinlock_t cq_lock;
> };
>
> static inline bool is_mcq_enabled(struct ufs_hba *hba)
> --
> 2.7.4
>
@@ -387,6 +387,18 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
return completed_reqs;
}
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ unsigned long completed_reqs;
+
+ spin_lock(&hwq->cq_lock);
+ completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ spin_unlock(&hwq->cq_lock);
+
+ return completed_reqs;
+}
+
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{
struct ufs_hw_queue *hwq;
@@ -483,6 +495,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
hwq = &hba->uhq[i];
hwq->max_entries = hba->nutrs;
spin_lock_init(&hwq->sq_lock);
+ spin_lock_init(&hwq->cq_lock);
}
/* The very first HW queue serves device commands */
@@ -75,6 +75,8 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req);
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
#define SD_ASCII_STD true
@@ -5453,6 +5453,13 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct ufs_hba *hba = shost_priv(shost);
unsigned long completed_reqs, flags;
u32 tr_doorbell;
+ struct ufs_hw_queue *hwq;
+
+ if (is_mcq_enabled(hba)) {
+ hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+
+ return ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ }
spin_lock_irqsave(&hba->outstanding_lock, flags);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -1069,6 +1069,7 @@ struct ufs_hba {
* @sq_lock: serialize submission queue access
* @cq_tail_slot: current slot to which CQ tail pointer is pointing
* @cq_head_slot: current slot to which CQ head pointer is pointing
+ * @cq_lock: Synchronize between multiple polling instances
*/
struct ufs_hw_queue {
void __iomem *mcq_sq_head;
@@ -1086,6 +1087,7 @@ struct ufs_hw_queue {
spinlock_t sq_lock;
u32 cq_tail_slot;
u32 cq_head_slot;
+ spinlock_t cq_lock;
};
static inline bool is_mcq_enabled(struct ufs_hba *hba)