Hi Asutosh,
I love your patch! Perhaps something to improve:
[auto build test WARNING on jejb-scsi/for-next]
[also build test WARNING on mkp-scsi/for-next linus/master v6.1-rc4 next-20221111]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Asutosh-Das/Add-Multi-Circular-Queue-Support/20221110-034847
base: https://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git for-next
patch link: https://lore.kernel.org/r/c1883a3cc18cc0103a6664cde7afbeef6d5424d8.1668022680.git.quic_asutoshd%40quicinc.com
patch subject: [PATCH v4 14/16] ufs: mcq: Add completion support of a cqe
config: i386-randconfig-a005
compiler: gcc-11 (Debian 11.3.0-8) 11.3.0
reproduce (this is a W=1 build):
# https://github.com/intel-lab-lkp/linux/commit/3d6ff8e043038d2fbaae0f2ca6547ae3bf457283
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Asutosh-Das/Add-Multi-Circular-Queue-Support/20221110-034847
git checkout 3d6ff8e043038d2fbaae0f2ca6547ae3bf457283
# save the config file
mkdir build_dir && cp config build_dir/.config
make W=1 O=build_dir ARCH=i386 SHELL=/bin/bash drivers/ufs/core/
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
All warnings (new ones prefixed by >>):
In file included from include/linux/ratelimit_types.h:5,
from include/linux/printk.h:9,
from include/asm-generic/bug.h:22,
from arch/x86/include/asm/bug.h:87,
from include/linux/bug.h:5,
from include/linux/fortify-string.h:5,
from include/linux/string.h:253,
from include/linux/dma-mapping.h:6,
from drivers/ufs/core/ufs-mcq.c:11:
drivers/ufs/core/ufs-mcq.c: In function 'ufshcd_mcq_get_tag':
>> include/linux/bits.h:36:18: warning: right shift count is negative [-Wshift-count-negative]
36 | (~UL(0) >> (BITS_PER_LONG - 1 - (h))))
| ^~
include/linux/bits.h:38:38: note: in expansion of macro '__GENMASK'
38 | (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
| ^~~~~~~~~
drivers/ufs/core/ufs-mcq.c:31:20: note: in expansion of macro 'GENMASK'
31 | #define CQE_UCD_BA GENMASK(63, 7)
| ^~~~~~~
drivers/ufs/core/ufs-mcq.c:345:63: note: in expansion of macro 'CQE_UCD_BA'
345 | dma_addr = le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA;
| ^~~~~~~~~~
vim +36 include/linux/bits.h
295bcca84916cb Rikard Falkeborn 2020-04-06 33
295bcca84916cb Rikard Falkeborn 2020-04-06 34 #define __GENMASK(h, l) \
95b980d62d52c4 Masahiro Yamada 2019-07-16 35 (((~UL(0)) - (UL(1) << (l)) + 1) & \
95b980d62d52c4 Masahiro Yamada 2019-07-16 @36 (~UL(0) >> (BITS_PER_LONG - 1 - (h))))
295bcca84916cb Rikard Falkeborn 2020-04-06 37 #define GENMASK(h, l) \
295bcca84916cb Rikard Falkeborn 2020-04-06 38 (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
8bd9cb51daac89 Will Deacon 2018-06-19 39
Hi Asutosh,
I love your patch! Perhaps something to improve:
[auto build test WARNING on jejb-scsi/for-next]
[also build test WARNING on mkp-scsi/for-next linus/master v6.1-rc4]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Asutosh-Das/Add-Multi-Circular-Queue-Support/20221110-034847
base: https://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git for-next
patch link: https://lore.kernel.org/r/c1883a3cc18cc0103a6664cde7afbeef6d5424d8.1668022680.git.quic_asutoshd%40quicinc.com
patch subject: [PATCH v4 14/16] ufs: mcq: Add completion support of a cqe
config: i386-randconfig-a002
compiler: clang version 14.0.6 (https://github.com/llvm/llvm-project f28c006a5895fc0e329fe15fead81e37457cb1d1)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/intel-lab-lkp/linux/commit/3d6ff8e043038d2fbaae0f2ca6547ae3bf457283
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Asutosh-Das/Add-Multi-Circular-Queue-Support/20221110-034847
git checkout 3d6ff8e043038d2fbaae0f2ca6547ae3bf457283
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=i386 SHELL=/bin/bash drivers/ufs/core/
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
All warnings (new ones prefixed by >>):
>> drivers/ufs/core/ufs-mcq.c:345:56: warning: shift count is negative [-Wshift-count-negative]
dma_addr = le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA;
^~~~~~~~~~
drivers/ufs/core/ufs-mcq.c:31:20: note: expanded from macro 'CQE_UCD_BA'
#define CQE_UCD_BA GENMASK(63, 7)
^~~~~~~~~~~~~~
include/linux/bits.h:38:31: note: expanded from macro 'GENMASK'
(GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
^~~~~~~~~~~~~~~
include/linux/bits.h:36:11: note: expanded from macro '__GENMASK'
(~UL(0) >> (BITS_PER_LONG - 1 - (h))))
^ ~~~~~~~~~~~~~~~~~~~~~~~~~
1 warning generated.
vim +345 drivers/ufs/core/ufs-mcq.c
334
335 static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
336 struct ufs_hw_queue *hwq,
337 struct cq_entry *cqe)
338 {
339 dma_addr_t dma_addr;
340
341 /* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
342 BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
343
344 /* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
> 345 dma_addr = le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA;
346
347 return (dma_addr - hba->ucdl_dma_addr) /
348 sizeof(struct utp_transfer_cmd_desc);
349 }
350
@@ -28,6 +28,7 @@
((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
#define MCQ_QCFG_SIZE 0x40
#define MCQ_ENTRY_SIZE_IN_DWORD 8
+#define CQE_UCD_BA GENMASK(63, 7)
static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
{
@@ -321,6 +322,59 @@ static void __iomem *mcq_opr_base(struct ufs_hba *hba,
return opr->base + opr->stride * i;
}
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
+{
+ return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
+}
+
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
+{
+ writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
+}
+
+static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq,
+ struct cq_entry *cqe)
+{
+ dma_addr_t dma_addr;
+
+ /* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
+ BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
+
+ /* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
+ dma_addr = le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA;
+
+ return (dma_addr - hba->ucdl_dma_addr) /
+ sizeof(struct utp_transfer_cmd_desc);
+}
+
+static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
+ int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
+
+ ufshcd_compl_one_cqe(hba, tag, cqe);
+}
+
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ unsigned long completed_reqs = 0;
+
+ ufshcd_mcq_update_cq_tail_slot(hwq);
+ while (!ufshcd_mcq_is_cq_empty(hwq)) {
+ ufshcd_mcq_process_cqe(hba, hwq);
+ ufshcd_mcq_inc_cq_head_slot(hwq);
+ completed_reqs++;
+ }
+
+ if (completed_reqs)
+ ufshcd_mcq_update_cq_head(hwq);
+
+ return completed_reqs;
+}
+
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{
struct ufs_hw_queue *hwq;
@@ -69,6 +69,10 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba);
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req);
@@ -253,6 +257,15 @@ static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
+static inline int ufshcd_vops_get_outstanding_cqs(struct ufs_hba *hba,
+ unsigned long *ocqs)
+{
+ if (hba->vops && hba->vops->get_outstanding_cqs)
+ return hba->vops->get_outstanding_cqs(hba, ocqs);
+
+ return -EOPNOTSUPP;
+}
+
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
@@ -339,4 +352,34 @@ static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
writel(val, q->mcq_sq_tail);
}
+static inline void ufshcd_mcq_update_cq_tail_slot(struct ufs_hw_queue *q)
+{
+ u32 val = readl(q->mcq_cq_tail);
+
+ q->cq_tail_slot = val / sizeof(struct cq_entry);
+}
+
+static inline bool ufshcd_mcq_is_cq_empty(struct ufs_hw_queue *q)
+{
+ return q->cq_head_slot == q->cq_tail_slot;
+}
+
+static inline void ufshcd_mcq_inc_cq_head_slot(struct ufs_hw_queue *q)
+{
+ q->cq_head_slot++;
+ if (q->cq_head_slot == q->max_entries)
+ q->cq_head_slot = 0;
+}
+
+static inline void ufshcd_mcq_update_cq_head(struct ufs_hw_queue *q)
+{
+ writel(q->cq_head_slot * sizeof(struct cq_entry), q->mcq_cq_head);
+}
+
+static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
+{
+ struct cq_entry *cqe = q->cqe_base_addr;
+
+ return cqe + q->cq_head_slot;
+}
#endif /* _UFSHCD_PRIV_H_ */
@@ -6672,6 +6672,40 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
}
/**
+ * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
+ * @hba: per adapter instance
+ *
+ * Returns IRQ_HANDLED if interrupt is handled
+ */
+static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ unsigned long outstanding_cqs;
+ unsigned int nr_queues;
+ int i, ret;
+ u32 events;
+
+ ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
+ if (ret)
+ outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
+
+ /* Exclude the poll queues */
+ nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+ for_each_set_bit(i, &outstanding_cqs, nr_queues) {
+ hwq = &hba->uhq[i];
+
+ events = ufshcd_mcq_read_cqis(hba, i);
+ if (events)
+ ufshcd_mcq_write_cqis(hba, events, i);
+
+ if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
+ ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
@@ -6696,6 +6730,9 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
if (intr_status & UTP_TRANSFER_REQ_COMPL)
retval |= ufshcd_transfer_req_compl(hba);
+ if (intr_status & MCQ_CQ_EVENT_STATUS)
+ retval |= ufshcd_handle_mcq_cq_events(hba);
+
return retval;
}
@@ -1454,6 +1454,21 @@ static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
return MAX_SUPP_MAC;
}
+static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
+ unsigned long *ocqs)
+{
+ u32 cqis_vs;
+ struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS];
+
+ if (!mcq_vs_res->base)
+ return -EINVAL;
+
+ cqis_vs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
+ *ocqs = cqis_vs;
+
+ return 0;
+}
+
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1479,6 +1494,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.program_key = ufs_qcom_ice_program_key,
.get_hba_mac = ufs_qcom_get_hba_mac,
.op_runtime_config = ufs_qcom_op_runtime_config,
+ .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
};
/**
@@ -72,6 +72,10 @@ enum {
UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
};
+enum {
+ UFS_MEM_CQIS_VS = 0x8,
+};
+
#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x)
#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
@@ -300,6 +300,7 @@ struct ufs_pwr_mode_info {
* @event_notify: called to notify important events
* @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode
* @op_runtime_config: called to config Operation and runtime regs Pointers
+ * @get_outstanding_cqs: called to get outstanding completion queues
*/
struct ufs_hba_variant_ops {
const char *name;
@@ -340,6 +341,8 @@ struct ufs_hba_variant_ops {
enum ufs_event_type evt, void *data);
int (*get_hba_mac)(struct ufs_hba *hba);
int (*op_runtime_config)(struct ufs_hba *hba);
+ int (*get_outstanding_cqs)(struct ufs_hba *hba,
+ unsigned long *ocqs);
};
/* clock gating state */
@@ -1064,6 +1067,8 @@ struct ufs_hba {
* @id: hardware queue ID
* @sq_tp_slot: current slot to which SQ tail pointer is pointing
* @sq_lock: serialize submission queue access
+ * @cq_tail_slot: current slot to which CQ tail pointer is pointing
+ * @cq_head_slot: current slot to which CQ head pointer is pointing
*/
struct ufs_hw_queue {
void __iomem *mcq_sq_head;
@@ -1079,6 +1084,8 @@ struct ufs_hw_queue {
u32 id;
u32 sq_tail_slot;
spinlock_t sq_lock;
+ u32 cq_tail_slot;
+ u32 cq_head_slot;
};
static inline bool is_mcq_enabled(struct ufs_hba *hba)
@@ -262,6 +262,9 @@ enum {
/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
#define UTP_TASK_REQ_LIST_RUN_STOP_BIT 0x1
+/* CQISy - CQ y Interrupt Status Register */
+#define UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS 0x1
+
/* UICCMD - UIC Command */
#define COMMAND_OPCODE_MASK 0xFF
#define GEN_SELECTOR_INDEX_MASK 0xFFFF