On 11/9/22 11:41, Asutosh Das wrote:
> +static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
> +{
> + int i;
> + u32 hba_maxq, rem, tot_queues;
> + struct Scsi_Host *host = hba->host;
> +
> + hba_maxq = FIELD_GET(GENMASK(7, 0), hba->mcq_capabilities);
> +
> + if (!rw_queues)
> + rw_queues = num_possible_cpus();
> +
> + tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
> + rw_queues;
> +
> + if (hba_maxq < tot_queues) {
> + dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
> + tot_queues, hba_maxq);
> + return -EOPNOTSUPP;
> + }
This function can fail with default kernel module parameters, e.g.
num_possible_cpus() == 8 and hba_maxq == 4. This is not acceptable. A
better solution is required, e.g. reducing rw_queues if the user has not
set this kernel module parameter.
Thanks,
Bart.
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-y += ufshcd.o ufs-sysfs.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
new file mode 100644
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
+ *
+ * Authors:
+ * Asutosh Das <quic_asutoshd@quicinc.com>
+ * Can Guo <quic_cang@quicinc.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "ufshcd-priv.h"
+
+#define UFS_MCQ_MIN_RW_QUEUES 2
+#define UFS_MCQ_MIN_READ_QUEUES 0
+#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
+#define UFS_MCQ_MIN_POLL_QUEUES 0
+
+
+static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops rw_queue_count_ops = {
+ .set = rw_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int rw_queues;
+module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
+MODULE_PARM_DESC(rw_queues,
+ "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
+
+static int read_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops read_queue_count_ops = {
+ .set = read_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int read_queues;
+module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
+MODULE_PARM_DESC(read_queues,
+ "Number of interrupt driven read queues used for read. Default value is 0");
+
+static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops poll_queue_count_ops = {
+ .set = poll_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int poll_queues = 1;
+module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
+MODULE_PARM_DESC(poll_queues,
+ "Number of poll queues used for r/w. Default value is 1");
+
+static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
+{
+ int i;
+ u32 hba_maxq, rem, tot_queues;
+ struct Scsi_Host *host = hba->host;
+
+ hba_maxq = FIELD_GET(GENMASK(7, 0), hba->mcq_capabilities);
+
+ if (!rw_queues)
+ rw_queues = num_possible_cpus();
+
+ tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
+ rw_queues;
+
+ if (hba_maxq < tot_queues) {
+ dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
+ tot_queues, hba_maxq);
+ return -EOPNOTSUPP;
+ }
+
+ rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES;
+ hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
+ num_possible_cpus());
+ rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
+ hba->nr_queues[HCTX_TYPE_POLL] = min(rem, poll_queues);
+ rem -= hba->nr_queues[HCTX_TYPE_POLL];
+ hba->nr_queues[HCTX_TYPE_READ] = min(rem, read_queues);
+
+ for (i = 0; i < HCTX_MAX_TYPES; i++)
+ host->nr_hw_queues += hba->nr_queues[i];
+
+ hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES;
+ return 0;
+}
+
+int ufshcd_mcq_init(struct ufs_hba *hba)
+{
+ int ret;
+
+ ret = ufshcd_mcq_config_nr_queues(hba);
+
+ return ret;
+}
+
@@ -61,6 +61,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
+int ufshcd_mcq_init(struct ufs_hba *hba);
#define SD_ASCII_STD true
#define SD_RAW false
@@ -8196,6 +8196,11 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
return ret;
}
+static int ufshcd_alloc_mcq(struct ufs_hba *hba)
+{
+ return ufshcd_mcq_init(hba);
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize it
* @hba: per-adapter instance
@@ -8245,6 +8250,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
goto out;
if (is_mcq_supported(hba)) {
+ ret = ufshcd_alloc_mcq(hba);
+ if (ret)
+ goto out;
ret = scsi_add_host(host, hba->dev);
if (ret) {
dev_err(hba->dev, "scsi_add_host failed\n");
@@ -833,6 +833,8 @@ struct ufs_hba_monitor {
* ufshcd_resume_complete()
* @ext_iid_sup: is EXT_IID is supported by UFSHC
* @mcq_sup: is mcq supported by UFSHC
+ * @nr_hw_queues: number of hardware queues configured
+ * @nr_queues: number of Queues of different queue types
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -984,6 +986,8 @@ struct ufs_hba {
bool complete_put;
bool ext_iid_sup;
bool mcq_sup;
+ unsigned int nr_hw_queues;
+ unsigned int nr_queues[HCTX_MAX_TYPES];
};
/* Returns true if clocks can be gated. Otherwise false */