On 11/22/22 20:10, Asutosh Das wrote:
> +/* Resources */
> +static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
> + {.name = "ufs_mem",},
> + {.name = "mcq",},
> + /* Submission Queue DAO */
> + {.name = "mcq_sqd",},
> + /* Submission Queue Interrupt Status */
> + {.name = "mcq_sqis",},
> + /* Completion Queue DAO */
> + {.name = "mcq_cqd",},
> + /* Completion Queue Interrupt Status */
> + {.name = "mcq_cqis",},
> + /* MCQ vendor specific */
> + {.name = "mcq_vs",},
> +};
Which names to associate with nodes in the device tree for UFS MCQ
resources has not been standardized. Additionally, the UFS driver is
also used on platforms that do not support the device tree (e.g. Intel
x86 platforms). So I don't think that the platform_get_resource_byname()
calls should occur in the UFS driver core. How about moving the code
that queries device tree nodes into the Qualcomm driver until a second
user of this code is added? If a second user of this code appears this
code could e.g. become a kernel module shared by both UFS host
controller drivers.
Thanks,
Bart.
@@ -18,6 +18,11 @@
#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
#define UFS_MCQ_MIN_POLL_QUEUES 0
+#define MCQ_QCFGPTR_MASK GENMASK(7, 0)
+#define MCQ_QCFGPTR_UNIT 0x200
+#define MCQ_SQATTR_OFFSET(c) \
+ ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
+#define MCQ_QCFG_SIZE 0x40
static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
{
@@ -67,6 +72,102 @@ module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
MODULE_PARM_DESC(poll_queues,
"Number of poll queues used for r/w. Default value is 1");
+/* Resources */
+static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
+ {.name = "ufs_mem",},
+ {.name = "mcq",},
+ /* Submission Queue DAO */
+ {.name = "mcq_sqd",},
+ /* Submission Queue Interrupt Status */
+ {.name = "mcq_sqis",},
+ /* Completion Queue DAO */
+ {.name = "mcq_cqd",},
+ /* Completion Queue Interrupt Status */
+ {.name = "mcq_cqis",},
+ /* MCQ vendor specific */
+ {.name = "mcq_vs",},
+};
+
+static int ufshcd_mcq_config_resource(struct ufs_hba *hba)
+{
+ struct platform_device *pdev = to_platform_device(hba->dev);
+ struct ufshcd_res_info *res;
+ struct resource *res_mem, *res_mcq;
+ int i, ret = 0;
+
+ memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
+
+ for (i = 0; i < RES_MAX; i++) {
+ res = &hba->res[i];
+ res->resource = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ res->name);
+ if (!res->resource) {
+ dev_info(hba->dev, "Resource %s not provided\n", res->name);
+ if (i == RES_UFS)
+ return -ENOMEM;
+ continue;
+ } else if (i == RES_UFS) {
+ res_mem = res->resource;
+ res->base = hba->mmio_base;
+ continue;
+ }
+
+ res->base = devm_ioremap_resource(hba->dev, res->resource);
+ if (IS_ERR(res->base)) {
+ dev_err(hba->dev, "Failed to map res %s, err=%d\n",
+ res->name, (int)PTR_ERR(res->base));
+ res->base = NULL;
+ ret = PTR_ERR(res->base);
+ return ret;
+ }
+ }
+
+ /* MCQ resource provided in DT */
+ res = &hba->res[RES_MCQ];
+ /* Bail if MCQ resource is provided */
+ if (res->base)
+ goto out;
+
+ /* Explicitly allocate MCQ resource from ufs_mem */
+ res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
+ if (!res_mcq) {
+ dev_err(hba->dev, "Failed to allocate MCQ resource\n");
+ return ret;
+ }
+
+ res_mcq->start = res_mem->start +
+ MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
+ res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1;
+ res_mcq->flags = res_mem->flags;
+ res_mcq->name = "mcq";
+
+ ret = insert_resource(&iomem_resource, res_mcq);
+ if (ret) {
+ dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
+ ret);
+ goto insert_res_err;
+ }
+
+ res->base = devm_ioremap_resource(hba->dev, res_mcq);
+ if (IS_ERR(res->base)) {
+ dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n",
+ (int)PTR_ERR(res->base));
+ ret = PTR_ERR(res->base);
+ goto ioremap_err;
+ }
+
+out:
+ hba->mcq_base = res->base;
+ return 0;
+ioremap_err:
+ res->base = NULL;
+ remove_resource(res_mcq);
+insert_res_err:
+ devm_kfree(hba->dev, res_mcq);
+ return ret;
+}
+
static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
{
int i;
@@ -119,7 +220,10 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
int ret;
ret = ufshcd_mcq_config_nr_queues(hba);
+ if (ret)
+ return ret;
+ ret = ufshcd_mcq_config_resource(hba);
return ret;
}
@@ -724,6 +724,30 @@ struct ufs_hba_monitor {
};
/**
+ * struct ufshcd_res_info_t - MCQ related resource regions
+ *
+ * @name: resource name
+ * @resource: pointer to resource region
+ * @base: register base address
+ */
+struct ufshcd_res_info {
+ const char *name;
+ struct resource *resource;
+ void __iomem *base;
+};
+
+enum ufshcd_res {
+ RES_UFS,
+ RES_MCQ,
+ RES_MCQ_SQD,
+ RES_MCQ_SQIS,
+ RES_MCQ_CQD,
+ RES_MCQ_CQIS,
+ RES_MCQ_VS,
+ RES_MAX,
+};
+
+/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
* @ucdl_base_addr: UFS Command Descriptor base address
@@ -835,6 +859,8 @@ struct ufs_hba_monitor {
* @mcq_sup: is mcq supported by UFSHC
* @nr_hw_queues: number of hardware queues configured
* @nr_queues: number of Queues of different queue types
+ * @res: array of resource info of MCQ registers
+ * @mcq_base: Multi circular queue registers base address
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -988,6 +1014,8 @@ struct ufs_hba {
bool mcq_sup;
unsigned int nr_hw_queues;
unsigned int nr_queues[HCTX_MAX_TYPES];
+ struct ufshcd_res_info res[RES_MAX];
+ void __iomem *mcq_base;
};
/* Returns true if clocks can be gated. Otherwise false */