[v4,1/5] mtd: rawnand: qcom: Implement exec_op()
Commit Message
Implement exec_op() so we can later get rid of the legacy interface
implementation.
Co-developed-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
---
Change in [v4]
* No change for this patch, since this is part of exec_op
series posting new patch.
Change in [v3]
* Removed NAND_CMD_STATUS check in pre_command and move
it to status exec_op.
* Removed min() , since this check not needed
* Removed all the dummy APIs of exec_ops, and added it
into same patch where its getting added.
* Added qcom_check_op() API to check for unsupported feature
by controller in check_only path.
Change in [v2]
* Missed to post Cover-letter, so posting v2 patch with cover-letter
Change in [v1]
* Added initial support for exec_ops.
drivers/mtd/nand/raw/qcom_nandc.c | 159 ++++++++++++++++++++++++++++++
1 file changed, 159 insertions(+)
Comments
Hi Md,
kernel test robot noticed the following build warnings:
[auto build test WARNING on mtd/nand/next]
[also build test WARNING on linus/master v6.4-rc7 next-20230620]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Md-Sadre-Alam/mtd-rawnand-qcom-Add-support-for-reset-readid-status-exec_op/20230615-153448
base: https://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git nand/next
patch link: https://lore.kernel.org/r/20230615073143.25079-1-quic_mdalam%40quicinc.com
patch subject: [PATCH v4 1/5] mtd: rawnand: qcom: Implement exec_op()
config: arm-allmodconfig (https://download.01.org/0day-ci/archive/20230620/202306201734.SmmrhWYJ-lkp@intel.com/config)
compiler: arm-linux-gnueabi-gcc (GCC) 12.3.0
reproduce: (https://download.01.org/0day-ci/archive/20230620/202306201734.SmmrhWYJ-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202306201734.SmmrhWYJ-lkp@intel.com/
All warnings (new ones prefixed by >>):
drivers/mtd/nand/raw/qcom_nandc.c: In function 'qcom_parse_instructions':
>> drivers/mtd/nand/raw/qcom_nandc.c:2944:38: warning: variable 'naddrs' set but not used [-Wunused-but-set-variable]
2944 | unsigned int offset, naddrs;
| ^~~~~~
drivers/mtd/nand/raw/qcom_nandc.c: At top level:
drivers/mtd/nand/raw/qcom_nandc.c:2932:13: warning: 'qcom_parse_instructions' defined but not used [-Wunused-function]
2932 | static void qcom_parse_instructions(struct nand_chip *chip,
| ^~~~~~~~~~~~~~~~~~~~~~~
vim +/naddrs +2944 drivers/mtd/nand/raw/qcom_nandc.c
2930
2931 /* NAND framework ->exec_op() hooks and related helpers */
2932 static void qcom_parse_instructions(struct nand_chip *chip,
2933 const struct nand_subop *subop,
2934 struct qcom_op *q_op)
2935 {
2936 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2937 const struct nand_op_instr *instr = NULL;
2938 unsigned int op_id;
2939 int i;
2940
2941 memset(q_op, 0, sizeof(*q_op));
2942
2943 for (op_id = 0; op_id < subop->ninstrs; op_id++) {
> 2944 unsigned int offset, naddrs;
2945 const u8 *addrs;
2946
2947 instr = &subop->instrs[op_id];
2948
2949 switch (instr->type) {
2950 case NAND_OP_CMD_INSTR:
2951 q_op->cmd_reg = qcom_op_cmd_mapping(nandc, instr->ctx.cmd.opcode, q_op);
2952 q_op->rdy_delay_ns = instr->delay_ns;
2953 break;
2954
2955 case NAND_OP_ADDR_INSTR:
2956 offset = nand_subop_get_addr_start_off(subop, op_id);
2957 naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
2958 addrs = &instr->ctx.addr.addrs[offset];
2959 for (i = 0; i < MAX_ADDRESS_CYCLE; i++) {
2960 if (i < 4)
2961 q_op->addr1_reg |= (u32)addrs[i] << i * 8;
2962 else
2963 q_op->addr2_reg |= addrs[i];
2964 }
2965 q_op->rdy_delay_ns = instr->delay_ns;
2966 break;
2967
2968 case NAND_OP_DATA_IN_INSTR:
2969 q_op->data_instr = instr;
2970 q_op->data_instr_idx = op_id;
2971 q_op->rdy_delay_ns = instr->delay_ns;
2972 fallthrough;
2973 case NAND_OP_DATA_OUT_INSTR:
2974 q_op->rdy_delay_ns = instr->delay_ns;
2975 break;
2976
2977 case NAND_OP_WAITRDY_INSTR:
2978 q_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
2979 q_op->rdy_delay_ns = instr->delay_ns;
2980 break;
2981 }
2982 }
2983 }
2984
Hi,
quic_mdalam@quicinc.com wrote on Thu, 15 Jun 2023 13:01:39 +0530:
> Implement exec_op() so we can later get rid of the legacy interface
> implementation.
>
> Co-developed-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
> Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
> Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
> ---
> Change in [v4]
>
> * No change for this patch, since this is part of exec_op
> series posting new patch.
>
> Change in [v3]
>
> * Removed NAND_CMD_STATUS check in pre_command and move
> it to status exec_op.
>
> * Removed min() , since this check not needed
>
> * Removed all the dummy APIs of exec_ops, and added it
> into same patch where its getting added.
>
> * Added qcom_check_op() API to check for unsupported feature
> by controller in check_only path.
>
> Change in [v2]
>
> * Missed to post Cover-letter, so posting v2 patch with cover-letter
>
> Change in [v1]
>
> * Added initial support for exec_ops.
>
> drivers/mtd/nand/raw/qcom_nandc.c | 159 ++++++++++++++++++++++++++++++
> 1 file changed, 159 insertions(+)
>
> diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
> index 72d6168d8a1b..d9c4c9fe2fe8 100644
> --- a/drivers/mtd/nand/raw/qcom_nandc.c
> +++ b/drivers/mtd/nand/raw/qcom_nandc.c
> @@ -157,6 +157,7 @@
> #define OP_PAGE_PROGRAM_WITH_ECC 0x7
> #define OP_PROGRAM_PAGE_SPARE 0x9
> #define OP_BLOCK_ERASE 0xa
> +#define OP_CHECK_STATUS 0xc
> #define OP_FETCH_ID 0xb
> #define OP_RESET_DEVICE 0xd
>
> @@ -235,6 +236,8 @@ nandc_set_reg(chip, reg, \
> */
> #define NAND_ERASED_CW_SET BIT(4)
>
> +#define MAX_ADDRESS_CYCLE 5
> +#define MAX_CHUNK_SIZE SZ_8K
New line.
> /*
> * This data type corresponds to the BAM transaction which will be used for all
> * NAND transfers.
> @@ -447,6 +450,29 @@ struct qcom_nand_boot_partition {
> u32 page_size;
> };
>
> +/*
> + * Qcom op for each exec_op transfer
> + *
> + * @data_instr: data instruction pointer
> + * @data_instr_idx: data instruction index
> + * @rdy_timeout_ms: wait ready timeout in ms
> + * @rdy_delay_ns: Additional delay in ns
> + * @addr1_reg: Address1 register value
> + * @addr2_reg: Address2 register value
> + * @cmd_reg: CMD register value
> + * @flag: flag for misc instruction
> + */
> +struct qcom_op {
> + const struct nand_op_instr *data_instr;
> + unsigned int data_instr_idx;
> + unsigned int rdy_timeout_ms;
> + unsigned int rdy_delay_ns;
> + u32 addr1_reg;
> + u32 addr2_reg;
> + u32 cmd_reg;
> + u8 flag;
> +};
> +
> /*
> * NAND chip structure
> *
> @@ -2867,8 +2893,141 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
> return 0;
> }
>
> +static int qcom_op_cmd_mapping(struct qcom_nand_controller *nandc, u8 cmd,
> + struct qcom_op *q_op)
> +{
> + int ret;
> +
> + switch (cmd) {
> + case NAND_CMD_RESET:
> + ret = OP_RESET_DEVICE;
> + break;
> + case NAND_CMD_READID:
> + ret = OP_FETCH_ID;
> + break;
> + case NAND_CMD_PARAM:
> + if (nandc->props->qpic_v2)
> + ret = OP_PAGE_READ_ONFI_READ;
> + else
> + ret = OP_PAGE_READ;
> + break;
> + case NAND_CMD_ERASE1:
> + case NAND_CMD_ERASE2:
> + ret = OP_BLOCK_ERASE;
> + break;
> + case NAND_CMD_STATUS:
> + ret = OP_CHECK_STATUS;
> + break;
> + case NAND_CMD_PAGEPROG:
> + ret = OP_PROGRAM_PAGE;
> + break;
> + default:
Again, this is not a supported case, you should handle it. And this
must be checked upon check_only conditions as well.
> + break;
> + }
> +
> + return ret;
> +}
> +
> +/* NAND framework ->exec_op() hooks and related helpers */
> +static void qcom_parse_instructions(struct nand_chip *chip,
> + const struct nand_subop *subop,
> + struct qcom_op *q_op)
> +{
> + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
> + const struct nand_op_instr *instr = NULL;
> + unsigned int op_id;
> + int i;
> +
> + memset(q_op, 0, sizeof(*q_op));
> +
> + for (op_id = 0; op_id < subop->ninstrs; op_id++) {
> + unsigned int offset, naddrs;
> + const u8 *addrs;
> +
> + instr = &subop->instrs[op_id];
> +
> + switch (instr->type) {
> + case NAND_OP_CMD_INSTR:
> + q_op->cmd_reg = qcom_op_cmd_mapping(nandc, instr->ctx.cmd.opcode, q_op);
> + q_op->rdy_delay_ns = instr->delay_ns;
> + break;
> +
> + case NAND_OP_ADDR_INSTR:
> + offset = nand_subop_get_addr_start_off(subop, op_id);
> + naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
> + addrs = &instr->ctx.addr.addrs[offset];
> + for (i = 0; i < MAX_ADDRESS_CYCLE; i++) {
> + if (i < 4)
> + q_op->addr1_reg |= (u32)addrs[i] << i * 8;
> + else
> + q_op->addr2_reg |= addrs[i];
> + }
> + q_op->rdy_delay_ns = instr->delay_ns;
> + break;
> +
> + case NAND_OP_DATA_IN_INSTR:
> + q_op->data_instr = instr;
> + q_op->data_instr_idx = op_id;
> + q_op->rdy_delay_ns = instr->delay_ns;
> + fallthrough;
> + case NAND_OP_DATA_OUT_INSTR:
> + q_op->rdy_delay_ns = instr->delay_ns;
> + break;
> +
> + case NAND_OP_WAITRDY_INSTR:
> + q_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
> + q_op->rdy_delay_ns = instr->delay_ns;
> + break;
> + }
> + }
> +}
> +
> +static int qcom_check_op(struct nand_chip *chip,
> + const struct nand_operation *op)
> +{
> + const struct nand_op_instr *instr;
> + int op_id;
> +
> + for (op_id = 0; op_id < op->ninstrs; op_id++) {
> + instr = &op->instrs[op_id];
> +
> + switch (instr->type) {
> + case NAND_OP_CMD_INSTR:
> + if (instr->ctx.cmd.opcode == NAND_CMD_READCACHESEQ ||
> + instr->ctx.cmd.opcode == NAND_CMD_READCACHEEND)
> + return -ENOTSUPP;
Do you really need this check? These operations have specific pattern,
no? I believe you should not need this check.
> + break;
> + case NAND_OP_ADDR_INSTR:
> + if (instr->ctx.addr.naddrs > MAX_ADDRESS_CYCLE)
> + return -ENOTSUPP;
This one is not needed either, as long as you properly define the
patterns.
> +
> + break;
> + case NAND_OP_DATA_IN_INSTR:
> + case NAND_OP_DATA_OUT_INSTR:
> + if (instr->ctx.data.len > MAX_CHUNK_SIZE)
Same.
> + return -ENOTSUPP;
> + break;
> + default:
> + break;
> + }
> + }
> +
> + return 0;
> +}
> +
> +static int qcom_nand_exec_op(struct nand_chip *chip,
> + const struct nand_operation *op,
> + bool check_only)
> +{
> + if (check_only)
> + return qcom_check_op(chip, op);
> +
> + return 0;
> +}
> +
> static const struct nand_controller_ops qcom_nandc_ops = {
> .attach_chip = qcom_nand_attach_chip,
> + .exec_op = qcom_nand_exec_op,
I understand the idea of making the series easier to review, and I
thank you for that, but in practice the series is not bisectable. I
doubt the driver works right after patch 1.
You will likely need two patches, one to add exec_op, one to remove the
legacy implementation.
> };
>
> static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
Thanks,
Miquèl
On 7/4/2023 8:20 PM, Miquel Raynal wrote:
> Hi,
>
> quic_mdalam@quicinc.com wrote on Thu, 15 Jun 2023 13:01:39 +0530:
>
>> Implement exec_op() so we can later get rid of the legacy interface
>> implementation.
>>
>> Co-developed-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
>> Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
>> Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
>> ---
>> Change in [v4]
>>
>> * No change for this patch, since this is part of exec_op
>> series posting new patch.
>>
>> Change in [v3]
>>
>> * Removed NAND_CMD_STATUS check in pre_command and move
>> it to status exec_op.
>>
>> * Removed min() , since this check not needed
>>
>> * Removed all the dummy APIs of exec_ops, and added it
>> into same patch where its getting added.
>>
>> * Added qcom_check_op() API to check for unsupported feature
>> by controller in check_only path.
>>
>> Change in [v2]
>>
>> * Missed to post Cover-letter, so posting v2 patch with cover-letter
>>
>> Change in [v1]
>>
>> * Added initial support for exec_ops.
>>
>> drivers/mtd/nand/raw/qcom_nandc.c | 159 ++++++++++++++++++++++++++++++
>> 1 file changed, 159 insertions(+)
>>
>> diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
>> index 72d6168d8a1b..d9c4c9fe2fe8 100644
>> --- a/drivers/mtd/nand/raw/qcom_nandc.c
>> +++ b/drivers/mtd/nand/raw/qcom_nandc.c
>> @@ -157,6 +157,7 @@
>> #define OP_PAGE_PROGRAM_WITH_ECC 0x7
>> #define OP_PROGRAM_PAGE_SPARE 0x9
>> #define OP_BLOCK_ERASE 0xa
>> +#define OP_CHECK_STATUS 0xc
>> #define OP_FETCH_ID 0xb
>> #define OP_RESET_DEVICE 0xd
>>
>> @@ -235,6 +236,8 @@ nandc_set_reg(chip, reg, \
>> */
>> #define NAND_ERASED_CW_SET BIT(4)
>>
>> +#define MAX_ADDRESS_CYCLE 5
>> +#define MAX_CHUNK_SIZE SZ_8K
>
> New line.
Will fix in next patch v5.
>
>> /*
>> * This data type corresponds to the BAM transaction which will be used for all
>> * NAND transfers.
>> @@ -447,6 +450,29 @@ struct qcom_nand_boot_partition {
>> u32 page_size;
>> };
>>
>> +/*
>> + * Qcom op for each exec_op transfer
>> + *
>> + * @data_instr: data instruction pointer
>> + * @data_instr_idx: data instruction index
>> + * @rdy_timeout_ms: wait ready timeout in ms
>> + * @rdy_delay_ns: Additional delay in ns
>> + * @addr1_reg: Address1 register value
>> + * @addr2_reg: Address2 register value
>> + * @cmd_reg: CMD register value
>> + * @flag: flag for misc instruction
>> + */
>> +struct qcom_op {
>> + const struct nand_op_instr *data_instr;
>> + unsigned int data_instr_idx;
>> + unsigned int rdy_timeout_ms;
>> + unsigned int rdy_delay_ns;
>> + u32 addr1_reg;
>> + u32 addr2_reg;
>> + u32 cmd_reg;
>> + u8 flag;
>> +};
>> +
>> /*
>> * NAND chip structure
>> *
>> @@ -2867,8 +2893,141 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
>> return 0;
>> }
>>
>> +static int qcom_op_cmd_mapping(struct qcom_nand_controller *nandc, u8 cmd,
>> + struct qcom_op *q_op)
>> +{
>> + int ret;
>> +
>> + switch (cmd) {
>> + case NAND_CMD_RESET:
>> + ret = OP_RESET_DEVICE;
>> + break;
>> + case NAND_CMD_READID:
>> + ret = OP_FETCH_ID;
>> + break;
>> + case NAND_CMD_PARAM:
>> + if (nandc->props->qpic_v2)
>> + ret = OP_PAGE_READ_ONFI_READ;
>> + else
>> + ret = OP_PAGE_READ;
>> + break;
>> + case NAND_CMD_ERASE1:
>> + case NAND_CMD_ERASE2:
>> + ret = OP_BLOCK_ERASE;
>> + break;
>> + case NAND_CMD_STATUS:
>> + ret = OP_CHECK_STATUS;
>> + break;
>> + case NAND_CMD_PAGEPROG:
>> + ret = OP_PROGRAM_PAGE;
>> + break;
>> + default:
>
> Again, this is not a supported case, you should handle it. And this
> must be checked upon check_only conditions as well.
>
Yes understand , Will handle this case in next patch v5.
>> + break;
>> + }
>> +
>> + return ret;
>> +}
>> +
>> +/* NAND framework ->exec_op() hooks and related helpers */
>> +static void qcom_parse_instructions(struct nand_chip *chip,
>> + const struct nand_subop *subop,
>> + struct qcom_op *q_op)
>> +{
>> + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
>> + const struct nand_op_instr *instr = NULL;
>> + unsigned int op_id;
>> + int i;
>> +
>> + memset(q_op, 0, sizeof(*q_op));
>> +
>> + for (op_id = 0; op_id < subop->ninstrs; op_id++) {
>> + unsigned int offset, naddrs;
>> + const u8 *addrs;
>> +
>> + instr = &subop->instrs[op_id];
>> +
>> + switch (instr->type) {
>> + case NAND_OP_CMD_INSTR:
>> + q_op->cmd_reg = qcom_op_cmd_mapping(nandc, instr->ctx.cmd.opcode, q_op);
>> + q_op->rdy_delay_ns = instr->delay_ns;
>> + break;
>> +
>> + case NAND_OP_ADDR_INSTR:
>> + offset = nand_subop_get_addr_start_off(subop, op_id);
>> + naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
>> + addrs = &instr->ctx.addr.addrs[offset];
>> + for (i = 0; i < MAX_ADDRESS_CYCLE; i++) {
>> + if (i < 4)
>> + q_op->addr1_reg |= (u32)addrs[i] << i * 8;
>> + else
>> + q_op->addr2_reg |= addrs[i];
>> + }
>> + q_op->rdy_delay_ns = instr->delay_ns;
>> + break;
>> +
>> + case NAND_OP_DATA_IN_INSTR:
>> + q_op->data_instr = instr;
>> + q_op->data_instr_idx = op_id;
>> + q_op->rdy_delay_ns = instr->delay_ns;
>> + fallthrough;
>> + case NAND_OP_DATA_OUT_INSTR:
>> + q_op->rdy_delay_ns = instr->delay_ns;
>> + break;
>> +
>> + case NAND_OP_WAITRDY_INSTR:
>> + q_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
>> + q_op->rdy_delay_ns = instr->delay_ns;
>> + break;
>> + }
>> + }
>> +}
>> +
>> +static int qcom_check_op(struct nand_chip *chip,
>> + const struct nand_operation *op)
>> +{
>> + const struct nand_op_instr *instr;
>> + int op_id;
>> +
>> + for (op_id = 0; op_id < op->ninstrs; op_id++) {
>> + instr = &op->instrs[op_id];
>> +
>> + switch (instr->type) {
>> + case NAND_OP_CMD_INSTR:
>> + if (instr->ctx.cmd.opcode == NAND_CMD_READCACHESEQ ||
>> + instr->ctx.cmd.opcode == NAND_CMD_READCACHEEND)
>> + return -ENOTSUPP;
>
> Do you really need this check? These operations have specific pattern,
> no? I believe you should not need this check.
Yes you are correct, these command are having specific pattern.
Its not needed here , will remove in next patch v5.
>> + break;
>> + case NAND_OP_ADDR_INSTR:
>> + if (instr->ctx.addr.naddrs > MAX_ADDRESS_CYCLE)
>> + return -ENOTSUPP;
>
> This one is not needed either, as long as you properly define the
> patterns.
Yes this one as well not needed. Will remove in next patch v5.
>
>> +
>> + break;
>> + case NAND_OP_DATA_IN_INSTR:
>> + case NAND_OP_DATA_OUT_INSTR:
>> + if (instr->ctx.data.len > MAX_CHUNK_SIZE)
>
> Same.
Will remove in next patch v5.
>> + return -ENOTSUPP;
>> + break;
>> + default:
>> + break;
>> + }
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +static int qcom_nand_exec_op(struct nand_chip *chip,
>> + const struct nand_operation *op,
>> + bool check_only)
>> +{
>> + if (check_only)
>> + return qcom_check_op(chip, op);
>> +
>> + return 0;
>> +}
>> +
>> static const struct nand_controller_ops qcom_nandc_ops = {
>> .attach_chip = qcom_nand_attach_chip,
>> + .exec_op = qcom_nand_exec_op,
>
> I understand the idea of making the series easier to review, and I
> thank you for that, but in practice the series is not bisectable. I
> doubt the driver works right after patch 1.
Yes you are right. This series patches are not bisectable, but got
compiled individually. But boot up will not work after patch 1.
>
> You will likely need two patches, one to add exec_op, one to remove the
> legacy implementation.
Will combine all the patches in two patches. One for exec_op() implementation.
another one is Remove legacy implementation. Will do this in next patch v5.
>
>> };
>>
>> static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
>
>
> Thanks,
> Miquèl
@@ -157,6 +157,7 @@
#define OP_PAGE_PROGRAM_WITH_ECC 0x7
#define OP_PROGRAM_PAGE_SPARE 0x9
#define OP_BLOCK_ERASE 0xa
+#define OP_CHECK_STATUS 0xc
#define OP_FETCH_ID 0xb
#define OP_RESET_DEVICE 0xd
@@ -235,6 +236,8 @@ nandc_set_reg(chip, reg, \
*/
#define NAND_ERASED_CW_SET BIT(4)
+#define MAX_ADDRESS_CYCLE 5
+#define MAX_CHUNK_SIZE SZ_8K
/*
* This data type corresponds to the BAM transaction which will be used for all
* NAND transfers.
@@ -447,6 +450,29 @@ struct qcom_nand_boot_partition {
u32 page_size;
};
+/*
+ * Qcom op for each exec_op transfer
+ *
+ * @data_instr: data instruction pointer
+ * @data_instr_idx: data instruction index
+ * @rdy_timeout_ms: wait ready timeout in ms
+ * @rdy_delay_ns: Additional delay in ns
+ * @addr1_reg: Address1 register value
+ * @addr2_reg: Address2 register value
+ * @cmd_reg: CMD register value
+ * @flag: flag for misc instruction
+ */
+struct qcom_op {
+ const struct nand_op_instr *data_instr;
+ unsigned int data_instr_idx;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ u32 addr1_reg;
+ u32 addr2_reg;
+ u32 cmd_reg;
+ u8 flag;
+};
+
/*
* NAND chip structure
*
@@ -2867,8 +2893,141 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
return 0;
}
+static int qcom_op_cmd_mapping(struct qcom_nand_controller *nandc, u8 cmd,
+ struct qcom_op *q_op)
+{
+ int ret;
+
+ switch (cmd) {
+ case NAND_CMD_RESET:
+ ret = OP_RESET_DEVICE;
+ break;
+ case NAND_CMD_READID:
+ ret = OP_FETCH_ID;
+ break;
+ case NAND_CMD_PARAM:
+ if (nandc->props->qpic_v2)
+ ret = OP_PAGE_READ_ONFI_READ;
+ else
+ ret = OP_PAGE_READ;
+ break;
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ ret = OP_BLOCK_ERASE;
+ break;
+ case NAND_CMD_STATUS:
+ ret = OP_CHECK_STATUS;
+ break;
+ case NAND_CMD_PAGEPROG:
+ ret = OP_PROGRAM_PAGE;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void qcom_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct qcom_op *q_op)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id;
+ int i;
+
+ memset(q_op, 0, sizeof(*q_op));
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ q_op->cmd_reg = qcom_op_cmd_mapping(nandc, instr->ctx.cmd.opcode, q_op);
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ for (i = 0; i < MAX_ADDRESS_CYCLE; i++) {
+ if (i < 4)
+ q_op->addr1_reg |= (u32)addrs[i] << i * 8;
+ else
+ q_op->addr2_reg |= addrs[i];
+ }
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ q_op->data_instr = instr;
+ q_op->data_instr_idx = op_id;
+ q_op->rdy_delay_ns = instr->delay_ns;
+ fallthrough;
+ case NAND_OP_DATA_OUT_INSTR:
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ q_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+}
+
+static int qcom_check_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr;
+ int op_id;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (instr->ctx.cmd.opcode == NAND_CMD_READCACHESEQ ||
+ instr->ctx.cmd.opcode == NAND_CMD_READCACHEEND)
+ return -ENOTSUPP;
+ break;
+ case NAND_OP_ADDR_INSTR:
+ if (instr->ctx.addr.naddrs > MAX_ADDRESS_CYCLE)
+ return -ENOTSUPP;
+
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ if (instr->ctx.data.len > MAX_CHUNK_SIZE)
+ return -ENOTSUPP;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ if (check_only)
+ return qcom_check_op(chip, op);
+
+ return 0;
+}
+
static const struct nand_controller_ops qcom_nandc_ops = {
.attach_chip = qcom_nand_attach_chip,
+ .exec_op = qcom_nand_exec_op,
};
static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)