[net-next,2/2] octeontx2-pf: TC flower offload support for mirror

Message ID 20231116101601.3188711-3-sumang@marvell.com
State New
Headers
Series ocetontx2: Multicast/mirror offload changes |

Commit Message

Suman Ghosh Nov. 16, 2023, 10:16 a.m. UTC
  This patch extends TC flower offload support for mirroring ingress/egress
traffic to a different PF/VF. Below is an example command,

'tc filter add dev eth1 ingress protocol ip flower src_ip <ip-addr>
skip_sw action mirred ingress mirror dev eth2'

Signed-off-by: Suman Ghosh <sumang@marvell.com>
---
 .../ethernet/marvell/octeontx2/nic/otx2_tc.c  | 110 +++++++++++++++++-
 1 file changed, 108 insertions(+), 2 deletions(-)
  

Comments

Wojciech Drewek Nov. 16, 2023, 10:58 a.m. UTC | #1
On 16.11.2023 11:16, Suman Ghosh wrote:
> This patch extends TC flower offload support for mirroring ingress/egress
> traffic to a different PF/VF. Below is an example command,
> 
> 'tc filter add dev eth1 ingress protocol ip flower src_ip <ip-addr>
> skip_sw action mirred ingress mirror dev eth2'
> 
> Signed-off-by: Suman Ghosh <sumang@marvell.com>
> ---

Only small nits,
Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>

>  .../ethernet/marvell/octeontx2/nic/otx2_tc.c  | 110 +++++++++++++++++-
>  1 file changed, 108 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
> index 8a5e3987a482..cfcf935b1003 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
> @@ -29,6 +29,8 @@
>  
>  #define OTX2_UNSUPP_LSE_DEPTH		GENMASK(6, 4)
>  
> +#define MCAST_INVALID_GRP		(-1U)
> +
>  struct otx2_tc_flow_stats {
>  	u64 bytes;
>  	u64 pkts;
> @@ -47,6 +49,7 @@ struct otx2_tc_flow {
>  	bool				is_act_police;
>  	u32				prio;
>  	struct npc_install_flow_req	req;
> +	u32				mcast_grp_idx;
>  };
>  
>  static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
> @@ -336,22 +339,95 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
>  	return rc;
>  }
>  
> +static int otx2_tc_update_mcast(struct otx2_nic *nic,
> +				struct npc_install_flow_req *req,
> +				struct netlink_ext_ack *extack,
> +				struct otx2_tc_flow *node,
> +				struct nix_mcast_grp_update_req *ureq,
> +				u8 num_intf)
> +{
> +	struct nix_mcast_grp_update_req *grp_update_req;
> +	struct nix_mcast_grp_create_req *creq;
> +	struct nix_mcast_grp_create_rsp *crsp;
> +	u32 grp_index;
> +	int rc;
> +
> +	mutex_lock(&nic->mbox.lock);
> +	creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox);
> +	if (!creq) {
> +		mutex_unlock(&nic->mbox.lock);

I would put mutex_unlock at the end and use goto but it's up to you.

> +		return -ENOMEM;
> +	}
> +
> +	creq->dir = NIX_MCAST_INGRESS;
> +	/* Send message to AF */
> +	rc = otx2_sync_mbox_msg(&nic->mbox);
> +	if (rc) {
> +		NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group");
> +		mutex_unlock(&nic->mbox.lock);
> +		return rc;
> +	}
> +
> +	crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
> +			0,
> +			&creq->hdr);
> +	if (IS_ERR(crsp)) {
> +		mutex_unlock(&nic->mbox.lock);
> +		return PTR_ERR(crsp);
> +	}
> +
> +	grp_index = crsp->mcast_grp_idx;
> +	grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox);
> +	if (!grp_update_req) {
> +		NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
> +		mutex_unlock(&nic->mbox.lock);
> +		return -ENOMEM;
> +	}
> +
> +	ureq->op = NIX_MCAST_OP_ADD_ENTRY;
> +	ureq->mcast_grp_idx = grp_index;
> +	ureq->num_mce_entry = num_intf;
> +	ureq->pcifunc[0] = nic->pcifunc;
> +	ureq->channel[0] = nic->hw.tx_chan_base;
> +
> +	ureq->dest_type[0] = NIX_RX_RSS;
> +	ureq->rq_rss_index[0] = 0;
> +	memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr));
> +	memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req));
> +
> +	/* Send message to AF */
> +	rc = otx2_sync_mbox_msg(&nic->mbox);
> +	if (rc) {
> +		NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
> +		mutex_unlock(&nic->mbox.lock);
> +		return rc;
> +	}
> +
> +	mutex_unlock(&nic->mbox.lock);
> +	req->op = NIX_RX_ACTIONOP_MCAST;
> +	req->index = grp_index;
> +	node->mcast_grp_idx = grp_index;
> +	return 0;
> +}
> +
>  static int otx2_tc_parse_actions(struct otx2_nic *nic,
>  				 struct flow_action *flow_action,
>  				 struct npc_install_flow_req *req,
>  				 struct flow_cls_offload *f,
>  				 struct otx2_tc_flow *node)
>  {
> +	struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 };
>  	struct netlink_ext_ack *extack = f->common.extack;
> +	bool pps = false, mcast = false;
>  	struct flow_action_entry *act;
>  	struct net_device *target;
>  	struct otx2_nic *priv;
>  	u32 burst, mark = 0;
>  	u8 nr_police = 0;
> -	bool pps = false;
> +	u8 num_intf = 1;
> +	int rc, i;
>  	u64 rate;
>  	int err;
> -	int i;
>  
>  	if (!flow_action_has_entries(flow_action)) {
>  		NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
> @@ -423,11 +499,30 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
>  			req->index = act->rx_queue;
>  			break;
>  
> +		case FLOW_ACTION_MIRRED_INGRESS:
> +			target = act->dev;
> +			priv = netdev_priv(target);
> +			dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc;
> +			dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base;
> +			dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS;
> +			dummy_grp_update_req.rq_rss_index[num_intf] = 0;
> +			mcast = true;
> +			num_intf++;
> +			break;
> +
>  		default:
>  			return -EOPNOTSUPP;
>  		}
>  	}
>  
> +	if (mcast) {
> +		rc = otx2_tc_update_mcast(nic, req, extack, node,
> +					  &dummy_grp_update_req,
> +					  num_intf);
> +		if (rc)
> +			return rc;

I think we can use err here, no need for new variable

> +	}
> +
>  	if (nr_police > 1) {
>  		NL_SET_ERR_MSG_MOD(extack,
>  				   "rate limit police offload requires a single action");
> @@ -1033,6 +1128,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
>  			    struct flow_cls_offload *tc_flow_cmd)
>  {
>  	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
> +	struct nix_mcast_grp_destroy_req *grp_destroy_req;
>  	struct otx2_tc_flow *flow_node;
>  	int err;
>  
> @@ -1064,6 +1160,15 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
>  		mutex_unlock(&nic->mbox.lock);
>  	}
>  
> +	/* Remove the multicast/mirror related nodes */
> +	if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) {
> +		mutex_lock(&nic->mbox.lock);
> +		grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox);
> +		grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx;
> +		otx2_sync_mbox_msg(&nic->mbox);
> +		mutex_unlock(&nic->mbox.lock);
> +	}
> +
>  	otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
>  	otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
>  	kfree_rcu(flow_node, rcu);
> @@ -1096,6 +1201,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
>  	spin_lock_init(&new_node->lock);
>  	new_node->cookie = tc_flow_cmd->cookie;
>  	new_node->prio = tc_flow_cmd->common.prio;
> +	new_node->mcast_grp_idx = MCAST_INVALID_GRP;
>  
>  	memset(&dummy, 0, sizeof(struct npc_install_flow_req));
>
  
Suman Ghosh Nov. 18, 2023, 5:52 p.m. UTC | #2
>
>Only small nits,
>Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
>
>>  .../ethernet/marvell/octeontx2/nic/otx2_tc.c  | 110
>> +++++++++++++++++-
>>  1 file changed, 108 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
>> b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
>> index 8a5e3987a482..cfcf935b1003 100644
>> --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
>> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
>> @@ -29,6 +29,8 @@
>>
>>  #define OTX2_UNSUPP_LSE_DEPTH		GENMASK(6, 4)
>>
>> +#define MCAST_INVALID_GRP		(-1U)
>> +
>>  struct otx2_tc_flow_stats {
>>  	u64 bytes;
>>  	u64 pkts;
>> @@ -47,6 +49,7 @@ struct otx2_tc_flow {
>>  	bool				is_act_police;
>>  	u32				prio;
>>  	struct npc_install_flow_req	req;
>> +	u32				mcast_grp_idx;
>>  };
>>
>>  static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32
>> burst, @@ -336,22 +339,95 @@ static int otx2_tc_act_set_police(struct
>otx2_nic *nic,
>>  	return rc;
>>  }
>>
>> +static int otx2_tc_update_mcast(struct otx2_nic *nic,
>> +				struct npc_install_flow_req *req,
>> +				struct netlink_ext_ack *extack,
>> +				struct otx2_tc_flow *node,
>> +				struct nix_mcast_grp_update_req *ureq,
>> +				u8 num_intf)
>> +{
>> +	struct nix_mcast_grp_update_req *grp_update_req;
>> +	struct nix_mcast_grp_create_req *creq;
>> +	struct nix_mcast_grp_create_rsp *crsp;
>> +	u32 grp_index;
>> +	int rc;
>> +
>> +	mutex_lock(&nic->mbox.lock);
>> +	creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox);
>> +	if (!creq) {
>> +		mutex_unlock(&nic->mbox.lock);
>
>I would put mutex_unlock at the end and use goto but it's up to you.
[Suman] Ack. This would make the code more readable, will update in v2.
>
>> +		return -ENOMEM;
>> +	}
>> +
>> +	creq->dir = NIX_MCAST_INGRESS;
>> +	/* Send message to AF */
>> +	rc = otx2_sync_mbox_msg(&nic->mbox);
>> +	if (rc) {
>> +		NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast
>group");
>> +		mutex_unlock(&nic->mbox.lock);
>> +		return rc;
>> +	}
>> +
>> +	crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic-
>>mbox.mbox,
>> +			0,
>> +			&creq->hdr);
>> +	if (IS_ERR(crsp)) {
>> +		mutex_unlock(&nic->mbox.lock);
>> +		return PTR_ERR(crsp);
>> +	}
>> +
>> +	grp_index = crsp->mcast_grp_idx;
>> +	grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic-
>>mbox);
>> +	if (!grp_update_req) {
>> +		NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast
>group");
>> +		mutex_unlock(&nic->mbox.lock);
>> +		return -ENOMEM;
>> +	}
>> +
>> +	ureq->op = NIX_MCAST_OP_ADD_ENTRY;
>> +	ureq->mcast_grp_idx = grp_index;
>> +	ureq->num_mce_entry = num_intf;
>> +	ureq->pcifunc[0] = nic->pcifunc;
>> +	ureq->channel[0] = nic->hw.tx_chan_base;
>> +
>> +	ureq->dest_type[0] = NIX_RX_RSS;
>> +	ureq->rq_rss_index[0] = 0;
>> +	memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct
>mbox_msghdr));
>> +	memcpy(grp_update_req, ureq, sizeof(struct
>> +nix_mcast_grp_update_req));
>> +
>> +	/* Send message to AF */
>> +	rc = otx2_sync_mbox_msg(&nic->mbox);
>> +	if (rc) {
>> +		NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast
>group");
>> +		mutex_unlock(&nic->mbox.lock);
>> +		return rc;
>> +	}
>> +
>> +	mutex_unlock(&nic->mbox.lock);
>> +	req->op = NIX_RX_ACTIONOP_MCAST;
>> +	req->index = grp_index;
>> +	node->mcast_grp_idx = grp_index;
>> +	return 0;
>> +}
>> +
>>  static int otx2_tc_parse_actions(struct otx2_nic *nic,
>>  				 struct flow_action *flow_action,
>>  				 struct npc_install_flow_req *req,
>>  				 struct flow_cls_offload *f,
>>  				 struct otx2_tc_flow *node)
>>  {
>> +	struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 };
>>  	struct netlink_ext_ack *extack = f->common.extack;
>> +	bool pps = false, mcast = false;
>>  	struct flow_action_entry *act;
>>  	struct net_device *target;
>>  	struct otx2_nic *priv;
>>  	u32 burst, mark = 0;
>>  	u8 nr_police = 0;
>> -	bool pps = false;
>> +	u8 num_intf = 1;
>> +	int rc, i;
>>  	u64 rate;
>>  	int err;
>> -	int i;
>>
>>  	if (!flow_action_has_entries(flow_action)) {
>>  		NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); @@ -
>423,11
>> +499,30 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
>>  			req->index = act->rx_queue;
>>  			break;
>>
>> +		case FLOW_ACTION_MIRRED_INGRESS:
>> +			target = act->dev;
>> +			priv = netdev_priv(target);
>> +			dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc;
>> +			dummy_grp_update_req.channel[num_intf] = priv-
>>hw.tx_chan_base;
>> +			dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS;
>> +			dummy_grp_update_req.rq_rss_index[num_intf] = 0;
>> +			mcast = true;
>> +			num_intf++;
>> +			break;
>> +
>>  		default:
>>  			return -EOPNOTSUPP;
>>  		}
>>  	}
>>
>> +	if (mcast) {
>> +		rc = otx2_tc_update_mcast(nic, req, extack, node,
>> +					  &dummy_grp_update_req,
>> +					  num_intf);
>> +		if (rc)
>> +			return rc;
>
>I think we can use err here, no need for new variable
[Suman] Ack, will update in v2.
>
>> +	}
>> +
>>  	if (nr_police > 1) {
>>  		NL_SET_ERR_MSG_MOD(extack,
>>  				   "rate limit police offload requires a single
>action"); @@
>> -1033,6 +1128,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
>>  			    struct flow_cls_offload *tc_flow_cmd)  {
>>  	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
>> +	struct nix_mcast_grp_destroy_req *grp_destroy_req;
>>  	struct otx2_tc_flow *flow_node;
>>  	int err;
>>
>> @@ -1064,6 +1160,15 @@ static int otx2_tc_del_flow(struct otx2_nic
>*nic,
>>  		mutex_unlock(&nic->mbox.lock);
>>  	}
>>
>> +	/* Remove the multicast/mirror related nodes */
>> +	if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) {
>> +		mutex_lock(&nic->mbox.lock);
>> +		grp_destroy_req =
>otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox);
>> +		grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx;
>> +		otx2_sync_mbox_msg(&nic->mbox);
>> +		mutex_unlock(&nic->mbox.lock);
>> +	}
>> +
>>  	otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
>>  	otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
>>  	kfree_rcu(flow_node, rcu);
>> @@ -1096,6 +1201,7 @@ static int otx2_tc_add_flow(struct otx2_nic
>*nic,
>>  	spin_lock_init(&new_node->lock);
>>  	new_node->cookie = tc_flow_cmd->cookie;
>>  	new_node->prio = tc_flow_cmd->common.prio;
>> +	new_node->mcast_grp_idx = MCAST_INVALID_GRP;
>>
>>  	memset(&dummy, 0, sizeof(struct npc_install_flow_req));
>>
  

Patch

diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 8a5e3987a482..cfcf935b1003 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -29,6 +29,8 @@ 
 
 #define OTX2_UNSUPP_LSE_DEPTH		GENMASK(6, 4)
 
+#define MCAST_INVALID_GRP		(-1U)
+
 struct otx2_tc_flow_stats {
 	u64 bytes;
 	u64 pkts;
@@ -47,6 +49,7 @@  struct otx2_tc_flow {
 	bool				is_act_police;
 	u32				prio;
 	struct npc_install_flow_req	req;
+	u32				mcast_grp_idx;
 };
 
 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
@@ -336,22 +339,95 @@  static int otx2_tc_act_set_police(struct otx2_nic *nic,
 	return rc;
 }
 
+static int otx2_tc_update_mcast(struct otx2_nic *nic,
+				struct npc_install_flow_req *req,
+				struct netlink_ext_ack *extack,
+				struct otx2_tc_flow *node,
+				struct nix_mcast_grp_update_req *ureq,
+				u8 num_intf)
+{
+	struct nix_mcast_grp_update_req *grp_update_req;
+	struct nix_mcast_grp_create_req *creq;
+	struct nix_mcast_grp_create_rsp *crsp;
+	u32 grp_index;
+	int rc;
+
+	mutex_lock(&nic->mbox.lock);
+	creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox);
+	if (!creq) {
+		mutex_unlock(&nic->mbox.lock);
+		return -ENOMEM;
+	}
+
+	creq->dir = NIX_MCAST_INGRESS;
+	/* Send message to AF */
+	rc = otx2_sync_mbox_msg(&nic->mbox);
+	if (rc) {
+		NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group");
+		mutex_unlock(&nic->mbox.lock);
+		return rc;
+	}
+
+	crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
+			0,
+			&creq->hdr);
+	if (IS_ERR(crsp)) {
+		mutex_unlock(&nic->mbox.lock);
+		return PTR_ERR(crsp);
+	}
+
+	grp_index = crsp->mcast_grp_idx;
+	grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox);
+	if (!grp_update_req) {
+		NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
+		mutex_unlock(&nic->mbox.lock);
+		return -ENOMEM;
+	}
+
+	ureq->op = NIX_MCAST_OP_ADD_ENTRY;
+	ureq->mcast_grp_idx = grp_index;
+	ureq->num_mce_entry = num_intf;
+	ureq->pcifunc[0] = nic->pcifunc;
+	ureq->channel[0] = nic->hw.tx_chan_base;
+
+	ureq->dest_type[0] = NIX_RX_RSS;
+	ureq->rq_rss_index[0] = 0;
+	memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr));
+	memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req));
+
+	/* Send message to AF */
+	rc = otx2_sync_mbox_msg(&nic->mbox);
+	if (rc) {
+		NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
+		mutex_unlock(&nic->mbox.lock);
+		return rc;
+	}
+
+	mutex_unlock(&nic->mbox.lock);
+	req->op = NIX_RX_ACTIONOP_MCAST;
+	req->index = grp_index;
+	node->mcast_grp_idx = grp_index;
+	return 0;
+}
+
 static int otx2_tc_parse_actions(struct otx2_nic *nic,
 				 struct flow_action *flow_action,
 				 struct npc_install_flow_req *req,
 				 struct flow_cls_offload *f,
 				 struct otx2_tc_flow *node)
 {
+	struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 };
 	struct netlink_ext_ack *extack = f->common.extack;
+	bool pps = false, mcast = false;
 	struct flow_action_entry *act;
 	struct net_device *target;
 	struct otx2_nic *priv;
 	u32 burst, mark = 0;
 	u8 nr_police = 0;
-	bool pps = false;
+	u8 num_intf = 1;
+	int rc, i;
 	u64 rate;
 	int err;
-	int i;
 
 	if (!flow_action_has_entries(flow_action)) {
 		NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
@@ -423,11 +499,30 @@  static int otx2_tc_parse_actions(struct otx2_nic *nic,
 			req->index = act->rx_queue;
 			break;
 
+		case FLOW_ACTION_MIRRED_INGRESS:
+			target = act->dev;
+			priv = netdev_priv(target);
+			dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc;
+			dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base;
+			dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS;
+			dummy_grp_update_req.rq_rss_index[num_intf] = 0;
+			mcast = true;
+			num_intf++;
+			break;
+
 		default:
 			return -EOPNOTSUPP;
 		}
 	}
 
+	if (mcast) {
+		rc = otx2_tc_update_mcast(nic, req, extack, node,
+					  &dummy_grp_update_req,
+					  num_intf);
+		if (rc)
+			return rc;
+	}
+
 	if (nr_police > 1) {
 		NL_SET_ERR_MSG_MOD(extack,
 				   "rate limit police offload requires a single action");
@@ -1033,6 +1128,7 @@  static int otx2_tc_del_flow(struct otx2_nic *nic,
 			    struct flow_cls_offload *tc_flow_cmd)
 {
 	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+	struct nix_mcast_grp_destroy_req *grp_destroy_req;
 	struct otx2_tc_flow *flow_node;
 	int err;
 
@@ -1064,6 +1160,15 @@  static int otx2_tc_del_flow(struct otx2_nic *nic,
 		mutex_unlock(&nic->mbox.lock);
 	}
 
+	/* Remove the multicast/mirror related nodes */
+	if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) {
+		mutex_lock(&nic->mbox.lock);
+		grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox);
+		grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx;
+		otx2_sync_mbox_msg(&nic->mbox);
+		mutex_unlock(&nic->mbox.lock);
+	}
+
 	otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
 	otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
 	kfree_rcu(flow_node, rcu);
@@ -1096,6 +1201,7 @@  static int otx2_tc_add_flow(struct otx2_nic *nic,
 	spin_lock_init(&new_node->lock);
 	new_node->cookie = tc_flow_cmd->cookie;
 	new_node->prio = tc_flow_cmd->common.prio;
+	new_node->mcast_grp_idx = MCAST_INVALID_GRP;
 
 	memset(&dummy, 0, sizeof(struct npc_install_flow_req));