@@ -34,6 +34,13 @@ static int sparx5_dcb_app_validate(struct net_device *dev,
int err = 0;
switch (app->selector) {
+ /* Dscp checks */
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ if (app->protocol > 63)
+ err = -EINVAL;
+ else if (app->priority >= SPX5_PRIOS)
+ err = -ERANGE;
+ break;
/* Pcp checks */
case DCB_APP_SEL_PCP:
if (app->protocol > 15)
@@ -104,17 +111,27 @@ static bool sparx5_dcb_apptrust_contains(int portno, u8 selector)
static int sparx5_dcb_app_update(struct net_device *dev)
{
- struct dcb_app app_itr = { .selector = DCB_APP_SEL_PCP };
struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5_port_qos_dscp_map *dscp_map;
struct sparx5_port_qos_pcp_map *pcp_map;
struct sparx5_port_qos qos = {0};
+ struct dcb_app app_itr = {0};
int portno = port->portno;
int i;
+ dscp_map = &qos.dscp.map;
pcp_map = &qos.pcp.map;
+ /* Get dscp ingress mapping */
+ for (i = 0; i < ARRAY_SIZE(dscp_map->map); i++) {
+ app_itr.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ app_itr.protocol = i;
+ dscp_map->map[i] = dcb_getapp(dev, &app_itr);
+ }
+
/* Get pcp ingress mapping */
for (i = 0; i < ARRAY_SIZE(pcp_map->map); i++) {
+ app_itr.selector = DCB_APP_SEL_PCP;
app_itr.protocol = i;
pcp_map->map[i] = dcb_getapp(dev, &app_itr);
}
@@ -125,9 +142,44 @@ static int sparx5_dcb_app_update(struct net_device *dev)
qos.pcp.dp_enable = qos.pcp.qos_enable;
}
+ /* Enable use of dscp for queue classification ? */
+ if (sparx5_dcb_apptrust_contains(portno, IEEE_8021QAZ_APP_SEL_DSCP)) {
+ qos.dscp.qos_enable = true;
+ qos.dscp.dp_enable = qos.dscp.qos_enable;
+ }
+
return sparx5_port_qos_set(port, &qos);
}
+/* Set or delete dscp app entry.
+ *
+ * Dscp mapping is global for all ports, so set and delete app entries are
+ * replicated for each port.
+ */
+static int sparx5_dcb_ieee_dscp_setdel_app(struct net_device *dev,
+ struct dcb_app *app, bool del)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+ struct dcb_app apps[SPX5_PORTS];
+ struct sparx5_port *port_itr;
+ int err, i;
+
+ for (i = 0; i < SPX5_PORTS; i++) {
+ port_itr = port->sparx5->ports[i];
+ if (!port_itr)
+ continue;
+ memcpy(&apps[i], app, sizeof(struct dcb_app));
+ if (del)
+ err = dcb_ieee_delapp(port_itr->ndev, &apps[i]);
+ else
+ err = dcb_ieee_setapp(port_itr->ndev, &apps[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
struct dcb_app app_itr;
@@ -146,7 +198,11 @@ static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
dcb_ieee_delapp(dev, &app_itr);
}
- err = dcb_ieee_setapp(dev, app);
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel_app(dev, app, false);
+ else
+ err = dcb_ieee_setapp(dev, app);
+
if (err)
goto out;
@@ -160,7 +216,11 @@ static int sparx5_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
{
int err;
- err = dcb_ieee_delapp(dev, app);
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel_app(dev, app, true);
+ else
+ err = dcb_ieee_delapp(dev, app);
+
if (err < 0)
return err;
@@ -1149,6 +1149,7 @@ void sparx5_port_enable(struct sparx5_port *port, bool enable)
int sparx5_port_qos_set(struct sparx5_port *port,
struct sparx5_port_qos *qos)
{
+ sparx5_port_qos_dscp_set(port, &qos->dscp);
sparx5_port_qos_pcp_set(port, &qos->pcp);
return 0;
@@ -1181,3 +1182,41 @@ int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
return 0;
}
+
+int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_dscp *qos)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ u8 *dscp = qos->map.map;
+ int i;
+
+ /* Enable/disable dscp and dp for qos classification.
+ * Disable rewrite of dscp values for now.
+ */
+ spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos->qos_enable) |
+ ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos->dp_enable) |
+ ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
+ ANA_CL_QOS_CFG_DSCP_QOS_ENA | ANA_CL_QOS_CFG_DSCP_DP_ENA |
+ ANA_CL_QOS_CFG_DSCP_KEEP_ENA, sparx5,
+ ANA_CL_QOS_CFG(port->portno));
+
+ /* Map each dscp value to priority and dp */
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp + i)) |
+ ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
+ ANA_CL_DSCP_CFG_DSCP_QOS_VAL |
+ ANA_CL_DSCP_CFG_DSCP_DP_VAL, sparx5,
+ ANA_CL_DSCP_CFG(i));
+ }
+
+ /* Set per-dscp trust */
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ if (qos->qos_enable) {
+ spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
+ ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, sparx5,
+ ANA_CL_DSCP_CFG(i));
+ }
+ }
+
+ return 0;
+}
@@ -95,14 +95,25 @@ struct sparx5_port_qos_pcp_map {
u8 map[16];
};
+struct sparx5_port_qos_dscp_map {
+ u8 map[64];
+};
+
struct sparx5_port_qos_pcp {
struct sparx5_port_qos_pcp_map map;
bool qos_enable;
bool dp_enable;
};
+struct sparx5_port_qos_dscp {
+ struct sparx5_port_qos_dscp_map map;
+ bool qos_enable;
+ bool dp_enable;
+};
+
struct sparx5_port_qos {
struct sparx5_port_qos_pcp pcp;
+ struct sparx5_port_qos_dscp dscp;
};
int sparx5_port_qos_set(struct sparx5_port *port, struct sparx5_port_qos *qos);
@@ -110,4 +121,6 @@ int sparx5_port_qos_set(struct sparx5_port *port, struct sparx5_port_qos *qos);
int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
struct sparx5_port_qos_pcp *qos);
+int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_dscp *qos);
#endif /* __SPARX5_PORT_H__ */