summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mscc/ocelot_net.c
diff options
context:
space:
mode:
authorVladimir Oltean <vladimir.oltean@nxp.com>2021-01-15 04:11:20 +0200
committerJakub Kicinski <kuba@kernel.org>2021-01-15 20:02:35 -0800
commitf59fd9cab7305266f4148776c3b66329551a2a3a (patch)
treee5985aceec4f8ed86f8665daf0520bf14130d79c /drivers/net/ethernet/mscc/ocelot_net.c
parenta4ae997adcbdf8ead133bafa5e9e2d6925c576b6 (diff)
downloadlwn-f59fd9cab7305266f4148776c3b66329551a2a3a.tar.gz
lwn-f59fd9cab7305266f4148776c3b66329551a2a3a.zip
net: mscc: ocelot: configure watermarks using devlink-sb
Using devlink-sb, we can configure 12/16 (the important 75%) of the switch's controlling watermarks for congestion drops, and we can monitor 50% of the watermark occupancies (we can monitor the reservation watermarks, but not the sharing watermarks, which are exposed as pool sizes). The following definitions can be made: SB_BUF=0 # The devlink-sb for frame buffers SB_REF=1 # The devlink-sb for frame references POOL_ING=0 # The pool for ingress traffic. Both devlink-sb instances # have one of these. POOL_EGR=1 # The pool for egress traffic. Both devlink-sb instances # have one of these. Editing the hardware watermarks is done in the following way: BUF_xxxx_I is accessed when sb=$SB_BUF and pool=$POOL_ING REF_xxxx_I is accessed when sb=$SB_REF and pool=$POOL_ING BUF_xxxx_E is accessed when sb=$SB_BUF and pool=$POOL_EGR REF_xxxx_E is accessed when sb=$SB_REF and pool=$POOL_EGR Configuring the sharing watermarks for COL_SHR(dp=0) is done implicitly by modifying the corresponding pool size. By default, the pool size has maximum size, so this can be skipped. devlink sb pool set pci/0000:00:00.5 sb $SB_BUF pool $POOL_ING \ size 129840 thtype static Since by default there is no buffer reservation, the above command has maxed out BUF_COL_SHR_I(dp=0). Configuring the per-port reservation watermark (P_RSRV) is done in the following way: devlink sb port pool set pci/0000:00:00.5/0 sb $SB_BUF \ pool $POOL_ING th 1000 The above command sets BUF_P_RSRV_I(port 0) to 1000 bytes. After this command, the sharing watermarks are internally reconfigured with 1000 bytes less, i.e. from 129840 bytes to 128840 bytes. Configuring the per-port-tc reservation watermarks (Q_RSRV) is done in the following way: for tc in {0..7}; do devlink sb tc bind set pci/0000:00:00.5/0 sb 0 tc $tc \ type ingress pool $POOL_ING \ th 3000 done The above command sets BUF_Q_RSRV_I(port 0, tc 0..7) to 3000 bytes. The sharing watermarks are again reconfigured with 24000 bytes less. Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/mscc/ocelot_net.c')
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c140
1 files changed, 140 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 4485faefc2b1..a520fd485912 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1,14 +1,154 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Microsemi Ocelot Switch driver
*
+ * This contains glue logic between the switchdev driver operations and the
+ * mscc_ocelot_switch_lib.
+ *
* Copyright (c) 2017, 2019 Microsemi Corporation
+ * Copyright 2020-2021 NXP Semiconductors
*/
#include <linux/if_bridge.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
+static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
+{
+ return devlink_priv(dlp->devlink);
+}
+
+static int devlink_port_to_port(struct devlink_port *dlp)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+
+ return dlp - ocelot->devlink_ports;
+}
+
+static int ocelot_devlink_sb_pool_get(struct devlink *dl,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
+}
+
+static int ocelot_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
+ threshold_type, extack);
+}
+
+static int ocelot_devlink_sb_port_pool_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_threshold);
+}
+
+static int ocelot_devlink_sb_port_pool_set(struct devlink_port *dlp,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
+ threshold, extack);
+}
+
+static int
+ocelot_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
+ pool_type, p_pool_index,
+ p_threshold);
+}
+
+static int
+ocelot_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
+ pool_type, pool_index, threshold,
+ extack);
+}
+
+static int ocelot_devlink_sb_occ_snapshot(struct devlink *dl,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_occ_snapshot(ocelot, sb_index);
+}
+
+static int ocelot_devlink_sb_occ_max_clear(struct devlink *dl,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_occ_max_clear(ocelot, sb_index);
+}
+
+static int ocelot_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
+ unsigned int sb_index,
+ u16 pool_index, u32 *p_cur,
+ u32 *p_max)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_cur, p_max);
+}
+
+static int
+ocelot_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index,
+ tc_index, pool_type,
+ p_cur, p_max);
+}
+
const struct devlink_ops ocelot_devlink_ops = {
+ .sb_pool_get = ocelot_devlink_sb_pool_get,
+ .sb_pool_set = ocelot_devlink_sb_pool_set,
+ .sb_port_pool_get = ocelot_devlink_sb_port_pool_get,
+ .sb_port_pool_set = ocelot_devlink_sb_port_pool_set,
+ .sb_tc_pool_bind_get = ocelot_devlink_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = ocelot_devlink_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = ocelot_devlink_sb_occ_snapshot,
+ .sb_occ_max_clear = ocelot_devlink_sb_occ_max_clear,
+ .sb_occ_port_pool_get = ocelot_devlink_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = ocelot_devlink_sb_occ_tc_port_bind_get,
};
int ocelot_port_devlink_init(struct ocelot *ocelot, int port,