[dpdk-dev] [PATCH v2 11/11] net/octeontx2: add tm capability callbacks

Nithin Dabilpuram nithind1988 at gmail.com
Thu Apr 2 21:34:53 CEST 2020


From: Krzysztof Kanas <kkanas at marvell.com>

Add Traffic Management capability callbacks to provide
global, level and node capabilities. This patch also
adds documentation on Traffic Management Support.

Signed-off-by: Nithin Dabilpuram <ndabilpuram at marvell.com>
Signed-off-by: Krzysztof Kanas <kkanas at marvell.com>
---
 doc/guides/nics/features/octeontx2.ini |   1 +
 doc/guides/nics/octeontx2.rst          |  15 +++
 doc/guides/rel_notes/release_20_05.rst |   8 ++
 drivers/net/octeontx2/otx2_ethdev.c    |   1 +
 drivers/net/octeontx2/otx2_tm.c        | 232 +++++++++++++++++++++++++++++++++
 drivers/net/octeontx2/otx2_tm.h        |   1 +
 6 files changed, 258 insertions(+)

diff --git a/doc/guides/nics/features/octeontx2.ini b/doc/guides/nics/features/octeontx2.ini
index 473fe56..fb13517 100644
--- a/doc/guides/nics/features/octeontx2.ini
+++ b/doc/guides/nics/features/octeontx2.ini
@@ -31,6 +31,7 @@ Inline protocol      = Y
 VLAN filter          = Y
 Flow control         = Y
 Flow API             = Y
+Rate limitation      = Y
 Jumbo frame          = Y
 Scattered Rx         = Y
 VLAN offload         = Y
diff --git a/doc/guides/nics/octeontx2.rst b/doc/guides/nics/octeontx2.rst
index 60187ec..6b885d6 100644
--- a/doc/guides/nics/octeontx2.rst
+++ b/doc/guides/nics/octeontx2.rst
@@ -39,6 +39,7 @@ Features of the OCTEON TX2 Ethdev PMD are:
 - HW offloaded `ethdev Rx queue` to `eventdev event queue` packet injection
 - Support Rx interrupt
 - Inline IPsec processing support
+- :ref:`Traffic Management API <tmapi>`
 
 Prerequisites
 -------------
@@ -213,6 +214,20 @@ Runtime Config Options
    parameters to all the PCIe devices if application requires to configure on
    all the ethdev ports.
 
+Traffic Management API
+----------------------
+
+OCTEON TX2 PMD supports generic DPDK Traffic Management API which allows to
+configure the following features:
+
+1. Hierarchical scheduling
+2. Single rate - two color, Two rate - three color shaping
+
+Both DWRR and Static Priority(SP) hierarchial scheduling is supported.
+Every parent can have atmost 10 SP Children and unlimited DWRR children.
+Both PF & VF supports traffic management API with PF supporting 6 levels
+and VF supporting 5 levels of topology.
+
 Limitations
 -----------
 
diff --git a/doc/guides/rel_notes/release_20_05.rst b/doc/guides/rel_notes/release_20_05.rst
index 000bbf5..47a9825 100644
--- a/doc/guides/rel_notes/release_20_05.rst
+++ b/doc/guides/rel_notes/release_20_05.rst
@@ -62,6 +62,14 @@ New Features
 
   * Added support for matching on IPv4 Time To Live and IPv6 Hop Limit.
 
+* **Updated Marvell OCTEON TX2 ethdev driver.**
+
+ Updated Marvell OCTEON TX2 ethdev driver with traffic manager support with
+ below features.
+
+ * Hierarchial Scheduling with DWRR and SP.
+ * Single rate - two color, Two rate - three color shaping.
+
 
 Removed Items
 -------------
diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c
index 78b7f3a..599a14c 100644
--- a/drivers/net/octeontx2/otx2_ethdev.c
+++ b/drivers/net/octeontx2/otx2_ethdev.c
@@ -2026,6 +2026,7 @@ static const struct eth_dev_ops otx2_eth_dev_ops = {
 	.link_update              = otx2_nix_link_update,
 	.tx_queue_setup           = otx2_nix_tx_queue_setup,
 	.tx_queue_release         = otx2_nix_tx_queue_release,
+	.tm_ops_get               = otx2_nix_tm_ops_get,
 	.rx_queue_setup           = otx2_nix_rx_queue_setup,
 	.rx_queue_release         = otx2_nix_rx_queue_release,
 	.dev_start                = otx2_nix_dev_start,
diff --git a/drivers/net/octeontx2/otx2_tm.c b/drivers/net/octeontx2/otx2_tm.c
index de5a59b..a2369e3 100644
--- a/drivers/net/octeontx2/otx2_tm.c
+++ b/drivers/net/octeontx2/otx2_tm.c
@@ -1834,7 +1834,217 @@ otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
 		*is_leaf = true;
 	else
 		*is_leaf = false;
+	return 0;
+}
 
+static int
+otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
+		     struct rte_tm_capabilities *cap,
+		     struct rte_tm_error *error)
+{
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+	struct otx2_mbox *mbox = dev->mbox;
+	int rc, max_nr_nodes = 0, i;
+	struct free_rsrcs_rsp *rsp;
+
+	memset(cap, 0, sizeof(*cap));
+
+	otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+	rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+	if (rc) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "unexpected fatal error";
+		return rc;
+	}
+
+	for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
+		max_nr_nodes += rsp->schq[i];
+
+	cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt;
+	/* TL1 level is reserved for PF */
+	cap->n_levels_max = nix_tm_have_tl1_access(dev) ?
+				OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1;
+	cap->non_leaf_nodes_identical = 1;
+	cap->leaf_nodes_identical = 1;
+
+	/* Shaper Capabilities */
+	cap->shaper_private_n_max = max_nr_nodes;
+	cap->shaper_n_max = max_nr_nodes;
+	cap->shaper_private_dual_rate_n_max = max_nr_nodes;
+	cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+	cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+	cap->shaper_pkt_length_adjust_min = 0;
+	cap->shaper_pkt_length_adjust_max = 0;
+
+	/* Schedule Capabilites */
+	cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ];
+	cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX;
+	cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
+	cap->sched_wfq_n_groups_max = 1;
+	cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+
+	cap->dynamic_update_mask =
+		RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
+		RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
+	cap->stats_mask =
+		RTE_TM_STATS_N_PKTS |
+		RTE_TM_STATS_N_BYTES |
+		RTE_TM_STATS_N_PKTS_RED_DROPPED |
+		RTE_TM_STATS_N_BYTES_RED_DROPPED;
+
+	for (i = 0; i < RTE_COLORS; i++) {
+		cap->mark_vlan_dei_supported[i] = false;
+		cap->mark_ip_ecn_tcp_supported[i] = false;
+		cap->mark_ip_dscp_supported[i] = false;
+	}
+
+	return 0;
+}
+
+static int
+otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
+				   struct rte_tm_level_capabilities *cap,
+				   struct rte_tm_error *error)
+{
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+	struct otx2_mbox *mbox = dev->mbox;
+	struct free_rsrcs_rsp *rsp;
+	uint16_t hw_lvl;
+	int rc;
+
+	memset(cap, 0, sizeof(*cap));
+
+	otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+	rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+	if (rc) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "unexpected fatal error";
+		return rc;
+	}
+
+	hw_lvl = nix_tm_lvl2nix(dev, lvl);
+
+	if (nix_tm_is_leaf(dev, lvl)) {
+		/* Leaf */
+		cap->n_nodes_max = dev->tm_leaf_cnt;
+		cap->n_nodes_leaf_max = dev->tm_leaf_cnt;
+		cap->leaf_nodes_identical = 1;
+		cap->leaf.stats_mask =
+			RTE_TM_STATS_N_PKTS |
+			RTE_TM_STATS_N_BYTES;
+
+	} else if (lvl == OTX2_TM_LVL_ROOT) {
+		/* Root node, aka TL2(vf)/TL1(pf) */
+		cap->n_nodes_max = 1;
+		cap->n_nodes_nonleaf_max = 1;
+		cap->non_leaf_nodes_identical = 1;
+
+		cap->nonleaf.shaper_private_supported = true;
+		cap->nonleaf.shaper_private_dual_rate_supported =
+			nix_tm_have_tl1_access(dev) ? false : true;
+		cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+		cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+
+		cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
+		cap->nonleaf.sched_sp_n_priorities_max =
+					nix_max_prio(dev, hw_lvl) + 1;
+		cap->nonleaf.sched_wfq_n_groups_max = 1;
+		cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+
+		if (nix_tm_have_tl1_access(dev))
+			cap->nonleaf.stats_mask =
+				RTE_TM_STATS_N_PKTS_RED_DROPPED |
+				RTE_TM_STATS_N_BYTES_RED_DROPPED;
+	} else if ((lvl < OTX2_TM_LVL_MAX) &&
+		   (hw_lvl < NIX_TXSCH_LVL_CNT)) {
+		/* TL2, TL3, TL4, MDQ */
+		cap->n_nodes_max = rsp->schq[hw_lvl];
+		cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+		cap->non_leaf_nodes_identical = 1;
+
+		cap->nonleaf.shaper_private_supported = true;
+		cap->nonleaf.shaper_private_dual_rate_supported = true;
+		cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+		cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+
+		/* MDQ doesn't support Strict Priority */
+		if (hw_lvl == NIX_TXSCH_LVL_MDQ)
+			cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
+		else
+			cap->nonleaf.sched_n_children_max =
+				rsp->schq[hw_lvl - 1];
+		cap->nonleaf.sched_sp_n_priorities_max =
+			nix_max_prio(dev, hw_lvl) + 1;
+		cap->nonleaf.sched_wfq_n_groups_max = 1;
+		cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+	} else {
+		/* unsupported level */
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		return rc;
+	}
+	return 0;
+}
+
+static int
+otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
+			  struct rte_tm_node_capabilities *cap,
+			  struct rte_tm_error *error)
+{
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+	struct otx2_mbox *mbox = dev->mbox;
+	struct otx2_nix_tm_node *tm_node;
+	struct free_rsrcs_rsp *rsp;
+	int rc, hw_lvl, lvl;
+
+	memset(cap, 0, sizeof(*cap));
+
+	tm_node = nix_tm_node_search(dev, node_id, true);
+	if (!tm_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	hw_lvl = tm_node->hw_lvl;
+	lvl = tm_node->lvl;
+
+	/* Leaf node */
+	if (nix_tm_is_leaf(dev, lvl)) {
+		cap->stats_mask = RTE_TM_STATS_N_PKTS |
+					RTE_TM_STATS_N_BYTES;
+		return 0;
+	}
+
+	otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+	rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+	if (rc) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "unexpected fatal error";
+		return rc;
+	}
+
+	/* Non Leaf Shaper */
+	cap->shaper_private_supported = true;
+	cap->shaper_private_dual_rate_supported =
+		(hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true;
+	cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+	cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+
+	/* Non Leaf Scheduler */
+	if (hw_lvl == NIX_TXSCH_LVL_MDQ)
+		cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
+	else
+		cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
+
+	cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1;
+	cap->nonleaf.sched_wfq_n_children_per_group_max =
+		cap->nonleaf.sched_n_children_max;
+	cap->nonleaf.sched_wfq_n_groups_max = 1;
+	cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+
+	if (hw_lvl == NIX_TXSCH_LVL_TL1)
+		cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
+			RTE_TM_STATS_N_BYTES_RED_DROPPED;
 	return 0;
 }
 
@@ -2515,6 +2725,10 @@ otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
 const struct rte_tm_ops otx2_tm_ops = {
 	.node_type_get = otx2_nix_tm_node_type_get,
 
+	.capabilities_get = otx2_nix_tm_capa_get,
+	.level_capabilities_get = otx2_nix_tm_level_capa_get,
+	.node_capabilities_get = otx2_nix_tm_node_capa_get,
+
 	.shaper_profile_add = otx2_nix_tm_shaper_profile_add,
 	.shaper_profile_delete = otx2_nix_tm_shaper_profile_delete,
 
@@ -2911,6 +3125,24 @@ otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
 }
 
 int
+otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg)
+{
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+	if (!arg)
+		return -EINVAL;
+
+	/* Check for supported revisions */
+	if (otx2_dev_is_95xx_Ax(dev) ||
+	    otx2_dev_is_96xx_Ax(dev))
+		return -EINVAL;
+
+	*(const void **)arg = &otx2_tm_ops;
+
+	return 0;
+}
+
+int
 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
 {
 	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
diff --git a/drivers/net/octeontx2/otx2_tm.h b/drivers/net/octeontx2/otx2_tm.h
index 7b1672e..9675182 100644
--- a/drivers/net/octeontx2/otx2_tm.h
+++ b/drivers/net/octeontx2/otx2_tm.h
@@ -19,6 +19,7 @@ struct otx2_eth_dev;
 void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev);
 int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev);
 int otx2_nix_tm_fini(struct rte_eth_dev *eth_dev);
+int otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
 int otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
 			      uint32_t *rr_quantum, uint16_t *smq);
 int otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
-- 
2.8.4



More information about the dev mailing list