[dpdk-dev] [PATCH v7 3/5] net/softnic: add TM capabilities ops

Jasvinder Singh jasvinder.singh at intel.com
Mon Oct 9 14:58:44 CEST 2017


Implement ethdev TM capability APIs in SoftNIC PMD.

Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu at intel.com>
Signed-off-by: Jasvinder Singh <jasvinder.singh at intel.com>

Acked-by: Lu, Wenzhuo <wenzhuo.lu at intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu at intel.com>
Acked-by: Thomas Monjalon <thomas at monjalon.net>

---
 drivers/net/softnic/rte_eth_softnic.c           |  12 +-
 drivers/net/softnic/rte_eth_softnic_internals.h |  32 ++
 drivers/net/softnic/rte_eth_softnic_tm.c        | 500 ++++++++++++++++++++++++
 3 files changed, 543 insertions(+), 1 deletion(-)

diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index 2f19159..34dceae 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -43,6 +43,7 @@
 #include <rte_errno.h>
 #include <rte_ring.h>
 #include <rte_sched.h>
+#include <rte_tm_driver.h>
 
 #include "rte_eth_softnic.h"
 #include "rte_eth_softnic_internals.h"
@@ -224,6 +225,15 @@ pmd_link_update(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+static int
+pmd_tm_ops_get(struct rte_eth_dev *dev, void *arg)
+{
+	*(const struct rte_tm_ops **)arg =
+		(tm_enabled(dev)) ? &pmd_tm_ops : NULL;
+
+	return 0;
+}
+
 static const struct eth_dev_ops pmd_ops = {
 	.dev_configure = pmd_dev_configure,
 	.dev_start = pmd_dev_start,
@@ -233,7 +243,7 @@ static const struct eth_dev_ops pmd_ops = {
 	.dev_infos_get = pmd_dev_infos_get,
 	.rx_queue_setup = pmd_rx_queue_setup,
 	.tx_queue_setup = pmd_tx_queue_setup,
-	.tm_ops_get = NULL,
+	.tm_ops_get = pmd_tm_ops_get,
 };
 
 static uint16_t
diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h
index fd9cbbe..75d9387 100644
--- a/drivers/net/softnic/rte_eth_softnic_internals.h
+++ b/drivers/net/softnic/rte_eth_softnic_internals.h
@@ -39,6 +39,7 @@
 #include <rte_mbuf.h>
 #include <rte_sched.h>
 #include <rte_ethdev.h>
+#include <rte_tm_driver.h>
 
 #include "rte_eth_softnic.h"
 
@@ -137,8 +138,26 @@ enum tm_node_level {
 	TM_NODE_LEVEL_MAX,
 };
 
+/* TM Node */
+struct tm_node {
+	TAILQ_ENTRY(tm_node) node;
+	uint32_t node_id;
+	uint32_t parent_node_id;
+	uint32_t priority;
+	uint32_t weight;
+	uint32_t level;
+	struct tm_node *parent_node;
+	struct rte_tm_node_params params;
+	struct rte_tm_node_stats stats;
+	uint32_t n_children;
+};
+
+TAILQ_HEAD(tm_node_list, tm_node);
+
 /* TM Hierarchy Specification */
 struct tm_hierarchy {
+	struct tm_node_list nodes;
+
 	uint32_t n_tm_nodes[TM_NODE_LEVEL_MAX];
 };
 
@@ -191,6 +210,11 @@ struct pmd_rx_queue {
 	} hard;
 };
 
+/**
+ * Traffic Management (TM) Operation
+ */
+extern const struct rte_tm_ops pmd_tm_ops;
+
 int
 tm_params_check(struct pmd_params *params, uint32_t hard_rate);
 
@@ -207,6 +231,14 @@ void
 tm_stop(struct pmd_internals *p);
 
 static inline int
+tm_enabled(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+
+	return (p->params.soft.flags & PMD_FEATURE_TM);
+}
+
+static inline int
 tm_used(struct rte_eth_dev *dev)
 {
 	struct pmd_internals *p = dev->data->dev_private;
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
index 165abfe..73274d4 100644
--- a/drivers/net/softnic/rte_eth_softnic_tm.c
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -179,3 +179,503 @@ tm_stop(struct pmd_internals *p)
 	if (p->soft.tm.sched)
 		rte_sched_port_free(p->soft.tm.sched);
 }
+
+static struct tm_node *
+tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_node_list *nl = &p->soft.tm.h.nodes;
+	struct tm_node *n;
+
+	TAILQ_FOREACH(n, nl, node)
+		if (n->node_id == node_id)
+			return n;
+
+	return NULL;
+}
+
+static uint32_t
+tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	uint32_t n_queues_max = p->params.soft.tm.nb_queues;
+	uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
+	uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+	uint32_t n_subports_max = n_pipes_max;
+	uint32_t n_root_max = 1;
+
+	switch (level) {
+	case TM_NODE_LEVEL_PORT:
+		return n_root_max;
+	case TM_NODE_LEVEL_SUBPORT:
+		return n_subports_max;
+	case TM_NODE_LEVEL_PIPE:
+		return n_pipes_max;
+	case TM_NODE_LEVEL_TC:
+		return n_tc_max;
+	case TM_NODE_LEVEL_QUEUE:
+	default:
+		return n_queues_max;
+	}
+}
+
+#ifdef RTE_SCHED_RED
+#define WRED_SUPPORTED						1
+#else
+#define WRED_SUPPORTED						0
+#endif
+
+#define STATS_MASK_DEFAULT					\
+	(RTE_TM_STATS_N_PKTS |					\
+	RTE_TM_STATS_N_BYTES |					\
+	RTE_TM_STATS_N_PKTS_GREEN_DROPPED |			\
+	RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
+
+#define STATS_MASK_QUEUE						\
+	(STATS_MASK_DEFAULT |					\
+	RTE_TM_STATS_N_PKTS_QUEUED)
+
+static const struct rte_tm_capabilities tm_cap = {
+	.n_nodes_max = UINT32_MAX,
+	.n_levels_max = TM_NODE_LEVEL_MAX,
+
+	.non_leaf_nodes_identical = 0,
+	.leaf_nodes_identical = 1,
+
+	.shaper_n_max = UINT32_MAX,
+	.shaper_private_n_max = UINT32_MAX,
+	.shaper_private_dual_rate_n_max = 0,
+	.shaper_private_rate_min = 1,
+	.shaper_private_rate_max = UINT32_MAX,
+
+	.shaper_shared_n_max = UINT32_MAX,
+	.shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
+	.shaper_shared_n_shapers_per_node_max = 1,
+	.shaper_shared_dual_rate_n_max = 0,
+	.shaper_shared_rate_min = 1,
+	.shaper_shared_rate_max = UINT32_MAX,
+
+	.shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
+	.shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
+
+	.sched_n_children_max = UINT32_MAX,
+	.sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+	.sched_wfq_n_children_per_group_max = UINT32_MAX,
+	.sched_wfq_n_groups_max = 1,
+	.sched_wfq_weight_max = UINT32_MAX,
+
+	.cman_head_drop_supported = 0,
+	.cman_wred_context_n_max = 0,
+	.cman_wred_context_private_n_max = 0,
+	.cman_wred_context_shared_n_max = 0,
+	.cman_wred_context_shared_n_nodes_per_context_max = 0,
+	.cman_wred_context_shared_n_contexts_per_node_max = 0,
+
+	.mark_vlan_dei_supported = {0, 0, 0},
+	.mark_ip_ecn_tcp_supported = {0, 0, 0},
+	.mark_ip_ecn_sctp_supported = {0, 0, 0},
+	.mark_ip_dscp_supported = {0, 0, 0},
+
+	.dynamic_update_mask = 0,
+
+	.stats_mask = STATS_MASK_QUEUE,
+};
+
+/* Traffic manager capabilities get */
+static int
+pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+	struct rte_tm_capabilities *cap,
+	struct rte_tm_error *error)
+{
+	if (cap == NULL)
+		return -rte_tm_error_set(error,
+		   EINVAL,
+		   RTE_TM_ERROR_TYPE_CAPABILITIES,
+		   NULL,
+		   rte_strerror(EINVAL));
+
+	memcpy(cap, &tm_cap, sizeof(*cap));
+
+	cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
+		tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
+		tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
+		tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
+		tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
+
+	cap->shaper_private_n_max =
+		tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
+		tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
+		tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
+		tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
+
+	cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
+		tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
+
+	cap->shaper_n_max = cap->shaper_private_n_max +
+		cap->shaper_shared_n_max;
+
+	cap->shaper_shared_n_nodes_per_shaper_max =
+		tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
+
+	cap->sched_n_children_max = RTE_MAX(
+		tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
+		(uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
+
+	cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
+
+	if (WRED_SUPPORTED)
+		cap->cman_wred_context_private_n_max =
+			tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
+
+	cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
+		cap->cman_wred_context_shared_n_max;
+
+	return 0;
+}
+
+static const struct rte_tm_level_capabilities tm_level_cap[] = {
+	[TM_NODE_LEVEL_PORT] = {
+		.n_nodes_max = 1,
+		.n_nodes_nonleaf_max = 1,
+		.n_nodes_leaf_max = 0,
+		.non_leaf_nodes_identical = 1,
+		.leaf_nodes_identical = 0,
+
+		.nonleaf = {
+			.shaper_private_supported = 1,
+			.shaper_private_dual_rate_supported = 0,
+			.shaper_private_rate_min = 1,
+			.shaper_private_rate_max = UINT32_MAX,
+			.shaper_shared_n_max = 0,
+
+			.sched_n_children_max = UINT32_MAX,
+			.sched_sp_n_priorities_max = 1,
+			.sched_wfq_n_children_per_group_max = UINT32_MAX,
+			.sched_wfq_n_groups_max = 1,
+			.sched_wfq_weight_max = 1,
+
+			.stats_mask = STATS_MASK_DEFAULT,
+		},
+	},
+
+	[TM_NODE_LEVEL_SUBPORT] = {
+		.n_nodes_max = UINT32_MAX,
+		.n_nodes_nonleaf_max = UINT32_MAX,
+		.n_nodes_leaf_max = 0,
+		.non_leaf_nodes_identical = 1,
+		.leaf_nodes_identical = 0,
+
+		.nonleaf = {
+			.shaper_private_supported = 1,
+			.shaper_private_dual_rate_supported = 0,
+			.shaper_private_rate_min = 1,
+			.shaper_private_rate_max = UINT32_MAX,
+			.shaper_shared_n_max = 0,
+
+			.sched_n_children_max = UINT32_MAX,
+			.sched_sp_n_priorities_max = 1,
+			.sched_wfq_n_children_per_group_max = UINT32_MAX,
+			.sched_wfq_n_groups_max = 1,
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+			.sched_wfq_weight_max = UINT32_MAX,
+#else
+			.sched_wfq_weight_max = 1,
+#endif
+			.stats_mask = STATS_MASK_DEFAULT,
+		},
+	},
+
+	[TM_NODE_LEVEL_PIPE] = {
+		.n_nodes_max = UINT32_MAX,
+		.n_nodes_nonleaf_max = UINT32_MAX,
+		.n_nodes_leaf_max = 0,
+		.non_leaf_nodes_identical = 1,
+		.leaf_nodes_identical = 0,
+
+		.nonleaf = {
+			.shaper_private_supported = 1,
+			.shaper_private_dual_rate_supported = 0,
+			.shaper_private_rate_min = 1,
+			.shaper_private_rate_max = UINT32_MAX,
+			.shaper_shared_n_max = 0,
+
+			.sched_n_children_max =
+				RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+			.sched_sp_n_priorities_max =
+				RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+			.sched_wfq_n_children_per_group_max = 1,
+			.sched_wfq_n_groups_max = 0,
+			.sched_wfq_weight_max = 1,
+
+			.stats_mask = STATS_MASK_DEFAULT,
+		},
+	},
+
+	[TM_NODE_LEVEL_TC] = {
+		.n_nodes_max = UINT32_MAX,
+		.n_nodes_nonleaf_max = UINT32_MAX,
+		.n_nodes_leaf_max = 0,
+		.non_leaf_nodes_identical = 1,
+		.leaf_nodes_identical = 0,
+
+		.nonleaf = {
+			.shaper_private_supported = 1,
+			.shaper_private_dual_rate_supported = 0,
+			.shaper_private_rate_min = 1,
+			.shaper_private_rate_max = UINT32_MAX,
+			.shaper_shared_n_max = 1,
+
+			.sched_n_children_max =
+				RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+			.sched_sp_n_priorities_max = 1,
+			.sched_wfq_n_children_per_group_max =
+				RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+			.sched_wfq_n_groups_max = 1,
+			.sched_wfq_weight_max = UINT32_MAX,
+
+			.stats_mask = STATS_MASK_DEFAULT,
+		},
+	},
+
+	[TM_NODE_LEVEL_QUEUE] = {
+		.n_nodes_max = UINT32_MAX,
+		.n_nodes_nonleaf_max = 0,
+		.n_nodes_leaf_max = UINT32_MAX,
+		.non_leaf_nodes_identical = 0,
+		.leaf_nodes_identical = 1,
+
+		.leaf = {
+			.shaper_private_supported = 0,
+			.shaper_private_dual_rate_supported = 0,
+			.shaper_private_rate_min = 0,
+			.shaper_private_rate_max = 0,
+			.shaper_shared_n_max = 0,
+
+			.cman_head_drop_supported = 0,
+			.cman_wred_context_private_supported = WRED_SUPPORTED,
+			.cman_wred_context_shared_n_max = 0,
+
+			.stats_mask = STATS_MASK_QUEUE,
+		},
+	},
+};
+
+/* Traffic manager level capabilities get */
+static int
+pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+	uint32_t level_id,
+	struct rte_tm_level_capabilities *cap,
+	struct rte_tm_error *error)
+{
+	if (cap == NULL)
+		return -rte_tm_error_set(error,
+		   EINVAL,
+		   RTE_TM_ERROR_TYPE_CAPABILITIES,
+		   NULL,
+		   rte_strerror(EINVAL));
+
+	if (level_id >= TM_NODE_LEVEL_MAX)
+		return -rte_tm_error_set(error,
+		   EINVAL,
+		   RTE_TM_ERROR_TYPE_LEVEL_ID,
+		   NULL,
+		   rte_strerror(EINVAL));
+
+	memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
+
+	switch (level_id) {
+	case TM_NODE_LEVEL_PORT:
+		cap->nonleaf.sched_n_children_max =
+			tm_level_get_max_nodes(dev,
+				TM_NODE_LEVEL_SUBPORT);
+		cap->nonleaf.sched_wfq_n_children_per_group_max =
+			cap->nonleaf.sched_n_children_max;
+		break;
+
+	case TM_NODE_LEVEL_SUBPORT:
+		cap->n_nodes_max = tm_level_get_max_nodes(dev,
+			TM_NODE_LEVEL_SUBPORT);
+		cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+		cap->nonleaf.sched_n_children_max =
+			tm_level_get_max_nodes(dev,
+				TM_NODE_LEVEL_PIPE);
+		cap->nonleaf.sched_wfq_n_children_per_group_max =
+			cap->nonleaf.sched_n_children_max;
+		break;
+
+	case TM_NODE_LEVEL_PIPE:
+		cap->n_nodes_max = tm_level_get_max_nodes(dev,
+			TM_NODE_LEVEL_PIPE);
+		cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+		break;
+
+	case TM_NODE_LEVEL_TC:
+		cap->n_nodes_max = tm_level_get_max_nodes(dev,
+			TM_NODE_LEVEL_TC);
+		cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+		break;
+
+	case TM_NODE_LEVEL_QUEUE:
+	default:
+		cap->n_nodes_max = tm_level_get_max_nodes(dev,
+			TM_NODE_LEVEL_QUEUE);
+		cap->n_nodes_leaf_max = cap->n_nodes_max;
+		break;
+	}
+
+	return 0;
+}
+
+static const struct rte_tm_node_capabilities tm_node_cap[] = {
+	[TM_NODE_LEVEL_PORT] = {
+		.shaper_private_supported = 1,
+		.shaper_private_dual_rate_supported = 0,
+		.shaper_private_rate_min = 1,
+		.shaper_private_rate_max = UINT32_MAX,
+		.shaper_shared_n_max = 0,
+
+		.nonleaf = {
+			.sched_n_children_max = UINT32_MAX,
+			.sched_sp_n_priorities_max = 1,
+			.sched_wfq_n_children_per_group_max = UINT32_MAX,
+			.sched_wfq_n_groups_max = 1,
+			.sched_wfq_weight_max = 1,
+		},
+
+		.stats_mask = STATS_MASK_DEFAULT,
+	},
+
+	[TM_NODE_LEVEL_SUBPORT] = {
+		.shaper_private_supported = 1,
+		.shaper_private_dual_rate_supported = 0,
+		.shaper_private_rate_min = 1,
+		.shaper_private_rate_max = UINT32_MAX,
+		.shaper_shared_n_max = 0,
+
+		.nonleaf = {
+			.sched_n_children_max = UINT32_MAX,
+			.sched_sp_n_priorities_max = 1,
+			.sched_wfq_n_children_per_group_max = UINT32_MAX,
+			.sched_wfq_n_groups_max = 1,
+			.sched_wfq_weight_max = UINT32_MAX,
+		},
+
+		.stats_mask = STATS_MASK_DEFAULT,
+	},
+
+	[TM_NODE_LEVEL_PIPE] = {
+		.shaper_private_supported = 1,
+		.shaper_private_dual_rate_supported = 0,
+		.shaper_private_rate_min = 1,
+		.shaper_private_rate_max = UINT32_MAX,
+		.shaper_shared_n_max = 0,
+
+		.nonleaf = {
+			.sched_n_children_max =
+				RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+			.sched_sp_n_priorities_max =
+				RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+			.sched_wfq_n_children_per_group_max = 1,
+			.sched_wfq_n_groups_max = 0,
+			.sched_wfq_weight_max = 1,
+		},
+
+		.stats_mask = STATS_MASK_DEFAULT,
+	},
+
+	[TM_NODE_LEVEL_TC] = {
+		.shaper_private_supported = 1,
+		.shaper_private_dual_rate_supported = 0,
+		.shaper_private_rate_min = 1,
+		.shaper_private_rate_max = UINT32_MAX,
+		.shaper_shared_n_max = 1,
+
+		.nonleaf = {
+			.sched_n_children_max =
+				RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+			.sched_sp_n_priorities_max = 1,
+			.sched_wfq_n_children_per_group_max =
+				RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
+			.sched_wfq_n_groups_max = 1,
+			.sched_wfq_weight_max = UINT32_MAX,
+		},
+
+		.stats_mask = STATS_MASK_DEFAULT,
+	},
+
+	[TM_NODE_LEVEL_QUEUE] = {
+		.shaper_private_supported = 0,
+		.shaper_private_dual_rate_supported = 0,
+		.shaper_private_rate_min = 0,
+		.shaper_private_rate_max = 0,
+		.shaper_shared_n_max = 0,
+
+
+		.leaf = {
+			.cman_head_drop_supported = 0,
+			.cman_wred_context_private_supported = WRED_SUPPORTED,
+			.cman_wred_context_shared_n_max = 0,
+		},
+
+		.stats_mask = STATS_MASK_QUEUE,
+	},
+};
+
+/* Traffic manager node capabilities get */
+static int
+pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
+	uint32_t node_id,
+	struct rte_tm_node_capabilities *cap,
+	struct rte_tm_error *error)
+{
+	struct tm_node *tm_node;
+
+	if (cap == NULL)
+		return -rte_tm_error_set(error,
+		   EINVAL,
+		   RTE_TM_ERROR_TYPE_CAPABILITIES,
+		   NULL,
+		   rte_strerror(EINVAL));
+
+	tm_node = tm_node_search(dev, node_id);
+	if (tm_node == NULL)
+		return -rte_tm_error_set(error,
+		   EINVAL,
+		   RTE_TM_ERROR_TYPE_NODE_ID,
+		   NULL,
+		   rte_strerror(EINVAL));
+
+	memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
+
+	switch (tm_node->level) {
+	case TM_NODE_LEVEL_PORT:
+		cap->nonleaf.sched_n_children_max =
+			tm_level_get_max_nodes(dev,
+				TM_NODE_LEVEL_SUBPORT);
+		cap->nonleaf.sched_wfq_n_children_per_group_max =
+			cap->nonleaf.sched_n_children_max;
+		break;
+
+	case TM_NODE_LEVEL_SUBPORT:
+		cap->nonleaf.sched_n_children_max =
+			tm_level_get_max_nodes(dev,
+				TM_NODE_LEVEL_PIPE);
+		cap->nonleaf.sched_wfq_n_children_per_group_max =
+			cap->nonleaf.sched_n_children_max;
+		break;
+
+	case TM_NODE_LEVEL_PIPE:
+	case TM_NODE_LEVEL_TC:
+	case TM_NODE_LEVEL_QUEUE:
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+const struct rte_tm_ops pmd_tm_ops = {
+	.capabilities_get = pmd_tm_capabilities_get,
+	.level_capabilities_get = pmd_tm_level_capabilities_get,
+	.node_capabilities_get = pmd_tm_node_capabilities_get,
+};
-- 
2.9.3



More information about the dev mailing list