[dpdk-dev] [PATCH 10/11] net/octeontx2: add tx queue ratelimit callback

Nithin Dabilpuram ndabilpuram at marvell.com
Thu Mar 12 12:19:06 CET 2020


From: Krzysztof Kanas <kkanas at marvell.com>

Add Tx queue ratelimiting support. This support is mutually
exclusive with TM support i.e when TM is configured, tx queue
ratelimiting config is no more valid.

Signed-off-by: Krzysztof Kanas <kkanas at marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram at marvell.com>
---
 drivers/net/octeontx2/otx2_ethdev.c |   1 +
 drivers/net/octeontx2/otx2_tm.c     | 241 +++++++++++++++++++++++++++++++++++-
 drivers/net/octeontx2/otx2_tm.h     |   3 +
 3 files changed, 243 insertions(+), 2 deletions(-)

diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c
index 6896797..78b7f3a 100644
--- a/drivers/net/octeontx2/otx2_ethdev.c
+++ b/drivers/net/octeontx2/otx2_ethdev.c
@@ -2071,6 +2071,7 @@ static const struct eth_dev_ops otx2_eth_dev_ops = {
 	.rx_descriptor_status     = otx2_nix_rx_descriptor_status,
 	.tx_descriptor_status     = otx2_nix_tx_descriptor_status,
 	.tx_done_cleanup          = otx2_nix_tx_done_cleanup,
+	.set_queue_rate_limit     = otx2_nix_tm_set_queue_rate_limit,
 	.pool_ops_supported       = otx2_nix_pool_ops_supported,
 	.filter_ctrl              = otx2_nix_dev_filter_ctrl,
 	.get_module_info          = otx2_nix_get_module_info,
diff --git a/drivers/net/octeontx2/otx2_tm.c b/drivers/net/octeontx2/otx2_tm.c
index 29c61de..bafb9aa 100644
--- a/drivers/net/octeontx2/otx2_tm.c
+++ b/drivers/net/octeontx2/otx2_tm.c
@@ -2195,14 +2195,15 @@ nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
 	}
 
 	/* Delete default/ratelimit tree */
-	if (dev->tm_flags & (NIX_TM_DEFAULT_TREE)) {
+	if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) {
 		rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false);
 		if (rc) {
 			error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
 			error->message = "failed to free default resources";
 			return rc;
 		}
-		dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE);
+		dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE |
+				   NIX_TM_RATE_LIMIT_TREE);
 	}
 
 	/* Free up user alloc'ed resources */
@@ -2663,6 +2664,242 @@ int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
 	return 0;
 }
 
+static int
+nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev)
+{
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+	uint32_t def = eth_dev->data->nb_tx_queues;
+	struct rte_tm_node_params params;
+	uint32_t leaf_parent, i, rc = 0;
+
+	memset(&params, 0, sizeof(params));
+
+	if (nix_tm_have_tl1_access(dev)) {
+		dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
+		rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
+					DEFAULT_RR_WEIGHT,
+					NIX_TXSCH_LVL_TL1,
+					OTX2_TM_LVL_ROOT, false, &params);
+		if (rc)
+			goto error;
+		rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
+					DEFAULT_RR_WEIGHT,
+					NIX_TXSCH_LVL_TL2,
+					OTX2_TM_LVL_SCH1, false, &params);
+		if (rc)
+			goto error;
+		rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
+					DEFAULT_RR_WEIGHT,
+					NIX_TXSCH_LVL_TL3,
+					OTX2_TM_LVL_SCH2, false, &params);
+		if (rc)
+			goto error;
+		rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
+					DEFAULT_RR_WEIGHT,
+					NIX_TXSCH_LVL_TL4,
+					OTX2_TM_LVL_SCH3, false, &params);
+		if (rc)
+			goto error;
+		leaf_parent = def + 3;
+
+		/* Add per queue SMQ nodes */
+		for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+			rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
+						leaf_parent,
+						0, DEFAULT_RR_WEIGHT,
+						NIX_TXSCH_LVL_SMQ,
+						OTX2_TM_LVL_SCH4,
+						false, &params);
+			if (rc)
+				goto error;
+		}
+
+		/* Add leaf nodes */
+		for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+			rc = nix_tm_node_add_to_list(dev, i,
+						     leaf_parent + 1 + i, 0,
+						     DEFAULT_RR_WEIGHT,
+						     NIX_TXSCH_LVL_CNT,
+						     OTX2_TM_LVL_QUEUE,
+						     false, &params);
+		if (rc)
+			goto error;
+		}
+
+		return 0;
+	}
+
+	dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
+	rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
+				DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2,
+				OTX2_TM_LVL_ROOT, false, &params);
+	if (rc)
+		goto error;
+	rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
+				DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3,
+				OTX2_TM_LVL_SCH1, false, &params);
+	if (rc)
+		goto error;
+	rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
+				     DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4,
+				     OTX2_TM_LVL_SCH2, false, &params);
+	if (rc)
+		goto error;
+	leaf_parent = def + 2;
+
+	/* Add per queue SMQ nodes */
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
+					     leaf_parent,
+					     0, DEFAULT_RR_WEIGHT,
+					     NIX_TXSCH_LVL_SMQ,
+					     OTX2_TM_LVL_SCH3,
+					     false, &params);
+		if (rc)
+			goto error;
+	}
+
+	/* Add leaf nodes */
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0,
+					     DEFAULT_RR_WEIGHT,
+					     NIX_TXSCH_LVL_CNT,
+					     OTX2_TM_LVL_SCH4,
+					     false, &params);
+		if (rc)
+			break;
+	}
+error:
+	return rc;
+}
+
+static int
+otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev,
+			   struct otx2_nix_tm_node *tm_node,
+			   uint64_t tx_rate)
+{
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+	struct otx2_nix_tm_shaper_profile profile;
+	struct otx2_mbox *mbox = dev->mbox;
+	volatile uint64_t *reg, *regval;
+	struct nix_txschq_config *req;
+	uint16_t flags;
+	uint8_t k = 0;
+	int rc;
+
+	flags = tm_node->flags;
+
+	req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+	req->lvl = NIX_TXSCH_LVL_MDQ;
+	reg = req->reg;
+	regval = req->regval;
+
+	if (tx_rate == 0) {
+		k += prepare_tm_sw_xoff(tm_node, true, &reg[k], &regval[k]);
+		flags &= ~NIX_TM_NODE_ENABLED;
+		goto exit;
+	}
+
+	if (!(flags & NIX_TM_NODE_ENABLED)) {
+		k += prepare_tm_sw_xoff(tm_node, false, &reg[k], &regval[k]);
+		flags |= NIX_TM_NODE_ENABLED;
+	}
+
+	/* Use only PIR for rate limit */
+	memset(&profile, 0, sizeof(profile));
+	profile.params.peak.rate = tx_rate;
+	/* Minimum burst of ~4us Bytes of Tx */
+	profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS,
+					   (4ull * tx_rate) / (1E6 * 8));
+	if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate)
+		dev->tm_rate_min = tx_rate;
+
+	k += prepare_tm_shaper_reg(tm_node, &profile, &reg[k], &regval[k]);
+exit:
+	req->num_regs = k;
+	rc = otx2_mbox_process(mbox);
+	if (rc)
+		return rc;
+
+	tm_node->flags = flags;
+	return 0;
+}
+
+int
+otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
+				uint16_t queue_idx, uint16_t tx_rate_mbps)
+{
+	struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+	uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
+	struct otx2_nix_tm_node *tm_node;
+	int rc;
+
+	/* Check for supported revisions */
+	if (otx2_dev_is_95xx_Ax(dev) ||
+	    otx2_dev_is_96xx_Ax(dev))
+		return -EINVAL;
+
+	if (queue_idx >= eth_dev->data->nb_tx_queues)
+		return -EINVAL;
+
+	if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
+	    !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE))
+		goto error;
+
+	if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
+	    eth_dev->data->nb_tx_queues > 1) {
+		/* For TM topology change ethdev needs to be stopped */
+		if (eth_dev->data->dev_started)
+			return -EBUSY;
+
+		/*
+		 * Disable xmit will be enabled when
+		 * new topology is available.
+		 */
+		rc = nix_xmit_disable(eth_dev);
+		if (rc) {
+			otx2_err("failed to disable TX, rc=%d", rc);
+			return -EIO;
+		}
+
+		rc = nix_tm_free_resources(dev, 0, 0, false);
+		if (rc < 0) {
+			otx2_tm_dbg("failed to free default resources, rc %d",
+				   rc);
+			return -EIO;
+		}
+
+		rc = nix_tm_prepare_rate_limited_tree(eth_dev);
+		if (rc < 0) {
+			otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc);
+			return rc;
+		}
+
+		rc = nix_tm_alloc_resources(eth_dev, true);
+		if (rc != 0) {
+			otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc);
+			return rc;
+		}
+
+		dev->tm_flags &= ~NIX_TM_DEFAULT_TREE;
+		dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE;
+	}
+
+	tm_node = nix_tm_node_search(dev, queue_idx, false);
+
+	/* check if we found a valid leaf node */
+	if (!tm_node ||
+	    !nix_tm_is_leaf(dev, tm_node->lvl) ||
+	    !tm_node->parent ||
+	    tm_node->parent->hw_id == UINT32_MAX)
+		return -EIO;
+
+	return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate);
+error:
+	otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags);
+	return -EINVAL;
+}
+
 int
 otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
 {
diff --git a/drivers/net/octeontx2/otx2_tm.h b/drivers/net/octeontx2/otx2_tm.h
index d5d58ec..7b1672e 100644
--- a/drivers/net/octeontx2/otx2_tm.h
+++ b/drivers/net/octeontx2/otx2_tm.h
@@ -11,6 +11,7 @@
 
 #define NIX_TM_DEFAULT_TREE	BIT_ULL(0)
 #define NIX_TM_COMMITTED	BIT_ULL(1)
+#define NIX_TM_RATE_LIMIT_TREE	BIT_ULL(2)
 #define NIX_TM_TL1_NO_SP	BIT_ULL(3)
 
 struct otx2_eth_dev;
@@ -20,6 +21,8 @@ int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev);
 int otx2_nix_tm_fini(struct rte_eth_dev *eth_dev);
 int otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
 			      uint32_t *rr_quantum, uint16_t *smq);
+int otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
+				     uint16_t queue_idx, uint16_t tx_rate);
 int otx2_nix_sq_flush_pre(void *_txq, bool dev_started);
 int otx2_nix_sq_flush_post(void *_txq);
 int otx2_nix_sq_enable(void *_txq);
-- 
2.8.4



More information about the dev mailing list