[PATCH v4 07/10] net/mlx5: add testpmd command to query per-queue rate limit

Slava Ovsiienko viacheslavo at nvidia.com
Mon Mar 23 14:19:26 CET 2026


Acked-by: Viacheslav Ovsiienko <viacheslavo at nvidia.com>

> -----Original Message-----
> From: Vincent Jardin <vjardin at free.fr>
> Sent: Sunday, March 22, 2026 3:46 PM
> To: dev at dpdk.org
> Cc: Raslan Darawsheh <rasland at nvidia.com>; NBU-Contact-Thomas Monjalon
> (EXTERNAL) <thomas at monjalon.net>; andrew.rybchenko at oktetlabs.ru;
> Dariusz Sosnowski <dsosnowski at nvidia.com>; Slava Ovsiienko
> <viacheslavo at nvidia.com>; Bing Zhao <bingz at nvidia.com>; Ori Kam
> <orika at nvidia.com>; Suanming Mou <suanmingm at nvidia.com>; Matan Azrad
> <matan at nvidia.com>; stephen at networkplumber.org;
> aman.deep.singh at intel.com; Vincent Jardin <vjardin at free.fr>
> Subject: [PATCH v4 07/10] net/mlx5: add testpmd command to query per-
> queue rate limit
> 
> Add a new testpmd command to display the per-queue packet pacing rate limit
> state, including the PP index from both driver state and FW SQ context
> readback:
> 
>   testpmd> mlx5 port <port_id> txq <queue_id> rate show
> 
> This helps verify that the FW actually applied the PP index to the SQ after setting
> a per-queue rate limit.
> 
> Expose a new PMD API rte_pmd_mlx5_txq_rate_limit_query() that queries
> txq_ctrl->rate_limit for driver state and
> mlx5_devx_cmd_query_sq() for the FW
> packet_pacing_rate_limit_index field.
> 
> Signed-off-by: Vincent Jardin <vjardin at free.fr>
> ---
>  drivers/net/mlx5/mlx5_testpmd.c | 93
> +++++++++++++++++++++++++++++++++
>  drivers/net/mlx5/mlx5_tx.c      | 40 +++++++++++++-
>  drivers/net/mlx5/rte_pmd_mlx5.h | 30 +++++++++++
>  3 files changed, 162 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_testpmd.c
> b/drivers/net/mlx5/mlx5_testpmd.c index 1bb5a89559..fd3efecc5d 100644
> --- a/drivers/net/mlx5/mlx5_testpmd.c
> +++ b/drivers/net/mlx5/mlx5_testpmd.c
> @@ -1365,6 +1365,94 @@ cmdline_parse_inst_t
> mlx5_cmd_dump_rq_context_options = {
>  	}
>  };
> 
> +/* Show per-queue rate limit PP index for a given port/queue */ struct
> +mlx5_cmd_show_rate_limit_options {
> +	cmdline_fixed_string_t mlx5;
> +	cmdline_fixed_string_t port;
> +	portid_t port_id;
> +	cmdline_fixed_string_t txq;
> +	queueid_t queue_id;
> +	cmdline_fixed_string_t rate;
> +	cmdline_fixed_string_t show;
> +};
> +
> +cmdline_parse_token_string_t mlx5_cmd_show_rate_limit_mlx5 =
> +	TOKEN_STRING_INITIALIZER(struct
> mlx5_cmd_show_rate_limit_options,
> +				 mlx5, "mlx5");
> +cmdline_parse_token_string_t mlx5_cmd_show_rate_limit_port =
> +	TOKEN_STRING_INITIALIZER(struct
> mlx5_cmd_show_rate_limit_options,
> +				 port, "port");
> +cmdline_parse_token_num_t mlx5_cmd_show_rate_limit_port_id =
> +	TOKEN_NUM_INITIALIZER(struct mlx5_cmd_show_rate_limit_options,
> +			      port_id, RTE_UINT16);
> +cmdline_parse_token_string_t mlx5_cmd_show_rate_limit_txq =
> +	TOKEN_STRING_INITIALIZER(struct
> mlx5_cmd_show_rate_limit_options,
> +				 txq, "txq");
> +cmdline_parse_token_num_t mlx5_cmd_show_rate_limit_queue_id =
> +	TOKEN_NUM_INITIALIZER(struct mlx5_cmd_show_rate_limit_options,
> +			      queue_id, RTE_UINT16);
> +cmdline_parse_token_string_t mlx5_cmd_show_rate_limit_rate =
> +	TOKEN_STRING_INITIALIZER(struct
> mlx5_cmd_show_rate_limit_options,
> +				 rate, "rate");
> +cmdline_parse_token_string_t mlx5_cmd_show_rate_limit_show =
> +	TOKEN_STRING_INITIALIZER(struct
> mlx5_cmd_show_rate_limit_options,
> +				 show, "show");
> +
> +static void
> +mlx5_cmd_show_rate_limit_parsed(void *parsed_result,
> +				__rte_unused struct cmdline *cl,
> +				__rte_unused void *data)
> +{
> +	struct mlx5_cmd_show_rate_limit_options *res = parsed_result;
> +	struct rte_pmd_mlx5_txq_rate_limit_info info;
> +	int ret;
> +
> +	ret = rte_pmd_mlx5_txq_rate_limit_query(res->port_id, res-
> >queue_id,
> +						 &info);
> +	switch (ret) {
> +	case 0:
> +		break;
> +	case -ENODEV:
> +		fprintf(stderr, "invalid port_id %u\n", res->port_id);
> +		return;
> +	case -EINVAL:
> +		fprintf(stderr, "invalid queue index (%u), out of range\n",
> +			res->queue_id);
> +		return;
> +	case -EIO:
> +		fprintf(stderr, "failed to query SQ context\n");
> +		return;
> +	default:
> +		fprintf(stderr, "query failed (%d)\n", ret);
> +		return;
> +	}
> +	fprintf(stdout, "Port %u Txq %u rate limit info:\n",
> +		res->port_id, res->queue_id);
> +	if (info.rate_mbps > 0)
> +		fprintf(stdout, "  Configured rate: %u Mbps\n",
> +			info.rate_mbps);
> +	else
> +		fprintf(stdout, "  Configured rate: disabled\n");
> +	fprintf(stdout, "  PP index (driver): %u\n", info.pp_index);
> +	fprintf(stdout, "  PP index (FW readback): %u\n", info.fw_pp_index); }
> +
> +cmdline_parse_inst_t mlx5_cmd_show_rate_limit = {
> +	.f = mlx5_cmd_show_rate_limit_parsed,
> +	.data = NULL,
> +	.help_str = "mlx5 port <port_id> txq <queue_id> rate show",
> +	.tokens = {
> +		(void *)&mlx5_cmd_show_rate_limit_mlx5,
> +		(void *)&mlx5_cmd_show_rate_limit_port,
> +		(void *)&mlx5_cmd_show_rate_limit_port_id,
> +		(void *)&mlx5_cmd_show_rate_limit_txq,
> +		(void *)&mlx5_cmd_show_rate_limit_queue_id,
> +		(void *)&mlx5_cmd_show_rate_limit_rate,
> +		(void *)&mlx5_cmd_show_rate_limit_show,
> +		NULL,
> +	}
> +};
> +
>  static struct testpmd_driver_commands mlx5_driver_cmds = {
>  	.commands = {
>  		{
> @@ -1440,6 +1528,11 @@ static struct testpmd_driver_commands
> mlx5_driver_cmds = {
>  			.help = "mlx5 port (port_id) queue (queue_id) dump
> rq_context (file_name)\n"
>  				"    Dump mlx5 RQ Context\n\n",
>  		},
> +		{
> +			.ctx = &mlx5_cmd_show_rate_limit,
> +			.help = "mlx5 port (port_id) txq (queue_id) rate
> show\n"
> +				"    Show per-queue rate limit PP index\n\n",
> +		},
>  		{
>  			.ctx = NULL,
>  		},
> diff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c index
> 8085b5c306..7d71782d33 100644
> --- a/drivers/net/mlx5/mlx5_tx.c
> +++ b/drivers/net/mlx5/mlx5_tx.c
> @@ -800,7 +800,7 @@ int rte_pmd_mlx5_txq_dump_contexts(uint16_t
> port_id, uint16_t queue_id, const ch
>  	if (!rte_eth_dev_is_valid_port(port_id))
>  		return -ENODEV;
> 
> -	if (rte_eth_tx_queue_is_valid(port_id, queue_id))
> +	if (rte_eth_tx_queue_is_valid(port_id, queue_id) != 0)
>  		return -EINVAL;
> 
>  	fd = fopen(path, "w");
> @@ -848,3 +848,41 @@ int rte_pmd_mlx5_txq_dump_contexts(uint16_t
> port_id, uint16_t queue_id, const ch
>  	fclose(fd);
>  	return ret;
>  }
> +
> +RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_pmd_mlx5_txq_rate_limit_query,
> +26.07) int rte_pmd_mlx5_txq_rate_limit_query(uint16_t port_id, uint16_t
> queue_id,
> +				       struct rte_pmd_mlx5_txq_rate_limit_info
> *info) {
> +	struct rte_eth_dev *dev;
> +	struct mlx5_priv *priv;
> +	struct mlx5_txq_data *txq_data;
> +	struct mlx5_txq_ctrl *txq_ctrl;
> +	uint32_t sq_out[MLX5_ST_SZ_DW(query_sq_out)] = {0};
> +	int ret;
> +
> +	if (info == NULL)
> +		return -EINVAL;
> +	if (!rte_eth_dev_is_valid_port(port_id))
> +		return -ENODEV;
> +	if (rte_eth_tx_queue_is_valid(port_id, queue_id) != 0)
> +		return -EINVAL;
> +	dev = &rte_eth_devices[port_id];
> +	priv = dev->data->dev_private;
> +	txq_data = (*priv->txqs)[queue_id];
> +	txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
> +	info->rate_mbps = txq_ctrl->rate_limit.rate_mbps;
> +	info->pp_index = txq_ctrl->rate_limit.pp_id;
> +	if (txq_ctrl->obj == NULL) {
> +		info->fw_pp_index = 0;
> +		return 0;
> +	}
> +	ret = mlx5_devx_cmd_query_sq(txq_ctrl->obj->sq_obj.sq,
> +				     sq_out, sizeof(sq_out));
> +	if (ret)
> +		return -EIO;
> +	info->fw_pp_index = MLX5_GET(sqc,
> +				     MLX5_ADDR_OF(query_sq_out, sq_out,
> +						  sq_context),
> +				     packet_pacing_rate_limit_index);
> +	return 0;
> +}
> diff --git a/drivers/net/mlx5/rte_pmd_mlx5.h
> b/drivers/net/mlx5/rte_pmd_mlx5.h index 7acfdae97d..698d7d2032 100644
> --- a/drivers/net/mlx5/rte_pmd_mlx5.h
> +++ b/drivers/net/mlx5/rte_pmd_mlx5.h
> @@ -420,6 +420,36 @@ __rte_experimental
>  int
>  rte_pmd_mlx5_txq_dump_contexts(uint16_t port_id, uint16_t queue_id,
> const char *filename);
> 
> +/**
> + * Per-queue rate limit information.
> + */
> +struct rte_pmd_mlx5_txq_rate_limit_info {
> +	uint32_t rate_mbps;	/**< Configured rate in Mbps, 0 = disabled. */
> +	uint16_t pp_index;	/**< PP index from driver state. */
> +	uint16_t fw_pp_index;	/**< PP index read back from FW SQ context.
> */
> +};
> +
> +/**
> + * Query per-queue rate limit state for a given Tx queue.
> + *
> + * @param[in] port_id
> + *   Port ID.
> + * @param[in] queue_id
> + *   Tx queue ID.
> + * @param[out] info
> + *   Rate limit information.
> + *
> + * @return
> + *   0 on success, negative errno on failure:
> + *   - -ENODEV: invalid port_id.
> + *   - -EINVAL: invalid queue_id.
> + *   - -EIO: FW query failed.
> + */
> +__rte_experimental
> +int
> +rte_pmd_mlx5_txq_rate_limit_query(uint16_t port_id, uint16_t queue_id,
> +				  struct rte_pmd_mlx5_txq_rate_limit_info
> *info);
> +
>  /** Type of mlx5 driver event for which custom callback is called. */  enum
> rte_pmd_mlx5_driver_event_cb_type {
>  	/** Called after HW Rx queue is created. */
> --
> 2.43.0



More information about the dev mailing list