<div dir="ltr"><div dir="ltr"><br></div><br><div class="gmail_quote gmail_quote_container"><div dir="ltr" class="gmail_attr">сб, 30 авг. 2025 г. в 20:52, Ivan Malov <<a href="mailto:ivan.malov@arknetworks.am">ivan.malov@arknetworks.am</a>>:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">Hi Vladimir,<br>
<br>
On Sat, 30 Aug 2025, Vladimir Medvedkin wrote:<br>
<br>
> Currently there are two structutes defined for DCB configuration, one for<br>
<br>
Typo: structuRes.<br>
<br>
> RX and one for TX. They do have slight semantic difference, but in terms<br>
> of their structure they are identical. Refactor DCB configuration API to<br>
> use common structute for both TX and RX.<br>
><br>
> Additionally, current structure do not reflect everything that is<br>
> required by the DCB specification, such as per Traffic Class bandwidth<br>
> allocation and Traffic Selection Algorithm (TSA). Extend rte_eth_dcb_conf<br>
> with additional DCB settings<br>
><br>
> Signed-off-by: Vladimir Medvedkin <<a href="mailto:vladimir.medvedkin@intel.com" target="_blank">vladimir.medvedkin@intel.com</a>><br>
> ---<br>
> app/test-pmd/testpmd.c | 19 ++++++-<br>
> drivers/net/intel/ice/ice_ethdev.c | 80 ++++++++++++++++++++----------<br>
> lib/ethdev/rte_ethdev.h | 25 ++++++----<br>
> 3 files changed, 85 insertions(+), 39 deletions(-)<br>
><br>
> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c<br>
> index bb88555328..d64a7dcac5 100644<br>
> --- a/app/test-pmd/testpmd.c<br>
> +++ b/app/test-pmd/testpmd.c<br>
> @@ -4134,9 +4134,9 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf, enum dcb_mode_enable dcb_mode,<br>
> (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);<br>
> eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;<br>
> } else {<br>
> - struct rte_eth_dcb_rx_conf *rx_conf =<br>
> + struct rte_eth_dcb_conf *rx_conf =<br>
> ð_conf->rx_adv_conf.dcb_rx_conf;<br>
> - struct rte_eth_dcb_tx_conf *tx_conf =<br>
> + struct rte_eth_dcb_conf *tx_conf =<br>
> ð_conf->tx_adv_conf.dcb_tx_conf;<br>
><br>
> rx_conf->nb_tcs = num_tcs;<br>
> @@ -4148,6 +4148,21 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf, enum dcb_mode_enable dcb_mode,<br>
> tx_conf->dcb_tc[i] = dcb_tc_val;<br>
> }<br>
><br>
> + const int bw_share_percent = 100 / num_tcs;<br>
> + const int bw_share_left = 100 - bw_share_percent * num_tcs;<br>
> + for (i = 0; i < num_tcs; i++) {<br>
> + rx_conf->dcb_tc_bw[i] = bw_share_percent;<br>
> + tx_conf->dcb_tc_bw[i] = bw_share_percent;<br>
> +<br>
> + rx_conf->dcb_tsa[i] = RTE_ETH_DCB_TSA_ETS;<br>
> + tx_conf->dcb_tsa[i] = RTE_ETH_DCB_TSA_ETS;<br>
> + }<br>
> +<br>
> + for (i = 0; i < bw_share_left; i++) {<br>
> + rx_conf->dcb_tc_bw[i]++;<br>
> + tx_conf->dcb_tc_bw[i]++;<br>
> + }<br>
<br>
A brief comment would make the purpose clearer.<br>
<br>
> +<br>
> eth_conf->rxmode.mq_mode =<br>
> (enum rte_eth_rx_mq_mode)<br>
> (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);<br>
> diff --git a/drivers/net/intel/ice/ice_ethdev.c b/drivers/net/intel/ice/ice_ethdev.c<br>
> index 8ab0da3549..7ba25049d7 100644<br>
> --- a/drivers/net/intel/ice/ice_ethdev.c<br>
> +++ b/drivers/net/intel/ice/ice_ethdev.c<br>
> @@ -3760,10 +3760,13 @@ static int ice_init_rss(struct ice_pf *pf)<br>
> }<br>
><br>
> static int<br>
> -check_dcb_conf(int is_8_ports, struct rte_eth_dcb_rx_conf *dcb_conf)<br>
> +check_dcb_conf(int is_8_ports, struct rte_eth_dcb_conf *dcb_conf)<br>
> {<br>
> uint32_t tc_map = 0;<br>
> int i;<br>
> + int total_bw_allocated = 0;<br>
> + bool ets_seen = false;<br>
> + int nb_tc_used;<br>
><br>
> enum rte_eth_nb_tcs nb_tcs = dcb_conf->nb_tcs;<br>
> if (nb_tcs != RTE_ETH_4_TCS && is_8_ports) {<br>
> @@ -3784,7 +3787,31 @@ check_dcb_conf(int is_8_ports, struct rte_eth_dcb_rx_conf *dcb_conf)<br>
> return -1;<br>
> }<br>
><br>
> - return rte_popcount32(tc_map);<br>
> + nb_tc_used = rte_popcount32(tc_map);<br>
> +<br>
> + /* calculate total ETS Bandwidth allocation */<br>
> + for (i = 0; i < nb_tc_used; i++) {<br>
> + if (dcb_conf->dcb_tsa[i] == RTE_ETH_DCB_TSA_ETS) {<br>
> + if (dcb_conf->dcb_tc_bw[i] == 0) {<br>
> + PMD_DRV_LOG(ERR,<br>
> + "Bad ETS BW configuration, can not allocate 0%%");<br>
> + return -1;<br>
> + }<br>
> + total_bw_allocated += dcb_conf->dcb_tc_bw[i];<br>
> + ets_seen = true;<br>
> + } else if (dcb_conf->dcb_tsa[i] != RTE_ETH_DCB_TSA_STRICT) {<br>
> + PMD_DRV_LOG(ERR, "Invalid TC TSA setting - only Strict and ETS are supported");<br>
> + return -1;<br>
> + }<br>
> + }<br>
> +<br>
> + /* total ETS BW allocation must add up to 100% */<br>
> + if (ets_seen && total_bw_allocated != 100) {<br>
> + PMD_DRV_LOG(ERR, "Invalid TC Bandwidth allocation configuration");<br>
> + return -1;<br>
> + }<br>
> +<br>
> + return nb_tc_used;<br>
> }<br>
><br>
> static int<br>
> @@ -3819,15 +3846,22 @@ ice_dev_configure(struct rte_eth_dev *dev)<br>
> struct ice_qos_cfg *qos_cfg = &port_info->qos_cfg;<br>
> struct ice_dcbx_cfg *local_dcb_conf = &qos_cfg->local_dcbx_cfg;<br>
> struct ice_vsi_ctx ctxt;<br>
> - struct rte_eth_dcb_rx_conf *dcb_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;<br>
> + struct rte_eth_dcb_conf *rx_dcb_conf =<br>
> + &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;<br>
> + struct rte_eth_dcb_conf *tx_dcb_conf =<br>
> + &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;<br>
> int i;<br>
> - enum rte_eth_nb_tcs nb_tcs = dcb_conf->nb_tcs;<br>
> - int nb_tc_used, queues_per_tc;<br>
> + enum rte_eth_nb_tcs nb_tcs = rx_dcb_conf->nb_tcs;<br>
> + int nb_tc_used_rx, nb_tc_used_tx, queues_per_tc;<br>
> uint16_t total_q_nb;<br>
><br>
> - nb_tc_used = check_dcb_conf(ice_get_port_max_cgd(hw) == ICE_4_CGD_PER_PORT,<br>
> - dcb_conf);<br>
> - if (nb_tc_used < 0)<br>
> + nb_tc_used_rx = check_dcb_conf(ice_get_port_max_cgd(hw) == ICE_4_CGD_PER_PORT,<br>
> + rx_dcb_conf);<br>
> + if (nb_tc_used_rx < 0)<br>
> + return -EINVAL;<br>
> + nb_tc_used_tx = check_dcb_conf(ice_get_port_max_cgd(hw) == ICE_4_CGD_PER_PORT,<br>
> + tx_dcb_conf);<br>
> + if (nb_tc_used_tx < 0)<br>
> return -EINVAL;<br>
><br>
> <a href="http://ctxt.info" rel="noreferrer" target="_blank">ctxt.info</a> = vsi->info;<br>
> @@ -3837,8 +3871,8 @@ ice_dev_configure(struct rte_eth_dev *dev)<br>
> }<br>
><br>
> total_q_nb = dev->data->nb_rx_queues;<br>
> - queues_per_tc = total_q_nb / nb_tc_used;<br>
> - if (total_q_nb % nb_tc_used != 0) {<br>
> + queues_per_tc = total_q_nb / nb_tc_used_rx;<br>
> + if (total_q_nb % nb_tc_used_rx != 0) {<br>
> PMD_DRV_LOG(ERR, "For DCB, number of queues must be evenly divisble by number of used TCs");<br>
> return -EINVAL;<br>
> } else if (!rte_is_power_of_2(queues_per_tc)) {<br>
> @@ -3846,7 +3880,7 @@ ice_dev_configure(struct rte_eth_dev *dev)<br>
> return -EINVAL;<br>
> }<br>
><br>
> - for (i = 0; i < nb_tc_used; i++) {<br>
> + for (i = 0; i < nb_tc_used_rx; i++) {<br>
> ctxt.info.tc_mapping[i] =<br>
> rte_cpu_to_le_16(((i * queues_per_tc) << ICE_AQ_VSI_TC_Q_OFFSET_S) |<br>
> (rte_log2_u32(queues_per_tc) << ICE_AQ_VSI_TC_Q_NUM_S));<br>
> @@ -3858,29 +3892,21 @@ ice_dev_configure(struct rte_eth_dev *dev)<br>
><br>
> /* Associate each VLAN UP with particular TC */<br>
> for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {<br>
> - local_dcb_conf->etscfg.prio_table[i] = dcb_conf->dcb_tc[i];<br>
> - local_dcb_conf->etsrec.prio_table[i] = dcb_conf->dcb_tc[i];<br>
> + local_dcb_conf->etscfg.prio_table[i] = rx_dcb_conf->dcb_tc[i];<br>
> + local_dcb_conf->etsrec.prio_table[i] = tx_dcb_conf->dcb_tc[i];<br>
> }<br>
><br>
> - /*<br>
> - * Since current API does not support setting ETS BW Share and Scheduler<br>
> - * configure all TC as ETS and evenly share load across all existing TC<br>
> - **/<br>
> - const int bw_share_percent = 100 / nb_tc_used;<br>
> - const int bw_share_left = 100 - bw_share_percent * nb_tc_used;<br>
> - for (i = 0; i < nb_tc_used; i++) {<br>
> + for (i = 0; i < nb_tc_used_rx; i++) {<br>
> /* Per TC bandwidth table (all valued must add up to 100%), valid on ETS */<br>
> - local_dcb_conf->etscfg.tcbwtable[i] = bw_share_percent;<br>
> - local_dcb_conf->etsrec.tcbwtable[i] = bw_share_percent;<br>
> + local_dcb_conf->etscfg.tcbwtable[i] = rx_dcb_conf->dcb_tc_bw[i];<br>
><br>
> /**< Transmission Selection Algorithm. 0 - Strict prio, 2 - ETS */<br>
> - local_dcb_conf->etscfg.tsatable[i] = 2;<br>
> - local_dcb_conf->etsrec.tsatable[i] = 2;<br>
> + local_dcb_conf->etscfg.tsatable[i] = rx_dcb_conf->dcb_tsa[i];<br>
> }<br>
><br>
> - for (i = 0; i < bw_share_left; i++) {<br>
> - local_dcb_conf->etscfg.tcbwtable[i]++;<br>
> - local_dcb_conf->etsrec.tcbwtable[i]++;<br>
> + for (i = 0; i < nb_tc_used_tx; i++) {<br>
> + local_dcb_conf->etsrec.tcbwtable[i] = tx_dcb_conf->dcb_tc_bw[i];<br>
> + local_dcb_conf->etsrec.tsatable[i] = tx_dcb_conf->dcb_tsa[i];<br>
> }<br>
><br>
> local_dcb_conf->pfc.pfccap = nb_tcs;<br>
> diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h<br>
> index f9fb6ae549..13b1a41d3b 100644<br>
> --- a/lib/ethdev/rte_ethdev.h<br>
> +++ b/lib/ethdev/rte_ethdev.h<br>
> @@ -853,6 +853,7 @@ rte_eth_rss_hf_refine(uint64_t rss_hf)<br>
> /**@{@name VMDq and DCB maximums */<br>
> #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64 /**< Maximum nb. of VMDq VLAN filters. */<br>
> #define RTE_ETH_DCB_NUM_USER_PRIORITIES 8 /**< Maximum nb. of DCB priorities. */<br>
> +#define RTE_ETH_DCB_NUM_TCS 8 /**< Maximum nb. of DCB traffic classes. */<br>
> #define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128 /**< Maximum nb. of VMDq DCB queues. */<br>
> #define RTE_ETH_DCB_NUM_QUEUES 128 /**< Maximum nb. of DCB queues. */<br>
> /**@}*/<br>
> @@ -929,11 +930,21 @@ enum rte_eth_nb_pools {<br>
> RTE_ETH_64_POOLS = 64 /**< 64 VMDq pools. */<br>
> };<br>
><br>
> +#define RTE_ETH_DCB_TSA_STRICT 0<br>
> +#define RTE_ETH_DCB_TSA_ETS 2<br>
<br>
Why not enum?<br></blockquote><div><br></div><div>Agree, enum will be better</div><div> </div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">
<br>
> +<br>
> /* This structure may be extended in future. */<br>
> -struct rte_eth_dcb_rx_conf {<br>
> +struct rte_eth_dcb_conf {<br>
> enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */<br>
> - /** Traffic class each UP mapped to. */<br>
> + /** Traffic class each UP mapped to.<br>
<br>
Perhaps keep '/**' on a separate line in a multi-line comment.<br>
<br>
Thank you.<br>
<br>
> + * Rx packets VLAN UP for Rx configuration<br>
> + * Rx PFC Pause frames UP for Tx configuration<br>
> + */<br>
> uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];<br>
> + /** Traffic class selector algorithm */<br>
> + uint8_t dcb_tsa[RTE_ETH_DCB_NUM_TCS];<br>
> + /** Traffic class relative bandwidth in percents */<br>
> + uint8_t dcb_tc_bw[RTE_ETH_DCB_NUM_TCS];<br>
> };<br>
><br>
> struct rte_eth_vmdq_dcb_tx_conf {<br>
> @@ -942,12 +953,6 @@ struct rte_eth_vmdq_dcb_tx_conf {<br>
> uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];<br>
> };<br>
><br>
> -struct rte_eth_dcb_tx_conf {<br>
> - enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */<br>
> - /** Traffic class each UP mapped to. */<br>
> - uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];<br>
> -};<br>
> -<br>
> struct rte_eth_vmdq_tx_conf {<br>
> enum rte_eth_nb_pools nb_queue_pools; /**< VMDq mode, 64 pools. */<br>
> };<br>
> @@ -1531,7 +1536,7 @@ struct rte_eth_conf {<br>
> /** Port VMDq+DCB configuration. */<br>
> struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;<br>
> /** Port DCB Rx configuration. */<br>
> - struct rte_eth_dcb_rx_conf dcb_rx_conf;<br>
> + struct rte_eth_dcb_conf dcb_rx_conf;<br>
> /** Port VMDq Rx configuration. */<br>
> struct rte_eth_vmdq_rx_conf vmdq_rx_conf;<br>
> } rx_adv_conf; /**< Port Rx filtering configuration. */<br>
> @@ -1539,7 +1544,7 @@ struct rte_eth_conf {<br>
> /** Port VMDq+DCB Tx configuration. */<br>
> struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;<br>
> /** Port DCB Tx configuration. */<br>
> - struct rte_eth_dcb_tx_conf dcb_tx_conf;<br>
> + struct rte_eth_dcb_conf dcb_tx_conf;<br>
> /** Port VMDq Tx configuration. */<br>
> struct rte_eth_vmdq_tx_conf vmdq_tx_conf;<br>
> } tx_adv_conf; /**< Port Tx DCB configuration (union). */<br>
> -- <br>
> 2.43.0<br>
><br>
><br>
</blockquote></div><div><br clear="all"></div><br><span class="gmail_signature_prefix">-- </span><br><div dir="ltr" class="gmail_signature"><div dir="ltr"><div>Regards,<br></div>Vladimir<br></div></div></div>