[dpdk-dev] [PATCH v1 39/42] net/txgbe: configure DCB HW resources

Jiawen Wu jiawenwu at trustnetic.com
Tue Sep 1 13:51:10 CEST 2020


Add DCB transmit and receive mode configurations.

Signed-off-by: Jiawen Wu <jiawenwu at trustnetic.com>
---
 drivers/net/txgbe/base/meson.build    |   1 +
 drivers/net/txgbe/base/txgbe_dcb.c    | 180 ++++++++++++
 drivers/net/txgbe/base/txgbe_dcb.h    |  27 ++
 drivers/net/txgbe/base/txgbe_dcb_hw.c | 283 +++++++++++++++++++
 drivers/net/txgbe/base/txgbe_dcb_hw.h |  23 ++
 drivers/net/txgbe/base/txgbe_hw.c     |   1 +
 drivers/net/txgbe/txgbe_ethdev.c      |   6 +
 drivers/net/txgbe/txgbe_ethdev.h      |  10 +
 drivers/net/txgbe/txgbe_rxtx.c        | 383 ++++++++++++++++++++++++++
 9 files changed, 914 insertions(+)
 create mode 100644 drivers/net/txgbe/base/txgbe_dcb_hw.c
 create mode 100644 drivers/net/txgbe/base/txgbe_dcb_hw.h

diff --git a/drivers/net/txgbe/base/meson.build b/drivers/net/txgbe/base/meson.build
index 13b418f19..d240a4335 100644
--- a/drivers/net/txgbe/base/meson.build
+++ b/drivers/net/txgbe/base/meson.build
@@ -2,6 +2,7 @@
 # Copyright(c) 2015-2020
 
 sources = [
+	'txgbe_dcb_hw.c',
 	'txgbe_dcb.c',
 	'txgbe_eeprom.c',
 	'txgbe_hw.c',
diff --git a/drivers/net/txgbe/base/txgbe_dcb.c b/drivers/net/txgbe/base/txgbe_dcb.c
index 6366da92a..7e9a16cfe 100644
--- a/drivers/net/txgbe/base/txgbe_dcb.c
+++ b/drivers/net/txgbe/base/txgbe_dcb.c
@@ -5,6 +5,7 @@
 #include "txgbe_type.h"
 #include "txgbe_hw.h"
 #include "txgbe_dcb.h"
+#include "txgbe_dcb_hw.h"
 
 /**
  *  txgbe_pfc_enable - Enable flow control
@@ -146,6 +147,177 @@ txgbe_dcb_pfc_enable(struct txgbe_hw *hw, uint8_t tc_num)
 	return ret_val;
 }
 
+/**
+ * txgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits
+ * @hw: pointer to hardware structure
+ * @dcb_config: Struct containing DCB settings
+ * @max_frame_size: Maximum frame size
+ * @direction: Configuring either Tx or Rx
+ *
+ * This function calculates the credits allocated to each traffic class.
+ * It should be called only after the rules are checked by
+ * txgbe_dcb_check_config_cee().
+ */
+s32 txgbe_dcb_calculate_tc_credits_cee(struct txgbe_hw *hw,
+				   struct txgbe_dcb_config *dcb_config,
+				   u32 max_frame_size, u8 direction)
+{
+	struct txgbe_dcb_tc_path *p;
+	u32 min_multiplier	= 0;
+	u16 min_percent		= 100;
+	s32 ret_val =		0;
+	/* Initialization values default for Tx settings */
+	u32 min_credit		= 0;
+	u32 credit_refill	= 0;
+	u32 credit_max		= 0;
+	u16 link_percentage	= 0;
+	u8  bw_percent		= 0;
+	u8  i;
+
+	UNREFERENCED_PARAMETER(hw);
+
+	if (dcb_config == NULL) {
+		ret_val = TXGBE_ERR_CONFIG;
+		goto out;
+	}
+
+	min_credit = ((max_frame_size / 2) + TXGBE_DCB_CREDIT_QUANTUM - 1) /
+		     TXGBE_DCB_CREDIT_QUANTUM;
+
+	/* Find smallest link percentage */
+	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+		p = &dcb_config->tc_config[i].path[direction];
+		bw_percent = dcb_config->bw_percentage[p->bwg_id][direction];
+		link_percentage = p->bwg_percent;
+
+		link_percentage = (link_percentage * bw_percent) / 100;
+
+		if (link_percentage && link_percentage < min_percent)
+			min_percent = link_percentage;
+	}
+
+	/*
+	 * The ratio between traffic classes will control the bandwidth
+	 * percentages seen on the wire. To calculate this ratio we use
+	 * a multiplier. It is required that the refill credits must be
+	 * larger than the max frame size so here we find the smallest
+	 * multiplier that will allow all bandwidth percentages to be
+	 * greater than the max frame size.
+	 */
+	min_multiplier = (min_credit / min_percent) + 1;
+
+	/* Find out the link percentage for each TC first */
+	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+		p = &dcb_config->tc_config[i].path[direction];
+		bw_percent = dcb_config->bw_percentage[p->bwg_id][direction];
+
+		link_percentage = p->bwg_percent;
+		/* Must be careful of integer division for very small nums */
+		link_percentage = (link_percentage * bw_percent) / 100;
+		if (p->bwg_percent > 0 && link_percentage == 0)
+			link_percentage = 1;
+
+		/* Save link_percentage for reference */
+		p->link_percent = (u8)link_percentage;
+
+		/* Calculate credit refill ratio using multiplier */
+		credit_refill = min(link_percentage * min_multiplier,
+				    (u32)TXGBE_DCB_MAX_CREDIT_REFILL);
+
+		/* Refill at least minimum credit */
+		if (credit_refill < min_credit)
+			credit_refill = min_credit;
+
+		p->data_credits_refill = (u16)credit_refill;
+
+		/* Calculate maximum credit for the TC */
+		credit_max = (link_percentage * TXGBE_DCB_MAX_CREDIT) / 100;
+
+		/*
+		 * Adjustment based on rule checking, if the percentage
+		 * of a TC is too small, the maximum credit may not be
+		 * enough to send out a jumbo frame in data plane arbitration.
+		 */
+		if (credit_max < min_credit)
+			credit_max = min_credit;
+
+		if (direction == TXGBE_DCB_TX_CONFIG) {
+			dcb_config->tc_config[i].desc_credits_max =
+								(u16)credit_max;
+		}
+
+		p->data_credits_max = (u16)credit_max;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ * txgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info
+ * @cfg: dcb configuration to unpack into hardware consumable fields
+ * @map: user priority to traffic class map
+ * @pfc_up: u8 to store user priority PFC bitmask
+ *
+ * This unpacks the dcb configuration PFC info which is stored per
+ * traffic class into a 8bit user priority bitmask that can be
+ * consumed by hardware routines. The priority to tc map must be
+ * updated before calling this routine to use current up-to maps.
+ */
+void txgbe_dcb_unpack_pfc_cee(struct txgbe_dcb_config *cfg, u8 *map, u8 *pfc_up)
+{
+	struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int up;
+
+	/*
+	 * If the TC for this user priority has PFC enabled then set the
+	 * matching bit in 'pfc_up' to reflect that PFC is enabled.
+	 */
+	for (*pfc_up = 0, up = 0; up < TXGBE_DCB_UP_MAX; up++) {
+		if (tc_config[map[up]].pfc != txgbe_dcb_pfc_disabled)
+			*pfc_up |= 1 << up;
+	}
+}
+
+void txgbe_dcb_unpack_refill_cee(struct txgbe_dcb_config *cfg, int direction,
+			     u16 *refill)
+{
+	struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int tc;
+
+	for (tc = 0; tc < TXGBE_DCB_TC_MAX; tc++)
+		refill[tc] = tc_config[tc].path[direction].data_credits_refill;
+}
+
+void txgbe_dcb_unpack_max_cee(struct txgbe_dcb_config *cfg, u16 *max)
+{
+	struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int tc;
+
+	for (tc = 0; tc < TXGBE_DCB_TC_MAX; tc++)
+		max[tc] = tc_config[tc].desc_credits_max;
+}
+
+void txgbe_dcb_unpack_bwgid_cee(struct txgbe_dcb_config *cfg, int direction,
+			    u8 *bwgid)
+{
+	struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int tc;
+
+	for (tc = 0; tc < TXGBE_DCB_TC_MAX; tc++)
+		bwgid[tc] = tc_config[tc].path[direction].bwg_id;
+}
+
+void txgbe_dcb_unpack_tsa_cee(struct txgbe_dcb_config *cfg, int direction,
+			   u8 *tsa)
+{
+	struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int tc;
+
+	for (tc = 0; tc < TXGBE_DCB_TC_MAX; tc++)
+		tsa[tc] = tc_config[tc].path[direction].tsa;
+}
+
 u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *cfg, int direction, u8 up)
 {
 	struct txgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
@@ -178,3 +350,11 @@ void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *cfg, int direction,
 		map[up] = txgbe_dcb_get_tc_from_up(cfg, direction, up);
 }
 
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 txgbe_dcb_config_pfc(struct txgbe_hw *hw, u8 pfc_en, u8 *map)
+{
+	int ret = TXGBE_ERR_PARAM;
+	ret = txgbe_dcb_config_pfc_raptor(hw, pfc_en, map);
+	return ret;
+}
+
diff --git a/drivers/net/txgbe/base/txgbe_dcb.h b/drivers/net/txgbe/base/txgbe_dcb.h
index 67de5c54b..c679f1d75 100644
--- a/drivers/net/txgbe/base/txgbe_dcb.h
+++ b/drivers/net/txgbe/base/txgbe_dcb.h
@@ -7,6 +7,17 @@
 
 #include "txgbe_type.h"
 
+/* DCB defines */
+/* DCB credit calculation defines */
+#define TXGBE_DCB_CREDIT_QUANTUM	64
+#define TXGBE_DCB_MAX_CREDIT_REFILL	200   /* 200 * 64B = 12800B */
+#define TXGBE_DCB_MAX_TSO_SIZE		(32 * 1024) /* Max TSO pkt size in DCB*/
+#define TXGBE_DCB_MAX_CREDIT		(2 * TXGBE_DCB_MAX_CREDIT_REFILL)
+
+/* 513 for 32KB TSO packet */
+#define TXGBE_DCB_MIN_TSO_CREDIT	\
+	((TXGBE_DCB_MAX_TSO_SIZE / TXGBE_DCB_CREDIT_QUANTUM) + 1)
+
 #define TXGBE_DCB_TX_CONFIG		0
 #define TXGBE_DCB_RX_CONFIG		1
 
@@ -80,7 +91,23 @@ struct txgbe_dcb_config {
 };
 
 int txgbe_dcb_pfc_enable(struct txgbe_hw *hw, u8 tc_num);
+
+/* DCB credits calculation */
+s32 txgbe_dcb_calculate_tc_credits_cee(struct txgbe_hw *,
+				       struct txgbe_dcb_config *, u32, u8);
+
+/* DCB PFC */
+s32 txgbe_dcb_config_pfc(struct txgbe_hw *, u8, u8 *);
+
+/* DCB unpack routines */
+void txgbe_dcb_unpack_pfc_cee(struct txgbe_dcb_config *, u8 *, u8 *);
+void txgbe_dcb_unpack_refill_cee(struct txgbe_dcb_config *, int, u16 *);
+void txgbe_dcb_unpack_max_cee(struct txgbe_dcb_config *, u16 *);
+void txgbe_dcb_unpack_bwgid_cee(struct txgbe_dcb_config *, int, u8 *);
+void txgbe_dcb_unpack_tsa_cee(struct txgbe_dcb_config *, int, u8 *);
 void txgbe_dcb_unpack_map_cee(struct txgbe_dcb_config *, int, u8 *);
 u8 txgbe_dcb_get_tc_from_up(struct txgbe_dcb_config *, int, u8);
 
+#include "txgbe_dcb_hw.h"
+
 #endif /* _TXGBE_DCB_H_ */
diff --git a/drivers/net/txgbe/base/txgbe_dcb_hw.c b/drivers/net/txgbe/base/txgbe_dcb_hw.c
new file mode 100644
index 000000000..68901012b
--- /dev/null
+++ b/drivers/net/txgbe/base/txgbe_dcb_hw.c
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include "txgbe_type.h"
+
+#include "txgbe_dcb.h"
+
+/**
+ * txgbe_dcb_config_rx_arbiter_raptor - Config Rx Data arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Rx Packet Arbiter and credits for each traffic class.
+ */
+s32 txgbe_dcb_config_rx_arbiter_raptor(struct txgbe_hw *hw, u16 *refill,
+				      u16 *max, u8 *bwg_id, u8 *tsa,
+				      u8 *map)
+{
+	u32 reg = 0;
+	u32 credit_refill = 0;
+	u32 credit_max = 0;
+	u8  i = 0;
+
+	/*
+	 * Disable the arbiter before changing parameters
+	 * (always enable recycle mode; WSP)
+	 */
+	reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP |
+	      TXGBE_ARBRXCTL_DIA;
+	wr32(hw, TXGBE_ARBRXCTL, reg);
+
+	/*
+	 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+	 * bits sets for the UPs that needs to be mappped to that TC.
+	 * e.g if priorities 6 and 7 are to be mapped to a TC then the
+	 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+	 */
+	reg = 0;
+	for (i = 0; i < TXGBE_DCB_UP_MAX; i++)
+		reg |= (map[i] << (i * TXGBE_RPUP2TC_UP_SHIFT));
+
+	wr32(hw, TXGBE_RPUP2TC, reg);
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+		credit_refill = refill[i];
+		credit_max = max[i];
+		reg = TXGBE_QARBRXCFG_CRQ(credit_refill) |
+		      TXGBE_QARBRXCFG_MCL(credit_max) |
+		      TXGBE_QARBRXCFG_BWG(bwg_id[i]);
+
+		if (tsa[i] == txgbe_dcb_tsa_strict)
+			reg |= TXGBE_QARBRXCFG_LSP;
+
+		wr32(hw, TXGBE_QARBRXCFG(i), reg);
+	}
+
+	/*
+	 * Configure Rx packet plane (recycle mode; WSP) and
+	 * enable arbiter
+	 */
+	reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP;
+	wr32(hw, TXGBE_ARBRXCTL, reg);
+
+	return 0;
+}
+
+/**
+ * txgbe_dcb_config_tx_desc_arbiter_raptor - Config Tx Desc. arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 txgbe_dcb_config_tx_desc_arbiter_raptor(struct txgbe_hw *hw, u16 *refill,
+					   u16 *max, u8 *bwg_id, u8 *tsa)
+{
+	u32 reg, max_credits;
+	u8  i;
+
+	/* Clear the per-Tx queue credits; we use per-TC instead */
+	for (i = 0; i < 128; i++) {
+		wr32(hw, TXGBE_QARBTXCRED(i), 0);
+	}
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+		max_credits = max[i];
+		reg = TXGBE_QARBTXCFG_MCL(max_credits) |
+		      TXGBE_QARBTXCFG_CRQ(refill[i]) |
+		      TXGBE_QARBTXCFG_BWG(bwg_id[i]);
+
+		if (tsa[i] == txgbe_dcb_tsa_group_strict_cee)
+			reg |= TXGBE_QARBTXCFG_GSP;
+
+		if (tsa[i] == txgbe_dcb_tsa_strict)
+			reg |= TXGBE_QARBTXCFG_LSP;
+
+		wr32(hw, TXGBE_QARBTXCFG(i), reg);
+	}
+
+	/*
+	 * Configure Tx descriptor plane (recycle mode; WSP) and
+	 * enable arbiter
+	 */
+	reg = TXGBE_ARBTXCTL_WSP | TXGBE_ARBTXCTL_RRM;
+	wr32(hw, TXGBE_ARBTXCTL, reg);
+
+	return 0;
+}
+
+/**
+ * txgbe_dcb_config_tx_data_arbiter_raptor - Config Tx Data arbiter
+ * @hw: pointer to hardware structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Tx Packet Arbiter and credits for each traffic class.
+ */
+s32 txgbe_dcb_config_tx_data_arbiter_raptor(struct txgbe_hw *hw, u16 *refill,
+					   u16 *max, u8 *bwg_id, u8 *tsa,
+					   u8 *map)
+{
+	u32 reg;
+	u8 i;
+
+	/*
+	 * Disable the arbiter before changing parameters
+	 * (always enable recycle mode; SP; arb delay)
+	 */
+	reg = TXGBE_PARBTXCTL_SP |
+	      TXGBE_PARBTXCTL_RECYC |
+	      TXGBE_PARBTXCTL_DA;
+	wr32(hw, TXGBE_PARBTXCTL, reg);
+
+	/*
+	 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+	 * bits sets for the UPs that needs to be mappped to that TC.
+	 * e.g if priorities 6 and 7 are to be mapped to a TC then the
+	 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+	 */
+	reg = 0;
+	for (i = 0; i < TXGBE_DCB_UP_MAX; i++)
+		reg |= TXGBE_DCBUP2TC_MAP(i, map[i]);
+
+	wr32(hw, TXGBE_PBRXUP2TC, reg);
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+		reg = TXGBE_PARBTXCFG_CRQ(refill[i]) |
+		      TXGBE_PARBTXCFG_MCL(max[i]) |
+		      TXGBE_PARBTXCFG_BWG(bwg_id[i]);
+
+		if (tsa[i] == txgbe_dcb_tsa_group_strict_cee)
+			reg |= TXGBE_PARBTXCFG_GSP;
+
+		if (tsa[i] == txgbe_dcb_tsa_strict)
+			reg |= TXGBE_PARBTXCFG_LSP;
+
+		wr32(hw, TXGBE_PARBTXCFG(i), reg);
+	}
+
+	/*
+	 * Configure Tx packet plane (recycle mode; SP; arb delay) and
+	 * enable arbiter
+	 */
+	reg = TXGBE_PARBTXCTL_SP | TXGBE_PARBTXCTL_RECYC;
+	wr32(hw, TXGBE_PARBTXCTL, reg);
+
+	return 0;
+}
+
+/**
+ * txgbe_dcb_config_pfc_raptor - Configure priority flow control
+ * @hw: pointer to hardware structure
+ * @pfc_en: enabled pfc bitmask
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Priority Flow Control (PFC) for each traffic class.
+ */
+s32 txgbe_dcb_config_pfc_raptor(struct txgbe_hw *hw, u8 pfc_en, u8 *map)
+{
+	u32 i, j, fcrtl, reg;
+	u8 max_tc = 0;
+
+	/* Enable Transmit Priority Flow Control */
+	wr32(hw, TXGBE_TXFCCFG, TXGBE_TXFCCFG_PFC);
+
+	/* Enable Receive Priority Flow Control */
+	wr32m(hw, TXGBE_RXFCCFG, TXGBE_RXFCCFG_PFC,
+		pfc_en ? TXGBE_RXFCCFG_PFC : 0);
+
+	for (i = 0; i < TXGBE_DCB_UP_MAX; i++) {
+		if (map[i] > max_tc)
+			max_tc = map[i];
+	}
+
+	/* Configure PFC Tx thresholds per TC */
+	for (i = 0; i <= max_tc; i++) {
+		int enabled = 0;
+
+		for (j = 0; j < TXGBE_DCB_UP_MAX; j++) {
+			if ((map[j] == i) && (pfc_en & (1 << j))) {
+				enabled = 1;
+				break;
+			}
+		}
+
+		if (enabled) {
+			reg = TXGBE_FCWTRHI_TH(hw->fc.high_water[i]) |
+			      TXGBE_FCWTRHI_XOFF;
+			fcrtl = TXGBE_FCWTRLO_TH(hw->fc.low_water[i]) |
+				TXGBE_FCWTRLO_XON;
+			wr32(hw, TXGBE_FCWTRLO(i), fcrtl);
+		} else {
+			/*
+			 * In order to prevent Tx hangs when the internal Tx
+			 * switch is enabled we must set the high water mark
+			 * to the Rx packet buffer size - 24KB.  This allows
+			 * the Tx switch to function even under heavy Rx
+			 * workloads.
+			 */
+			reg = rd32(hw, TXGBE_PBRXSIZE(i)) - 24576;
+			wr32(hw, TXGBE_FCWTRLO(i), 0);
+		}
+
+		wr32(hw, TXGBE_FCWTRHI(i), reg);
+	}
+
+	for (; i < TXGBE_DCB_TC_MAX; i++) {
+		wr32(hw, TXGBE_FCWTRLO(i), 0);
+		wr32(hw, TXGBE_FCWTRHI(i), 0);
+	}
+
+	/* Configure pause time (2 TCs per register) */
+	reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+	for (i = 0; i < (TXGBE_DCB_TC_MAX / 2); i++)
+		wr32(hw, TXGBE_FCXOFFTM(i), reg);
+
+	/* Configure flow control refresh threshold value */
+	wr32(hw, TXGBE_RXFCRFSH, hw->fc.pause_time / 2);
+
+	return 0;
+}
+
+/**
+ * txgbe_dcb_config_tc_stats_raptor - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 txgbe_dcb_config_tc_stats_raptor(struct txgbe_hw *hw,
+				    struct txgbe_dcb_config *dcb_config)
+{
+	u8 tc_count = 8;
+	bool vt_mode = false;
+
+	UNREFERENCED_PARAMETER(hw);
+
+	if (dcb_config != NULL) {
+		tc_count = dcb_config->num_tcs.pg_tcs;
+		vt_mode = dcb_config->vt_mode;
+	}
+
+	if (!((tc_count == 8 && vt_mode == false) || tc_count == 4))
+		return TXGBE_ERR_PARAM;
+
+	return 0;
+}
+
diff --git a/drivers/net/txgbe/base/txgbe_dcb_hw.h b/drivers/net/txgbe/base/txgbe_dcb_hw.h
new file mode 100644
index 000000000..d31a70f1d
--- /dev/null
+++ b/drivers/net/txgbe/base/txgbe_dcb_hw.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef _TXGBE_DCB_HW_H_
+#define _TXGBE_DCB_HW_H_
+
+/* DCB PFC */
+s32 txgbe_dcb_config_pfc_raptor(struct txgbe_hw *, u8, u8 *);
+
+/* DCB stats */
+s32 txgbe_dcb_config_tc_stats_raptor(struct txgbe_hw *,
+				    struct txgbe_dcb_config *);
+
+/* DCB config arbiters */
+s32 txgbe_dcb_config_tx_desc_arbiter_raptor(struct txgbe_hw *, u16 *, u16 *,
+					   u8 *, u8 *);
+s32 txgbe_dcb_config_tx_data_arbiter_raptor(struct txgbe_hw *, u16 *, u16 *,
+					   u8 *, u8 *, u8 *);
+s32 txgbe_dcb_config_rx_arbiter_raptor(struct txgbe_hw *, u16 *, u16 *, u8 *,
+				      u8 *, u8 *);
+
+#endif /* _TXGBE_DCB_HW_H_ */
diff --git a/drivers/net/txgbe/base/txgbe_hw.c b/drivers/net/txgbe/base/txgbe_hw.c
index 15ab0213d..465106009 100644
--- a/drivers/net/txgbe/base/txgbe_hw.c
+++ b/drivers/net/txgbe/base/txgbe_hw.c
@@ -4,6 +4,7 @@
 
 #include "txgbe_type.h"
 #include "txgbe_phy.h"
+#include "txgbe_dcb.h"
 #include "txgbe_vf.h"
 #include "txgbe_eeprom.h"
 #include "txgbe_mng.h"
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index a72994d08..7a2f16d63 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -401,6 +401,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
 	struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
+	struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	const struct rte_memzone *mz;
 	uint32_t ctrl_ext;
@@ -600,6 +601,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 	/* enable support intr */
 	txgbe_enable_intr(eth_dev);
 
+	/* initialize bandwidth configuration info */
+	memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
+
 	return 0;
 }
 
@@ -1181,8 +1185,10 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 
+	/* Configure DCB hw */
 	txgbe_configure_pb(dev);
 	txgbe_configure_port(dev);
+	txgbe_configure_dcb(dev);
 
 	err = txgbe_dev_rxtx_start(dev);
 	if (err < 0) {
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 1166c151d..8a3c56a56 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -82,6 +82,11 @@ struct txgbe_vf_info {
 	uint16_t switch_domain_id;
 };
 
+/* The configuration of bandwidth */
+struct txgbe_bw_conf {
+	uint8_t tc_num; /* Number of TCs. */
+};
+
 /*
  * Structure to store private data for each driver instance (for each port).
  */
@@ -94,6 +99,7 @@ struct txgbe_adapter {
 	struct txgbe_hwstrip        hwstrip;
 	struct txgbe_dcb_config     dcb_config;
 	struct txgbe_vf_info        *vfdata;
+	struct txgbe_bw_conf        bw_conf;
 	bool rx_bulk_alloc_allowed;
 };
 
@@ -132,6 +138,9 @@ int txgbe_vf_representor_uninit(struct rte_eth_dev *ethdev);
 
 #define TXGBE_DEV_VFDATA(dev) \
 	(&((struct txgbe_adapter *)(dev)->data->dev_private)->vfdata)
+#define TXGBE_DEV_BW_CONF(dev) \
+	(&((struct txgbe_adapter *)(dev)->data->dev_private)->bw_conf)
+
 
 /*
  * RX/TX function prototypes
@@ -211,6 +220,7 @@ void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
 
 void txgbe_configure_pb(struct rte_eth_dev *dev);
 void txgbe_configure_port(struct rte_eth_dev *dev);
+void txgbe_configure_dcb(struct rte_eth_dev *dev);
 
 int
 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index e2ab86568..a1d1c83da 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -2760,6 +2760,365 @@ txgbe_dev_free_queues(struct rte_eth_dev *dev)
 	dev->data->nb_tx_queues = 0;
 }
 
+#define NUM_VFTA_REGISTERS 128
+#define NIC_RX_BUFFER_SIZE 0x200
+
+/**
+ * txgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
+ * @dev: pointer to eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static void
+txgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
+		       struct txgbe_dcb_config *dcb_config)
+{
+	uint32_t reg;
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Disable the Tx desc arbiter */
+	reg = rd32(hw, TXGBE_ARBTXCTL);
+	reg |= TXGBE_ARBTXCTL_DIA;
+	wr32(hw, TXGBE_ARBTXCTL, reg);
+
+	/* Enable DCB for Tx with 8 TCs */
+	reg = rd32(hw, TXGBE_PORTCTL);
+	reg &= TXGBE_PORTCTL_NUMTC_MASK;
+	reg |= TXGBE_PORTCTL_DCB;
+	if (dcb_config->num_tcs.pg_tcs == 8) {
+		reg |= TXGBE_PORTCTL_NUMTC_8;
+	} else {
+		reg |= TXGBE_PORTCTL_NUMTC_4;
+	}
+	wr32(hw, TXGBE_PORTCTL, reg);
+
+	/* Enable the Tx desc arbiter */
+	reg = rd32(hw, TXGBE_ARBTXCTL);
+	reg &= ~TXGBE_ARBTXCTL_DIA;
+	wr32(hw, TXGBE_ARBTXCTL, reg);
+}
+
+static void
+txgbe_dcb_rx_config(struct rte_eth_dev *dev,
+		struct txgbe_dcb_config *dcb_config)
+{
+	struct rte_eth_dcb_rx_conf *rx_conf =
+			&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+	struct txgbe_dcb_tc_config *tc;
+	uint8_t i, j;
+
+	dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
+	dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
+
+	/* Initialize User Priority to Traffic Class mapping */
+	for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+		tc = &dcb_config->tc_config[j];
+		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+	}
+
+	/* User Priority to Traffic Class mapping */
+	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = rx_conf->dcb_tc[i];
+		tc = &dcb_config->tc_config[j];
+		tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+						(uint8_t)(1 << i);
+	}
+}
+
+static void
+txgbe_dcb_tx_config(struct rte_eth_dev *dev,
+		struct txgbe_dcb_config *dcb_config)
+{
+	struct rte_eth_dcb_tx_conf *tx_conf =
+			&dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+	struct txgbe_dcb_tc_config *tc;
+	uint8_t i, j;
+
+	dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
+	dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
+
+	/* Initialize User Priority to Traffic Class mapping */
+	for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
+		tc = &dcb_config->tc_config[j];
+		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+	}
+
+	/* User Priority to Traffic Class mapping */
+	for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+		j = tx_conf->dcb_tc[i];
+		tc = &dcb_config->tc_config[j];
+		tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+						(uint8_t)(1 << i);
+	}
+}
+
+/**
+ * txgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
+ * @dev: pointer to eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static void
+txgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
+		       struct txgbe_dcb_config *dcb_config)
+{
+	uint32_t reg;
+	uint32_t vlanctrl;
+	uint8_t i;
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+	PMD_INIT_FUNC_TRACE();
+	/*
+	 * Disable the arbiter before changing parameters
+	 * (always enable recycle mode; WSP)
+	 */
+	reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP | TXGBE_ARBRXCTL_DIA;
+	wr32(hw, TXGBE_ARBRXCTL, reg);
+
+	reg = rd32(hw, TXGBE_PORTCTL);
+	reg &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
+	if (dcb_config->num_tcs.pg_tcs == 4) {
+		reg |= TXGBE_PORTCTL_NUMTC_4;
+		if (dcb_config->vt_mode) {
+			reg |= TXGBE_PORTCTL_NUMVT_32;
+		} else {
+			wr32(hw, TXGBE_POOLCTL, 0);
+		}
+	}
+
+	if (dcb_config->num_tcs.pg_tcs == 8) {
+		reg |= TXGBE_PORTCTL_NUMTC_8;
+		if (dcb_config->vt_mode)
+			reg |= TXGBE_PORTCTL_NUMVT_16;
+		else {
+			wr32(hw, TXGBE_POOLCTL, 0);
+		}
+	}
+
+	wr32(hw, TXGBE_PORTCTL, reg);
+
+	/* VLNCTL: enable vlan filtering and allow all vlan tags through */
+	vlanctrl = rd32(hw, TXGBE_VLANCTL);
+	vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+	wr32(hw, TXGBE_VLANCTL, vlanctrl);
+
+	/* VLANTBL - enable all vlan filters */
+	for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
+		wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
+	}
+
+	/*
+	 * Configure Rx packet plane (recycle mode; WSP) and
+	 * enable arbiter
+	 */
+	reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP;
+	wr32(hw, TXGBE_ARBRXCTL, reg);
+}
+
+static void
+txgbe_dcb_hw_arbite_rx_config(struct txgbe_hw *hw, uint16_t *refill,
+			uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+	txgbe_dcb_config_rx_arbiter_raptor(hw, refill, max, bwg_id,
+					  tsa, map);
+}
+
+static void
+txgbe_dcb_hw_arbite_tx_config(struct txgbe_hw *hw, uint16_t *refill, uint16_t *max,
+			    uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+{
+	switch (hw->mac.type) {
+	case txgbe_mac_raptor:
+		txgbe_dcb_config_tx_desc_arbiter_raptor(hw, refill, max, bwg_id, tsa);
+		txgbe_dcb_config_tx_data_arbiter_raptor(hw, refill, max, bwg_id, tsa, map);
+		break;
+	default:
+		break;
+	}
+}
+
+#define DCB_RX_CONFIG  1
+#define DCB_TX_CONFIG  1
+#define DCB_TX_PB      1024
+/**
+ * txgbe_dcb_hw_configure - Enable DCB and configure
+ * general DCB in VT mode and non-VT mode parameters
+ * @dev: pointer to rte_eth_dev structure
+ * @dcb_config: pointer to txgbe_dcb_config structure
+ */
+static int
+txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
+			struct txgbe_dcb_config *dcb_config)
+{
+	int     ret = 0;
+	uint8_t i, pfc_en, nb_tcs;
+	uint16_t pbsize, rx_buffer_size;
+	uint8_t config_dcb_rx = 0;
+	uint8_t config_dcb_tx = 0;
+	uint8_t tsa[TXGBE_DCB_TC_MAX] = {0};
+	uint8_t bwgid[TXGBE_DCB_TC_MAX] = {0};
+	uint16_t refill[TXGBE_DCB_TC_MAX] = {0};
+	uint16_t max[TXGBE_DCB_TC_MAX] = {0};
+	uint8_t map[TXGBE_DCB_TC_MAX] = {0};
+	struct txgbe_dcb_tc_config *tc;
+	uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+	struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
+
+	switch (dev->data->dev_conf.rxmode.mq_mode) {
+	case ETH_MQ_RX_DCB:
+	case ETH_MQ_RX_DCB_RSS:
+		dcb_config->vt_mode = false;
+		config_dcb_rx = DCB_RX_CONFIG;
+		/* Get dcb TX configuration parameters from rte_eth_conf */
+		txgbe_dcb_rx_config(dev, dcb_config);
+		/*Configure general DCB RX parameters*/
+		txgbe_dcb_rx_hw_config(dev, dcb_config);
+		break;
+	default:
+		PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
+		break;
+	}
+	switch (dev->data->dev_conf.txmode.mq_mode) {
+	case ETH_MQ_TX_DCB:
+		dcb_config->vt_mode = false;
+		config_dcb_tx = DCB_TX_CONFIG;
+		/* get DCB TX configuration parameters from rte_eth_conf */
+		txgbe_dcb_tx_config(dev, dcb_config);
+		/* Configure general DCB TX parameters */
+		txgbe_dcb_tx_hw_config(dev, dcb_config);
+		break;
+	default:
+		PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
+		break;
+	}
+
+	nb_tcs = dcb_config->num_tcs.pfc_tcs;
+	/* Unpack map */
+	txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
+	if (nb_tcs == ETH_4_TCS) {
+		/* Avoid un-configured priority mapping to TC0 */
+		uint8_t j = 4;
+		uint8_t mask = 0xFF;
+
+		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
+			mask = (uint8_t)(mask & (~(1 << map[i])));
+		for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
+			if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
+				map[j++] = i;
+			mask >>= 1;
+		}
+		/* Re-configure 4 TCs BW */
+		for (i = 0; i < nb_tcs; i++) {
+			tc = &dcb_config->tc_config[i];
+			if (bw_conf->tc_num != nb_tcs)
+				tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
+					(uint8_t)(100 / nb_tcs);
+			tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
+						(uint8_t)(100 / nb_tcs);
+		}
+		for (; i < TXGBE_DCB_TC_MAX; i++) {
+			tc = &dcb_config->tc_config[i];
+			tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+			tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = 0;
+		}
+	} else {
+		/* Re-configure 8 TCs BW */
+		for (i = 0; i < nb_tcs; i++) {
+			tc = &dcb_config->tc_config[i];
+			if (bw_conf->tc_num != nb_tcs)
+				tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
+					(uint8_t)(100 / nb_tcs + (i & 1));
+			tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
+				(uint8_t)(100 / nb_tcs + (i & 1));
+		}
+	}
+
+	rx_buffer_size = NIC_RX_BUFFER_SIZE;
+
+	if (config_dcb_rx) {
+		/* Set RX buffer size */
+		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
+		uint32_t rxpbsize = pbsize << 10;
+
+		for (i = 0; i < nb_tcs; i++) {
+			wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
+		}
+		/* zero alloc all unused TCs */
+		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+			wr32(hw, TXGBE_PBRXSIZE(i), 0);
+		}
+	}
+	if (config_dcb_tx) {
+		/* Only support an equally distributed
+		 *  Tx packet buffer strategy.
+		 */
+		uint32_t txpktsize = TXGBE_PBTXSIZE_MAX / nb_tcs;
+		uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - TXGBE_TXPKT_SIZE_MAX;
+
+		for (i = 0; i < nb_tcs; i++) {
+			wr32(hw, TXGBE_PBTXSIZE(i), txpktsize);
+			wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
+		}
+		/* Clear unused TCs, if any, to zero buffer size*/
+		for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+			wr32(hw, TXGBE_PBTXSIZE(i), 0);
+			wr32(hw, TXGBE_PBTXDMATH(i), 0);
+		}
+	}
+
+	/*Calculates traffic class credits*/
+	txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
+				TXGBE_DCB_TX_CONFIG);
+	txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
+				TXGBE_DCB_RX_CONFIG);
+
+	if (config_dcb_rx) {
+		/* Unpack CEE standard containers */
+		txgbe_dcb_unpack_refill_cee(dcb_config, TXGBE_DCB_RX_CONFIG, refill);
+		txgbe_dcb_unpack_max_cee(dcb_config, max);
+		txgbe_dcb_unpack_bwgid_cee(dcb_config, TXGBE_DCB_RX_CONFIG, bwgid);
+		txgbe_dcb_unpack_tsa_cee(dcb_config, TXGBE_DCB_RX_CONFIG, tsa);
+		/* Configure PG(ETS) RX */
+		txgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
+	}
+
+	if (config_dcb_tx) {
+		/* Unpack CEE standard containers */
+		txgbe_dcb_unpack_refill_cee(dcb_config, TXGBE_DCB_TX_CONFIG, refill);
+		txgbe_dcb_unpack_max_cee(dcb_config, max);
+		txgbe_dcb_unpack_bwgid_cee(dcb_config, TXGBE_DCB_TX_CONFIG, bwgid);
+		txgbe_dcb_unpack_tsa_cee(dcb_config, TXGBE_DCB_TX_CONFIG, tsa);
+		/* Configure PG(ETS) TX */
+		txgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
+	}
+
+	/* Configure queue statistics registers */
+	txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
+
+	/* Check if the PFC is supported */
+	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+		pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
+		for (i = 0; i < nb_tcs; i++) {
+			/*
+			* If the TC count is 8,and the default high_water is 48,
+			* the low_water is 16 as default.
+			*/
+			hw->fc.high_water[i] = (pbsize * 3) / 4;
+			hw->fc.low_water[i] = pbsize / 4;
+			/* Enable pfc for this TC */
+			tc = &dcb_config->tc_config[i];
+			tc->pfc = txgbe_dcb_pfc_enabled;
+		}
+		txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+		if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+			pfc_en &= 0x0F;
+		ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
+	}
+
+	return ret;
+}
+
 void txgbe_configure_pb(struct rte_eth_dev *dev)
 {
 	struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
@@ -2811,6 +3170,30 @@ void txgbe_configure_port(struct rte_eth_dev *dev)
 	wr32(hw, TXGBE_VXLANPORT, 4789);
 }
 
+/**
+ * txgbe_configure_dcb - Configure DCB  Hardware
+ * @dev: pointer to rte_eth_dev
+ */
+void txgbe_configure_dcb(struct rte_eth_dev *dev)
+{
+	struct txgbe_dcb_config *dcb_cfg = TXGBE_DEV_DCB_CONFIG(dev);
+	struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* check support mq_mode for DCB */
+	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
+	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
+	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
+		return;
+
+	if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
+		return;
+
+	/** Configure DCB hardware **/
+	txgbe_dcb_hw_configure(dev, dcb_cfg);
+}
+
 static int __rte_cold
 txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
 {
-- 
2.18.4





More information about the dev mailing list