[dpdk-dev] [RFC 1/2] net/bonding: add Tx prepare for bonding

Chengchang Tang tangchengchang at huawei.com
Fri Apr 16 13:04:30 CEST 2021


To use the HW offloads capability (e.g. checksum and TSO) in the Tx
direction, the upper-layer users need to call rte_eth_dev_prepare to do
some adjustment to the packets before sending them (e.g. processing
pseudo headers when Tx checksum offoad enabled). But, the tx_prepare
callback of the bond driver is not implemented. Therefore, related
offloads can not be used unless the upper layer users process the packet
properly in their own application. But it is bad for the
transplantability.

However, it is difficult to design the tx_prepare callback for bonding
driver. Because when a bonded device sends packets, the bonded device
allocates the packets to different slave devices based on the real-time
link status and bonding mode. That is, it is very difficult for the
bonding device to determine which slave device's prepare function should
be invoked. In addition, if the link status changes after the packets are
prepared, the packets may fail to be sent because packets allocation may
change.

So, in this patch, the tx_prepare callback of bonding driver is not
implemented. Instead, the prepare function of the slave device is added to
the tx_burst callback. And a global variable is introduced to control
whether the bonded device need call the rte_eth_tx_prepare. If upper-layer
users need to use related offloads, they should enable the preparation
function. In this way, the bonded device will call the rte_eth_tx_prepare
for the fast path packets in the tx_burst callback.

Note:
The rte_eth_tx_prepare is not added to bond mode 3(Broadcast). This is
because in broadcast mode, a packet needs to be sent by all slave ports.
Different PMDs process the packets differently in tx_prepare. As a result,
the sent packet may be incorrect.

Signed-off-by: Chengchang Tang <tangchengchang at huawei.com>
---
 drivers/net/bonding/eth_bond_private.h |  1 +
 drivers/net/bonding/rte_eth_bond.h     | 29 +++++++++++++++++++++++++++++
 drivers/net/bonding/rte_eth_bond_api.c | 28 ++++++++++++++++++++++++++++
 drivers/net/bonding/rte_eth_bond_pmd.c | 33 +++++++++++++++++++++++++++++----
 drivers/net/bonding/version.map        |  5 +++++
 5 files changed, 92 insertions(+), 4 deletions(-)

diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
index 75fb8dc..72ec4a0 100644
--- a/drivers/net/bonding/eth_bond_private.h
+++ b/drivers/net/bonding/eth_bond_private.h
@@ -126,6 +126,7 @@ struct bond_dev_private {
 	/**< Flag for whether MAC address is user defined or not */

 	uint8_t link_status_polling_enabled;
+	uint8_t tx_prepare_enabled;
 	uint32_t link_status_polling_interval_ms;

 	uint32_t link_down_delay_ms;
diff --git a/drivers/net/bonding/rte_eth_bond.h b/drivers/net/bonding/rte_eth_bond.h
index 874aa91..8ec09eb 100644
--- a/drivers/net/bonding/rte_eth_bond.h
+++ b/drivers/net/bonding/rte_eth_bond.h
@@ -343,6 +343,35 @@ rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id,
 int
 rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id);

+/**
+ * Enable Tx prepare for bonded port
+ *
+ * To perform some HW offloads in the Tx direction, some PMDs need to call
+ * rte_eth_tx_prepare to do some adjustment for packets. This function
+ * enables packets preparation in the fast path for bonded device.
+ *
+ * @param bonded_port_id      Bonded device id
+ *
+ * @return
+ *   0 on success, negative value otherwise.
+ */
+__rte_experimental
+int
+rte_eth_bond_tx_prepare_enable(uint16_t bonded_port_id);
+
+/**
+ * Disable Tx prepare for bonded port
+ *
+ * This function disables Tx prepare for the fast path packets.
+ *
+ * @param bonded_port_id      Bonded device id
+ *
+ * @return
+ *   0 on success, negative value otherwise.
+ */
+__rte_experimental
+int
+rte_eth_bond_tx_prepare_disable(uint16_t bonded_port_id);

 #ifdef __cplusplus
 }
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 17e6ff8..b04806a 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -1050,3 +1050,31 @@ rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id)

 	return internals->link_up_delay_ms;
 }
+
+int
+rte_eth_bond_tx_prepare_enable(uint16_t bonded_port_id)
+{
+	struct bond_dev_private *internals;
+
+	if (valid_bonded_port_id(bonded_port_id) != 0)
+		return -1;
+
+	internals = rte_eth_devices[bonded_port_id].data->dev_private;
+	internals->tx_prepare_enabled = 1;
+
+	return 0;
+}
+
+int
+rte_eth_bond_tx_prepare_disable(uint16_t bonded_port_id)
+{
+	struct bond_dev_private *internals;
+
+	if (valid_bonded_port_id(bonded_port_id) != 0)
+		return -1;
+
+	internals = rte_eth_devices[bonded_port_id].data->dev_private;
+	internals->tx_prepare_enabled = 0;
+
+	return 0;
+}
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 2e9cea5..3b7870f 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -606,8 +606,14 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
 	/* Send packet burst on each slave device */
 	for (i = 0; i < num_of_slaves; i++) {
 		if (slave_nb_pkts[i] > 0) {
+			int nb_prep_pkts = slave_nb_pkts[i];
+			if (internals->tx_prepare_enabled)
+				nb_prep_pkts = rte_eth_tx_prepare(slaves[i],
+						bd_tx_q->queue_id,
+						slave_bufs[i], nb_prep_pkts);
+
 			num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
-					slave_bufs[i], slave_nb_pkts[i]);
+					slave_bufs[i], nb_prep_pkts);

 			/* if tx burst fails move packets to end of bufs */
 			if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
@@ -632,6 +638,7 @@ bond_ethdev_tx_burst_active_backup(void *queue,
 {
 	struct bond_dev_private *internals;
 	struct bond_tx_queue *bd_tx_q;
+	int nb_prep_pkts = nb_pkts;

 	bd_tx_q = (struct bond_tx_queue *)queue;
 	internals = bd_tx_q->dev_private;
@@ -639,8 +646,13 @@ bond_ethdev_tx_burst_active_backup(void *queue,
 	if (internals->active_slave_count < 1)
 		return 0;

+	if (internals->tx_prepare_enabled)
+		nb_prep_pkts =
+			rte_eth_tx_prepare(internals->current_primary_port,
+				bd_tx_q->queue_id, bufs, nb_prep_pkts);
+
 	return rte_eth_tx_burst(internals->current_primary_port, bd_tx_q->queue_id,
-			bufs, nb_pkts);
+			bufs, nb_prep_pkts);
 }

 static inline uint16_t
@@ -939,6 +951,7 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	}

 	for (i = 0; i < num_of_slaves; i++) {
+		int nb_prep_pkts;
 		rte_eth_macaddr_get(slaves[i], &active_slave_addr);
 		for (j = num_tx_total; j < nb_pkts; j++) {
 			if (j + 3 < nb_pkts)
@@ -955,8 +968,14 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #endif
 		}

+		nb_prep_pkts = nb_pkts - num_tx_total;
+		if (internals->tx_prepare_enabled)
+			nb_prep_pkts = rte_eth_tx_prepare(slaves[i],
+					bd_tx_q->queue_id, bufs + num_tx_total,
+					nb_prep_pkts);
+
 		num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
-				bufs + num_tx_total, nb_pkts - num_tx_total);
+				bufs + num_tx_total, nb_prep_pkts);

 		if (num_tx_total == nb_pkts)
 			break;
@@ -1159,12 +1178,18 @@ tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,

 	/* Send packet burst on each slave device */
 	for (i = 0; i < slave_count; i++) {
+		int nb_prep_pkts;
 		if (slave_nb_bufs[i] == 0)
 			continue;
+		nb_prep_pkts = slave_nb_bufs[i];
+		if (internals->tx_prepare_enabled)
+			nb_prep_pkts = rte_eth_tx_prepare(slave_port_ids[i],
+					bd_tx_q->queue_id, slave_bufs[i],
+					nb_prep_pkts);

 		slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
 				bd_tx_q->queue_id, slave_bufs[i],
-				slave_nb_bufs[i]);
+				nb_prep_pkts);

 		total_tx_count += slave_tx_count;

diff --git a/drivers/net/bonding/version.map b/drivers/net/bonding/version.map
index df81ee7..b642729 100644
--- a/drivers/net/bonding/version.map
+++ b/drivers/net/bonding/version.map
@@ -31,3 +31,8 @@ DPDK_21 {

 	local: *;
 };
+
+EXPERIMENTAL {
+	rte_eth_bond_tx_prepare_disable;
+	rte_eth_bond_tx_prepare_enable;
+};
--
2.7.4



More information about the dev mailing list