[PATCH v3] net/axgbe: support TSO
Jesna K E
jesna.k.e at amd.com
Thu Nov 16 17:03:26 CET 2023
Added TSO support for axgbe PMD.
Initial Implementation for the TSO feature support
Currently only headers transmitted to
tester receiver side
Signed-off-by: Jesna K E <jesna.k.e at amd.com>
---
doc/guides/nics/features/axgbe.ini | 1 +
drivers/net/axgbe/axgbe_common.h | 12 ++++
drivers/net/axgbe/axgbe_dev.c | 13 +++++
drivers/net/axgbe/axgbe_ethdev.c | 3 +
drivers/net/axgbe/axgbe_ethdev.h | 1 +
drivers/net/axgbe/axgbe_rxtx.c | 88 +++++++++++++++++++++++++-----
6 files changed, 104 insertions(+), 14 deletions(-)
diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
index 5e2d6498e5..5c30c967bc 100644
--- a/doc/guides/nics/features/axgbe.ini
+++ b/doc/guides/nics/features/axgbe.ini
@@ -7,6 +7,7 @@
Speed capabilities = Y
Link status = Y
Scattered Rx = Y
+TSO = Y
Promiscuous mode = Y
Allmulticast mode = Y
RSS hash = Y
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
index a5d11c5832..c30efe4c02 100644
--- a/drivers/net/axgbe/axgbe_common.h
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -161,6 +161,10 @@
#define DMA_CH_CARBR_LO 0x5c
#define DMA_CH_SR 0x60
+/* Setting MSS register entry bit positions and sizes for TSO */
+#define DMA_CH_CR_MSS_INDEX 0
+#define DMA_CH_CR_MSS_WIDTH 14
+
/* DMA channel register entry bit positions and sizes */
#define DMA_CH_CR_PBLX8_INDEX 16
#define DMA_CH_CR_PBLX8_WIDTH 1
@@ -1232,6 +1236,14 @@
#define TX_CONTEXT_DESC3_VT_INDEX 0
#define TX_CONTEXT_DESC3_VT_WIDTH 16
+/* TSO related register entry bit positions and sizes*/
+#define TX_NORMAL_DESC3_TPL_INDEX 0
+#define TX_NORMAL_DESC3_TPL_WIDTH 18
+#define TX_NORMAL_DESC3_THL_INDEX 19
+#define TX_NORMAL_DESC3_THL_WIDTH 4
+#define TX_CONTEXT_DESC3_OSTC_INDEX 27
+#define TX_CONTEXT_DESC3_OSTC_WIDTH 1
+
#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
#define TX_NORMAL_DESC2_IC_INDEX 31
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index 6a7fddffca..eef453fab0 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -808,6 +808,18 @@ int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
return 0;
}
+static void xgbe_config_tso_mode(struct axgbe_port *pdata)
+{
+ unsigned int i;
+ struct axgbe_tx_queue *txq;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, TSE, 1);
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, MSS, 800);
+ }
+}
+
static int axgbe_enable_rss(struct axgbe_port *pdata)
{
int ret;
@@ -1314,6 +1326,7 @@ static int axgbe_init(struct axgbe_port *pdata)
axgbe_config_rx_pbl_val(pdata);
axgbe_config_rx_buffer_size(pdata);
axgbe_config_rss(pdata);
+ xgbe_config_tso_mode(pdata);
wrapper_tx_desc_init(pdata);
ret = wrapper_rx_desc_init(pdata);
if (ret)
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 3717166384..0a4901aabc 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -12,6 +12,8 @@
#include "eal_filesystem.h"
+#include <rte_vect.h>
+
#ifdef RTE_ARCH_X86
#include <cpuid.h>
#else
@@ -1237,6 +1239,7 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_TSO |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
if (pdata->hw_feat.rss) {
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
index 7f19321d88..31a583c2c6 100644
--- a/drivers/net/axgbe/axgbe_ethdev.h
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -583,6 +583,7 @@ struct axgbe_port {
unsigned int tx_osp_mode;
unsigned int tx_max_fifo_size;
unsigned int multi_segs_tx;
+ unsigned int tso_tx;
/* Rx settings */
unsigned int rx_sf_mode;
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index a9ff291cef..b0cafcbdda 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -627,6 +627,9 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
pdata->multi_segs_tx = true;
+ if ((dev_data->dev_conf.txmode.offloads &
+ RTE_ETH_TX_OFFLOAD_TCP_TSO))
+ pdata->tso_tx = true;
return 0;
}
@@ -827,6 +830,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
desc = &txq->desc[idx];
+ PMD_DRV_LOG(DEBUG, "tso:Inside %s /n", __func__);
/* Update buffer address and length */
desc->baddr = rte_mbuf_data_iova(mbuf);
@@ -873,7 +877,6 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
rte_wmb();
-
/* Save mbuf */
txq->sw_ring[idx] = mbuf;
/* Update current index*/
@@ -884,6 +887,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
return 0;
}
+
/* Tx Descriptor formation for segmented mbuf
* Each mbuf will require multiple descriptors
*/
@@ -899,9 +903,26 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
uint32_t pkt_len = 0;
int nb_desc_free;
struct rte_mbuf *tx_pkt;
+ uint64_t l2_len = 0;
+ uint64_t l3_len = 0;
+ uint64_t l4_len = 0;
+ uint64_t total_hdr_len;
+ int tso = 0;
+
+ /*Parameters required for tso*/
+ l2_len = mbuf->l2_len;
+ l3_len = mbuf->l3_len;
+ l4_len = mbuf->l4_len;
+ total_hdr_len = l2_len + l3_len + l4_len;
+
+ if (txq->pdata->tso_tx)
+ tso = 1;
+ else
+ tso = 0;
- nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ PMD_DRV_LOG(DEBUG, "tso::Inside %s\n", __func__);
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
if (mbuf->nb_segs > nb_desc_free) {
axgbe_xmit_cleanup_seg(txq);
nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
@@ -913,23 +934,27 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
desc = &txq->desc[idx];
/* Saving the start index for setting the OWN bit finally */
start_index = idx;
-
tx_pkt = mbuf;
/* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */
pkt_len = tx_pkt->pkt_len;
/* Update buffer address and length */
- desc->baddr = rte_mbuf_data_iova(tx_pkt);
- AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
- tx_pkt->data_len);
- /* Total msg length to transmit */
- AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
- tx_pkt->pkt_len);
+ desc->baddr = rte_pktmbuf_iova_offset(mbuf, 0);
+ /*For TSO first buffer contains the Header */
+ if (tso)
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ total_hdr_len);
+ else
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ tx_pkt->data_len);
+ rte_wmb();
+
/* Timestamp enablement check */
if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
rte_wmb();
+
/* Mark it as First Descriptor */
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
/* Mark it as a NORMAL descriptor */
@@ -959,19 +984,55 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
}
rte_wmb();
+ /*Register settings for TSO*/
+ if (tso) {
+ PMD_DRV_LOG(DEBUG, "tso : Inside TSO register settings\n");
+ /* Enable TSO */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TSE, 1);
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TPL,
+ ((mbuf->pkt_len) - total_hdr_len));
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, THL,
+ l4_len);
+ } else {
+ /* Enable CRC and Pad Insertion */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CPC, 0);
+ /* Total msg length to transmit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+ mbuf->pkt_len);
+ }
+
/* Save mbuf */
txq->sw_ring[idx] = tx_pkt;
/* Update current index*/
txq->cur++;
+ /*For TSO , needs one more descriptor to hold
+ * * the Payload
+ * * *But while adding another descriptor packets are not transmitted
+ */
+
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ desc = &txq->desc[idx];
+ desc->baddr = rte_pktmbuf_iova_offset(mbuf, total_hdr_len);
+ AXGMAC_SET_BITS_LE(desc->desc2,
+ TX_NORMAL_DESC2, HL_B1L, (mbuf->pkt_len) - total_hdr_len);
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+
+ txq->cur++;
tx_pkt = tx_pkt->next;
while (tx_pkt != NULL) {
idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
desc = &txq->desc[idx];
- /* Update buffer address and length */
- desc->baddr = rte_mbuf_data_iova(tx_pkt);
+ if (tso)
+ desc->baddr = rte_pktmbuf_iova_offset(mbuf, total_hdr_len);
+ else
+ /* Update buffer address and length */
+ desc->baddr = rte_mbuf_data_iova(tx_pkt);
AXGMAC_SET_BITS_LE(desc->desc2,
TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len);
@@ -992,7 +1053,7 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
rte_wmb();
- /* Set OWN bit */
+ /* Set OWN bit */
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
rte_wmb();
@@ -1000,7 +1061,6 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
txq->sw_ring[idx] = tx_pkt;
/* Update current index*/
txq->cur++;
-
tx_pkt = tx_pkt->next;
}
@@ -1015,7 +1075,6 @@ axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq,
desc = &txq->desc[start_index];
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
rte_wmb();
-
return 0;
}
@@ -1061,6 +1120,7 @@ axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
idx * sizeof(struct axgbe_tx_desc));
/* Update tail reg with next immediate address to kick Tx DMA channel*/
AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
+
txq->pkts += nb_pkt_sent;
return nb_pkt_sent;
}
--
2.34.1
More information about the dev
mailing list