[dpdk-dev] [PATCH v2 25/56] net/txgbe: fill transmit function with hardware offload

Jiawen Wu jiawenwu at trustnetic.com
Mon Oct 5 14:08:39 CEST 2020


From: jiawenwu <jiawenwu at trustnetic.com>

Fill transmit function with hardware offload.

Signed-off-by: jiawenwu <jiawenwu at trustnetic.com>
---
 doc/guides/nics/features/txgbe.ini |   7 +
 doc/guides/nics/txgbe.rst          |   3 +
 drivers/net/txgbe/txgbe_ethdev.c   |  25 ++
 drivers/net/txgbe/txgbe_ethdev.h   |   6 +
 drivers/net/txgbe/txgbe_rxtx.c     | 675 +++++++++++++++++++++++++++++
 drivers/net/txgbe/txgbe_rxtx.h     |  45 ++
 6 files changed, 761 insertions(+)

diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini
index 247704572..f77afa002 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -10,11 +10,18 @@ Link status event    = Y
 Queue start/stop     = Y
 Jumbo frame          = Y
 Scattered Rx         = Y
+TSO                  = Y
 Unicast MAC filter   = Y
 Multicast MAC filter = Y
 CRC offload          = P
 VLAN offload         = P
+QinQ offload         = P
+L3 checksum offload  = P
+L4 checksum offload  = P
+Inner L3 checksum    = P
+Inner L4 checksum    = P
 Packet type parsing  = Y
+Multiprocess aware   = Y
 Linux UIO            = Y
 Linux VFIO           = Y
 ARMv8                = Y
diff --git a/doc/guides/nics/txgbe.rst b/doc/guides/nics/txgbe.rst
index 3bee5d023..010444939 100644
--- a/doc/guides/nics/txgbe.rst
+++ b/doc/guides/nics/txgbe.rst
@@ -13,8 +13,11 @@ Features
 - Multiple queues for TX and RX
 - MAC filtering
 - Packet type information
+- Checksum offload
+- TSO offload
 - Jumbo frames
 - Link state information
+- Scattered and gather for TX
 
 Prerequisites
 -------------
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 34e2a05a3..83054b3a1 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -116,6 +116,31 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 	PMD_INIT_FUNC_TRACE();
 
 	eth_dev->dev_ops = &txgbe_eth_dev_ops;
+	eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
+
+	/*
+	 * For secondary processes, we don't initialise any further as primary
+	 * has already done this work. Only check we don't need a different
+	 * RX and TX function.
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		struct txgbe_tx_queue *txq;
+		/* TX queue function in primary, set by last queue initialized
+		 * Tx queue may not initialized by primary process
+		 */
+		if (eth_dev->data->tx_queues) {
+			txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
+			txgbe_set_tx_function(eth_dev, txq);
+		} else {
+			/* Use default TX function if we get here */
+			PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
+				     "Using default TX function.");
+		}
+
+		txgbe_set_rx_function(eth_dev);
+
+		return 0;
+	}
 
 	rte_eth_copy_pci_info(eth_dev, pci_dev);
 
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 21b1699ea..834aae72b 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -113,6 +113,12 @@ int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
 int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
+uint16_t txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
+
+uint16_t txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts);
+
 void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
 			       uint8_t queue, uint8_t msix_vector);
 
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index b9a14a016..d51c62a18 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -25,6 +25,19 @@
 #include "txgbe_ethdev.h"
 #include "txgbe_rxtx.h"
 
+/* Bit Mask to indicate what bits required for building TX context */
+static const u64 TXGBE_TX_OFFLOAD_MASK = (
+		PKT_TX_OUTER_IPV6 |
+		PKT_TX_OUTER_IPV4 |
+		PKT_TX_IPV6 |
+		PKT_TX_IPV4 |
+		PKT_TX_VLAN_PKT |
+		PKT_TX_IP_CKSUM |
+		PKT_TX_L4_MASK |
+		PKT_TX_TCP_SEG |
+		PKT_TX_TUNNEL_MASK |
+		PKT_TX_OUTER_IP_CKSUM);
+
 static int
 txgbe_is_vf(struct rte_eth_dev *dev)
 {
@@ -267,10 +280,662 @@ txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
+static inline void
+txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
+		volatile struct txgbe_tx_ctx_desc *ctx_txd,
+		uint64_t ol_flags, union txgbe_tx_offload tx_offload,
+		__rte_unused uint64_t *mdata)
+{
+	union txgbe_tx_offload tx_offload_mask;
+	uint32_t type_tucmd_mlhl;
+	uint32_t mss_l4len_idx;
+	uint32_t ctx_idx;
+	uint32_t vlan_macip_lens;
+	uint32_t tunnel_seed;
+
+	ctx_idx = txq->ctx_curr;
+	tx_offload_mask.data[0] = 0;
+	tx_offload_mask.data[1] = 0;
+
+	/* Specify which HW CTX to upload. */
+	mss_l4len_idx = TXGBE_TXD_IDX(ctx_idx);
+	type_tucmd_mlhl = TXGBE_TXD_CTXT;
+
+	tx_offload_mask.ptid |= ~0;
+	type_tucmd_mlhl |= TXGBE_TXD_PTID(tx_offload.ptid);
+
+	/* check if TCP segmentation required for this packet */
+	if (ol_flags & PKT_TX_TCP_SEG) {
+		tx_offload_mask.l2_len |= ~0;
+		tx_offload_mask.l3_len |= ~0;
+		tx_offload_mask.l4_len |= ~0;
+		tx_offload_mask.tso_segsz |= ~0;
+		mss_l4len_idx |= TXGBE_TXD_MSS(tx_offload.tso_segsz);
+		mss_l4len_idx |= TXGBE_TXD_L4LEN(tx_offload.l4_len);
+	} else { /* no TSO, check if hardware checksum is needed */
+		if (ol_flags & PKT_TX_IP_CKSUM) {
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+		}
+
+		switch (ol_flags & PKT_TX_L4_MASK) {
+		case PKT_TX_UDP_CKSUM:
+			mss_l4len_idx |=
+				TXGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+			break;
+		case PKT_TX_TCP_CKSUM:
+			mss_l4len_idx |=
+				TXGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+			break;
+		case PKT_TX_SCTP_CKSUM:
+			mss_l4len_idx |=
+				TXGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
+			tx_offload_mask.l2_len |= ~0;
+			tx_offload_mask.l3_len |= ~0;
+			break;
+		default:
+			break;
+		}
+	}
+
+	vlan_macip_lens = TXGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
+
+	if (ol_flags & PKT_TX_TUNNEL_MASK) {
+		tx_offload_mask.outer_tun_len |= ~0;
+		tx_offload_mask.outer_l2_len |= ~0;
+		tx_offload_mask.outer_l3_len |= ~0;
+		tx_offload_mask.l2_len |= ~0;
+		tunnel_seed = TXGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
+		tunnel_seed |= TXGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
+
+		switch (ol_flags & PKT_TX_TUNNEL_MASK) {
+		case PKT_TX_TUNNEL_IPIP:
+			/* for non UDP / GRE tunneling, set to 0b */
+			break;
+		case PKT_TX_TUNNEL_VXLAN:
+		case PKT_TX_TUNNEL_GENEVE:
+			tunnel_seed |= TXGBE_TXD_ETYPE_UDP;
+			break;
+		case PKT_TX_TUNNEL_GRE:
+			tunnel_seed |= TXGBE_TXD_ETYPE_GRE;
+			break;
+		default:
+			PMD_TX_LOG(ERR, "Tunnel type not supported");
+			return;
+		}
+		vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.outer_l2_len);
+	} else {
+		tunnel_seed = 0;
+		vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.l2_len);
+	}
+
+	if (ol_flags & PKT_TX_VLAN_PKT) {
+		tx_offload_mask.vlan_tci |= ~0;
+		vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);
+	}
+
+	txq->ctx_cache[ctx_idx].flags = ol_flags;
+	txq->ctx_cache[ctx_idx].tx_offload.data[0] =
+		tx_offload_mask.data[0] & tx_offload.data[0];
+	txq->ctx_cache[ctx_idx].tx_offload.data[1] =
+		tx_offload_mask.data[1] & tx_offload.data[1];
+	txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
+
+	ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
+	ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
+	ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
+	ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_ctx_update(struct txgbe_tx_queue *txq, uint64_t flags,
+		   union txgbe_tx_offload tx_offload)
+{
+	/* If match with the current used context */
+	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+		     & tx_offload.data[0])) &&
+		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+		     & tx_offload.data[1]))))
+		return txq->ctx_curr;
+
+	/* What if match with the next context  */
+	txq->ctx_curr ^= 1;
+	if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+		     & tx_offload.data[0])) &&
+		   (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+		    (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+		     & tx_offload.data[1]))))
+		return txq->ctx_curr;
+
+	/* Mismatch, use the previous context */
+	return TXGBE_CTX_NUM;
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
+{
+	uint32_t tmp = 0;
+
+	if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) {
+		tmp |= TXGBE_TXD_CC;
+		tmp |= TXGBE_TXD_L4CS;
+	}
+	if (ol_flags & PKT_TX_IP_CKSUM) {
+		tmp |= TXGBE_TXD_CC;
+		tmp |= TXGBE_TXD_IPCS;
+	}
+	if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+		tmp |= TXGBE_TXD_CC;
+		tmp |= TXGBE_TXD_EIPCS;
+	}
+	if (ol_flags & PKT_TX_TCP_SEG) {
+		tmp |= TXGBE_TXD_CC;
+		/* implies IPv4 cksum */
+		if (ol_flags & PKT_TX_IPV4)
+			tmp |= TXGBE_TXD_IPCS;
+		tmp |= TXGBE_TXD_L4CS;
+	}
+	if (ol_flags & PKT_TX_VLAN_PKT) {
+		tmp |= TXGBE_TXD_CC;
+	}
+
+	return tmp;
+}
+
+static inline uint32_t
+tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
+{
+	uint32_t cmdtype = 0;
+
+	if (ol_flags & PKT_TX_VLAN_PKT)
+		cmdtype |= TXGBE_TXD_VLE;
+	if (ol_flags & PKT_TX_TCP_SEG)
+		cmdtype |= TXGBE_TXD_TSE;
+	if (ol_flags & PKT_TX_MACSEC)
+		cmdtype |= TXGBE_TXD_LINKSEC;
+	return cmdtype;
+}
+
+static inline uint8_t
+tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
+{
+	bool tun;
+
+	if (ptype)
+		return txgbe_encode_ptype(ptype);
+
+	/* Only suport flags in TXGBE_TX_OFFLOAD_MASK */
+	tun = !!(oflags & PKT_TX_TUNNEL_MASK);
+
+	/* L2 level */
+	ptype = RTE_PTYPE_L2_ETHER;
+	if (oflags & PKT_TX_VLAN) {
+		ptype |= RTE_PTYPE_L2_ETHER_VLAN;
+	}
+
+	/* L3 level */
+	if (oflags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM)) {
+		ptype |= RTE_PTYPE_L3_IPV4;
+	} else if (oflags & (PKT_TX_OUTER_IPV6)) {
+		ptype |= RTE_PTYPE_L3_IPV6;
+	}
+
+	if (oflags & (PKT_TX_IPV4 | PKT_TX_IP_CKSUM)) {
+		ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
+	} else if (oflags & (PKT_TX_IPV6)) {
+		ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
+	}
+
+	/* L4 level */
+	switch (oflags & (PKT_TX_L4_MASK)) {
+	case PKT_TX_TCP_CKSUM:
+		ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
+		break;
+	case PKT_TX_UDP_CKSUM:
+		ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
+		break;
+	case PKT_TX_SCTP_CKSUM:
+		ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
+		break;
+	}
+
+	if (oflags & PKT_TX_TCP_SEG) {
+		ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
+	}
+
+	/* Tunnel */
+	switch (oflags & PKT_TX_TUNNEL_MASK) {
+	case PKT_TX_TUNNEL_VXLAN:
+		ptype |= RTE_PTYPE_L2_ETHER |
+			 RTE_PTYPE_L3_IPV4 |
+			 RTE_PTYPE_TUNNEL_VXLAN;
+		ptype |= RTE_PTYPE_INNER_L2_ETHER;
+		break;
+	case PKT_TX_TUNNEL_GRE:
+		ptype |= RTE_PTYPE_L2_ETHER |
+			 RTE_PTYPE_L3_IPV4 |
+			 RTE_PTYPE_TUNNEL_GRE;
+		ptype |= RTE_PTYPE_INNER_L2_ETHER;
+		break;
+	case PKT_TX_TUNNEL_GENEVE:
+		ptype |= RTE_PTYPE_L2_ETHER |
+			 RTE_PTYPE_L3_IPV4 |
+			 RTE_PTYPE_TUNNEL_GENEVE;
+		ptype |= RTE_PTYPE_INNER_L2_ETHER;
+		break;
+	case PKT_TX_TUNNEL_VXLAN_GPE:
+		ptype |= RTE_PTYPE_L2_ETHER |
+			 RTE_PTYPE_L3_IPV4 |
+			 RTE_PTYPE_TUNNEL_VXLAN_GPE;
+		ptype |= RTE_PTYPE_INNER_L2_ETHER;
+		break;
+	case PKT_TX_TUNNEL_IPIP:
+	case PKT_TX_TUNNEL_IP:
+		ptype |= RTE_PTYPE_L2_ETHER |
+			 RTE_PTYPE_L3_IPV4 |
+			 RTE_PTYPE_TUNNEL_IP;
+		break;
+	}
+
+	return txgbe_encode_ptype(ptype);
+}
+
 #ifndef DEFAULT_TX_FREE_THRESH
 #define DEFAULT_TX_FREE_THRESH 32
 #endif
 
+/* Reset transmit descriptors after they have been used */
+static inline int
+txgbe_xmit_cleanup(struct txgbe_tx_queue *txq)
+{
+	struct txgbe_tx_entry *sw_ring = txq->sw_ring;
+	volatile struct txgbe_tx_desc *txr = txq->tx_ring;
+	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+	uint16_t nb_tx_desc = txq->nb_tx_desc;
+	uint16_t desc_to_clean_to;
+	uint16_t nb_tx_to_clean;
+	uint32_t status;
+
+	/* Determine the last descriptor needing to be cleaned */
+	desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh);
+	if (desc_to_clean_to >= nb_tx_desc)
+		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+	/* Check to make sure the last descriptor to clean is done */
+	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+	status = txr[desc_to_clean_to].dw3;
+	if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
+		PMD_TX_FREE_LOG(DEBUG,
+				"TX descriptor %4u is not done"
+				"(port=%d queue=%d)",
+				desc_to_clean_to,
+				txq->port_id, txq->queue_id);
+		if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
+			txgbe_set32_masked(txq->tdc_reg_addr,
+				TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
+		/* Failed to clean any descriptors, better luck next time */
+		return -(1);
+	}
+
+	/* Figure out how many descriptors will be cleaned */
+	if (last_desc_cleaned > desc_to_clean_to)
+		nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+							desc_to_clean_to);
+	else
+		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+						last_desc_cleaned);
+
+	PMD_TX_FREE_LOG(DEBUG,
+			"Cleaning %4u TX descriptors: %4u to %4u "
+			"(port=%d queue=%d)",
+			nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
+			txq->port_id, txq->queue_id);
+
+	/*
+	 * The last descriptor to clean is done, so that means all the
+	 * descriptors from the last descriptor that was cleaned
+	 * up to the last descriptor with the RS bit set
+	 * are done. Only reset the threshold descriptor.
+	 */
+	txr[desc_to_clean_to].dw3 = 0;
+
+	/* Update the txq to reflect the last descriptor that was cleaned */
+	txq->last_desc_cleaned = desc_to_clean_to;
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
+
+	/* No Error */
+	return 0;
+}
+
+static inline uint8_t
+txgbe_get_tun_len(struct rte_mbuf *mbuf)
+{
+	struct txgbe_genevehdr genevehdr;
+	const struct txgbe_genevehdr *gh;
+	uint8_t tun_len;
+
+	switch (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) {
+	case PKT_TX_TUNNEL_IPIP:
+		tun_len = 0;
+		break;
+	case PKT_TX_TUNNEL_VXLAN:
+	case PKT_TX_TUNNEL_VXLAN_GPE:
+		tun_len = sizeof(struct txgbe_udphdr)
+			+ sizeof(struct txgbe_vxlanhdr);
+		break;
+	case PKT_TX_TUNNEL_GRE:
+		tun_len = sizeof(struct txgbe_nvgrehdr);
+		break;
+	case PKT_TX_TUNNEL_GENEVE:
+		gh = rte_pktmbuf_read(mbuf,
+			mbuf->outer_l2_len + mbuf->outer_l3_len,
+			sizeof(genevehdr), &genevehdr);
+		tun_len = sizeof(struct txgbe_udphdr)
+			+ sizeof(struct txgbe_genevehdr)
+			+ (gh->opt_len << 2);
+		break;
+	default:
+		tun_len = 0;
+	}
+
+	return tun_len;
+}
+
+uint16_t
+txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+		uint16_t nb_pkts)
+{
+	struct txgbe_tx_queue *txq;
+	struct txgbe_tx_entry *sw_ring;
+	struct txgbe_tx_entry *txe, *txn;
+	volatile struct txgbe_tx_desc *txr;
+	volatile struct txgbe_tx_desc *txd;
+	struct rte_mbuf     *tx_pkt;
+	struct rte_mbuf     *m_seg;
+	uint64_t buf_dma_addr;
+	uint32_t olinfo_status;
+	uint32_t cmd_type_len;
+	uint32_t pkt_len;
+	uint16_t slen;
+	uint64_t ol_flags;
+	uint16_t tx_id;
+	uint16_t tx_last;
+	uint16_t nb_tx;
+	uint16_t nb_used;
+	uint64_t tx_ol_req;
+	uint32_t ctx = 0;
+	uint32_t new_ctx;
+	union txgbe_tx_offload tx_offload;
+
+	tx_offload.data[0] = 0;
+	tx_offload.data[1] = 0;
+	txq = tx_queue;
+	sw_ring = txq->sw_ring;
+	txr     = txq->tx_ring;
+	tx_id   = txq->tx_tail;
+	txe = &sw_ring[tx_id];
+
+	/* Determine if the descriptor ring needs to be cleaned. */
+	if (txq->nb_tx_free < txq->tx_free_thresh)
+		txgbe_xmit_cleanup(txq);
+
+	rte_prefetch0(&txe->mbuf->pool);
+
+	/* TX loop */
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+		new_ctx = 0;
+		tx_pkt = *tx_pkts++;
+		pkt_len = tx_pkt->pkt_len;
+
+		/*
+		 * Determine how many (if any) context descriptors
+		 * are needed for offload functionality.
+		 */
+		ol_flags = tx_pkt->ol_flags;
+
+		/* If hardware offload required */
+		tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;
+		if (tx_ol_req) {
+			tx_offload.ptid = tx_desc_ol_flags_to_ptid(
+					tx_ol_req, tx_pkt->packet_type);
+			tx_offload.l2_len = tx_pkt->l2_len;
+			tx_offload.l3_len = tx_pkt->l3_len;
+			tx_offload.l4_len = tx_pkt->l4_len;
+			tx_offload.vlan_tci = tx_pkt->vlan_tci;
+			tx_offload.tso_segsz = tx_pkt->tso_segsz;
+			tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+			tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+			tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt);
+
+			/* If new context need be built or reuse the exist ctx. */
+			ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
+			/* Only allocate context descriptor if required*/
+			new_ctx = (ctx == TXGBE_CTX_NUM);
+			ctx = txq->ctx_curr;
+		}
+
+		/*
+		 * Keep track of how many descriptors are used this loop
+		 * This will always be the number of segments + the number of
+		 * Context descriptors required to transmit the packet
+		 */
+		nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
+
+		/*
+		 * The number of descriptors that must be allocated for a
+		 * packet is the number of segments of that packet, plus 1
+		 * Context Descriptor for the hardware offload, if any.
+		 * Determine the last TX descriptor to allocate in the TX ring
+		 * for the packet, starting from the current position (tx_id)
+		 * in the ring.
+		 */
+		tx_last = (uint16_t) (tx_id + nb_used - 1);
+
+		/* Circular ring */
+		if (tx_last >= txq->nb_tx_desc)
+			tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
+
+		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+			   " tx_first=%u tx_last=%u",
+			   (unsigned) txq->port_id,
+			   (unsigned) txq->queue_id,
+			   (unsigned) pkt_len,
+			   (unsigned) tx_id,
+			   (unsigned) tx_last);
+
+		/*
+		 * Make sure there are enough TX descriptors available to
+		 * transmit the entire packet.
+		 * nb_used better be less than or equal to txq->tx_free_thresh
+		 */
+		if (nb_used > txq->nb_tx_free) {
+			PMD_TX_FREE_LOG(DEBUG,
+					"Not enough free TX descriptors "
+					"nb_used=%4u nb_free=%4u "
+					"(port=%d queue=%d)",
+					nb_used, txq->nb_tx_free,
+					txq->port_id, txq->queue_id);
+
+			if (txgbe_xmit_cleanup(txq) != 0) {
+				/* Could not clean any descriptors */
+				if (nb_tx == 0)
+					return 0;
+				goto end_of_tx;
+			}
+
+			/* nb_used better be <= txq->tx_free_thresh */
+			if (unlikely(nb_used > txq->tx_free_thresh)) {
+				PMD_TX_FREE_LOG(DEBUG,
+					"The number of descriptors needed to "
+					"transmit the packet exceeds the "
+					"RS bit threshold. This will impact "
+					"performance."
+					"nb_used=%4u nb_free=%4u "
+					"tx_free_thresh=%4u. "
+					"(port=%d queue=%d)",
+					nb_used, txq->nb_tx_free,
+					txq->tx_free_thresh,
+					txq->port_id, txq->queue_id);
+				/*
+				 * Loop here until there are enough TX
+				 * descriptors or until the ring cannot be
+				 * cleaned.
+				 */
+				while (nb_used > txq->nb_tx_free) {
+					if (txgbe_xmit_cleanup(txq) != 0) {
+						/*
+						 * Could not clean any
+						 * descriptors
+						 */
+						if (nb_tx == 0)
+							return 0;
+						goto end_of_tx;
+					}
+				}
+			}
+		}
+
+		/*
+		 * By now there are enough free TX descriptors to transmit
+		 * the packet.
+		 */
+
+		/*
+		 * Set common flags of all TX Data Descriptors.
+		 *
+		 * The following bits must be set in all Data Descriptors:
+		 *   - TXGBE_TXD_DTYP_DATA
+		 *   - TXGBE_TXD_DCMD_DEXT
+		 *
+		 * The following bits must be set in the first Data Descriptor
+		 * and are ignored in the other ones:
+		 *   - TXGBE_TXD_DCMD_IFCS
+		 *   - TXGBE_TXD_MAC_1588
+		 *   - TXGBE_TXD_DCMD_VLE
+		 *
+		 * The following bits must only be set in the last Data
+		 * Descriptor:
+		 *   - TXGBE_TXD_CMD_EOP
+		 *
+		 * The following bits can be set in any Data Descriptor, but
+		 * are only set in the last Data Descriptor:
+		 *   - TXGBE_TXD_CMD_RS
+		 */
+		cmd_type_len = TXGBE_TXD_FCS;
+
+		olinfo_status = 0;
+		if (tx_ol_req) {
+
+			if (ol_flags & PKT_TX_TCP_SEG) {
+				/* when TSO is on, paylen in descriptor is the
+				 * not the packet len but the tcp payload len */
+				pkt_len -= (tx_offload.l2_len +
+					tx_offload.l3_len + tx_offload.l4_len);
+				pkt_len -=
+					(tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK)
+					? tx_offload.outer_l2_len +
+					  tx_offload.outer_l3_len : 0;
+			}
+
+			/*
+			 * Setup the TX Advanced Context Descriptor if required
+			 */
+			if (new_ctx) {
+				volatile struct txgbe_tx_ctx_desc *ctx_txd;
+
+				ctx_txd = (volatile struct txgbe_tx_ctx_desc *)
+				    &txr[tx_id];
+
+				txn = &sw_ring[txe->next_id];
+				rte_prefetch0(&txn->mbuf->pool);
+
+				if (txe->mbuf != NULL) {
+					rte_pktmbuf_free_seg(txe->mbuf);
+					txe->mbuf = NULL;
+				}
+
+				txgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+					tx_offload, &tx_pkt->udata64);
+
+				txe->last_id = tx_last;
+				tx_id = txe->next_id;
+				txe = txn;
+			}
+
+			/*
+			 * Setup the TX Advanced Data Descriptor,
+			 * This path will go through
+			 * whatever new/reuse the context descriptor
+			 */
+			cmd_type_len  |= tx_desc_ol_flags_to_cmdtype(ol_flags);
+			olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
+			olinfo_status |= TXGBE_TXD_IDX(ctx);
+		}
+
+		olinfo_status |= TXGBE_TXD_PAYLEN(pkt_len);
+
+		m_seg = tx_pkt;
+		do {
+			txd = &txr[tx_id];
+			txn = &sw_ring[txe->next_id];
+			rte_prefetch0(&txn->mbuf->pool);
+
+			if (txe->mbuf != NULL)
+				rte_pktmbuf_free_seg(txe->mbuf);
+			txe->mbuf = m_seg;
+
+			/*
+			 * Set up Transmit Data Descriptor.
+			 */
+			slen = m_seg->data_len;
+			buf_dma_addr = rte_mbuf_data_iova(m_seg);
+			txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
+			txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
+			txd->dw3 = rte_cpu_to_le_32(olinfo_status);
+			txe->last_id = tx_last;
+			tx_id = txe->next_id;
+			txe = txn;
+			m_seg = m_seg->next;
+		} while (m_seg != NULL);
+
+		/*
+		 * The last packet data descriptor needs End Of Packet (EOP)
+		 */
+		cmd_type_len |= TXGBE_TXD_EOP;
+		txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
+
+		txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
+	}
+
+end_of_tx:
+
+	rte_wmb();
+
+	/*
+	 * Set the Transmit Descriptor Tail (TDT)
+	 */
+	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+		   (unsigned) txq->port_id, (unsigned) txq->queue_id,
+		   (unsigned) tx_id, (unsigned) nb_tx);
+	txgbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
+	txq->tx_tail = tx_id;
+
+	return nb_tx;
+}
+
 uint64_t
 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
 {
@@ -368,6 +1033,16 @@ txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
 		PMD_INIT_LOG(DEBUG, "Using simple tx code path");
 		dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
 		dev->tx_pkt_prepare = NULL;
+	} else {
+		PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
+		PMD_INIT_LOG(DEBUG,
+				" - offloads = 0x%" PRIx64,
+				txq->offloads);
+		PMD_INIT_LOG(DEBUG,
+				" - tx_free_thresh = %lu " "[RTE_PMD_TXGBE_TX_MAX_BURST=%lu]",
+				(unsigned long)txq->tx_free_thresh,
+				(unsigned long)RTE_PMD_TXGBE_TX_MAX_BURST);
+		dev->tx_pkt_burst = txgbe_xmit_pkts;
 	}
 }
 
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index 05d131298..82cb9a0eb 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -211,6 +211,45 @@ struct txgbe_rx_queue {
 	struct rte_mbuf *rx_stage[RTE_PMD_TXGBE_RX_MAX_BURST*2];
 };
 
+/**
+ * TXGBE CTX Constants
+ */
+enum txgbe_ctx_num {
+	TXGBE_CTX_0    = 0, /**< CTX0 */
+	TXGBE_CTX_1    = 1, /**< CTX1  */
+	TXGBE_CTX_NUM  = 2, /**< CTX NUMBER  */
+};
+
+/** Offload features */
+union txgbe_tx_offload {
+	uint64_t data[2];
+	struct {
+		uint64_t ptid:8; /**< Packet Type Identifier. */
+		uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+		uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+		uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+		uint64_t tso_segsz:16; /**< TCP TSO segment size */
+		uint64_t vlan_tci:16;
+		/**< VLAN Tag Control Identifier (CPU order). */
+
+		/* fields for TX offloading of tunnels */
+		uint64_t outer_tun_len:8; /**< Outer TUN (Tunnel) Hdr Length. */
+		uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
+		uint64_t outer_l3_len:16; /**< Outer L3 (IP) Hdr Length. */
+	};
+};
+
+/**
+ * Structure to check if new context need be built
+ */
+struct txgbe_ctx_info {
+	uint64_t flags;           /**< ol_flags for context build. */
+	/**< tx offload: vlan, tso, l2-l3-l4 lengths. */
+	union txgbe_tx_offload tx_offload;
+	/** compare mask for tx offload. */
+	union txgbe_tx_offload tx_offload_mask;
+};
+
 /**
  * Structure associated with each TX queue.
  */
@@ -229,6 +268,9 @@ struct txgbe_tx_queue {
 	/**< Start freeing TX buffers if there are less free descriptors than
 	     this value. */
 	uint16_t            tx_free_thresh;
+	/** Index to last TX descriptor to have been cleaned. */
+	uint16_t            last_desc_cleaned;
+	/** Total number of TX descriptors ready to be allocated. */
 	uint16_t            nb_tx_free;
 	uint16_t tx_next_dd; /**< next desc to scan for DD bit */
 	uint16_t            queue_id;      /**< TX queue index. */
@@ -238,6 +280,9 @@ struct txgbe_tx_queue {
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
 	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+	uint32_t            ctx_curr;      /**< Hardware context states. */
+	/** Hardware context0 history. */
+	struct txgbe_ctx_info ctx_cache[TXGBE_CTX_NUM];
 	const struct txgbe_txq_ops *ops;       /**< txq ops */
 	uint8_t             tx_deferred_start; /**< not in global dev start. */
 };
-- 
2.18.4





More information about the dev mailing list