[dpdk-dev] [PATCH 18/36] net/sfc: support multi-segment transmit for EF100 datapath

Andrew Rybchenko arybchenko at solarflare.com
Tue Oct 13 15:45:35 CEST 2020


Signed-off-by: Andrew Rybchenko <arybchenko at solarflare.com>
---
 doc/guides/nics/sfc_efx.rst    |  4 +-
 drivers/net/sfc/sfc_ef100_tx.c | 69 ++++++++++++++++++++++++++++++++--
 2 files changed, 67 insertions(+), 6 deletions(-)

diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst
index 726d653fa8..17e9461bea 100644
--- a/doc/guides/nics/sfc_efx.rst
+++ b/doc/guides/nics/sfc_efx.rst
@@ -328,8 +328,8 @@ boolean parameters value.
   **ef10_simple** chooses EF10 (SFN7xxx, SFN8xxx, X2xxx) native datapath which
   is even more faster then **ef10** but does not support multi-segment
   mbufs, disallows multiple mempools and neglects mbuf reference counters.
-  **ef100** chooses EF100 native datapath which does not support multi-segment
-  mbufs and any offloads.
+  **ef100** chooses EF100 native datapath which does not support
+  any offloads except multi-segment mbufs.
 
 - ``perf_profile`` [auto|throughput|low-latency] (default **throughput**)
 
diff --git a/drivers/net/sfc/sfc_ef100_tx.c b/drivers/net/sfc/sfc_ef100_tx.c
index 20b7c786cc..0a7bd74651 100644
--- a/drivers/net/sfc/sfc_ef100_tx.c
+++ b/drivers/net/sfc/sfc_ef100_tx.c
@@ -36,6 +36,10 @@
 #define SFC_EF100_TX_SEND_DESC_LEN_MAX \
 	((1u << ESF_GZ_TX_SEND_LEN_WIDTH) - 1)
 
+/** Maximum length of the segment descriptor data */
+#define SFC_EF100_TX_SEG_DESC_LEN_MAX \
+	((1u << ESF_GZ_TX_SEG_LEN_WIDTH) - 1)
+
 /**
  * Maximum number of descriptors/buffers in the Tx ring.
  * It should guarantee that corresponding event queue never overfill.
@@ -82,6 +86,32 @@ sfc_ef100_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
 	return container_of(dp_txq, struct sfc_ef100_txq, dp);
 }
 
+static uint16_t
+sfc_ef100_tx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+			  uint16_t nb_pkts)
+{
+	struct sfc_ef100_txq * const txq = sfc_ef100_txq_by_dp_txq(tx_queue);
+	uint16_t i;
+
+	for (i = 0; i < nb_pkts; i++) {
+		struct rte_mbuf *m = tx_pkts[i];
+		int ret;
+
+		ret = sfc_dp_tx_prepare_pkt(m, 0, txq->max_fill_level, 0, 0);
+		if (unlikely(ret != 0)) {
+			rte_errno = ret;
+			break;
+		}
+
+		if (m->nb_segs > EFX_MASK32(ESF_GZ_TX_SEND_NUM_SEGS)) {
+			rte_errno = EINVAL;
+			break;
+		}
+	}
+
+	return i;
+}
+
 static bool
 sfc_ef100_tx_get_event(struct sfc_ef100_txq *txq, efx_qword_t *ev)
 {
@@ -189,10 +219,20 @@ sfc_ef100_tx_qdesc_send_create(const struct rte_mbuf *m, efx_oword_t *tx_desc)
 	EFX_POPULATE_OWORD_4(*tx_desc,
 			ESF_GZ_TX_SEND_ADDR, rte_mbuf_data_iova(m),
 			ESF_GZ_TX_SEND_LEN, rte_pktmbuf_data_len(m),
-			ESF_GZ_TX_SEND_NUM_SEGS, 1,
+			ESF_GZ_TX_SEND_NUM_SEGS, m->nb_segs,
 			ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEND);
 }
 
+static void
+sfc_ef100_tx_qdesc_seg_create(rte_iova_t addr, uint16_t len,
+			      efx_oword_t *tx_desc)
+{
+	EFX_POPULATE_OWORD_3(*tx_desc,
+			ESF_GZ_TX_SEG_ADDR, addr,
+			ESF_GZ_TX_SEG_LEN, len,
+			ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG);
+}
+
 static inline void
 sfc_ef100_tx_qpush(struct sfc_ef100_txq *txq, unsigned int added)
 {
@@ -231,8 +271,17 @@ sfc_ef100_tx_pkt_descs_max(const struct rte_mbuf *m)
 	RTE_BUILD_BUG_ON(SFC_EF100_TX_SEND_DESC_LEN_MAX <
 		RTE_MIN((unsigned int)EFX_MAC_PDU_MAX, SFC_MBUF_SEG_LEN_MAX));
 
-	SFC_ASSERT(m->nb_segs == 1);
-	return 1;
+	/*
+	 * Any segment of scattered packet cannot be bigger than maximum
+	 * segment length and maximum packet legnth since TSO is not
+	 * supported yet.
+	 * Make sure that subsequent segments do not need fragmentation (split
+	 * into many Tx descriptors).
+	 */
+	RTE_BUILD_BUG_ON(SFC_EF100_TX_SEG_DESC_LEN_MAX <
+		RTE_MIN((unsigned int)EFX_MAC_PDU_MAX, SFC_MBUF_SEG_LEN_MAX));
+
+	return m->nb_segs;
 }
 
 static uint16_t
@@ -306,6 +355,17 @@ sfc_ef100_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		 */
 		txq->sw_ring[id].mbuf = m_seg;
 
+		while ((m_seg = m_seg->next) != NULL) {
+			RTE_BUILD_BUG_ON(SFC_MBUF_SEG_LEN_MAX >
+					 SFC_EF100_TX_SEG_DESC_LEN_MAX);
+
+			id = added++ & txq->ptr_mask;
+			sfc_ef100_tx_qdesc_seg_create(rte_mbuf_data_iova(m_seg),
+					rte_pktmbuf_data_len(m_seg),
+					&txq->txq_hw_ring[id]);
+			txq->sw_ring[id].mbuf = m_seg;
+		}
+
 		dma_desc_space -= (added - pkt_start);
 	}
 
@@ -532,7 +592,7 @@ struct sfc_dp_tx sfc_ef100_tx = {
 	},
 	.features		= SFC_DP_TX_FEAT_MULTI_PROCESS,
 	.dev_offload_capa	= 0,
-	.queue_offload_capa	= 0,
+	.queue_offload_capa	= DEV_TX_OFFLOAD_MULTI_SEGS,
 	.get_dev_info		= sfc_ef100_get_dev_info,
 	.qsize_up_rings		= sfc_ef100_tx_qsize_up_rings,
 	.qcreate		= sfc_ef100_tx_qcreate,
@@ -542,5 +602,6 @@ struct sfc_dp_tx sfc_ef100_tx = {
 	.qstop			= sfc_ef100_tx_qstop,
 	.qreap			= sfc_ef100_tx_qreap,
 	.qdesc_status		= sfc_ef100_tx_qdesc_status,
+	.pkt_prepare		= sfc_ef100_tx_prepare_pkts,
 	.pkt_burst		= sfc_ef100_xmit_pkts,
 };
-- 
2.17.1



More information about the dev mailing list