[dpdk-dev] [RFC PATCH 1/2] app/testpmd: add packet template

Xueming Li xuemingl at mellanox.com
Thu Oct 19 16:06:48 CEST 2017


Txonly forwarding mode will check templates, if avaialbe, use templates
for each queue.

Signed-off-by: Xueming Li <xuemingl at mellanox.com>
---
 app/test-pmd/testpmd.c |  3 +++
 app/test-pmd/testpmd.h |  1 +
 app/test-pmd/txonly.c  | 42 ++++++++++++++++++++++++++----------------
 3 files changed, 30 insertions(+), 16 deletions(-)

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index a4d4a866b..dacb97888 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -128,6 +128,9 @@ uint8_t mp_anon = 0;
 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
 portid_t nb_peer_eth_addrs = 0;
 
+/* Template of packet to tx */
+struct rte_mbuf *pkt_templ[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
+
 /*
  * Probed Target Environment.
  */
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 265b75f9f..4090d1e87 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -480,6 +480,7 @@ extern struct fwd_stream **fwd_streams;
 
 extern portid_t nb_peer_eth_addrs; /**< Number of peer ethernet addresses. */
 extern struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
+extern struct rte_mbuf *pkt_templ[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
 
 extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */
 extern uint32_t burst_tx_retry_num;  /**< Burst tx retry number for mac-retry. */
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 7070ddc3b..e2b189c60 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -186,6 +186,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
 	struct rte_port *txp;
 	struct rte_mbuf *pkt;
 	struct rte_mbuf *pkt_seg;
+	struct rte_mbuf *tmpl;
 	struct rte_mempool *mbp;
 	struct ether_hdr eth_hdr;
 	uint16_t nb_tx;
@@ -206,6 +207,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
 #endif
 
 	mbp = current_fwd_lcore()->mbp;
+	tmpl = pkt_templ[fs->tx_port][fs->tx_queue];
 	txp = &ports[fs->tx_port];
 	vlan_tci = txp->tx_vlan_id;
 	vlan_tci_outer = txp->tx_vlan_id_outer;
@@ -231,7 +233,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
 		 * reset to default value.
 		 */
 		rte_pktmbuf_reset_headroom(pkt);
-		pkt->data_len = tx_pkt_seg_lengths[0];
+		pkt->data_len = tmpl ? tmpl->data_len : tx_pkt_seg_lengths[0];
 		pkt_seg = pkt;
 		if (tx_pkt_split == TX_PKT_SPLIT_RND)
 			nb_segs = random() % tx_pkt_nb_segs + 1;
@@ -251,22 +253,30 @@ pkt_burst_transmit(struct fwd_stream *fs)
 		}
 		pkt_seg->next = NULL; /* Last segment of packet. */
 
-		/*
-		 * Initialize Ethernet header.
-		 */
-		ether_addr_copy(&peer_eth_addrs[fs->peer_addr],&eth_hdr.d_addr);
-		ether_addr_copy(&ports[fs->tx_port].eth_addr, &eth_hdr.s_addr);
-		eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+		if (tmpl) {
+			copy_buf_to_pkt(
+				rte_pktmbuf_mtod_offset(tmpl, void *, 0),
+				tmpl->data_len, pkt, 0);
+		} else {
+			/*
+			 * Initialize Ethernet header.
+			 */
+			ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
+					&eth_hdr.d_addr);
+			ether_addr_copy(&ports[fs->tx_port].eth_addr,
+					&eth_hdr.s_addr);
+			eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
 
-		/*
-		 * Copy headers in first packet segment(s).
-		 */
-		copy_buf_to_pkt(&eth_hdr, sizeof(eth_hdr), pkt, 0);
-		copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
-				sizeof(struct ether_hdr));
-		copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
-				sizeof(struct ether_hdr) +
-				sizeof(struct ipv4_hdr));
+			/*
+			 * Copy headers in first packet segment(s).
+			 */
+			copy_buf_to_pkt(&eth_hdr, sizeof(eth_hdr), pkt, 0);
+			copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
+					sizeof(struct ether_hdr));
+			copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
+					sizeof(struct ether_hdr) +
+					sizeof(struct ipv4_hdr));
+		}
 
 		/*
 		 * Complete first mbuf of packet and append it to the
-- 
2.13.3



More information about the dev mailing list