[EXT] [PATCH v5 2/5] examples/l3fwd: split processing and send stages
Shijith Thotton
sthotton at marvell.com
Mon Oct 17 14:06:16 CEST 2022
>
>Split packet processing from packet send stage, as send stage
>is not common for poll and event mode.
>
>Signed-off-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
Acked-by: Shijith Thotton <sthotton at marvell.com>
>---
> examples/l3fwd/l3fwd_em_hlm.h | 39 +++++++++++++++++++-----------
> examples/l3fwd/l3fwd_lpm_altivec.h | 25 ++++++++++++++++---
> examples/l3fwd/l3fwd_lpm_neon.h | 35 ++++++++++++++++++++-------
> examples/l3fwd/l3fwd_lpm_sse.h | 25 ++++++++++++++++---
> 4 files changed, 95 insertions(+), 29 deletions(-)
>
>diff --git a/examples/l3fwd/l3fwd_em_hlm.h b/examples/l3fwd/l3fwd_em_hlm.h
>index e76f2760b0..12b997e477 100644
>--- a/examples/l3fwd/l3fwd_em_hlm.h
>+++ b/examples/l3fwd/l3fwd_em_hlm.h
>@@ -177,16 +177,12 @@ em_get_dst_port(const struct lcore_conf *qconf, struct
>rte_mbuf *pkt,
> return portid;
> }
>
>-/*
>- * Buffer optimized handling of packets, invoked
>- * from main_loop.
>- */
> static inline void
>-l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
>- uint16_t portid, struct lcore_conf *qconf)
>+l3fwd_em_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
>+ uint16_t *dst_port, uint16_t portid,
>+ struct lcore_conf *qconf, const uint8_t do_step3)
> {
> int32_t i, j, pos;
>- uint16_t dst_port[MAX_PKT_BURST];
>
> /*
> * Send nb_rx - nb_rx % EM_HASH_LOOKUP_COUNT packets
>@@ -233,13 +229,30 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf
>**pkts_burst,
> dst_port[j + i] = em_get_dst_port(qconf,
> pkts_burst[j + i], portid);
> }
>+
>+ for (i = 0; i < EM_HASH_LOOKUP_COUNT && do_step3; i +=
>FWDSTEP)
>+ processx4_step3(&pkts_burst[j + i], &dst_port[j + i]);
> }
>
>- for (; j < nb_rx; j++)
>+ for (; j < nb_rx; j++) {
> dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
>+ if (do_step3)
>+ process_packet(pkts_burst[j], &pkts_burst[j]->port);
>+ }
>+}
>
>- send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
>+/*
>+ * Buffer optimized handling of packets, invoked
>+ * from main_loop.
>+ */
>+static inline void
>+l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t
>portid,
>+ struct lcore_conf *qconf)
>+{
>+ uint16_t dst_port[MAX_PKT_BURST];
>
>+ l3fwd_em_process_packets(nb_rx, pkts_burst, dst_port, portid, qconf,
>0);
>+ send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
> }
>
> /*
>@@ -260,11 +273,8 @@ l3fwd_em_process_events(int nb_rx, struct rte_event
>**ev,
> */
> int32_t n = RTE_ALIGN_FLOOR(nb_rx, EM_HASH_LOOKUP_COUNT);
>
>- for (j = 0; j < EM_HASH_LOOKUP_COUNT && j < nb_rx; j++) {
>+ for (j = 0; j < nb_rx; j++)
> pkts_burst[j] = ev[j]->mbuf;
>- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
>- struct rte_ether_hdr *) + 1);
>- }
>
> for (j = 0; j < n; j += EM_HASH_LOOKUP_COUNT) {
>
>@@ -305,7 +315,8 @@ l3fwd_em_process_events(int nb_rx, struct rte_event
>**ev,
> }
> continue;
> }
>- processx4_step3(&pkts_burst[j], &dst_port[j]);
>+ for (i = 0; i < EM_HASH_LOOKUP_COUNT; i += FWDSTEP)
>+ processx4_step3(&pkts_burst[j + i], &dst_port[j + i]);
>
> for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
> pkts_burst[j + i]->port = dst_port[j + i];
>diff --git a/examples/l3fwd/l3fwd_lpm_altivec.h
>b/examples/l3fwd/l3fwd_lpm_altivec.h
>index 0c6852a7bb..adb82f1478 100644
>--- a/examples/l3fwd/l3fwd_lpm_altivec.h
>+++ b/examples/l3fwd/l3fwd_lpm_altivec.h
>@@ -96,11 +96,11 @@ processx4_step2(const struct lcore_conf *qconf,
> * from main_loop.
> */
> static inline void
>-l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
>- uint8_t portid, struct lcore_conf *qconf)
>+l3fwd_lpm_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
>+ uint8_t portid, uint16_t *dst_port,
>+ struct lcore_conf *qconf, const uint8_t do_step3)
> {
> int32_t j;
>- uint16_t dst_port[MAX_PKT_BURST];
> __vector unsigned int dip[MAX_PKT_BURST / FWDSTEP];
> uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
> const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
>@@ -114,22 +114,41 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf
>**pkts_burst,
> ipv4_flag[j / FWDSTEP],
> portid, &pkts_burst[j], &dst_port[j]);
>
>+ if (do_step3)
>+ for (j = 0; j != k; j += FWDSTEP)
>+ processx4_step3(&pkts_burst[j], &dst_port[j]);
>+
> /* Classify last up to 3 packets one by one */
> switch (nb_rx % FWDSTEP) {
> case 3:
> dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
>+ if (do_step3)
>+ process_packet(pkts_burst[j], &dst_port[j]);
> j++;
> /* fall-through */
> case 2:
> dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
>+ if (do_step3)
>+ process_packet(pkts_burst[j], &dst_port[j]);
> j++;
> /* fall-through */
> case 1:
> dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
>+ if (do_step3)
>+ process_packet(pkts_burst[j], &dst_port[j]);
> j++;
> /* fall-through */
> }
>+}
>+
>+static inline void
>+l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint8_t
>portid,
>+ struct lcore_conf *qconf)
>+{
>+ uint16_t dst_port[MAX_PKT_BURST];
>
>+ l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf,
>+ 0);
> send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
> }
>
>diff --git a/examples/l3fwd/l3fwd_lpm_neon.h
>b/examples/l3fwd/l3fwd_lpm_neon.h
>index 78ee83b76c..2a68c4c15e 100644
>--- a/examples/l3fwd/l3fwd_lpm_neon.h
>+++ b/examples/l3fwd/l3fwd_lpm_neon.h
>@@ -80,16 +80,12 @@ processx4_step2(const struct lcore_conf *qconf,
> }
> }
>
>-/*
>- * Buffer optimized handling of packets, invoked
>- * from main_loop.
>- */
> static inline void
>-l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
>- uint16_t portid, struct lcore_conf *qconf)
>+l3fwd_lpm_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
>+ uint16_t portid, uint16_t *dst_port,
>+ struct lcore_conf *qconf, const uint8_t do_step3)
> {
> int32_t i = 0, j = 0;
>- uint16_t dst_port[MAX_PKT_BURST];
> int32x4_t dip;
> uint32_t ipv4_flag;
> const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
>@@ -100,7 +96,6 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf
>**pkts_burst,
> rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i],
> void *));
> }
>-
> for (j = 0; j != k - FWDSTEP; j += FWDSTEP) {
> for (i = 0; i < FWDSTEP; i++) {
> rte_prefetch0(rte_pktmbuf_mtod(
>@@ -111,11 +106,15 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf
>**pkts_burst,
> processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
> processx4_step2(qconf, dip, ipv4_flag, portid,
> &pkts_burst[j], &dst_port[j]);
>+ if (do_step3)
>+ processx4_step3(&pkts_burst[j], &dst_port[j]);
> }
>
> processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
> processx4_step2(qconf, dip, ipv4_flag, portid, &pkts_burst[j],
> &dst_port[j]);
>+ if (do_step3)
>+ processx4_step3(&pkts_burst[j], &dst_port[j]);
>
> j += FWDSTEP;
> }
>@@ -138,26 +137,44 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf
>**pkts_burst,
> void *));
> j++;
> }
>-
> j -= m;
> /* Classify last up to 3 packets one by one */
> switch (m) {
> case 3:
> dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
> portid);
>+ if (do_step3)
>+ process_packet(pkts_burst[j], &dst_port[j]);
> j++;
> /* fallthrough */
> case 2:
> dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
> portid);
>+ if (do_step3)
>+ process_packet(pkts_burst[j], &dst_port[j]);
> j++;
> /* fallthrough */
> case 1:
> dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
> portid);
>+ if (do_step3)
>+ process_packet(pkts_burst[j], &dst_port[j]);
> }
> }
>+}
>+
>+/*
>+ * Buffer optimized handling of packets, invoked
>+ * from main_loop.
>+ */
>+static inline void
>+l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t
>portid,
>+ struct lcore_conf *qconf)
>+{
>+ uint16_t dst_port[MAX_PKT_BURST];
>
>+ l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf,
>+ 0);
> send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
> }
>
>diff --git a/examples/l3fwd/l3fwd_lpm_sse.h b/examples/l3fwd/l3fwd_lpm_sse.h
>index 3f637a23d1..db15030320 100644
>--- a/examples/l3fwd/l3fwd_lpm_sse.h
>+++ b/examples/l3fwd/l3fwd_lpm_sse.h
>@@ -82,11 +82,11 @@ processx4_step2(const struct lcore_conf *qconf,
> * from main_loop.
> */
> static inline void
>-l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
>- uint16_t portid, struct lcore_conf *qconf)
>+l3fwd_lpm_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
>+ uint16_t portid, uint16_t *dst_port,
>+ struct lcore_conf *qconf, const uint8_t do_step3)
> {
> int32_t j;
>- uint16_t dst_port[MAX_PKT_BURST];
> __m128i dip[MAX_PKT_BURST / FWDSTEP];
> uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
> const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
>@@ -99,21 +99,40 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf
>**pkts_burst,
> processx4_step2(qconf, dip[j / FWDSTEP],
> ipv4_flag[j / FWDSTEP], portid, &pkts_burst[j],
>&dst_port[j]);
>
>+ if (do_step3)
>+ for (j = 0; j != k; j += FWDSTEP)
>+ processx4_step3(&pkts_burst[j], &dst_port[j]);
>+
> /* Classify last up to 3 packets one by one */
> switch (nb_rx % FWDSTEP) {
> case 3:
> dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
>+ if (do_step3)
>+ process_packet(pkts_burst[j], &dst_port[j]);
> j++;
> /* fall-through */
> case 2:
> dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
>+ if (do_step3)
>+ process_packet(pkts_burst[j], &dst_port[j]);
> j++;
> /* fall-through */
> case 1:
> dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
>+ if (do_step3)
>+ process_packet(pkts_burst[j], &dst_port[j]);
> j++;
> }
>+}
>+
>+static inline void
>+l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t
>portid,
>+ struct lcore_conf *qconf)
>+{
>+ uint16_t dst_port[MAX_PKT_BURST];
>
>+ l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf,
>+ 0);
> send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
> }
>
>--
>2.25.1
More information about the dev
mailing list