[dpdk-dev] [PATCH v2 09/12] net/cxgbe: add devarg to control Tx coalescing

Rahul Lakkireddy rahul.lakkireddy at chelsio.com
Fri Sep 27 22:30:09 CEST 2019


Remove compile time option to control Tx coalescing Latency vs
Throughput behavior. Add tx_mode_latency devarg instead, to
dynamically control Tx coalescing behavior.

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy at chelsio.com>
---
v2:
- No changes.

 config/common_base                 |  1 -
 doc/guides/nics/cxgbe.rst          | 13 +++++++++----
 drivers/net/cxgbe/base/adapter.h   |  1 +
 drivers/net/cxgbe/cxgbe.h          |  1 +
 drivers/net/cxgbe/cxgbe_ethdev.c   |  3 ++-
 drivers/net/cxgbe/cxgbe_main.c     |  3 +++
 drivers/net/cxgbe/cxgbevf_ethdev.c |  1 +
 drivers/net/cxgbe/sge.c            | 18 ++++++++----------
 8 files changed, 25 insertions(+), 16 deletions(-)

diff --git a/config/common_base b/config/common_base
index eb4d86065..6a7e00d75 100644
--- a/config/common_base
+++ b/config/common_base
@@ -217,7 +217,6 @@ CONFIG_RTE_LIBRTE_BNXT_PMD=y
 # Compile burst-oriented Chelsio Terminator (CXGBE) PMD
 #
 CONFIG_RTE_LIBRTE_CXGBE_PMD=y
-CONFIG_RTE_LIBRTE_CXGBE_TPUT=y
 
 # NXP DPAA Bus
 CONFIG_RTE_LIBRTE_DPAA_BUS=n
diff --git a/doc/guides/nics/cxgbe.rst b/doc/guides/nics/cxgbe.rst
index f94b8371e..76b1a2ac7 100644
--- a/doc/guides/nics/cxgbe.rst
+++ b/doc/guides/nics/cxgbe.rst
@@ -104,10 +104,6 @@ enabling debugging options may affect system performance.
 
      This controls compilation of both CXGBE and CXGBEVF PMD.
 
-- ``CONFIG_RTE_LIBRTE_CXGBE_TPUT`` (default **y**)
-
-  Toggle behavior to prefer Throughput or Latency.
-
 Runtime Options
 ~~~~~~~~~~~~~~~
 
@@ -127,6 +123,15 @@ Common Runtime Options
   enabled, the outer VLAN tag is preserved in Q-in-Q packets. Otherwise,
   the outer VLAN tag is stripped in Q-in-Q packets.
 
+- ``tx_mode_latency`` (default **0**)
+
+  When set to 1, Tx doesn't wait for max number of packets to get
+  coalesced and sends the packets immediately at the end of the
+  current Tx burst. When set to 0, Tx waits across multiple Tx bursts
+  until the max number of packets have been coalesced. In this case,
+  Tx only sends the coalesced packets to hardware once the max
+  coalesce limit has been reached.
+
 CXGBE VF Only Runtime Options
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h
index fcc84e4e9..6758364c7 100644
--- a/drivers/net/cxgbe/base/adapter.h
+++ b/drivers/net/cxgbe/base/adapter.h
@@ -302,6 +302,7 @@ TAILQ_HEAD(mbox_list, mbox_entry);
 struct adapter_devargs {
 	bool keep_ovlan;
 	bool force_link_up;
+	bool tx_mode_latency;
 };
 
 struct adapter {
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index 3a50502b7..ed1be3559 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -52,6 +52,7 @@
 
 /* Common PF and VF devargs */
 #define CXGBE_DEVARG_CMN_KEEP_OVLAN "keep_ovlan"
+#define CXGBE_DEVARG_CMN_TX_MODE_LATENCY "tx_mode_latency"
 
 /* VF only devargs */
 #define CXGBE_DEVARG_VF_FORCE_LINK_UP "force_link_up"
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 2a2875f89..615dda607 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -1239,7 +1239,8 @@ RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
-			      CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> ");
+			      CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
+			      CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> ");
 
 RTE_INIT(cxgbe_init_log)
 {
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 6a6137f06..23b74c754 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -672,6 +672,7 @@ void cxgbe_print_port_info(struct adapter *adap)
 static int check_devargs_handler(const char *key, const char *value, void *p)
 {
 	if (!strncmp(key, CXGBE_DEVARG_CMN_KEEP_OVLAN, strlen(key)) ||
+	    !strncmp(key, CXGBE_DEVARG_CMN_TX_MODE_LATENCY, strlen(key)) ||
 	    !strncmp(key, CXGBE_DEVARG_VF_FORCE_LINK_UP, strlen(key))) {
 		if (!strncmp(value, "1", 1)) {
 			bool *dst_val = (bool *)p;
@@ -728,6 +729,8 @@ void cxgbe_process_devargs(struct adapter *adap)
 {
 	cxgbe_get_devargs_int(adap, &adap->devargs.keep_ovlan,
 			      CXGBE_DEVARG_CMN_KEEP_OVLAN, 0);
+	cxgbe_get_devargs_int(adap, &adap->devargs.tx_mode_latency,
+			      CXGBE_DEVARG_CMN_TX_MODE_LATENCY, 0);
 	cxgbe_get_devargs_int(adap, &adap->devargs.force_link_up,
 			      CXGBE_DEVARG_VF_FORCE_LINK_UP, 0);
 }
diff --git a/drivers/net/cxgbe/cxgbevf_ethdev.c b/drivers/net/cxgbe/cxgbevf_ethdev.c
index cc0938b43..4165ba986 100644
--- a/drivers/net/cxgbe/cxgbevf_ethdev.c
+++ b/drivers/net/cxgbe/cxgbevf_ethdev.c
@@ -213,4 +213,5 @@ RTE_PMD_REGISTER_PCI_TABLE(net_cxgbevf, cxgb4vf_pci_tbl);
 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbevf, "* igb_uio | vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbevf,
 			      CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
+			      CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
 			      CXGBE_DEVARG_VF_FORCE_LINK_UP "=<0|1> ");
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index bf3190211..0df870a41 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -1007,10 +1007,6 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
 	unsigned int max_coal_pkt_num = is_pf4(adap) ? ETH_COALESCE_PKT_NUM :
 						       ETH_COALESCE_VF_PKT_NUM;
 
-#ifdef RTE_LIBRTE_CXGBE_TPUT
-	RTE_SET_USED(nb_pkts);
-#endif
-
 	if (q->coalesce.type == 0) {
 		mc = (struct ulp_txpkt *)q->coalesce.ptr;
 		mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) |
@@ -1082,13 +1078,15 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
 	sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1);
 	sd->coalesce.idx = (idx & 1) + 1;
 
-	/* send the coaelsced work request if max reached */
-	if (++q->coalesce.idx == max_coal_pkt_num
-#ifndef RTE_LIBRTE_CXGBE_TPUT
-	    || q->coalesce.idx >= nb_pkts
-#endif
-	    )
+	/* Send the coalesced work request, only if max reached. However,
+	 * if lower latency is preferred over throughput, then don't wait
+	 * for coalescing the next Tx burst and send the packets now.
+	 */
+	q->coalesce.idx++;
+	if (q->coalesce.idx == max_coal_pkt_num ||
+	    (adap->devargs.tx_mode_latency && q->coalesce.idx >= nb_pkts))
 		ship_tx_pkt_coalesce_wr(adap, txq);
+
 	return 0;
 }
 
-- 
2.18.0



More information about the dev mailing list