<div dir="ltr">+class Config(BaseConfig):<br>+ """Performance test metrics."""<br>+<br>+ test_parameters: list[dict[str, int | float]] = [<br>+ {"frame_size": 64, "num_descriptors": 1024, "expected_mpps": 1.00},<br>+ {"frame_size": 128, "num_descriptors": 1024, "expected_mpps": 1.00},<br>+ {"frame_size": 256, "num_descriptors": 1024, "expected_mpps": 1.00},<br>+ {"frame_size": 512, "num_descriptors": 1024, "expected_mpps": 1.00},<br>+ {"frame_size": 1024, "num_descriptors": 1024, "expected_mpps": 1.00},<br>+ {"frame_size": 1518, "num_descriptors": 1024, "expected_mpps": 1.00},<br>+ ]<br>+ delta_tolerance: float = 0.05<br><br>Disregard the last comment, I had assumed that the delta_tolerance was in mpps<br>not percentage of expected_mpps.<div> <br>nit: It may be helpful to declare this distinction in a comment above the delta_tolerance<div>variable or perhaps use percent_tolerance instead. <div><br>Reviewed-by: Andrew Bailey <<a href="mailto:abailey@iol.unh.edu">abailey@iol.unh.edu</a>></div></div></div></div><br><div class="gmail_quote gmail_quote_container"><div dir="ltr" class="gmail_attr">On Thu, Nov 6, 2025 at 8:30 AM Andrew Bailey <<a href="mailto:abailey@iol.unh.edu">abailey@iol.unh.edu</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex"><div dir="ltr"><div><div style="color:inherit">+ params["measured_mpps"] = self._transmit(testpmd, frame_size)<br>+ params["performance_delta"] = (<br>+ float(params["measured_mpps"]) - float(params["expected_mpps"])<br></div></div>+ ) / float(params["expected_mpps"])<br>+ params["pass"] = float(params["performance_delta"]) >= -self.delta_tolerance<br><br>This code seems like it can produce false positives. If we are checking if a measured mpps is within dela_tolerance of the expected, (pass = measured_mpps >= expected_mpps - delta_tolerance) may be more effective. As an example of a false positive, use measured_mpps = 1.0, expected_mpps = 2.0, and delta_tolerance = 0.51. This should be a fail since 1.0 is not within 0.51 of 2.0 but equates to (-.50) >= (-.51) == true. </div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Wed, Nov 5, 2025 at 5:37 PM Patrick Robb <<a href="mailto:probb@iol.unh.edu" target="_blank">probb@iol.unh.edu</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">From: Nicholas Pratte <<a href="mailto:npratte@iol.unh.edu" target="_blank">npratte@iol.unh.edu</a>><br>
<br>
Provide packet transmission function to support performance tests using a<br>
user-supplied performance traffic generator. The single core performance<br>
test is included. It allows the user to define a matrix of frame size,<br>
descriptor count, and expected mpps, and fails if any combination does<br>
not forward a mpps count within 5% of the given baseline.<br>
<br>
Bugzilla ID: 1697<br>
Signed-off-by: Nicholas Pratte <<a href="mailto:npratte@iol.unh.edu" target="_blank">npratte@iol.unh.edu</a>><br>
Signed-off-by: Patrick Robb <<a href="mailto:probb@iol.unh.edu" target="_blank">probb@iol.unh.edu</a>><br>
Reviewed-by: Dean Marx <<a href="mailto:dmarx@iol.unh.edu" target="_blank">dmarx@iol.unh.edu</a>><br>
Reviewed-by: Andrew Bailey <<a href="mailto:abailey@iol.unh.edu" target="_blank">abailey@iol.unh.edu</a>><br>
---<br>
...sts.TestSuite_single_core_forward_perf.rst | 8 +<br>
dts/api/packet.py | 35 +++-<br>
dts/api/test.py | 32 ++++<br>
dts/configurations/tests_config.example.yaml | 12 ++<br>
.../TestSuite_single_core_forward_perf.py | 149 ++++++++++++++++++<br>
5 files changed, 235 insertions(+), 1 deletion(-)<br>
create mode 100644 doc/api/dts/tests.TestSuite_single_core_forward_perf.rst<br>
create mode 100644 dts/tests/TestSuite_single_core_forward_perf.py<br>
<br>
diff --git a/doc/api/dts/tests.TestSuite_single_core_forward_perf.rst b/doc/api/dts/tests.TestSuite_single_core_forward_perf.rst<br>
new file mode 100644<br>
index 0000000000..3651b0b041<br>
--- /dev/null<br>
+++ b/doc/api/dts/tests.TestSuite_single_core_forward_perf.rst<br>
@@ -0,0 +1,8 @@<br>
+.. SPDX-License-Identifier: BSD-3-Clause<br>
+<br>
+single_core_forward_perf Test Suite<br>
+===================================<br>
+<br>
+.. automodule:: tests.TestSuite_single_core_forward_perf<br>
+ :members:<br>
+ :show-inheritance:<br>
diff --git a/dts/api/packet.py b/dts/api/packet.py<br>
index ac7f64dd17..094a1b7a9d 100644<br>
--- a/dts/api/packet.py<br>
+++ b/dts/api/packet.py<br>
@@ -33,6 +33,9 @@<br>
from framework.testbed_model.traffic_generator.capturing_traffic_generator import (<br>
PacketFilteringConfig,<br>
)<br>
+from framework.testbed_model.traffic_generator.performance_traffic_generator import (<br>
+ PerformanceTrafficStats,<br>
+)<br>
from framework.utils import get_packet_summaries<br>
<br>
<br>
@@ -108,7 +111,9 @@ def send_packets(<br>
packets: Packets to send.<br>
"""<br>
packets = adjust_addresses(packets)<br>
- get_ctx().func_tg.send_packets(packets, get_ctx().topology.tg_port_egress)<br>
+ tg = get_ctx().func_tg<br>
+ if tg:<br>
+ tg.send_packets(packets, get_ctx().topology.tg_port_egress)<br>
<br>
<br>
def get_expected_packets(<br>
@@ -317,3 +322,31 @@ def _verify_l3_packet(received_packet: IP, expected_packet: IP) -> bool:<br>
if received_packet.src != expected_packet.src or received_packet.dst != expected_packet.dst:<br>
return False<br>
return True<br>
+<br>
+<br>
+def assess_performance_by_packet(<br>
+ packet: Packet, duration: float, send_mpps: int | None = None<br>
+) -> PerformanceTrafficStats:<br>
+ """Send a given packet for a given duration and assess basic performance statistics.<br>
+<br>
+ Send `packet` and assess NIC performance for a given duration, corresponding to the test<br>
+ suite's given topology.<br>
+<br>
+ Args:<br>
+ packet: The packet to send.<br>
+ duration: Performance test duration (in seconds).<br>
+ send_mpps: The millions packets per second send rate.<br>
+<br>
+ Returns:<br>
+ Performance statistics of the generated test.<br>
+ """<br>
+ from framework.testbed_model.traffic_generator.performance_traffic_generator import (<br>
+ PerformanceTrafficGenerator,<br>
+ )<br>
+<br>
+ assert isinstance(<br>
+ get_ctx().perf_tg, PerformanceTrafficGenerator<br>
+ ), "Cannot send performance traffic with non-performance traffic generator"<br>
+ tg: PerformanceTrafficGenerator = cast(PerformanceTrafficGenerator, get_ctx().perf_tg)<br>
+ # TODO: implement @requires for types of traffic generator<br>
+ return tg.calculate_traffic_and_stats(packet, duration, send_mpps)<br>
diff --git a/dts/api/test.py b/dts/api/test.py<br>
index f58c82715d..11265ee2c1 100644<br>
--- a/dts/api/test.py<br>
+++ b/dts/api/test.py<br>
@@ -6,9 +6,13 @@<br>
This module provides utility functions for test cases, including logging, verification.<br>
"""<br>
<br>
+import json<br>
+from datetime import datetime<br>
+<br>
from framework.context import get_ctx<br>
from framework.exception import InternalError, SkippedTestException, TestCaseVerifyError<br>
from framework.logger import DTSLogger<br>
+from framework.testbed_model.artifact import Artifact<br>
<br>
<br>
def get_current_test_case_name() -> str:<br>
@@ -124,3 +128,31 @@ def get_logger() -> DTSLogger:<br>
if current_test_suite is None:<br>
raise InternalError("No current test suite")<br>
return current_test_suite._logger<br>
+<br>
+<br>
+def write_performance_json(<br>
+ performance_data: dict, filename: str = "performance_metrics.json"<br>
+) -> None:<br>
+ """Write performance test results to a JSON file in the test suite's output directory.<br>
+<br>
+ This method creates a JSON file containing performance metrics in the test suite's<br>
+ output directory. The data can be a dictionary of any structure. No specific format<br>
+ is required.<br>
+<br>
+ Args:<br>
+ performance_data: Dictionary containing performance metrics and results.<br>
+ filename: Name of the JSON file to create.<br>
+<br>
+ Raises:<br>
+ InternalError: If performance data is not provided.<br>
+ """<br>
+ if not performance_data:<br>
+ raise InternalError("No performance data to write")<br>
+<br>
+ perf_data = {"timestamp": datetime.now().isoformat(), **performance_data}<br>
+ perf_json_artifact = Artifact("local", filename)<br>
+<br>
+ with perf_json_artifact.open("w") as json_file:<br>
+ json.dump(perf_data, json_file, indent=2)<br>
+<br>
+ get_logger().info(f"Performance results written to: {perf_json_artifact.local_path}")<br>
diff --git a/dts/configurations/tests_config.example.yaml b/dts/configurations/tests_config.example.yaml<br>
index c011ac0588..167bc91a35 100644<br>
--- a/dts/configurations/tests_config.example.yaml<br>
+++ b/dts/configurations/tests_config.example.yaml<br>
@@ -3,3 +3,15 @@<br>
# Define the custom test suite configurations<br>
hello_world:<br>
msg: A custom hello world to you!<br>
+single_core_forward_perf:<br>
+ test_parameters: # Add frame size / descriptor count combinations as needed<br>
+ - frame_size: 64<br>
+ num_descriptors: 512<br>
+ expected_mpps: 1.0 # Set millions of packets per second according to your devices expected throughput for this given frame size / descriptor count<br>
+ - frame_size: 64<br>
+ num_descriptors: 1024<br>
+ expected_mpps: 1.0<br>
+ - frame_size: 512<br>
+ num_descriptors: 1024<br>
+ expected_mpps: 1.0<br>
+ delta_tolerance: 0.05<br>
\ No newline at end of file<br>
diff --git a/dts/tests/TestSuite_single_core_forward_perf.py b/dts/tests/TestSuite_single_core_forward_perf.py<br>
new file mode 100644<br>
index 0000000000..8a92ba39b5<br>
--- /dev/null<br>
+++ b/dts/tests/TestSuite_single_core_forward_perf.py<br>
@@ -0,0 +1,149 @@<br>
+# SPDX-License-Identifier: BSD-3-Clause<br>
+# Copyright(c) 2025 University of New Hampshire<br>
+<br>
+"""Single core forwarding performance test suite.<br>
+<br>
+This suite measures the amount of packets which can be forwarded by DPDK using a single core.<br>
+The testsuites takes in as parameters a set of parameters, each consisting of a frame size,<br>
+Tx/Rx descriptor count, and the expected MPPS to be forwarded by the DPDK application. The<br>
+test leverages a performance traffic generator to send traffic at two paired TestPMD interfaces<br>
+on the SUT system, which forward to one another and then back to the traffic generator's ports.<br>
+The aggregate packets forwarded by the two TestPMD ports are compared against the expected MPPS<br>
+baseline which is given in the test config, in order to determine the test result.<br>
+"""<br>
+<br>
+from scapy.layers.inet import IP<br>
+from scapy.layers.l2 import Ether<br>
+from scapy.packet import Raw<br>
+<br>
+from api.capabilities import (<br>
+ LinkTopology,<br>
+ requires_link_topology,<br>
+)<br>
+from api.packet import assess_performance_by_packet<br>
+from api.test import verify, write_performance_json<br>
+from api.testpmd import TestPmd<br>
+from api.testpmd.config import RXRingParams, TXRingParams<br>
+from framework.params.types import TestPmdParamsDict<br>
+from framework.test_suite import BaseConfig, TestSuite, perf_test<br>
+<br>
+<br>
+class Config(BaseConfig):<br>
+ """Performance test metrics."""<br>
+<br>
+ test_parameters: list[dict[str, int | float]] = [<br>
+ {"frame_size": 64, "num_descriptors": 1024, "expected_mpps": 1.00},<br>
+ {"frame_size": 128, "num_descriptors": 1024, "expected_mpps": 1.00},<br>
+ {"frame_size": 256, "num_descriptors": 1024, "expected_mpps": 1.00},<br>
+ {"frame_size": 512, "num_descriptors": 1024, "expected_mpps": 1.00},<br>
+ {"frame_size": 1024, "num_descriptors": 1024, "expected_mpps": 1.00},<br>
+ {"frame_size": 1518, "num_descriptors": 1024, "expected_mpps": 1.00},<br>
+ ]<br>
+ delta_tolerance: float = 0.05<br>
+<br>
+<br>
+@requires_link_topology(LinkTopology.TWO_LINKS)<br>
+class TestSingleCoreForwardPerf(TestSuite):<br>
+ """Single core forwarding performance test suite."""<br>
+<br>
+ config: Config<br>
+<br>
+ def set_up_suite(self):<br>
+ """Set up the test suite."""<br>
+ self.test_parameters = self.config.test_parameters<br>
+ self.delta_tolerance = self.config.delta_tolerance<br>
+<br>
+ def _transmit(self, testpmd: TestPmd, frame_size: int) -> float:<br>
+ """Create a testpmd session with every rule in the given list, verify jump behavior.<br>
+<br>
+ Args:<br>
+ testpmd: The testpmd shell to use for forwarding packets.<br>
+ frame_size: The size of the frame to transmit.<br>
+<br>
+ Returns:<br>
+ The MPPS (millions of packets per second) forwarded by the SUT.<br>
+ """<br>
+ # Build packet with dummy values, and account for the 14B and 20B Ether and IP headers<br>
+ packet = (<br>
+ Ether(src="52:00:00:00:00:00")<br>
+ / IP(src="1.2.3.4", dst="192.18.1.0")<br>
+ / Raw(load="x" * (frame_size - 14 - 20))<br>
+ )<br>
+<br>
+ testpmd.start()<br>
+<br>
+ # Transmit for 30 seconds.<br>
+ stats = assess_performance_by_packet(packet=packet, duration=30)<br>
+<br>
+ rx_mpps = stats.rx_pps / 1_000_000<br>
+<br>
+ return rx_mpps<br>
+<br>
+ def _produce_stats_table(self, test_parameters: list[dict[str, int | float]]) -> None:<br>
+ """Display performance results in table format and write to structured JSON file.<br>
+<br>
+ Args:<br>
+ test_parameters: The expected and real stats per set of test parameters.<br>
+ """<br>
+ header = f"{'Frame Size':>12} | {'TXD/RXD':>12} | {'Real MPPS':>12} | {'Expected MPPS':>14}"<br>
+ print("-" * len(header))<br>
+ print(header)<br>
+ print("-" * len(header))<br>
+ for params in test_parameters:<br>
+ print(f"{params['frame_size']:>12} | {params['num_descriptors']:>12} | ", end="")<br>
+ print(f"{params['measured_mpps']:>12.2f} | {params['expected_mpps']:>14.2f}")<br>
+ print("-" * len(header))<br>
+<br>
+ write_performance_json({"results": test_parameters})<br>
+<br>
+ @perf_test<br>
+ def single_core_forward_perf(self) -> None:<br>
+ """Validate expected single core forwarding performance.<br>
+<br>
+ Steps:<br>
+ * Create a packet according to the frame size specified in the test config.<br>
+ * Transmit from the traffic generator's ports 0 and 1 at above the expect.<br>
+ * Forward on TestPMD's interfaces 0 and 1 with 1 core.<br>
+<br>
+ Verify:<br>
+ * The resulting MPPS forwarded is greater than expected_mpps*(1-delta_tolerance).<br>
+ """<br>
+ # Find SUT DPDK driver to determine driver specific performance optimization flags<br>
+ sut_dpdk_driver = self._ctx.sut_node.config.ports[0].os_driver_for_dpdk<br>
+<br>
+ for params in self.test_parameters:<br>
+ frame_size = params["frame_size"]<br>
+ num_descriptors = params["num_descriptors"]<br>
+<br>
+ driver_specific_testpmd_args: TestPmdParamsDict = {<br>
+ "tx_ring": TXRingParams(descriptors=num_descriptors),<br>
+ "rx_ring": RXRingParams(descriptors=num_descriptors),<br>
+ "nb_cores": 1,<br>
+ }<br>
+<br>
+ if sut_dpdk_driver == "mlx5_core":<br>
+ driver_specific_testpmd_args["burst"] = 64<br>
+ driver_specific_testpmd_args["mbcache"] = 512<br>
+ elif sut_dpdk_driver == "i40e":<br>
+ driver_specific_testpmd_args["rx_queues"] = 2<br>
+ driver_specific_testpmd_args["tx_queues"] = 2<br>
+<br>
+ with TestPmd(<br>
+ **driver_specific_testpmd_args,<br>
+ ) as testpmd:<br>
+ params["measured_mpps"] = self._transmit(testpmd, frame_size)<br>
+ params["performance_delta"] = (<br>
+ float(params["measured_mpps"]) - float(params["expected_mpps"])<br>
+ ) / float(params["expected_mpps"])<br>
+ params["pass"] = float(params["performance_delta"]) >= -self.delta_tolerance<br>
+<br>
+ self._produce_stats_table(self.test_parameters)<br>
+<br>
+ for params in self.test_parameters:<br>
+ verify(<br>
+ params["pass"] is True,<br>
+ f"""Packets forwarded is less than {(1 -self.delta_tolerance)*100}%<br>
+ of the expected baseline.<br>
+ Measured MPPS = {params["measured_mpps"]}<br>
+ Expected MPPS = {params["expected_mpps"]}""",<br>
+ )<br>
-- <br>
2.49.0<br>
<br>
</blockquote></div>
</blockquote></div>