[PATCH v3 1/4] dts: add virtual functions to framework
Patrick Robb
probb at iol.unh.edu
Fri Jul 4 06:22:28 CEST 2025
On Wed, Jul 2, 2025 at 12:23 PM Dean Marx <dmarx at iol.unh.edu> wrote:
> Add virtual functions to DTS framework, along with
> a field for specifying VF test runs in the config file.
>
> Signed-off-by: Patrick Robb <probb at iol.unh.edu>
> Signed-off-by: Dean Marx <dmarx at iol.unh.edu>
> ---
> dts/framework/config/test_run.py | 2 +
> dts/framework/test_run.py | 7 +++
> dts/framework/testbed_model/linux_session.py | 53 +++++++++++++++++++-
> dts/framework/testbed_model/os_session.py | 42 ++++++++++++++++
> dts/framework/testbed_model/topology.py | 53 +++++++++++++++++++-
> 5 files changed, 154 insertions(+), 3 deletions(-)
>
> diff --git a/dts/framework/config/test_run.py
> b/dts/framework/config/test_run.py
> index b6e4099eeb..eefa32c3cb 100644
> --- a/dts/framework/config/test_run.py
> +++ b/dts/framework/config/test_run.py
> @@ -467,6 +467,8 @@ class TestRunConfiguration(FrozenModel):
> perf: bool
> #: Whether to run functional tests.
> func: bool
> + #: Whether to run the testing with virtual functions instead of
> physical functions
> + virtual_functions_testrun: bool
> #: Whether to skip smoke tests.
> skip_smoke_tests: bool = False
> #: The names of test suites and/or test cases to execute.
> diff --git a/dts/framework/test_run.py b/dts/framework/test_run.py
> index fd49a7dc74..ee919e30d9 100644
> --- a/dts/framework/test_run.py
> +++ b/dts/framework/test_run.py
> @@ -346,6 +346,10 @@ def next(self) -> State | None:
> test_run.ctx.tg_node.setup()
> test_run.ctx.dpdk.setup()
> test_run.ctx.topology.setup()
> +
> + if test_run.config.virtual_functions_testrun:
> + test_run.ctx.topology.instantiate_vf_ports()
> +
> test_run.ctx.topology.configure_ports("sut", "dpdk")
> test_run.ctx.tg.setup(test_run.ctx.topology)
>
> @@ -432,6 +436,9 @@ def description(self) -> str:
>
> def next(self) -> State | None:
> """Next state."""
> + if self.test_run.config.virtual_functions_testrun:
> + self.test_run.ctx.topology.delete_vf_ports()
> +
> self.test_run.ctx.shell_pool.terminate_current_pool()
> self.test_run.ctx.tg.teardown()
> self.test_run.ctx.topology.teardown()
> diff --git a/dts/framework/testbed_model/linux_session.py
> b/dts/framework/testbed_model/linux_session.py
> index e01c2dd712..604245d855 100644
> --- a/dts/framework/testbed_model/linux_session.py
> +++ b/dts/framework/testbed_model/linux_session.py
> @@ -17,7 +17,11 @@
>
> from typing_extensions import NotRequired
>
> -from framework.exception import ConfigurationError, InternalError,
> RemoteCommandExecutionError
> +from framework.exception import (
> + ConfigurationError,
> + InternalError,
> + RemoteCommandExecutionError,
> +)
> from framework.testbed_model.os_session import PortInfo
> from framework.utils import expand_range
>
> @@ -211,11 +215,58 @@ def devbind_script_path(self) -> PurePath:
> """
> raise InternalError("Accessed devbind script path before setup.")
>
> + def create_vfs(self, pf_port: Port) -> None:
> + """Overrides :meth:`~.os_session.OSSession.create_vfs`.
> +
> + Raises:
> + InternalError: If there are existing VFs which have to be
> deleted.
> + """
> + sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}".replace(":",
> "\\:")
> + curr_num_vfs = int(
> + self.send_command(f"cat {sys_bus_path}/sriov_numvfs",
> privileged=True).stdout
> + )
> + if 0 < curr_num_vfs:
> + raise InternalError("There are existing VFs on the port which
> must be deleted.")
> + if curr_num_vfs == 0:
> + self.send_command(f"echo 1 | sudo tee
> {sys_bus_path}/sriov_numvfs", privileged=True)
> + self.refresh_lshw()
> +
> + def delete_vfs(self, pf_port: Port) -> None:
> + """Overrides :meth:`~.os_session.OSSession.delete_vfs`."""
> + sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}".replace(":",
> "\\:")
> + curr_num_vfs = int(
> + self.send_command(f"cat {sys_bus_path}/sriov_numvfs",
> privileged=True).stdout
> + )
> + if curr_num_vfs == 0:
> + self._logger.debug(f"No VFs found on port {pf_port.pci},
> skipping deletion")
> + else:
> + self.send_command(f"echo 0 | sudo tee
> {sys_bus_path}/sriov_numvfs", privileged=True)
> +
> + def get_pci_addr_of_vfs(self, pf_port: Port) -> list[str]:
> + """Overrides
> :meth:`~.os_session.OSSession.get_pci_addr_of_vfs`."""
> + sys_bus_path = f"/sys/bus/pci/devices/{pf_port.pci}".replace(":",
> "\\:")
> + curr_num_vfs = int(self.send_command(f"cat
> {sys_bus_path}/sriov_numvfs").stdout)
> + if curr_num_vfs > 0:
> + pci_addrs = self.send_command(
> + 'awk -F "PCI_SLOT_NAME=" "/PCI_SLOT_NAME=/ {print \\$2}" '
> + + f"{sys_bus_path}/virtfn*/uevent",
> + privileged=True,
> + )
> + return pci_addrs.stdout.splitlines()
> + else:
> + return []
> +
> @cached_property
> def _lshw_net_info(self) -> list[LshwOutput]:
> output = self.send_command("lshw -quiet -json -C network",
> verify=True)
> return json.loads(output.stdout)
>
> + def refresh_lshw(self) -> None:
> + """Force refresh of cached lshw network info."""
> + if "_lshw_net_info" in self.__dict__:
> + del self.__dict__["_lshw_net_info"]
> + _ = self._lshw_net_info
> +
> def _update_port_attr(self, port: Port, attr_value: str | None,
> attr_name: str) -> None:
> if attr_value:
> setattr(port, attr_name, attr_value)
> diff --git a/dts/framework/testbed_model/os_session.py
> b/dts/framework/testbed_model/os_session.py
> index d7a09a0d5d..b6e03aa83d 100644
> --- a/dts/framework/testbed_model/os_session.py
> +++ b/dts/framework/testbed_model/os_session.py
> @@ -603,3 +603,45 @@ def configure_port_mtu(self, mtu: int, port: Port) ->
> None:
> mtu: Desired MTU value.
> port: Port to set `mtu` on.
> """
> +
> + @abstractmethod
> + def create_vfs(self, pf_port: Port) -> None:
> + """Creates virtual functions for `pf_port`.
> +
> + Checks how many virtual functions (VFs) `pf_port` supports, and
> creates that
> + number of VFs on the port.
> +
> + Args:
> + pf_port: The port to create virtual functions on.
> +
> + Raises:
> + InternalError: If the number of VFs is greater than 0 but
> less than the
> + maximum for `pf_port`.
> + """
> +
> + @abstractmethod
> + def delete_vfs(self, pf_port: Port) -> None:
> + """Deletes virtual functions for `pf_port`.
> +
> + Checks how many virtual functions (VFs) `pf_port` supports, and
> deletes that
> + number of VFs on the port.
> +
> + Args:
> + pf_port: The port to delete virtual functions on.
> +
> + Raises:
> + InternalError: If the number of VFs is greater than 0 but
> less than the
> + maximum for `pf_port`.
> + """
> +
> + @abstractmethod
> + def get_pci_addr_of_vfs(self, pf_port: Port) -> list[str]:
> + """Find the PCI addresses of all virtual functions (VFs) on the
> port `pf_port`.
> +
> + Args:
> + pf_port: The port to find the VFs on.
> +
> + Returns:
> + A list containing all of the PCI addresses of the VFs on the
> port. If the port has no
> + VFs then the list will be empty.
> + """
> diff --git a/dts/framework/testbed_model/topology.py
> b/dts/framework/testbed_model/topology.py
> index fb45969136..2bc69d46a9 100644
> --- a/dts/framework/testbed_model/topology.py
> +++ b/dts/framework/testbed_model/topology.py
> @@ -19,7 +19,7 @@
> from framework.exception import ConfigurationError, InternalError
> from framework.testbed_model.node import Node
>
> -from .port import DriverKind, Port
> +from .port import DriverKind, Port, PortConfig
>
>
> class TopologyType(int, Enum):
> @@ -74,6 +74,8 @@ class Topology:
> type: TopologyType
> sut_ports: list[Port]
> tg_ports: list[Port]
> + pf_ports: list[Port]
> + vf_ports: list[Port]
>
> @classmethod
> def from_port_links(cls, port_links: Iterator[PortLink]) -> Self:
> @@ -101,7 +103,7 @@ def from_port_links(cls, port_links:
> Iterator[PortLink]) -> Self:
> msg = "More than two links in a topology are not
> supported."
> raise ConfigurationError(msg)
>
> - return cls(type, sut_ports, tg_ports)
> + return cls(type, sut_ports, tg_ports, [], [])
>
> def node_and_ports_from_id(self, node_identifier: NodeIdentifier) ->
> tuple[Node, list[Port]]:
> """Retrieve node and its ports for the current topology.
> @@ -160,6 +162,53 @@ def _setup_ports(self, node_identifier:
> NodeIdentifier) -> None:
> f"for port {port.name} in node {node.name}."
> )
>
> + def instantiate_vf_ports(self) -> None:
> + """Create, setup, and add virtual functions to the list of ports
> on the SUT node.
> +
> + Raises:
> + InternalError: If virtual function creation fails.
> + """
> + from framework.context import get_ctx
> +
> + ctx = get_ctx()
> +
> + for port in self.sut_ports:
> + self.pf_ports.append(port)
> +
> + for port in self.pf_ports:
> + ctx.sut_node.main_session.create_vfs(port)
> + addr_list =
> ctx.sut_node.main_session.get_pci_addr_of_vfs(port)
> + if addr_list == []:
> + raise InternalError(f"Failed to create virtual function
> on port {port.pci}")
> + for addr in addr_list:
> + vf_config = PortConfig(
> + name=f"{port.name}-vf-{addr}",
> + pci=addr,
> + os_driver_for_dpdk=port.config.os_driver_for_dpdk,
> + os_driver=port.config.os_driver,
> + )
> + self.vf_ports.append(Port(node=port.node,
> config=vf_config))
> + ctx.sut_node.main_session.send_command(f"ip link set
> {port.logical_name} vf 0 trust on")
> +
> + self.sut_ports.clear()
> + self.sut_ports.extend(self.vf_ports)
> +
> + for port in self.pf_ports:
> + ctx.sut_node.main_session.send_command(
> + f"ip link set dev {port.logical_name} up", privileged=True
> + )
>
I think this can become:
ctx.sut_node.main_session.bring_up_link(self.pf_ports)
which should do the exact same but runs through the method we already have
implemented.
> +
> + def delete_vf_ports(self) -> None:
> + """Delete virtual functions from the SUT node during test run
> teardown."""
> + from framework.context import get_ctx
> +
> + ctx = get_ctx()
> +
> + for port in self.pf_ports:
> + ctx.sut_node.main_session.delete_vfs(port)
> + self.sut_ports.clear()
> + self.sut_ports.extend(self.pf_ports)
> +
> def configure_ports(
> self, node_identifier: NodeIdentifier, drivers: DriverKind |
> tuple[DriverKind, ...]
> ) -> None:
> --
> 2.49.0
>
>
Reviewed-by: Patrick Robb <probb at iol.unh.edu>
Tested-by: Patrick Robb <probb at iol.unh.edu>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mails.dpdk.org/archives/dev/attachments/20250704/5b4169d0/attachment-0001.htm>
More information about the dev
mailing list