[RFC 1/2] net/null: add stub flow ops for rte_flow API testing
Stephen Hemminger
stephen at networkplumber.org
Mon Feb 9 21:05:42 CET 2026
Add flow_ops to the null PMD that validate input structure and reject
all rules with properly typed rte_flow_error responses. Pattern items
and actions are walked individually, with cause pointers set to the
offending element. flush() succeeds as a no-op; all other operations
return appropriate error codes.
This enables testing the full rte_flow code path without hardware.
Signed-off-by: Stephen Hemminger <stephen at networkplumber.org>
---
drivers/net/null/meson.build | 6 +-
drivers/net/null/null_flow.c | 237 ++++++++++++++++++++++++++++++++
drivers/net/null/null_flow.h | 12 ++
drivers/net/null/rte_eth_null.c | 12 ++
4 files changed, 266 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/null/null_flow.c
create mode 100644 drivers/net/null/null_flow.h
diff --git a/drivers/net/null/meson.build b/drivers/net/null/meson.build
index bad7dc1af7..810715de52 100644
--- a/drivers/net/null/meson.build
+++ b/drivers/net/null/meson.build
@@ -1,5 +1,9 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-sources = files('rte_eth_null.c')
+sources = files(
+ 'rte_eth_null.c',
+ 'null_flow.c',
+)
+
require_iova_in_mbuf = false
diff --git a/drivers/net/null/null_flow.c b/drivers/net/null/null_flow.c
new file mode 100644
index 0000000000..57de18f455
--- /dev/null
+++ b/drivers/net/null/null_flow.c
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) Stephen Hemminger
+ */
+
+/*
+ * Stub flow operations for the net_null PMD.
+ *
+ * These ops provide a realistic-but-minimal implementation of
+ * rte_flow_ops that can be used for API-layer testing. Every
+ * operation walks its input, performs basic structural validation,
+ * and then rejects the request with the most specific error type
+ * and message it can produce. This exercises the full flow-API
+ * code path (port lookup → ops dispatch → PMD callback → error
+ * propagation) without requiring any hardware.
+ *
+ * Summary of behaviour:
+ *
+ * validate – walks pattern + actions; rejects each unsupported
+ * item/action with RTE_FLOW_ERROR_TYPE_ITEM or
+ * _ACTION, pointing `cause` at the offending element.
+ * A structurally valid rule whose items are all VOID/END
+ * and whose actions are all VOID/END gets rejected at
+ * the attribute level (no ingress+egress+transfer) or
+ * with a generic "no resources" if nothing else applies.
+ *
+ * create – calls validate, then returns NULL (never creates).
+ *
+ * destroy – returns -ENOENT (no flows exist).
+ *
+ * flush – succeeds (there are no flows to flush).
+ *
+ * query – returns -ENOTSUP (no queryable actions).
+ *
+ * isolate – returns -ENOTSUP (isolation not supported).
+ */
+
+#include <errno.h>
+#include <string.h>
+
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+/* --------------------------------------------------------------------------
+ * Helpers
+ * -------------------------------------------------------------------------- */
+
+/*
+ * Walk the pattern array and reject the first item that is not
+ * VOID or END. Return 0 if nothing objectionable was found
+ * (all items are VOID/END), or -rte_errno on failure.
+ */
+static int
+null_flow_validate_pattern(const struct rte_flow_item pattern[],
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+
+ if (pattern == NULL)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern");
+
+ for (item = pattern;
+ item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+
+ /* Any real match item is unsupported. */
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "null PMD does not support pattern items");
+ }
+
+ return 0; /* only VOID + END */
+}
+
+/*
+ * Walk the action array and reject the first action that is not
+ * VOID or END. Same semantics as above.
+ */
+static int
+null_flow_validate_actions(const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action;
+
+ if (actions == NULL)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action list");
+
+ for (action = actions;
+ action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "null PMD does not support flow actions");
+ }
+
+ return 0; /* only VOID + END */
+}
+
+/* --------------------------------------------------------------------------
+ * Flow ops callbacks
+ * -------------------------------------------------------------------------- */
+
+static int
+null_flow_validate(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ /* ---- attribute checks ---- */
+ if (attr == NULL)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attributes");
+
+ if (!attr->ingress && !attr->egress && !attr->transfer)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ attr,
+ "at least one of ingress/egress/transfer "
+ "must be set");
+
+ if (attr->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr,
+ "transfer attribute not supported");
+
+ if (attr->group > 0)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr,
+ "only group 0 is supported");
+
+ if (attr->priority > 0)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr,
+ "only priority 0 is supported");
+
+ /* ---- pattern checks ---- */
+ ret = null_flow_validate_pattern(pattern, error);
+ if (ret)
+ return ret;
+
+ /* ---- action checks ---- */
+ ret = null_flow_validate_actions(actions, error);
+ if (ret)
+ return ret;
+
+ /*
+ * If we get here, the rule is structurally valid but contains
+ * nothing but VOID items and VOID actions — reject generically.
+ */
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "null PMD cannot offload any flow rules");
+}
+
+static struct rte_flow *
+null_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ null_flow_validate(dev, attr, pattern, actions, error);
+ return NULL;
+}
+
+static int
+null_flow_destroy(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOENT,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ flow,
+ "no flow rules exist on null PMD");
+}
+
+static int
+null_flow_flush(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ /* Nothing to flush — success. */
+ return 0;
+}
+
+static int
+null_flow_query(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const struct rte_flow_action *action __rte_unused,
+ void *data __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "null PMD does not support flow queries");
+}
+
+static int
+null_flow_isolate(struct rte_eth_dev *dev __rte_unused,
+ int set __rte_unused,
+ struct rte_flow_error *error)
+{
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "null PMD does not support flow isolation");
+}
+
+/* --------------------------------------------------------------------------
+ * Public ops structure — referenced by rte_eth_null.c
+ * -------------------------------------------------------------------------- */
+
+const struct rte_flow_ops null_flow_ops = {
+ .validate = null_flow_validate,
+ .create = null_flow_create,
+ .destroy = null_flow_destroy,
+ .flush = null_flow_flush,
+ .query = null_flow_query,
+ .isolate = null_flow_isolate,
+};
diff --git a/drivers/net/null/null_flow.h b/drivers/net/null/null_flow.h
new file mode 100644
index 0000000000..f589533079
--- /dev/null
+++ b/drivers/net/null/null_flow.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2026 Stephen Hemminger
+ */
+
+#ifndef NULL_FLOW_H
+#define NULL_FLOW_H
+
+#include <rte_flow_driver.h>
+
+extern const struct rte_flow_ops null_flow_ops;
+
+#endif /* NULL_FLOW_H */
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 46e7e7bd8c..b8dbef35e1 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -8,12 +8,15 @@
#include <rte_mbuf.h>
#include <ethdev_driver.h>
#include <ethdev_vdev.h>
+#include <rte_flow.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <bus_vdev_driver.h>
#include <rte_kvargs.h>
#include <rte_spinlock.h>
+#include "null_flow.h"
+
#define ETH_NULL_PACKET_SIZE_ARG "size"
#define ETH_NULL_PACKET_COPY_ARG "copy"
#define ETH_NULL_PACKET_NO_RX_ARG "no-rx"
@@ -511,12 +514,21 @@ eth_dev_close(struct rte_eth_dev *dev)
return 0;
}
+static int
+null_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
+{
+ *ops = &null_flow_ops;
+ return 0;
+}
+
static const struct eth_dev_ops ops = {
.dev_close = eth_dev_close,
.dev_start = eth_dev_start,
.dev_stop = eth_dev_stop,
.dev_configure = eth_dev_configure,
.dev_infos_get = eth_dev_info,
+ .flow_ops_get = null_dev_flow_ops_get,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
.rx_queue_release = eth_rx_queue_release,
--
2.51.0
More information about the dev
mailing list