[dpdk-dev] [PATCH v2 5/5] net/mlx5: fix non-dv compilation errors

Gregory Etelson getelson at nvidia.com
Fri Nov 13 15:52:30 CET 2020


ipools used for tunnel offload are restricted to dv enabled code only
since 1d1248d452ff, while the rest of the tunnel offload API did not
have compilation limitations. As the results, PMD compilation failed
on non-dv setups.

Current patch groups tunnel offload code in dv enabled code area and
provides stubs for calls that have to be visible in non-dv
environment. These stabs will notify caller that tunnel offload
functionality is not supported on that setup.

Fixes: 1d1248d452ff ("net/mlx5: fix offloaded tunnel allocation")
Acked-by: Viacheslav Ovsiienko <viacheslavo at nvidia.com>

Signed-off-by: Gregory Etelson <getelson at nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c |   16 +-
 drivers/net/mlx5/mlx5_flow.c     | 1104 +++++++++++++++++-------------
 drivers/net/mlx5/mlx5_flow.h     |   17 +-
 3 files changed, 638 insertions(+), 499 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index c78d56fae3..1aee481bd7 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -301,6 +301,12 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 		goto error;
 	}
 	sh->encaps_decaps->ctx = sh;
+	if (!sh->tunnel_hub)
+		err = mlx5_alloc_tunnel_hub(sh);
+	if (err) {
+		DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err);
+		goto error;
+	}
 #endif
 #ifdef HAVE_MLX5DV_DR
 	void *domain;
@@ -335,12 +341,6 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 		sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
 	}
 #endif
-	if (!sh->tunnel_hub)
-		err = mlx5_alloc_tunnel_hub(sh);
-	if (err) {
-		DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err);
-		goto error;
-	}
 	if (priv->config.reclaim_mode == MLX5_RCM_AGGR) {
 		mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);
 		mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);
@@ -389,10 +389,12 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 		mlx5_hlist_destroy(sh->tag_table);
 		sh->tag_table = NULL;
 	}
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	if (sh->tunnel_hub) {
 		mlx5_release_tunnel_hub(sh, priv->dev_port);
 		sh->tunnel_hub = NULL;
 	}
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 	mlx5_free_table_hash_list(priv);
 	return err;
 }
@@ -451,10 +453,12 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)
 		mlx5_hlist_destroy(sh->tag_table);
 		sh->tag_table = NULL;
 	}
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	if (sh->tunnel_hub) {
 		mlx5_release_tunnel_hub(sh, priv->dev_port);
 		sh->tunnel_hub = NULL;
 	}
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 	mlx5_cache_list_destroy(&sh->port_id_action_list);
 	mlx5_cache_list_destroy(&sh->push_vlan_action_list);
 	mlx5_free_table_hash_list(priv);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 358a5f4e72..11bc8e9dde 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -33,25 +33,34 @@
 #include "mlx5_common_os.h"
 #include "rte_pmd_mlx5.h"
 
-static bool
-mlx5_access_tunnel_offload_db
-	(struct rte_eth_dev *dev,
-	 bool (*match)(struct rte_eth_dev *,
-		       struct mlx5_flow_tunnel *, const void *),
-	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
-	 void (*miss)(struct rte_eth_dev *, void *),
-	 void *ctx, bool lock_op);
+struct tunnel_default_miss_ctx {
+	uint16_t *queue;
+	__extension__
+	union {
+		struct rte_flow_action_rss action_rss;
+		struct rte_flow_action_queue miss_queue;
+		struct rte_flow_action_jump miss_jump;
+		uint8_t raw[0];
+	};
+};
+
+static int
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+			     struct rte_flow *flow,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_action *app_actions,
+			     uint32_t flow_idx,
+			     struct tunnel_default_miss_ctx *ctx,
+			     struct rte_flow_error *error);
 static struct mlx5_flow_tunnel *
 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
 static void
 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
-static const struct mlx5_flow_tbl_data_entry  *
-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);
-static int
-mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
-		     const struct rte_flow_tunnel *app_tunnel,
-		     struct mlx5_flow_tunnel **tunnel);
-
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+				const struct mlx5_flow_tunnel *tunnel,
+				uint32_t group, uint32_t *table,
+				struct rte_flow_error *error);
 
 /** Device flow drivers. */
 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
@@ -588,203 +597,32 @@ static int mlx5_shared_action_query
 				 const struct rte_flow_shared_action *action,
 				 void *data,
 				 struct rte_flow_error *error);
-static inline bool
-mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
-			  struct rte_flow_tunnel *tunnel,
-			  const char *err_msg)
-{
-	err_msg = NULL;
-	if (!is_tunnel_offload_active(dev)) {
-		err_msg = "tunnel offload was not activated";
-		goto out;
-	} else if (!tunnel) {
-		err_msg = "no application tunnel";
-		goto out;
-	}
-
-	switch (tunnel->type) {
-	default:
-		err_msg = "unsupported tunnel type";
-		goto out;
-	case RTE_FLOW_ITEM_TYPE_VXLAN:
-		break;
-	}
-
-out:
-	return !err_msg;
-}
-
-
 static int
 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
-		    struct rte_flow_tunnel *app_tunnel,
-		    struct rte_flow_action **actions,
-		    uint32_t *num_of_actions,
-		    struct rte_flow_error *error)
-{
-	int ret;
-	struct mlx5_flow_tunnel *tunnel;
-	const char *err_msg = NULL;
-	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
-
-	if (!verdict)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-					  err_msg);
-	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
-	if (ret < 0) {
-		return rte_flow_error_set(error, ret,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-					  "failed to initialize pmd tunnel");
-	}
-	*actions = &tunnel->action;
-	*num_of_actions = 1;
-	return 0;
-}
-
+			   struct rte_flow_tunnel *app_tunnel,
+			   struct rte_flow_action **actions,
+			   uint32_t *num_of_actions,
+			   struct rte_flow_error *error);
 static int
 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
 		       struct rte_flow_tunnel *app_tunnel,
 		       struct rte_flow_item **items,
 		       uint32_t *num_of_items,
-		       struct rte_flow_error *error)
-{
-	int ret;
-	struct mlx5_flow_tunnel *tunnel;
-	const char *err_msg = NULL;
-	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
-
-	if (!verdict)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  err_msg);
-	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
-	if (ret < 0) {
-		return rte_flow_error_set(error, ret,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "failed to initialize pmd tunnel");
-	}
-	*items = &tunnel->item;
-	*num_of_items = 1;
-	return 0;
-}
-
-struct tunnel_db_element_release_ctx {
-	struct rte_flow_item *items;
-	struct rte_flow_action *actions;
-	uint32_t num_elements;
-	struct rte_flow_error *error;
-	int ret;
-};
-
-static bool
-tunnel_element_release_match(struct rte_eth_dev *dev,
-			     struct mlx5_flow_tunnel *tunnel, const void *x)
-{
-	const struct tunnel_db_element_release_ctx *ctx = x;
-
-	RTE_SET_USED(dev);
-	if (ctx->num_elements != 1)
-		return false;
-	else if (ctx->items)
-		return ctx->items == &tunnel->item;
-	else if (ctx->actions)
-		return ctx->actions == &tunnel->action;
-
-	return false;
-}
-
-static void
-tunnel_element_release_hit(struct rte_eth_dev *dev,
-			   struct mlx5_flow_tunnel *tunnel, void *x)
-{
-	struct tunnel_db_element_release_ctx *ctx = x;
-	ctx->ret = 0;
-	if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tunnel);
-}
-
-static void
-tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
-{
-	struct tunnel_db_element_release_ctx *ctx = x;
-	RTE_SET_USED(dev);
-	ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
-				      RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-				      "invalid argument");
-}
-
+		       struct rte_flow_error *error);
 static int
 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
 			      struct rte_flow_item *pmd_items,
-			      uint32_t num_items, struct rte_flow_error *err)
-{
-	struct tunnel_db_element_release_ctx ctx = {
-		.items = pmd_items,
-		.actions = NULL,
-		.num_elements = num_items,
-		.error = err,
-	};
-
-	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
-				      tunnel_element_release_hit,
-				      tunnel_element_release_miss, &ctx, false);
-
-	return ctx.ret;
-}
-
+			      uint32_t num_items, struct rte_flow_error *err);
 static int
 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
 				struct rte_flow_action *pmd_actions,
 				uint32_t num_actions,
-				struct rte_flow_error *err)
-{
-	struct tunnel_db_element_release_ctx ctx = {
-		.items = NULL,
-		.actions = pmd_actions,
-		.num_elements = num_actions,
-		.error = err,
-	};
-
-	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
-				      tunnel_element_release_hit,
-				      tunnel_element_release_miss, &ctx, false);
-
-	return ctx.ret;
-}
-
+				struct rte_flow_error *err);
 static int
 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
 				  struct rte_mbuf *m,
 				  struct rte_flow_restore_info *info,
-				  struct rte_flow_error *err)
-{
-	uint64_t ol_flags = m->ol_flags;
-	const struct mlx5_flow_tbl_data_entry *tble;
-	const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
-
-	if ((ol_flags & mask) != mask)
-		goto err;
-	tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
-	if (!tble) {
-		DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
-			dev->data->port_id, m->hash.fdir.hi);
-		goto err;
-	}
-	MLX5_ASSERT(tble->tunnel);
-	memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
-	info->group_id = tble->group_id;
-	info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
-		      RTE_FLOW_RESTORE_INFO_GROUP_ID |
-		      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
-
-	return 0;
-
-err:
-	return rte_flow_error_set(err, EINVAL,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				  "failed to get restore info");
-}
+				  struct rte_flow_error *err);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
 	.validate = mlx5_flow_validate,
@@ -4206,142 +4044,6 @@ flow_hairpin_split(struct rte_eth_dev *dev,
 	return 0;
 }
 
-__extension__
-union tunnel_offload_mark {
-	uint32_t val;
-	struct {
-		uint32_t app_reserve:8;
-		uint32_t table_id:15;
-		uint32_t transfer:1;
-		uint32_t _unused_:8;
-	};
-};
-
-struct tunnel_default_miss_ctx {
-	uint16_t *queue;
-	__extension__
-	union {
-		struct rte_flow_action_rss action_rss;
-		struct rte_flow_action_queue miss_queue;
-		struct rte_flow_action_jump miss_jump;
-		uint8_t raw[0];
-	};
-};
-
-static int
-flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
-			     struct rte_flow *flow,
-			     const struct rte_flow_attr *attr,
-			     const struct rte_flow_action *app_actions,
-			     uint32_t flow_idx,
-			     struct tunnel_default_miss_ctx *ctx,
-			     struct rte_flow_error *error)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow *dev_flow;
-	struct rte_flow_attr miss_attr = *attr;
-	const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
-	const struct rte_flow_item miss_items[2] = {
-		{
-			.type = RTE_FLOW_ITEM_TYPE_ETH,
-			.spec = NULL,
-			.last = NULL,
-			.mask = NULL
-		},
-		{
-			.type = RTE_FLOW_ITEM_TYPE_END,
-			.spec = NULL,
-			.last = NULL,
-			.mask = NULL
-		}
-	};
-	union tunnel_offload_mark mark_id;
-	struct rte_flow_action_mark miss_mark;
-	struct rte_flow_action miss_actions[3] = {
-		[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
-		[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
-	};
-	const struct rte_flow_action_jump *jump_data;
-	uint32_t i, flow_table = 0; /* prevent compilation warning */
-	struct flow_grp_info grp_info = {
-		.external = 1,
-		.transfer = attr->transfer,
-		.fdb_def_rule = !!priv->fdb_def_rule,
-		.std_tbl_fix = 0,
-	};
-	int ret;
-
-	if (!attr->transfer) {
-		uint32_t q_size;
-
-		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
-		q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
-		ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
-					 0, SOCKET_ID_ANY);
-		if (!ctx->queue)
-			return rte_flow_error_set
-				(error, ENOMEM,
-				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-				NULL, "invalid default miss RSS");
-		ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
-		ctx->action_rss.level = 0,
-		ctx->action_rss.types = priv->rss_conf.rss_hf,
-		ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
-		ctx->action_rss.queue_num = priv->reta_idx_n,
-		ctx->action_rss.key = priv->rss_conf.rss_key,
-		ctx->action_rss.queue = ctx->queue;
-		if (!priv->reta_idx_n || !priv->rxqs_n)
-			return rte_flow_error_set
-				(error, EINVAL,
-				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-				NULL, "invalid port configuration");
-		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
-			ctx->action_rss.types = 0;
-		for (i = 0; i != priv->reta_idx_n; ++i)
-			ctx->queue[i] = (*priv->reta_idx)[i];
-	} else {
-		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
-		ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
-	}
-	miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
-	for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
-	jump_data = app_actions->conf;
-	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
-	miss_attr.group = jump_data->group;
-	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
-				       &flow_table, grp_info, error);
-	if (ret)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-					  NULL, "invalid tunnel id");
-	mark_id.app_reserve = 0;
-	mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
-	mark_id.transfer = !!attr->transfer;
-	mark_id._unused_ = 0;
-	miss_mark.id = mark_id.val;
-	dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
-				    miss_items, miss_actions, flow_idx, error);
-	if (!dev_flow)
-		return -rte_errno;
-	dev_flow->flow = flow;
-	dev_flow->external = true;
-	dev_flow->tunnel = tunnel;
-	/* Subflow object was created, we must include one in the list. */
-	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
-		      dev_flow->handle, next);
-	DRV_LOG(DEBUG,
-		"port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
-		dev->data->port_id, tunnel->app_tunnel.type,
-		tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
-	ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
-				  miss_actions, error);
-	if (!ret)
-		ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
-						  error);
-
-	return ret;
-}
-
 /**
  * The last stage of splitting chain, just creates the subflow
  * without any modification.
@@ -5672,7 +5374,8 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 					      error);
 		if (ret < 0)
 			goto error;
-		if (is_flow_tunnel_steer_rule(dev, attr,
+		if (is_tunnel_offload_active(dev) &&
+		    is_flow_tunnel_steer_rule(dev, attr,
 					      buf->entry[i].pattern,
 					      p_actions_rx)) {
 			ret = flow_tunnel_add_default_miss(dev, flow, attr,
@@ -5743,12 +5446,15 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 	wks->flow_idx = wks->flow_nested_idx;
 	if (wks->flow_nested_idx)
 		wks->flow_nested_idx = 0;
-	tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
-	if (tunnel) {
-		flow->tunnel = 1;
-		flow->tunnel_id = tunnel->tunnel_id;
-		__atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
-		mlx5_free(default_miss_ctx.queue);
+	if (is_tunnel_offload_active(dev)) {
+		tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
+		if (tunnel) {
+			flow->tunnel = 1;
+			flow->tunnel_id = tunnel->tunnel_id;
+			__atomic_add_fetch(&tunnel->refctn, 1,
+					   __ATOMIC_RELAXED);
+			mlx5_free(default_miss_ctx.queue);
+		}
 	}
 	return idx;
 error:
@@ -6945,107 +6651,6 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
 	sh->cmng.pending_queries--;
 }
 
-static const struct mlx5_flow_tbl_data_entry  *
-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_ctx_shared *sh = priv->sh;
-	struct mlx5_hlist_entry *he;
-	union tunnel_offload_mark mbits = { .val = mark };
-	union mlx5_flow_tbl_key table_key = {
-		{
-			.table_id = tunnel_id_to_flow_tbl(mbits.table_id),
-			.dummy = 0,
-			.domain = !!mbits.transfer,
-			.direction = 0,
-		}
-	};
-	he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
-	return he ?
-	       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
-}
-
-static void
-mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
-				   struct mlx5_hlist_entry *entry)
-{
-	struct mlx5_dev_ctx_shared *sh = list->ctx;
-	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
-
-	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],
-			tunnel_flow_tbl_to_id(tte->flow_table));
-	mlx5_free(tte);
-}
-
-static struct mlx5_hlist_entry *
-mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
-				   uint64_t key __rte_unused,
-				   void *ctx __rte_unused)
-{
-	struct mlx5_dev_ctx_shared *sh = list->ctx;
-	struct tunnel_tbl_entry *tte;
-
-	tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
-			  sizeof(*tte), 0,
-			  SOCKET_ID_ANY);
-	if (!tte)
-		goto err;
-	mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],
-			  &tte->flow_table);
-	if (tte->flow_table >= MLX5_MAX_TABLES) {
-		DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
-			tte->flow_table);
-		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],
-				tte->flow_table);
-		goto err;
-	} else if (!tte->flow_table) {
-		goto err;
-	}
-	tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
-	return &tte->hash;
-err:
-	if (tte)
-		mlx5_free(tte);
-	return NULL;
-}
-
-static uint32_t
-tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
-				const struct mlx5_flow_tunnel *tunnel,
-				uint32_t group, uint32_t *table,
-				struct rte_flow_error *error)
-{
-	struct mlx5_hlist_entry *he;
-	struct tunnel_tbl_entry *tte;
-	union tunnel_tbl_key key = {
-		.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
-		.group = group
-	};
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_hlist *group_hash;
-
-	group_hash = tunnel ? tunnel->groups : thub->groups;
-	he = mlx5_hlist_lookup(group_hash, key.val, NULL);
-	if (!he) {
-		DRV_LOG(DEBUG, "port %u tunnel %u group=%u - generate table id",
-		dev->data->port_id, key.tunnel_id, group);
-		he = mlx5_hlist_register(group_hash, key.val, NULL);
-	} else {
-		DRV_LOG(DEBUG, "port %u tunnel %u group=%u - skip table id",
-		dev->data->port_id, key.tunnel_id, group);
-	}
-	if (!he)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
-					  NULL,
-					  "tunnel group index not supported");
-	tte = container_of(he, typeof(*tte), hash);
-	*table = tte->flow_table;
-	DRV_LOG(DEBUG, "port %u tunnel %u group=%u table=%u",
-	dev->data->port_id, key.tunnel_id, group, *table);
-	return 0;
-}
-
 static int
 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
 		    struct flow_grp_info grp_info, struct rte_flow_error *error)
@@ -7506,23 +7111,38 @@ mlx5_shared_action_flush(struct rte_eth_dev *dev)
 	return ret;
 }
 
-static void
-mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
-		      struct mlx5_flow_tunnel *tunnel)
+#ifndef HAVE_MLX5DV_DR
+#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
+#else
+#define MLX5_DOMAIN_SYNC_FLOW \
+	(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
+#endif
+
+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
 {
-	/* no tunnel hub spinlock protection */
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_indexed_pool *ipool;
+	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+	const struct mlx5_flow_driver_ops *fops;
+	int ret;
+	struct rte_flow_attr attr = { .transfer = 0 };
 
-	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
-		dev->data->port_id, tunnel->tunnel_id);
-	rte_spinlock_lock(&thub->sl);
-	LIST_REMOVE(tunnel, chain);
-	rte_spinlock_unlock(&thub->sl);
-	mlx5_hlist_destroy(tunnel->groups);
-	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD];
-	mlx5_ipool_free(ipool, tunnel->tunnel_id);
+	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+	ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
+	if (ret > 0)
+		ret = -ret;
+	return ret;
+}
+
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+static inline uint32_t
+tunnel_id_to_flow_tbl(uint32_t id)
+{
+	return id | (1u << 16);
+}
+
+static inline uint32_t
+tunnel_flow_tbl_to_id(uint32_t flow_tbl)
+{
+	return flow_tbl & ~(1u << 16);
 }
 
 static bool
@@ -7532,19 +7152,235 @@ mlx5_access_tunnel_offload_db
 		       struct mlx5_flow_tunnel *, const void *),
 	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
 	 void (*miss)(struct rte_eth_dev *, void *),
-	 void *ctx, bool lock_op)
-{
-	bool verdict = false;
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tunnel;
+	 void *ctx, bool lock_op);
+static const struct mlx5_flow_tbl_data_entry  *
+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);
+static int
+mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
+		     const struct rte_flow_tunnel *app_tunnel,
+		     struct mlx5_flow_tunnel **tunnel);
 
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tunnel, &thub->tunnels, chain) {
-		verdict = match(dev, tunnel, (const void *)ctx);
-		if (verdict)
-			break;
-	}
-	if (!lock_op)
+__extension__
+union tunnel_offload_mark {
+	uint32_t val;
+	struct {
+		uint32_t app_reserve:8;
+		uint32_t table_id:15;
+		uint32_t transfer:1;
+		uint32_t _unused_:8;
+	};
+};
+
+static int
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+			     struct rte_flow *flow,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_action *app_actions,
+			     uint32_t flow_idx,
+			     struct tunnel_default_miss_ctx *ctx,
+			     struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flow *dev_flow;
+	struct rte_flow_attr miss_attr = *attr;
+	const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
+	const struct rte_flow_item miss_items[2] = {
+		{
+			.type = RTE_FLOW_ITEM_TYPE_ETH,
+			.spec = NULL,
+			.last = NULL,
+			.mask = NULL
+		},
+		{
+			.type = RTE_FLOW_ITEM_TYPE_END,
+			.spec = NULL,
+			.last = NULL,
+			.mask = NULL
+		}
+	};
+	union tunnel_offload_mark mark_id;
+	struct rte_flow_action_mark miss_mark;
+	struct rte_flow_action miss_actions[3] = {
+		[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
+		[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
+	};
+	const struct rte_flow_action_jump *jump_data;
+	uint32_t i, flow_table = 0; /* prevent compilation warning */
+	struct flow_grp_info grp_info = {
+		.external = 1,
+		.transfer = attr->transfer,
+		.fdb_def_rule = !!priv->fdb_def_rule,
+		.std_tbl_fix = 0,
+	};
+	int ret;
+
+	if (!attr->transfer) {
+		uint32_t q_size;
+
+		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
+		q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
+		ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
+					 0, SOCKET_ID_ANY);
+		if (!ctx->queue)
+			return rte_flow_error_set
+				(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+				NULL, "invalid default miss RSS");
+		ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+		ctx->action_rss.level = 0,
+		ctx->action_rss.types = priv->rss_conf.rss_hf,
+		ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
+		ctx->action_rss.queue_num = priv->reta_idx_n,
+		ctx->action_rss.key = priv->rss_conf.rss_key,
+		ctx->action_rss.queue = ctx->queue;
+		if (!priv->reta_idx_n || !priv->rxqs_n)
+			return rte_flow_error_set
+				(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+				NULL, "invalid port configuration");
+		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+			ctx->action_rss.types = 0;
+		for (i = 0; i != priv->reta_idx_n; ++i)
+			ctx->queue[i] = (*priv->reta_idx)[i];
+	} else {
+		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
+		ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
+	}
+	miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
+	for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
+	jump_data = app_actions->conf;
+	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
+	miss_attr.group = jump_data->group;
+	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
+				       &flow_table, grp_info, error);
+	if (ret)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+					  NULL, "invalid tunnel id");
+	mark_id.app_reserve = 0;
+	mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
+	mark_id.transfer = !!attr->transfer;
+	mark_id._unused_ = 0;
+	miss_mark.id = mark_id.val;
+	dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
+				    miss_items, miss_actions, flow_idx, error);
+	if (!dev_flow)
+		return -rte_errno;
+	dev_flow->flow = flow;
+	dev_flow->external = true;
+	dev_flow->tunnel = tunnel;
+	/* Subflow object was created, we must include one in the list. */
+	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+		      dev_flow->handle, next);
+	DRV_LOG(DEBUG,
+		"port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
+		dev->data->port_id, tunnel->app_tunnel.type,
+		tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
+	ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
+				  miss_actions, error);
+	if (!ret)
+		ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
+						  error);
+
+	return ret;
+}
+
+static const struct mlx5_flow_tbl_data_entry  *
+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
+	struct mlx5_hlist_entry *he;
+	union tunnel_offload_mark mbits = { .val = mark };
+	union mlx5_flow_tbl_key table_key = {
+		{
+			.table_id = tunnel_id_to_flow_tbl(mbits.table_id),
+			.dummy = 0,
+			.domain = !!mbits.transfer,
+			.direction = 0,
+		}
+	};
+	he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
+	return he ?
+	       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+				const struct mlx5_flow_tunnel *tunnel,
+				uint32_t group, uint32_t *table,
+				struct rte_flow_error *error)
+{
+	struct mlx5_hlist_entry *he;
+	struct tunnel_tbl_entry *tte;
+	union tunnel_tbl_key key = {
+		.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
+		.group = group
+	};
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_hlist *group_hash;
+
+	group_hash = tunnel ? tunnel->groups : thub->groups;
+	he = mlx5_hlist_lookup(group_hash, key.val, NULL);
+	if (!he) {
+		DRV_LOG(DEBUG, "port %u tunnel %u group=%u - generate table id",
+		dev->data->port_id, key.tunnel_id, group);
+		he = mlx5_hlist_register(group_hash, key.val, NULL);
+	} else {
+		DRV_LOG(DEBUG, "port %u tunnel %u group=%u - skip table id",
+		dev->data->port_id, key.tunnel_id, group);
+	}
+	if (!he)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+					  NULL,
+					  "tunnel group index not supported");
+	tte = container_of(he, typeof(*tte), hash);
+	*table = tte->flow_table;
+	DRV_LOG(DEBUG, "port %u tunnel %u group=%u table=%u",
+	dev->data->port_id, key.tunnel_id, group, *table);
+	return 0;
+}
+
+static void
+mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
+		      struct mlx5_flow_tunnel *tunnel)
+{
+	/* no tunnel hub spinlock protection */
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_indexed_pool *ipool;
+
+	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
+		dev->data->port_id, tunnel->tunnel_id);
+	rte_spinlock_lock(&thub->sl);
+	LIST_REMOVE(tunnel, chain);
+	rte_spinlock_unlock(&thub->sl);
+	mlx5_hlist_destroy(tunnel->groups);
+	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD];
+	mlx5_ipool_free(ipool, tunnel->tunnel_id);
+}
+
+static bool
+mlx5_access_tunnel_offload_db
+	(struct rte_eth_dev *dev,
+	 bool (*match)(struct rte_eth_dev *,
+		       struct mlx5_flow_tunnel *, const void *),
+	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+	 void (*miss)(struct rte_eth_dev *, void *),
+	 void *ctx, bool lock_op)
+{
+	bool verdict = false;
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_flow_tunnel *tunnel;
+
+	rte_spinlock_lock(&thub->sl);
+	LIST_FOREACH(tunnel, &thub->tunnels, chain) {
+		verdict = match(dev, tunnel, (const void *)ctx);
+		if (verdict)
+			break;
+	}
+	if (!lock_op)
 		rte_spinlock_unlock(&thub->sl);
 	if (verdict && hit)
 		hit(dev, tunnel, ctx);
@@ -7593,6 +7429,50 @@ mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
 	return ctx.tunnel;
 }
 
+static void
+mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
+				   struct mlx5_hlist_entry *entry)
+{
+	struct mlx5_dev_ctx_shared *sh = list->ctx;
+	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],
+			tunnel_flow_tbl_to_id(tte->flow_table));
+	mlx5_free(tte);
+}
+
+static struct mlx5_hlist_entry *
+mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
+				   uint64_t key __rte_unused,
+				   void *ctx __rte_unused)
+{
+	struct mlx5_dev_ctx_shared *sh = list->ctx;
+	struct tunnel_tbl_entry *tte;
+
+	tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
+			  sizeof(*tte), 0,
+			  SOCKET_ID_ANY);
+	if (!tte)
+		goto err;
+	mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],
+			  &tte->flow_table);
+	if (tte->flow_table >= MLX5_MAX_TABLES) {
+		DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
+			tte->flow_table);
+		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],
+				tte->flow_table);
+		goto err;
+	} else if (!tte->flow_table) {
+		goto err;
+	}
+	tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
+	return &tte->hash;
+err:
+	if (tte)
+		mlx5_free(tte);
+	return NULL;
+}
+
 static struct mlx5_flow_tunnel *
 mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
 			  const struct rte_flow_tunnel *app_tunnel)
@@ -7678,7 +7558,6 @@ static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
 		LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
 }
 
-
 static int
 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
 		     const struct rte_flow_tunnel *app_tunnel,
@@ -7738,23 +7617,286 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
 	return err;
 }
 
-#ifndef HAVE_MLX5DV_DR
-#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
-#else
-#define MLX5_DOMAIN_SYNC_FLOW \
-	(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
-#endif
+static inline bool
+mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
+			  struct rte_flow_tunnel *tunnel,
+			  const char *err_msg)
+{
+	err_msg = NULL;
+	if (!is_tunnel_offload_active(dev)) {
+		err_msg = "tunnel offload was not activated";
+		goto out;
+	} else if (!tunnel) {
+		err_msg = "no application tunnel";
+		goto out;
+	}
 
-int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
+	switch (tunnel->type) {
+	default:
+		err_msg = "unsupported tunnel type";
+		goto out;
+	case RTE_FLOW_ITEM_TYPE_VXLAN:
+		break;
+	}
+
+out:
+	return !err_msg;
+}
+
+static int
+mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
+			   struct rte_flow_tunnel *app_tunnel,
+			   struct rte_flow_action **actions,
+			   uint32_t *num_of_actions,
+			   struct rte_flow_error *error)
 {
-	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-	const struct mlx5_flow_driver_ops *fops;
 	int ret;
-	struct rte_flow_attr attr = { .transfer = 0 };
+	struct mlx5_flow_tunnel *tunnel;
+	const char *err_msg = NULL;
+	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
 
-	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
-	ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
-	if (ret > 0)
-		ret = -ret;
-	return ret;
+	if (!verdict)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  err_msg);
+	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+	if (ret < 0) {
+		return rte_flow_error_set(error, ret,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  "failed to initialize pmd tunnel");
+	}
+	*actions = &tunnel->action;
+	*num_of_actions = 1;
+	return 0;
+}
+
+static int
+mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
+		       struct rte_flow_tunnel *app_tunnel,
+		       struct rte_flow_item **items,
+		       uint32_t *num_of_items,
+		       struct rte_flow_error *error)
+{
+	int ret;
+	struct mlx5_flow_tunnel *tunnel;
+	const char *err_msg = NULL;
+	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+
+	if (!verdict)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  err_msg);
+	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+	if (ret < 0) {
+		return rte_flow_error_set(error, ret,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  "failed to initialize pmd tunnel");
+	}
+	*items = &tunnel->item;
+	*num_of_items = 1;
+	return 0;
+}
+
+struct tunnel_db_element_release_ctx {
+	struct rte_flow_item *items;
+	struct rte_flow_action *actions;
+	uint32_t num_elements;
+	struct rte_flow_error *error;
+	int ret;
+};
+
+static bool
+tunnel_element_release_match(struct rte_eth_dev *dev,
+			     struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+	const struct tunnel_db_element_release_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	if (ctx->num_elements != 1)
+		return false;
+	else if (ctx->items)
+		return ctx->items == &tunnel->item;
+	else if (ctx->actions)
+		return ctx->actions == &tunnel->action;
+
+	return false;
+}
+
+static void
+tunnel_element_release_hit(struct rte_eth_dev *dev,
+			   struct mlx5_flow_tunnel *tunnel, void *x)
+{
+	struct tunnel_db_element_release_ctx *ctx = x;
+
+	ctx->ret = 0;
+	if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
+		mlx5_flow_tunnel_free(dev, tunnel);
+}
+
+static void
+tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
+{
+	struct tunnel_db_element_release_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
+				      RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				      "invalid argument");
+}
+
+static int
+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
+			      struct rte_flow_item *pmd_items,
+			      uint32_t num_items, struct rte_flow_error *err)
+{
+	struct tunnel_db_element_release_ctx ctx = {
+		.items = pmd_items,
+		.actions = NULL,
+		.num_elements = num_items,
+		.error = err,
+	};
+
+	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+				      tunnel_element_release_hit,
+				      tunnel_element_release_miss, &ctx, false);
+
+	return ctx.ret;
+}
+
+static int
+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
+				struct rte_flow_action *pmd_actions,
+				uint32_t num_actions,
+				struct rte_flow_error *err)
+{
+	struct tunnel_db_element_release_ctx ctx = {
+		.items = NULL,
+		.actions = pmd_actions,
+		.num_elements = num_actions,
+		.error = err,
+	};
+
+	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+				      tunnel_element_release_hit,
+				      tunnel_element_release_miss, &ctx, false);
+
+	return ctx.ret;
+}
+
+static int
+mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err)
+{
+	uint64_t ol_flags = m->ol_flags;
+	const struct mlx5_flow_tbl_data_entry *tble;
+	const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+
+	if ((ol_flags & mask) != mask)
+		goto err;
+	tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
+	if (!tble) {
+		DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
+			dev->data->port_id, m->hash.fdir.hi);
+		goto err;
+	}
+	MLX5_ASSERT(tble->tunnel);
+	memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
+	info->group_id = tble->group_id;
+	info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
+		      RTE_FLOW_RESTORE_INFO_GROUP_ID |
+		      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
+
+	return 0;
+
+err:
+	return rte_flow_error_set(err, EINVAL,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				  "failed to get restore info");
+}
+#else /* HAVE_IBV_FLOW_DV_SUPPORT */
+static int
+mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
+			   __rte_unused struct rte_flow_tunnel *app_tunnel,
+			   __rte_unused struct rte_flow_action **actions,
+			   __rte_unused uint32_t *num_of_actions,
+			   __rte_unused struct rte_flow_error *error)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,
+		       __rte_unused struct rte_flow_tunnel *app_tunnel,
+		       __rte_unused struct rte_flow_item **items,
+		       __rte_unused uint32_t *num_of_items,
+		       __rte_unused struct rte_flow_error *error)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,
+			      __rte_unused struct rte_flow_item *pmd_items,
+			      __rte_unused uint32_t num_items,
+			      __rte_unused struct rte_flow_error *err)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,
+				__rte_unused struct rte_flow_action *pmd_action,
+				__rte_unused uint32_t num_actions,
+				__rte_unused struct rte_flow_error *err)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,
+				  __rte_unused struct rte_mbuf *m,
+				  __rte_unused struct rte_flow_restore_info *i,
+				  __rte_unused struct rte_flow_error *err)
+{
+	return -ENOTSUP;
+}
+
+static int
+flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,
+			     __rte_unused struct rte_flow *flow,
+			     __rte_unused const struct rte_flow_attr *attr,
+			     __rte_unused const struct rte_flow_action *actions,
+			     __rte_unused uint32_t flow_idx,
+			     __rte_unused struct tunnel_default_miss_ctx *ctx,
+			     __rte_unused struct rte_flow_error *error)
+{
+	return -ENOTSUP;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,
+		    __rte_unused uint32_t id)
+{
+	return NULL;
+}
+
+static void
+mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,
+		      __rte_unused struct mlx5_flow_tunnel *tunnel)
+{
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,
+				__rte_unused const struct mlx5_flow_tunnel *t,
+				__rte_unused uint32_t group,
+				__rte_unused uint32_t *table,
+				struct rte_flow_error *error)
+{
+	return rte_flow_error_set(error, ENOTSUP,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				  "tunnel offload requires DV support");
 }
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index bdf2c50090..672c27ecb9 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -965,18 +965,6 @@ struct tunnel_tbl_entry {
 	uint32_t flow_table;
 };
 
-static inline uint32_t
-tunnel_id_to_flow_tbl(uint32_t id)
-{
-	return id | (1u << 16);
-}
-
-static inline uint32_t
-tunnel_flow_tbl_to_id(uint32_t flow_tbl)
-{
-	return flow_tbl & ~(1u << 16);
-}
-
 union tunnel_tbl_key {
 	uint64_t val;
 	struct {
@@ -995,8 +983,13 @@ mlx5_tunnel_hub(struct rte_eth_dev *dev)
 static inline bool
 is_tunnel_offload_active(struct rte_eth_dev *dev)
 {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	struct mlx5_priv *priv = dev->data->dev_private;
 	return !!priv->config.dv_miss_info;
+#else
+	RTE_SET_USED(dev);
+	return false;
+#endif
 }
 
 static inline bool
-- 
2.29.2



More information about the dev mailing list