[dpdk-dev] [PATCH 06/11] net/mlx5: add Direct Verbs prepare function

Yongseok Koh yskoh at mellanox.com
Wed Sep 19 08:48:40 CEST 2018


From: Ori Kam <orika at mellanox.com>

This function allocates the Direct Verbs device flow, and
introduce the relevant PRM structures.

This commit also adds the matcher object. The matcher object acts as a
mask and should be shared between flows. For example all rules that
should match source IP with full mask should use the same matcher. A
flow that should match dest IP or source IP but without full mask should
have a new matcher allocated.

Signed-off-by: Ori Kam <orika at mellanox.com>
Acked-by: Yongseok Koh <yskoh at mellanox.com>
---
 drivers/net/mlx5/mlx5.h         |   1 +
 drivers/net/mlx5/mlx5_flow.h    |  31 +++++-
 drivers/net/mlx5/mlx5_flow_dv.c |  45 ++++++++-
 drivers/net/mlx5/mlx5_prm.h     | 213 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_rxtx.h    |   7 ++
 5 files changed, 295 insertions(+), 2 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 35a196e76..1207edf91 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -212,6 +212,7 @@ struct priv {
 	LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */
 	/* Verbs Indirection tables. */
 	LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls;
+	LIST_HEAD(matcher, mlx5_cache) matchers;
 	uint32_t link_speed_capa; /* Link speed capabilities. */
 	struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
 	int primary_socket; /* Unix socket for primary process. */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 9b0cd28ae..0cf496db3 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -106,6 +106,34 @@
 /* Max number of actions per DV flow. */
 #define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
 
+/* Matcher PRM representation */
+struct mlx5_flow_dv_match_params {
+	size_t size;
+	/**< Size of match value. Do NOT split size and key! */
+	uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
+	/**< Matcher value. This value is used as the mask or as a key. */
+};
+
+/* Matcher structure. */
+struct mlx5_flow_dv_matcher {
+	struct mlx5_cache cache; /**< Cache to struct mlx5dv_flow_matcher. */
+	uint16_t crc; /**< CRC of key. */
+	uint16_t priority; /**< Priority of matcher. */
+	uint8_t egress; /**< Egress matcher. */
+	struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
+};
+
+/* DV flows structure. */
+struct mlx5_flow_dv {
+	uint64_t hash_fields; /**< Fields that participate in the hash. */
+	struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
+	/* Flow DV api: */
+	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
+	struct mlx5_flow_dv_match_params value;
+	/**< Holds the value that the packet is compared to. */
+	struct ibv_flow *flow; /**< Installed flow. */
+};
+
 /* Verbs specification header. */
 struct ibv_spec_header {
 	enum ibv_flow_spec_type type;
@@ -132,7 +160,8 @@ struct mlx5_flow {
 	struct rte_flow *flow; /**< Pointer to the main flow. */
 	uint32_t layers; /**< Bit-fields that holds the detected layers. */
 	union {
-		struct mlx5_flow_verbs verbs; /**< Holds the verbs dev-flow. */
+		struct mlx5_flow_dv dv;
+		struct mlx5_flow_verbs verbs;
 	};
 };
 
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 86a8b3cd0..30d501a61 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -291,6 +291,49 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 }
 
 /**
+ * Internal preparation function. Allocates the DV flow size,
+ * this size is constant.
+ *
+ * @param[in] attr
+ *   Pointer to the flow attributes.
+ * @param[in] items
+ *   Pointer to the list of items.
+ * @param[in] actions
+ *   Pointer to the list of actions.
+ * @param[out] item_flags
+ *   Pointer to bit mask of all items detected.
+ * @param[out] action_flags
+ *   Pointer to bit mask of all actions detected.
+ * @param[out] error
+ *   Pointer to the error structure.
+ *
+ * @return
+ *   Pointer to mlx5_flow object on success,
+ *   otherwise NULL and rte_ernno is set.
+ */
+static struct mlx5_flow *
+flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
+		const struct rte_flow_item items[] __rte_unused,
+		const struct rte_flow_action actions[] __rte_unused,
+		uint64_t *item_flags __rte_unused,
+		uint64_t *action_flags __rte_unused,
+		struct rte_flow_error *error)
+{
+	uint32_t size = sizeof(struct mlx5_flow);
+	struct mlx5_flow *flow;
+
+	flow = rte_calloc(__func__, 1, size, 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "not enough memory to create flow");
+		return NULL;
+	}
+	flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
+	return flow;
+}
+
+/**
  * Fills the flow_ops with the function pointers.
  *
  * @param[out] flow_ops
@@ -301,7 +344,7 @@ mlx5_flow_dv_get_driver_ops(struct mlx5_flow_driver_ops *flow_ops)
 {
 	*flow_ops = (struct mlx5_flow_driver_ops) {
 		.validate = flow_dv_validate,
-		.prepare = NULL,
+		.prepare = flow_dv_prepare,
 		.translate = NULL,
 		.apply = NULL,
 		.remove = NULL,
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 0870d32fd..2222e7fbd 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -280,6 +280,219 @@ struct mlx5_cqe {
 /* CQE format value. */
 #define MLX5_COMPRESSED 0x3
 
+/* The field of packet to be modified. */
+enum mlx5_modificaiton_field {
+	MLX5_MODI_OUT_SMAC_47_16 = 1,
+	MLX5_MODI_OUT_SMAC_15_0,
+	MLX5_MODI_OUT_ETHERTYPE,
+	MLX5_MODI_OUT_DMAC_47_16,
+	MLX5_MODI_OUT_DMAC_15_0,
+	MLX5_MODI_OUT_IP_DSCP,
+	MLX5_MODI_OUT_TCP_FLAGS,
+	MLX5_MODI_OUT_TCP_SPORT,
+	MLX5_MODI_OUT_TCP_DPORT,
+	MLX5_MODI_OUT_IPV4_TTL,
+	MLX5_MODI_OUT_UDP_SPORT,
+	MLX5_MODI_OUT_UDP_DPORT,
+	MLX5_MODI_OUT_SIPV6_127_96,
+	MLX5_MODI_OUT_SIPV6_95_64,
+	MLX5_MODI_OUT_SIPV6_63_32,
+	MLX5_MODI_OUT_SIPV6_31_0,
+	MLX5_MODI_OUT_DIPV6_127_96,
+	MLX5_MODI_OUT_DIPV6_95_64,
+	MLX5_MODI_OUT_DIPV6_63_32,
+	MLX5_MODI_OUT_DIPV6_31_0,
+	MLX5_MODI_OUT_SIPV4,
+	MLX5_MODI_OUT_DIPV4,
+	MLX5_MODI_IN_SMAC_47_16 = 0x31,
+	MLX5_MODI_IN_SMAC_15_0,
+	MLX5_MODI_IN_ETHERTYPE,
+	MLX5_MODI_IN_DMAC_47_16,
+	MLX5_MODI_IN_DMAC_15_0,
+	MLX5_MODI_IN_IP_DSCP,
+	MLX5_MODI_IN_TCP_FLAGS,
+	MLX5_MODI_IN_TCP_SPORT,
+	MLX5_MODI_IN_TCP_DPORT,
+	MLX5_MODI_IN_IPV4_TTL,
+	MLX5_MODI_IN_UDP_SPORT,
+	MLX5_MODI_IN_UDP_DPORT,
+	MLX5_MODI_IN_SIPV6_127_96,
+	MLX5_MODI_IN_SIPV6_95_64,
+	MLX5_MODI_IN_SIPV6_63_32,
+	MLX5_MODI_IN_SIPV6_31_0,
+	MLX5_MODI_IN_DIPV6_127_96,
+	MLX5_MODI_IN_DIPV6_95_64,
+	MLX5_MODI_IN_DIPV6_63_32,
+	MLX5_MODI_IN_DIPV6_31_0,
+	MLX5_MODI_IN_SIPV4,
+	MLX5_MODI_IN_DIPV4,
+	MLX5_MODI_OUT_IPV6_HOPLIMIT,
+	MLX5_MODI_IN_IPV6_HOPLIMIT,
+	MLX5_MODI_META_DATA_REG_A,
+	MLX5_MODI_META_DATA_REG_B = 0x50,
+};
+
+/* Modification sub command. */
+struct mlx5_modification_cmd {
+	union {
+		uint32_t data0;
+		struct {
+			unsigned int bits:5;
+			unsigned int rsvd0:3;
+			unsigned int src_offset:5; /* Start bit offset. */
+			unsigned int rsvd1:3;
+			unsigned int src_field:12;
+			unsigned int type:4;
+		};
+	};
+	union {
+		uint32_t data1;
+		uint8_t data[4];
+		struct {
+			unsigned int rsvd2:8;
+			unsigned int dst_offset:8;
+			unsigned int dst_field:12;
+			unsigned int rsvd3:4;
+		};
+	};
+};
+
+typedef uint32_t u32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+
+#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
+#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
+#define __mlx5_bit_off(typ, fld) ((unsigned int)(unsigned long) \
+				  (&(__mlx5_nullp(typ)->fld)))
+#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - \
+				    (__mlx5_bit_off(typ, fld) & 0x1f))
+#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
+#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << \
+				  __mlx5_dw_bit_off(typ, fld))
+#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
+#define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - \
+				    (__mlx5_bit_off(typ, fld) & 0xf))
+#define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_ST_SZ_DB(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
+#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
+
+/* insert a value to a struct */
+#define MLX5_SET(typ, p, fld, v) \
+	do { \
+		u32 _v = v; \
+		*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+		rte_cpu_to_be_32((rte_be_to_cpu_32(*((u32 *)(p) + \
+				  __mlx5_dw_off(typ, fld))) & \
+				  (~__mlx5_dw_mask(typ, fld))) | \
+				 (((_v) & __mlx5_mask(typ, fld)) << \
+				   __mlx5_dw_bit_off(typ, fld))); \
+	} while (0)
+#define MLX5_GET16(typ, p, fld) \
+	((rte_be_to_cpu_16(*((__be16 *)(p) + \
+	  __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
+	 __mlx5_mask16(typ, fld))
+#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
+
+struct mlx5_ifc_fte_match_set_misc_bits {
+	u8 reserved_at_0[0x8];
+	u8 source_sqn[0x18];
+	u8 reserved_at_20[0x10];
+	u8 source_port[0x10];
+	u8 outer_second_prio[0x3];
+	u8 outer_second_cfi[0x1];
+	u8 outer_second_vid[0xc];
+	u8 inner_second_prio[0x3];
+	u8 inner_second_cfi[0x1];
+	u8 inner_second_vid[0xc];
+	u8 outer_second_cvlan_tag[0x1];
+	u8 inner_second_cvlan_tag[0x1];
+	u8 outer_second_svlan_tag[0x1];
+	u8 inner_second_svlan_tag[0x1];
+	u8 reserved_at_64[0xc];
+	u8 gre_protocol[0x10];
+	u8 gre_key_h[0x18];
+	u8 gre_key_l[0x8];
+	u8 vxlan_vni[0x18];
+	u8 reserved_at_b8[0x8];
+	u8 reserved_at_c0[0x20];
+	u8 reserved_at_e0[0xc];
+	u8 outer_ipv6_flow_label[0x14];
+	u8 reserved_at_100[0xc];
+	u8 inner_ipv6_flow_label[0x14];
+	u8 reserved_at_120[0xe0];
+};
+
+struct mlx5_ifc_ipv4_layout_bits {
+	u8 reserved_at_0[0x60];
+	u8 ipv4[0x20];
+};
+
+struct mlx5_ifc_ipv6_layout_bits {
+	u8 ipv6[16][0x8];
+};
+
+union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+	struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
+	struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
+	u8 reserved_at_0[0x80];
+};
+
+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
+	u8 smac_47_16[0x20];
+	u8 smac_15_0[0x10];
+	u8 ethertype[0x10];
+	u8 dmac_47_16[0x20];
+	u8 dmac_15_0[0x10];
+	u8 first_prio[0x3];
+	u8 first_cfi[0x1];
+	u8 first_vid[0xc];
+	u8 ip_protocol[0x8];
+	u8 ip_dscp[0x6];
+	u8 ip_ecn[0x2];
+	u8 cvlan_tag[0x1];
+	u8 svlan_tag[0x1];
+	u8 frag[0x1];
+	u8 ip_version[0x4];
+	u8 tcp_flags[0x9];
+	u8 tcp_sport[0x10];
+	u8 tcp_dport[0x10];
+	u8 reserved_at_c0[0x20];
+	u8 udp_sport[0x10];
+	u8 udp_dport[0x10];
+	union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
+	union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
+};
+
+struct mlx5_ifc_fte_match_mpls_bits {
+	u8 mpls_label[0x14];
+	u8 mpls_exp[0x3];
+	u8 mpls_s_bos[0x1];
+	u8 mpls_ttl[0x8];
+};
+
+struct mlx5_ifc_fte_match_set_misc2_bits {
+	struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;
+	struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;
+	struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;
+	struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
+	u8 reserved_at_80[0x100];
+	u8 metadata_reg_a[0x20];
+	u8 reserved_at_1a0[0x60];
+};
+
+/* Flow matcher. */
+struct mlx5_ifc_fte_match_param_bits {
+	struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+	struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+	struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
+	struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
+	u8 reserved_at_800[0x800];
+};
+
 /* CQE format mask. */
 #define MLX5E_CQE_FORMAT_MASK 0xc
 
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index d225b9c27..02034a4f4 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -171,6 +171,13 @@ struct mlx5_hrxq {
 	uint8_t rss_key[]; /* Hash key. */
 };
 
+/* List of cached objects. */
+struct mlx5_cache {
+	LIST_ENTRY(mlx5_cache) next; /* Pointer to the next element. */
+	rte_atomic32_t refcnt; /* Reference counter. */
+	void *resource; /* Cached resource */
+};
+
 /* TX queue descriptor. */
 __extension__
 struct mlx5_txq_data {
-- 
2.11.0



More information about the dev mailing list