[dpdk-dev] [PATCH] mbuf: implement generic format for sched field

Jasvinder Singh jasvinder.singh at intel.com
Fri Nov 23 17:54:23 CET 2018


This patch implements the changes proposed in the deprecation
notes [1][2].

The opaque mbuf->hash.sched field is updated to support generic
definition in line with the ethdev TM and MTR APIs. The new generic
format contains: queue ID, traffic class, color.

In addtion, following API functions of the sched library have
been modified with an additional parameter of type struct
rte_sched_port to accomodate the changes made to mbuf sched field.
(i)  rte_sched_port_pkt_write()
(ii) rte_sched_port_pkt_read()

The other libraries, sample applications and tests which use mbuf
sched field have been updated as well.

[1] http://mails.dpdk.org/archives/dev/2018-February/090651.html
[2] https://mails.dpdk.org/archives/dev/2018-November/119051.html

Signed-off-by: Jasvinder Singh <jasvinder.singh at intel.com>
Signed-off-by: Reshma Pattan <reshma.pattan at intel.com>
---
 examples/qos_sched/app_thread.c               |  6 +-
 examples/qos_sched/main.c                     |  1 +
 .../rte_event_eth_tx_adapter.h                |  4 +-
 lib/librte_mbuf/Makefile                      |  2 +-
 lib/librte_mbuf/rte_mbuf.h                    | 10 +--
 lib/librte_pipeline/rte_table_action.c        | 60 ++++++++-----
 lib/librte_sched/Makefile                     |  2 +-
 lib/librte_sched/rte_sched.c                  | 89 +++++++------------
 lib/librte_sched/rte_sched.h                  | 10 ++-
 test/test/test_sched.c                        |  8 +-
 10 files changed, 92 insertions(+), 100 deletions(-)

diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index a59274236..92da85039 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -73,8 +73,10 @@ app_rx_thread(struct thread_conf **confs)
 			for(i = 0; i < nb_rx; i++) {
 				get_pkt_sched(rx_mbufs[i],
 						&subport, &pipe, &traffic_class, &queue, &color);
-				rte_sched_port_pkt_write(rx_mbufs[i], subport, pipe,
-						traffic_class, queue, (enum rte_meter_color) color);
+				rte_sched_port_pkt_write(conf->sched_port, rx_mbufs[i],
+							subport, pipe,
+							traffic_class, queue,
+							(enum rte_meter_color) color);
 			}
 
 			if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
diff --git a/examples/qos_sched/main.c b/examples/qos_sched/main.c
index e7c97bd64..c0ed16b68 100644
--- a/examples/qos_sched/main.c
+++ b/examples/qos_sched/main.c
@@ -55,6 +55,7 @@ app_main_loop(__attribute__((unused))void *dummy)
 			flow->rx_thread.rx_port = flow->rx_port;
 			flow->rx_thread.rx_ring =  flow->rx_ring;
 			flow->rx_thread.rx_queue = flow->rx_queue;
+			flow->rx_thread.sched_port = flow->sched_port;
 
 			rx_confs[rx_idx++] = &flow->rx_thread;
 
diff --git a/lib/librte_eventdev/rte_event_eth_tx_adapter.h b/lib/librte_eventdev/rte_event_eth_tx_adapter.h
index 81456d4a9..8d4946ede 100644
--- a/lib/librte_eventdev/rte_event_eth_tx_adapter.h
+++ b/lib/librte_eventdev/rte_event_eth_tx_adapter.h
@@ -300,7 +300,7 @@ rte_event_eth_tx_adapter_queue_del(uint8_t id,
 static __rte_always_inline void __rte_experimental
 rte_event_eth_tx_adapter_txq_set(struct rte_mbuf *pkt, uint16_t queue)
 {
-	uint16_t *p = (uint16_t *)&pkt->hash.sched.hi;
+	uint16_t *p = (uint16_t *)&pkt->hash.sched.queue_id;
 	p[1] = queue;
 }
 
@@ -320,7 +320,7 @@ rte_event_eth_tx_adapter_txq_set(struct rte_mbuf *pkt, uint16_t queue)
 static __rte_always_inline uint16_t __rte_experimental
 rte_event_eth_tx_adapter_txq_get(struct rte_mbuf *pkt)
 {
-	uint16_t *p = (uint16_t *)&pkt->hash.sched.hi;
+	uint16_t *p = (uint16_t *)&pkt->hash.sched.queue_id;
 	return p[1];
 }
 
diff --git a/lib/librte_mbuf/Makefile b/lib/librte_mbuf/Makefile
index e2b98a254..8c4c7d790 100644
--- a/lib/librte_mbuf/Makefile
+++ b/lib/librte_mbuf/Makefile
@@ -11,7 +11,7 @@ LDLIBS += -lrte_eal -lrte_mempool
 
 EXPORT_MAP := rte_mbuf_version.map
 
-LIBABIVER := 4
+LIBABIVER := 5
 
 # all source are stored in SRCS-y
 SRCS-$(CONFIG_RTE_LIBRTE_MBUF) := rte_mbuf.c rte_mbuf_ptype.c rte_mbuf_pool_ops.c
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 3dbc6695e..98428bd21 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -575,12 +575,10 @@ struct rte_mbuf {
 				 */
 			} fdir;	/**< Filter identifier if FDIR enabled */
 			struct {
-				uint32_t lo;
-				uint32_t hi;
-				/**< The event eth Tx adapter uses this field
-				 * to store Tx queue id.
-				 * @see rte_event_eth_tx_adapter_txq_set()
-				 */
+				uint32_t queue_id;   /**< Queue ID. */
+				uint8_t traffic_class;   /**< Traffic class ID. */
+				uint8_t color;   /**< Color. */
+				uint16_t reserved;   /**< Reserved. */
 			} sched;          /**< Hierarchical scheduler */
 			/**< User defined tags. See rte_distributor_process() */
 			uint32_t usr;
diff --git a/lib/librte_pipeline/rte_table_action.c b/lib/librte_pipeline/rte_table_action.c
index 7c7c8dd82..99f2d779b 100644
--- a/lib/librte_pipeline/rte_table_action.c
+++ b/lib/librte_pipeline/rte_table_action.c
@@ -108,12 +108,12 @@ mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
 }
 
 #define MBUF_SCHED_QUEUE_TC_COLOR(queue, tc, color)        \
-	((uint16_t)((((uint64_t)(queue)) & 0x3) |          \
-	((((uint64_t)(tc)) & 0x3) << 2) |                  \
-	((((uint64_t)(color)) & 0x3) << 4)))
+	((uint64_t)((((uint64_t)(queue)) & 0xffffffff) |          \
+	((((uint64_t)(tc)) & 0xff) << 32) |                  \
+	((((uint64_t)(color)) & 0xff) << 40)))
 
 #define MBUF_SCHED_COLOR(sched, color)                     \
-	(((sched) & (~0x30LLU)) | ((color) << 4))
+	((uint64_t)((sched) & (~0xff000000LLU)) | (((uint64_t)(color)) << 40))
 
 struct mtr_trtcm_data {
 	struct rte_meter_trtcm trtcm;
@@ -176,7 +176,7 @@ mtr_data_size(struct rte_table_action_mtr_config *mtr)
 struct dscp_table_entry_data {
 	enum rte_meter_color color;
 	uint16_t tc;
-	uint16_t queue_tc_color;
+	uint32_t queue;
 };
 
 struct dscp_table_data {
@@ -368,7 +368,6 @@ tm_cfg_check(struct rte_table_action_tm_config *tm)
 }
 
 struct tm_data {
-	uint16_t queue_tc_color;
 	uint16_t subport;
 	uint32_t pipe;
 } __attribute__((__packed__));
@@ -397,26 +396,40 @@ tm_apply(struct tm_data *data,
 		return status;
 
 	/* Apply */
-	data->queue_tc_color = 0;
 	data->subport = (uint16_t) p->subport_id;
 	data->pipe = p->pipe_id;
 
 	return 0;
 }
 
+static uint32_t
+tm_sched_qindex(struct tm_data *data,
+	struct dscp_table_entry_data *dscp,
+	struct rte_table_action_tm_config *cfg) {
+
+	uint32_t result;
+
+	result = data->subport * cfg->n_pipes_per_subport + data->pipe;
+	result = result * RTE_TABLE_ACTION_TC_MAX + dscp->tc;
+	result = result * RTE_TABLE_ACTION_TC_QUEUE_MAX + dscp->queue;
+
+	return result;
+}
+
 static __rte_always_inline void
 pkt_work_tm(struct rte_mbuf *mbuf,
 	struct tm_data *data,
 	struct dscp_table_data *dscp_table,
-	uint32_t dscp)
+	uint32_t dscp,
+	struct rte_table_action_tm_config *cfg)
 {
 	struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
-	struct tm_data *sched_ptr = (struct tm_data *) &mbuf->hash.sched;
-	struct tm_data sched;
+	uint64_t *sched_ptr = (uint64_t *) &mbuf->hash.sched;
+	uint32_t queue = tm_sched_qindex(data, dscp_entry, cfg);
 
-	sched = *data;
-	sched.queue_tc_color = dscp_entry->queue_tc_color;
-	*sched_ptr = sched;
+	*sched_ptr = MBUF_SCHED_QUEUE_TC_COLOR(queue,
+			dscp_entry->tc,
+			dscp_entry->color);
 }
 
 /**
@@ -2580,17 +2593,13 @@ rte_table_action_dscp_table_update(struct rte_table_action *action,
 			&action->dscp_table.entry[i];
 		struct rte_table_action_dscp_table_entry *entry =
 			&table->entry[i];
-		uint16_t queue_tc_color =
-			MBUF_SCHED_QUEUE_TC_COLOR(entry->tc_queue_id,
-				entry->tc_id,
-				entry->color);
 
 		if ((dscp_mask & (1LLU << i)) == 0)
 			continue;
 
 		data->color = entry->color;
 		data->tc = entry->tc_id;
-		data->queue_tc_color = queue_tc_color;
+		data->queue = entry->tc_queue_id;
 	}
 
 	return 0;
@@ -2882,7 +2891,8 @@ pkt_work(struct rte_mbuf *mbuf,
 		pkt_work_tm(mbuf,
 			data,
 			&action->dscp_table,
-			dscp);
+			dscp,
+			&cfg->tm);
 	}
 
 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
@@ -3108,22 +3118,26 @@ pkt4_work(struct rte_mbuf **mbufs,
 		pkt_work_tm(mbuf0,
 			data0,
 			&action->dscp_table,
-			dscp0);
+			dscp0,
+			&cfg->tm);
 
 		pkt_work_tm(mbuf1,
 			data1,
 			&action->dscp_table,
-			dscp1);
+			dscp1,
+			&cfg->tm);
 
 		pkt_work_tm(mbuf2,
 			data2,
 			&action->dscp_table,
-			dscp2);
+			dscp2,
+			&cfg->tm);
 
 		pkt_work_tm(mbuf3,
 			data3,
 			&action->dscp_table,
-			dscp3);
+			dscp3,
+			&cfg->tm);
 	}
 
 	if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
diff --git a/lib/librte_sched/Makefile b/lib/librte_sched/Makefile
index 46c53ed71..644fd9d15 100644
--- a/lib/librte_sched/Makefile
+++ b/lib/librte_sched/Makefile
@@ -18,7 +18,7 @@ LDLIBS += -lrte_timer
 
 EXPORT_MAP := rte_sched_version.map
 
-LIBABIVER := 1
+LIBABIVER := 2
 
 #
 # all source are stored in SRCS-y
diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 587d5e602..7bf4d6400 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -128,22 +128,6 @@ enum grinder_state {
 	e_GRINDER_READ_MBUF
 };
 
-/*
- * Path through the scheduler hierarchy used by the scheduler enqueue
- * operation to identify the destination queue for the current
- * packet. Stored in the field pkt.hash.sched of struct rte_mbuf of
- * each packet, typically written by the classification stage and read
- * by scheduler enqueue.
- */
-struct rte_sched_port_hierarchy {
-	uint16_t queue:2;                /**< Queue ID (0 .. 3) */
-	uint16_t traffic_class:2;        /**< Traffic class ID (0 .. 3)*/
-	uint32_t color:2;                /**< Color */
-	uint16_t unused:10;
-	uint16_t subport;                /**< Subport ID */
-	uint32_t pipe;		         /**< Pipe ID */
-};
-
 struct rte_sched_grinder {
 	/* Pipe cache */
 	uint16_t pcache_qmask[RTE_SCHED_GRINDER_PCACHE_SIZE];
@@ -241,16 +225,12 @@ enum rte_sched_port_array {
 	e_RTE_SCHED_PORT_ARRAY_TOTAL,
 };
 
-#ifdef RTE_SCHED_COLLECT_STATS
-
 static inline uint32_t
 rte_sched_port_queues_per_subport(struct rte_sched_port *port)
 {
 	return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport;
 }
 
-#endif
-
 static inline uint32_t
 rte_sched_port_queues_per_port(struct rte_sched_port *port)
 {
@@ -1006,44 +986,50 @@ rte_sched_port_pipe_profile_add(struct rte_sched_port *port,
 	return 0;
 }
 
+static inline uint32_t
+rte_sched_port_qindex(struct rte_sched_port *port,
+	uint32_t subport,
+	uint32_t pipe,
+	uint32_t traffic_class,
+	uint32_t queue)
+{
+	uint32_t result;
+
+	result = subport * port->n_pipes_per_subport + pipe;
+	result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + traffic_class;
+	result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
+
+	return result;
+}
+
 void
-rte_sched_port_pkt_write(struct rte_mbuf *pkt,
+rte_sched_port_pkt_write(struct rte_sched_port *port,
+			 struct rte_mbuf *pkt,
 			 uint32_t subport, uint32_t pipe, uint32_t traffic_class,
 			 uint32_t queue, enum rte_meter_color color)
 {
-	struct rte_sched_port_hierarchy *sched
-		= (struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
-	RTE_BUILD_BUG_ON(sizeof(*sched) > sizeof(pkt->hash.sched));
-
-	sched->color = (uint32_t) color;
-	sched->subport = subport;
-	sched->pipe = pipe;
-	sched->traffic_class = traffic_class;
-	sched->queue = queue;
+	pkt->hash.sched.traffic_class = traffic_class;
+	pkt->hash.sched.queue_id = rte_sched_port_qindex(port, subport, pipe,
+			traffic_class, queue);
+	pkt->hash.sched.color = (uint8_t) color;
 }
 
 void
-rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
+rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
+				  const struct rte_mbuf *pkt,
 				  uint32_t *subport, uint32_t *pipe,
 				  uint32_t *traffic_class, uint32_t *queue)
 {
-	const struct rte_sched_port_hierarchy *sched
-		= (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
-	*subport = sched->subport;
-	*pipe = sched->pipe;
-	*traffic_class = sched->traffic_class;
-	*queue = sched->queue;
+	*subport = pkt->hash.sched.queue_id / rte_sched_port_queues_per_subport(port);
+	*pipe = pkt->hash.sched.queue_id / RTE_SCHED_QUEUES_PER_PIPE;
+	*traffic_class = pkt->hash.sched.traffic_class;
+	*queue = pkt->hash.sched.queue_id % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
 }
 
 enum rte_meter_color
 rte_sched_port_pkt_read_color(const struct rte_mbuf *pkt)
 {
-	const struct rte_sched_port_hierarchy *sched
-		= (const struct rte_sched_port_hierarchy *) &pkt->hash.sched;
-
-	return (enum rte_meter_color) sched->color;
+	return (enum rte_meter_color) pkt->hash.sched.color;
 }
 
 int
@@ -1100,18 +1086,6 @@ rte_sched_queue_read_stats(struct rte_sched_port *port,
 	return 0;
 }
 
-static inline uint32_t
-rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue)
-{
-	uint32_t result;
-
-	result = subport * port->n_pipes_per_subport + pipe;
-	result = result * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + traffic_class;
-	result = result * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue;
-
-	return result;
-}
-
 #ifdef RTE_SCHED_DEBUG
 
 static inline int
@@ -1272,11 +1246,8 @@ rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_port *port,
 #ifdef RTE_SCHED_COLLECT_STATS
 	struct rte_sched_queue_extra *qe;
 #endif
-	uint32_t subport, pipe, traffic_class, queue, qindex;
-
-	rte_sched_port_pkt_read_tree_path(pkt, &subport, &pipe, &traffic_class, &queue);
+	uint32_t qindex = pkt->hash.sched.queue_id;
 
-	qindex = rte_sched_port_qindex(port, subport, pipe, traffic_class, queue);
 	q = port->queue + qindex;
 	rte_prefetch0(q);
 #ifdef RTE_SCHED_COLLECT_STATS
diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
index 84fa896de..4d9f869eb 100644
--- a/lib/librte_sched/rte_sched.h
+++ b/lib/librte_sched/rte_sched.h
@@ -355,6 +355,8 @@ rte_sched_queue_read_stats(struct rte_sched_port *port,
  * Scheduler hierarchy path write to packet descriptor. Typically
  * called by the packet classification stage.
  *
+ * @param port
+ *   Handle to port scheduler instance
  * @param pkt
  *   Packet descriptor handle
  * @param subport
@@ -369,7 +371,8 @@ rte_sched_queue_read_stats(struct rte_sched_port *port,
  *   Packet color set
  */
 void
-rte_sched_port_pkt_write(struct rte_mbuf *pkt,
+rte_sched_port_pkt_write(struct rte_sched_port *port,
+			 struct rte_mbuf *pkt,
 			 uint32_t subport, uint32_t pipe, uint32_t traffic_class,
 			 uint32_t queue, enum rte_meter_color color);
 
@@ -379,6 +382,8 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
  * enqueue operation. The subport, pipe, traffic class and queue
  * parameters need to be pre-allocated by the caller.
  *
+  * @param port
+ *   Handle to port scheduler instance
  * @param pkt
  *   Packet descriptor handle
  * @param subport
@@ -392,7 +397,8 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
  *
  */
 void
-rte_sched_port_pkt_read_tree_path(const struct rte_mbuf *pkt,
+rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port,
+				  const struct rte_mbuf *pkt,
 				  uint32_t *subport, uint32_t *pipe,
 				  uint32_t *traffic_class, uint32_t *queue);
 
diff --git a/test/test/test_sched.c b/test/test/test_sched.c
index 32e500ba9..55f37c1e2 100644
--- a/test/test/test_sched.c
+++ b/test/test/test_sched.c
@@ -76,7 +76,7 @@ create_mempool(void)
 }
 
 static void
-prepare_pkt(struct rte_mbuf *mbuf)
+prepare_pkt(struct rte_sched_port *port, struct rte_mbuf *mbuf)
 {
 	struct ether_hdr *eth_hdr;
 	struct vlan_hdr *vlan1, *vlan2;
@@ -95,7 +95,7 @@ prepare_pkt(struct rte_mbuf *mbuf)
 	ip_hdr->dst_addr = IPv4(0,0,TC,QUEUE);
 
 
-	rte_sched_port_pkt_write(mbuf, SUBPORT, PIPE, TC, QUEUE, e_RTE_METER_YELLOW);
+	rte_sched_port_pkt_write(port, mbuf, SUBPORT, PIPE, TC, QUEUE, e_RTE_METER_YELLOW);
 
 	/* 64 byte packet */
 	mbuf->pkt_len  = 60;
@@ -138,7 +138,7 @@ test_sched(void)
 	for (i = 0; i < 10; i++) {
 		in_mbufs[i] = rte_pktmbuf_alloc(mp);
 		TEST_ASSERT_NOT_NULL(in_mbufs[i], "Packet allocation failed\n");
-		prepare_pkt(in_mbufs[i]);
+		prepare_pkt(port, in_mbufs[i]);
 	}
 
 
@@ -155,7 +155,7 @@ test_sched(void)
 		color = rte_sched_port_pkt_read_color(out_mbufs[i]);
 		TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");
 
-		rte_sched_port_pkt_read_tree_path(out_mbufs[i],
+		rte_sched_port_pkt_read_tree_path(port, out_mbufs[i],
 				&subport, &pipe, &traffic_class, &queue);
 
 		TEST_ASSERT_EQUAL(subport, SUBPORT, "Wrong subport\n");
-- 
2.17.1



More information about the dev mailing list