[dpdk-dev] [PATCH 06/27] sched: update subport config api

Lukasz Krakowiak lukaszx.krakowiak at intel.com
Tue May 28 14:05:32 CEST 2019


From: Jasvinder Singh <jasvinder.singh at intel.com>

Update suport configuration api implementation of the scheduler to allow
configuration flexiblity for pipe traffic classes and queues, and subport
level configuration of the pipe parameters.

Signed-off-by: Jasvinder Singh <jasvinder.singh at intel.com>
Signed-off-by: Abraham Tovar <abrahamx.tovar at intel.com>
Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak at intel.com>
---
 lib/librte_sched/rte_sched.c | 325 ++++++++++++++++++++++++++++++-----
 1 file changed, 283 insertions(+), 42 deletions(-)

diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 39a6165e3..020c028fd 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -495,24 +495,72 @@ rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum rte_sch
 	return base;
 }
 
-uint32_t
-rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
+static uint32_t
+rte_sched_subport_get_array_base(struct rte_sched_subport_params *params,
+	enum rte_sched_subport_array array)
 {
-	uint32_t size0, size1;
-	int status;
+	uint32_t n_subport_pipes = params->n_subport_pipes;
+	uint32_t n_subport_queues = RTE_SCHED_QUEUES_PER_PIPE * n_subport_pipes;
 
-	status = rte_sched_port_check_params(params);
-	if (status != 0) {
-		RTE_LOG(NOTICE, SCHED,
-			"Port scheduler params check failed (%d)\n", status);
+	uint32_t size_pipe = n_subport_pipes * sizeof(struct rte_sched_pipe);
+	uint32_t size_queue = n_subport_queues * sizeof(struct rte_sched_queue);
+	uint32_t size_queue_extra
+		= n_subport_queues * sizeof(struct rte_sched_queue_extra);
+	uint32_t size_pipe_profiles = RTE_SCHED_PIPE_PROFILES_PER_SUBPORT *
+		sizeof(struct rte_sched_pipe_profile);
+	uint32_t size_bmp_array =
+		rte_bitmap_get_memory_footprint(n_subport_queues);
+	uint32_t size_per_pipe_queue_array, size_queue_array;
 
-		return 0;
+	uint32_t base, i;
+
+	size_per_pipe_queue_array = 0;
+	for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
+		size_per_pipe_queue_array += params->qsize[i] * sizeof(struct rte_mbuf *);
 	}
+	size_queue_array = n_subport_pipes * size_per_pipe_queue_array;
 
-	size0 = sizeof(struct rte_sched_port);
-	size1 = rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_TOTAL);
+	base = 0;
 
-	return size0 + size1;
+	if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE)
+		return base;
+	base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
+
+	if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE)
+		return base;
+	base += RTE_CACHE_LINE_ROUNDUP(size_queue);
+
+	if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA)
+		return base;
+	base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
+
+	if (array == e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES)
+		return base;
+	base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
+
+	if (array == e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY)
+		return base;
+	base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
+
+	if (array == e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY)
+		return base;
+	base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
+
+	return base;
+}
+
+static void
+rte_sched_subport_config_qsize(struct rte_sched_subport *subport)
+{
+	uint32_t i;
+
+	subport->qsize_add[0] = 0;
+
+	for (i = 1; i < RTE_SCHED_QUEUES_PER_PIPE; i++)
+		subport->qsize_add[i] =
+			subport->qsize_add[i-1] + subport->qsize[i-1];
+
+	subport->qsize_sum = subport->qsize_add[15] + subport->qsize[15];
 }
 
 static void
@@ -621,6 +669,120 @@ rte_sched_pipe_profile_convert(struct rte_sched_pipe_params *src,
 	}
 }
 
+static int
+rte_sched_subport_check_params(struct rte_sched_subport_params *params,
+	uint32_t rate)
+{
+	uint32_t i, j;
+
+	/* Check user parameters */
+	if (params == NULL)
+		return -1;
+
+	if (params->tb_rate == 0 || params->tb_rate > rate)
+		return -2;
+
+	if (params->tb_size == 0)
+		return -3;
+
+	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+		if (params->tc_rate[i] > params->tb_rate) {
+			printf("traffic class %u, tc_rate %u, tb_rate %u\n", i,
+				params->tc_rate[i], params->tb_rate);
+			return -4;
+		}
+	if (params->tc_period == 0)
+		return -6;
+
+	/* n_subport_pipes: non-zero, power of 2 */
+	if (params->n_subport_pipes == 0 ||
+	    !rte_is_power_of_2(params->n_subport_pipes))
+		return -7;
+
+	/* qsize: power of 2, if non-zero
+	 * no bigger than 32K (due to 16-bit read/write pointers)
+	 */
+	for (i = 0, j = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
+		uint32_t tc_rate = params->tc_rate[j];
+		uint16_t qsize = params->qsize[i];
+
+		if (((qsize == 0) &&
+			((tc_rate != 0) &&
+			(j != RTE_SCHED_TRAFFIC_CLASS_BE))) ||
+			((qsize != 0) && !rte_is_power_of_2(qsize)))
+			return -8;
+
+		if (j < RTE_SCHED_TRAFFIC_CLASS_BE)
+			j++;
+	}
+
+	/* WRR queues : 1, 4, 8 */
+	uint32_t wrr_queues = 0;
+	for (i = 0; i < RTE_SCHED_WRR_QUEUES_PER_PIPE; i++) {
+		if (params->qsize[RTE_SCHED_TRAFFIC_CLASS_BE + i])
+			wrr_queues++;
+	}
+	if (params->tc_rate[RTE_SCHED_TRAFFIC_CLASS_BE] &&
+		(wrr_queues != 1 && wrr_queues != 2 &&
+		wrr_queues != 4 && wrr_queues != 8))
+		return -9;
+
+	/* pipe_profiles and n_pipe_profiles */
+	if (params->pipe_profiles == NULL ||
+	    params->n_pipe_profiles == 0 ||
+	    params->n_pipe_profiles > RTE_SCHED_PIPE_PROFILES_PER_SUBPORT)
+		return -10;
+
+	return 0;
+}
+
+static uint32_t
+rte_sched_subport_get_memory_footprint(struct rte_sched_port *port,
+	uint32_t subport_id, struct rte_sched_subport_params *params)
+{
+	uint32_t size0, size1;
+	int status;
+
+	if (port == NULL ||
+	    subport_id >= port->n_subports_per_port)
+		return 0;
+
+	status = rte_sched_subport_check_params(params, port->rate);
+	if (status != 0) {
+		RTE_LOG(NOTICE, SCHED,
+			"Port scheduler params check failed (%d)\n", status);
+
+		return 0;
+	}
+
+	size0 = sizeof(struct rte_sched_subport);
+	size1 = rte_sched_subport_get_array_base(params,
+			e_RTE_SCHED_SUBPORT_ARRAY_TOTAL);
+
+	return size0 + size1;
+}
+
+uint32_t
+rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
+{
+	uint32_t size0, size1;
+	int status;
+
+	status = rte_sched_port_check_params(params);
+	if (status != 0) {
+		RTE_LOG(NOTICE, SCHED,
+			"Port scheduler params check failed (%d)\n", status);
+
+		return 0;
+	}
+
+	size0 = sizeof(struct rte_sched_port);
+	size1 = rte_sched_port_get_array_base(params,
+			e_RTE_SCHED_PORT_ARRAY_TOTAL);
+
+	return size0 + size1;
+}
+
 struct rte_sched_port *
 rte_sched_port_config(struct rte_sched_port_params *params)
 {
@@ -710,12 +872,12 @@ rte_sched_port_free(struct rte_sched_port *port)
 static void
 rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
 {
-	struct rte_sched_subport *s = port->subport + i;
+	struct rte_sched_subport *s = port->subports[i];
 
 	RTE_LOG(DEBUG, SCHED, "Low level config for subport %u:\n"
 		"    Token bucket: period = %u, credits per period = %u, size = %u\n"
-		"    Traffic classes: period = %u, credits per period = [%u, %u, %u, %u]\n"
-		"    Traffic class 3 oversubscription: wm min = %u, wm max = %u\n",
+		"    Traffic classes: period = %u, credits per period = [%u, %u, %u, %u, %u, %u, %u, %u, %u]\n"
+		"    Traffic class BE oversubscription: wm min = %u, wm max = %u\n",
 		i,
 
 		/* Token bucket */
@@ -729,8 +891,13 @@ rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
 		s->tc_credits_per_period[1],
 		s->tc_credits_per_period[2],
 		s->tc_credits_per_period[3],
+		s->tc_credits_per_period[4],
+		s->tc_credits_per_period[5],
+		s->tc_credits_per_period[6],
+		s->tc_credits_per_period[7],
+		s->tc_credits_per_period[8],
 
-		/* Traffic class 3 oversubscription */
+		/* Traffic class BE oversubscription */
 		s->tc_ov_wm_min,
 		s->tc_ov_wm_max);
 }
@@ -740,32 +907,21 @@ rte_sched_subport_config(struct rte_sched_port *port,
 	uint32_t subport_id,
 	struct rte_sched_subport_params *params)
 {
-	struct rte_sched_subport *s;
-	uint32_t i;
+	struct rte_sched_subport *s = NULL;
+	uint32_t mem_size, bmp_mem_size, n_subport_queues, n_subport_pipes_log2, i;
 
-	/* Check user parameters */
-	if (port == NULL ||
-	    subport_id >= port->n_subports_per_port ||
-	    params == NULL)
+	/* Check user parameters. Determine the amount of memory to allocate */
+	mem_size = rte_sched_subport_get_memory_footprint(port,
+		subport_id, params);
+	if (mem_size == 0)
 		return -1;
 
-	if (params->tb_rate == 0 || params->tb_rate > port->rate)
+	/* Allocate memory to store the data structures */
+	s = rte_zmalloc_socket("subport_params", mem_size, RTE_CACHE_LINE_SIZE,
+		port->socket);
+	if (s == NULL)
 		return -2;
 
-	if (params->tb_size == 0)
-		return -3;
-
-	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
-		if (params->tc_rate[i] == 0 ||
-		    params->tc_rate[i] > params->tb_rate)
-			return -4;
-	}
-
-	if (params->tc_period == 0)
-		return -5;
-
-	s = port->subport + subport_id;
-
 	/* Token Bucket (TB) */
 	if (params->tb_rate == port->rate) {
 		s->tb_credits_per_period = 1;
@@ -784,19 +940,104 @@ rte_sched_subport_config(struct rte_sched_port *port,
 	/* Traffic Classes (TCs) */
 	s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
-		s->tc_credits_per_period[i]
-			= rte_sched_time_ms_to_bytes(params->tc_period,
-						     params->tc_rate[i]);
+		if (params->qsize[i])
+			s->tc_credits_per_period[i]
+				= rte_sched_time_ms_to_bytes(params->tc_period,
+					params->tc_rate[i]);
 	}
 	s->tc_time = port->time + s->tc_period;
 	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
-		s->tc_credits[i] = s->tc_credits_per_period[i];
+		if (params->qsize[i])
+			s->tc_credits[i] = s->tc_credits_per_period[i];
+
+	/* compile time checks */
+	RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS == 0);
+	RTE_BUILD_BUG_ON(RTE_SCHED_PORT_N_GRINDERS &
+		(RTE_SCHED_PORT_N_GRINDERS - 1));
+
+	/* User parameters */
+	s->n_subport_pipes = params->n_subport_pipes;
+	n_subport_pipes_log2 = __builtin_ctz(params->n_subport_pipes);
+	memcpy(s->qsize, params->qsize, sizeof(params->qsize));
+	s->n_pipe_profiles = params->n_pipe_profiles;
+
+#ifdef RTE_SCHED_RED
+	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+		uint32_t j;
+
+		for (j = 0; j < RTE_COLORS; j++) {
+			/* if min/max are both zero, then RED is disabled */
+			if ((params->red_params[i][j].min_th |
+			     params->red_params[i][j].max_th) == 0) {
+				continue;
+			}
+
+			if (rte_red_config_init(&s->red_config[i][j],
+				params->red_params[i][j].wq_log2,
+				params->red_params[i][j].min_th,
+				params->red_params[i][j].max_th,
+				params->red_params[i][j].maxp_inv) != 0) {
+				rte_free(s);
+				return -3;
+			}
+		}
+	}
+#endif
+
+	/* Scheduling loop detection */
+	s->pipe_loop = RTE_SCHED_PIPE_INVALID;
+	s->pipe_exhaustion = 0;
+
+	/* Grinders */
+	s->busy_grinders = 0;
+
+	/* Queue base calculation */
+	rte_sched_subport_config_qsize(s);
+
+	/* Large data structures */
+	s->pipe = (struct rte_sched_pipe *)
+		(s->memory + rte_sched_subport_get_array_base(params,
+						e_RTE_SCHED_SUBPORT_ARRAY_PIPE));
+	s->queue = (struct rte_sched_queue *)
+		(s->memory + rte_sched_subport_get_array_base(params,
+						e_RTE_SCHED_SUBPORT_ARRAY_QUEUE));
+	s->queue_extra = (struct rte_sched_queue_extra *)
+		(s->memory + rte_sched_subport_get_array_base(params,
+						e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_EXTRA));
+	s->pipe_profiles = (struct rte_sched_pipe_profile *)
+		(s->memory + rte_sched_subport_get_array_base(params,
+						e_RTE_SCHED_SUBPORT_ARRAY_PIPE_PROFILES));
+	s->bmp_array =  s->memory + rte_sched_subport_get_array_base(params,
+						e_RTE_SCHED_SUBPORT_ARRAY_BMP_ARRAY);
+	s->queue_array = (struct rte_mbuf **)
+		(s->memory + rte_sched_subport_get_array_base(params,
+						e_RTE_SCHED_SUBPORT_ARRAY_QUEUE_ARRAY));
+
+	/* Bitmap */
+	n_subport_queues = rte_sched_subport_queues(s);
+	bmp_mem_size = rte_bitmap_get_memory_footprint(n_subport_queues);
+	s->bmp = rte_bitmap_init(n_subport_queues, s->bmp_array,
+				bmp_mem_size);
+	if (s->bmp == NULL) {
+		RTE_LOG(ERR, SCHED, "Subport bitmap init error\n");
+		rte_free(port);
+		return -4;
+	}
+
+	for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
+		s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
+
+	/* Port */
+	port->subports[subport_id] = s;
+
+	if (n_subport_pipes_log2 > port->n_max_subport_pipes_log2)
+		port->n_max_subport_pipes_log2 = n_subport_pipes_log2;
 
 #ifdef RTE_SCHED_SUBPORT_TC_OV
 	/* TC oversubscription */
 	s->tc_ov_wm_min = port->mtu;
 	s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params->tc_period,
-						     port->pipe_tc3_rate_max);
+						     s->pipe_tc_be_rate_max);
 	s->tc_ov_wm = s->tc_ov_wm_max;
 	s->tc_ov_period_id = 0;
 	s->tc_ov = 0;
-- 
2.20.1



More information about the dev mailing list