[dpdk-dev] [PATCH v2 02/28] sched: update subport and pipe data structures

Dumitrescu, Cristian cristian.dumitrescu at intel.com
Mon Jul 1 20:58:55 CEST 2019



> -----Original Message-----
> From: Singh, Jasvinder
> Sent: Tuesday, June 25, 2019 4:32 PM
> To: dev at dpdk.org
> Cc: Dumitrescu, Cristian <cristian.dumitrescu at intel.com>; Tovar, AbrahamX
> <abrahamx.tovar at intel.com>; Krakowiak, LukaszX
> <lukaszx.krakowiak at intel.com>
> Subject: [PATCH v2 02/28] sched: update subport and pipe data structures
> 
> Update subport and pipe data structures to allow configuration
> flexiblity for pipe traffic classes and queues, and subport level
> configuration of the pipe parameters.
> 
> Signed-off-by: Jasvinder Singh <jasvinder.singh at intel.com>
> Signed-off-by: Abraham Tovar <abrahamx.tovar at intel.com>
> Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak at intel.com>
> ---
>  app/test/test_sched.c        |   2 +-
>  examples/qos_sched/init.c    |   2 +-
>  lib/librte_sched/rte_sched.h | 126 +++++++++++++++++++++++------------
>  3 files changed, 85 insertions(+), 45 deletions(-)
> 
> diff --git a/app/test/test_sched.c b/app/test/test_sched.c
> index 49bb9ea6f..d6651d490 100644
> --- a/app/test/test_sched.c
> +++ b/app/test/test_sched.c
> @@ -40,7 +40,7 @@ static struct rte_sched_pipe_params pipe_profile[] = {
>  		.tc_rate = {305175, 305175, 305175, 305175},
>  		.tc_period = 40,
> 
> -		.wrr_weights = {1, 1, 1, 1,  1, 1, 1, 1,  1, 1, 1, 1,  1, 1, 1, 1},
> +		.wrr_weights = {1, 1, 1, 1,  1, 1, 1, 1},
>  	},
>  };
> 
> diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
> index 1209bd7ce..f6e9af16b 100644
> --- a/examples/qos_sched/init.c
> +++ b/examples/qos_sched/init.c
> @@ -186,7 +186,7 @@ static struct rte_sched_pipe_params
> pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PO
>  		.tc_ov_weight = 1,
>  #endif
> 
> -		.wrr_weights = {1, 1, 1, 1,  1, 1, 1, 1,  1, 1, 1, 1,  1, 1, 1, 1},
> +		.wrr_weights = {1, 1, 1, 1,  1, 1, 1, 1},
>  	},
>  };
> 
> diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
> index 470a0036a..ebde07669 100644
> --- a/lib/librte_sched/rte_sched.h
> +++ b/lib/librte_sched/rte_sched.h
> @@ -114,6 +114,35 @@ extern "C" {
>  #define RTE_SCHED_FRAME_OVERHEAD_DEFAULT      24
>  #endif
> 
> +/*
> + * Pipe configuration parameters. The period and credits_per_period
> + * parameters are measured in bytes, with one byte meaning the time
> + * duration associated with the transmission of one byte on the
> + * physical medium of the output port, with pipe or pipe traffic class
> + * rate (measured as percentage of output port rate) determined as
> + * credits_per_period divided by period. One credit represents one
> + * byte.
> + */
> +struct rte_sched_pipe_params {
> +	/** Token bucket rate (measured in bytes per second) */
> +	uint32_t tb_rate;
> +	/** Token bucket size (measured in credits) */
> +	uint32_t tb_size;
> +
> +	/** Traffic class rates (measured in bytes per second) */
> +	uint32_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
> +
> +	/** Enforcement period (measured in milliseconds) */
> +	uint32_t tc_period;
> +#ifdef RTE_SCHED_SUBPORT_TC_OV
> +	/** Best-effort traffic class oversubscription weight */
> +	uint8_t tc_ov_weight;
> +#endif

We should always enable the Best Effort traffic class oversubscription feature on the API side, at least. In case this feature is disabled through the build time option (RTE_SCHED_SUBPORT_TC_OV), the values for these params can be ignored.

We should also consider always enabling the run-time part for this feature, as the oversubscription is the typical configuration used by the service providers. Do you see significant performance drop when this feature is enabled?

> +
> +	/** WRR weights of best-effort traffic class queues */
> +	uint8_t wrr_weights[RTE_SCHED_BE_QUEUES_PER_PIPE];
> +};
> +
>  /*
>   * Subport configuration parameters. The period and credits_per_period
>   * parameters are measured in bytes, with one byte meaning the time
> @@ -124,15 +153,44 @@ extern "C" {
>   * byte.
>   */
>  struct rte_sched_subport_params {
> -	/* Subport token bucket */
> -	uint32_t tb_rate;                /**< Rate (measured in bytes per second)
> */
> -	uint32_t tb_size;                /**< Size (measured in credits) */
> +	/** Token bucket rate (measured in bytes per second) */
> +	uint32_t tb_rate;
> +
> +	/** Token bucket size (measured in credits) */
> +	uint32_t tb_size;
> 
> -	/* Subport traffic classes */
> +	/** Traffic class rates (measured in bytes per second) */
>  	uint32_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
> -	/**< Traffic class rates (measured in bytes per second) */
> +
> +	/** Enforcement period for rates (measured in milliseconds) */
>  	uint32_t tc_period;
> -	/**< Enforcement period for rates (measured in milliseconds) */
> +
> +	/** Number of subport_pipes */
> +	uint32_t n_subport_pipes;

Minor issue: Any reason why not keeping the initial name of n_pipes_per_subport? The initial name looks more intuitive to me, I vote to keep it; it is also inline with other naming conventions in this library.

> +
> +	/** Packet queue size for each traffic class.
> +	 * All the pipes within the same subport share the similar
> +	 * configuration for the queues. Queues which are not needed, have
> +	 * zero size.
> +	 */
> +	uint16_t qsize[RTE_SCHED_QUEUES_PER_PIPE];
> +
> +	/** Pipe profile table.
> +	 * Every pipe is configured using one of the profiles from this table.
> +	 */
> +	struct rte_sched_pipe_params *pipe_profiles;
> +
> +	/** Profiles in the pipe profile table */
> +	uint32_t n_pipe_profiles;
> +
> +	/** Max profiles allowed in the pipe profile table */
> +	uint32_t n_max_pipe_profiles;
> +#ifdef RTE_SCHED_RED
> +	/** RED parameters */
> +	struct rte_red_params
> +
> 	red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS
> ];
> +
> +#endif
>  };
> 
>  /** Subport statistics */
> @@ -155,33 +213,6 @@ struct rte_sched_subport_stats {
>  #endif
>  };
> 
> -/*
> - * Pipe configuration parameters. The period and credits_per_period
> - * parameters are measured in bytes, with one byte meaning the time
> - * duration associated with the transmission of one byte on the
> - * physical medium of the output port, with pipe or pipe traffic class
> - * rate (measured as percentage of output port rate) determined as
> - * credits_per_period divided by period. One credit represents one
> - * byte.
> - */
> -struct rte_sched_pipe_params {
> -	/* Pipe token bucket */
> -	uint32_t tb_rate;                /**< Rate (measured in bytes per second)
> */
> -	uint32_t tb_size;                /**< Size (measured in credits) */
> -
> -	/* Pipe traffic classes */
> -	uint32_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
> -	/**< Traffic class rates (measured in bytes per second) */
> -	uint32_t tc_period;
> -	/**< Enforcement period (measured in milliseconds) */
> -#ifdef RTE_SCHED_SUBPORT_TC_OV
> -	uint8_t tc_ov_weight;		 /**< Weight Traffic class 3
> oversubscription */
> -#endif
> -
> -	/* Pipe queues */
> -	uint8_t  wrr_weights[RTE_SCHED_QUEUES_PER_PIPE]; /**< WRR
> weights */
> -};
> -
>  /** Queue statistics */
>  struct rte_sched_queue_stats {
>  	/* Packets */
> @@ -198,16 +229,25 @@ struct rte_sched_queue_stats {
> 
>  /** Port configuration parameters. */
>  struct rte_sched_port_params {
> -	const char *name;                /**< String to be associated */
> -	int socket;                      /**< CPU socket ID */
> -	uint32_t rate;                   /**< Output port rate
> -					  * (measured in bytes per second) */
> -	uint32_t mtu;                    /**< Maximum Ethernet frame size
> -					  * (measured in bytes).
> -					  * Should not include the framing
> overhead. */
> -	uint32_t frame_overhead;         /**< Framing overhead per packet
> -					  * (measured in bytes) */
> -	uint32_t n_subports_per_port;    /**< Number of subports */
> +	/** Name of the port to be associated */
> +	const char *name;
> +
> +	/** CPU socket ID */
> +	int socket;
> +
> +	/** Output port rate (measured in bytes per second) */
> +	uint32_t rate;
> +
> +	/** Maximum Ethernet frame size (measured in bytes).
> +	 * Should not include the framing overhead.
> +	 */
> +	uint32_t mtu;
> +
> +	/** Framing overhead per packet (measured in bytes) */
> +	uint32_t frame_overhead;
> +
> +	/** Number of subports */
> +	uint32_t n_subports_per_port;
>  	uint32_t n_pipes_per_subport;    /**< Number of pipes per subport
> */
>  	uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
>  	/**< Packet queue size for each traffic class.
> --
> 2.21.0



More information about the dev mailing list