[dpdk-dev] [PATCH v3] examples/ipsec-secgw: add per core packet stats

Ananyev, Konstantin konstantin.ananyev at intel.com
Thu May 7 18:12:15 CEST 2020


> @@ -1099,6 +1151,10 @@ ipsec_poll_mode_worker(void)
>  	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
>  			/ US_PER_S * BURST_TX_DRAIN_US;
>  	struct lcore_rx_queue *rxql;
> +#if (STATS_INTERVAL > 0)
> +	const uint64_t timer_period = STATS_INTERVAL * rte_get_timer_hz();
> +	uint64_t timer_tsc = 0;
> +#endif /* STATS_INTERVAL */
> 
>  	prev_tsc = 0;
>  	lcore_id = rte_lcore_id();
> @@ -1159,6 +1215,19 @@ ipsec_poll_mode_worker(void)
>  			drain_tx_buffers(qconf);
>  			drain_crypto_buffers(qconf);
>  			prev_tsc = cur_tsc;
> +#if (STATS_INTERVAL > 0)
> +			if (lcore_id == rte_get_master_lcore()) {
> +				/* advance the timer */
> +				timer_tsc += diff_tsc;
> +
> +				/* if timer has reached its timeout */
> +				if (unlikely(timer_tsc >= timer_period)) {
> +					print_stats();
> +					/* reset the timer */
> +					timer_tsc = 0;
> +				}
> +			}
> +#endif /* STATS_INTERVAL */

I still don't understand why to do it in data-path thread.
As I said in previous comments, in DPDK there is a control
thread that can be used for such house-keeping tasks.
Why not to use it (via rte_alarm or so) and keep data-path
threads less affected.

>  		}
> 
>  		for (i = 0; i < qconf->nb_rx_queue; ++i) {
> @@ -1169,8 +1238,10 @@ ipsec_poll_mode_worker(void)
>  			nb_rx = rte_eth_rx_burst(portid, queueid,
>  					pkts, MAX_PKT_BURST);
> 
> -			if (nb_rx > 0)
> +			if (nb_rx > 0) {
> +				core_stats_update_rx(nb_rx);
>  				process_pkts(qconf, pkts, nb_rx, portid);
> +			}
> 
>  			/* dequeue and process completed crypto-ops */
>  			if (is_unprotected_port(portid))
> diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
> index 4b53cb5..5b3561f 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.h
> +++ b/examples/ipsec-secgw/ipsec-secgw.h
> @@ -6,6 +6,8 @@
> 
>  #include <stdbool.h>
> 
> +#define STATS_INTERVAL 0

Shouldn't it be:
#ifndef STATS_INTERVAL
#define STATS_INTERVAL	0
#endif
?

To allow user specify statis interval via EXTRA_CFLAGS='-DSTATS_INTERVAL=10'
or so.

> +
>  #define NB_SOCKETS 4
> 
>  #define MAX_PKT_BURST 32
> @@ -69,6 +71,17 @@ struct ethaddr_info {
>  	uint64_t src, dst;
>  };
> 
> +#if (STATS_INTERVAL > 0)
> +struct ipsec_core_statistics {
> +	uint64_t tx;
> +	uint64_t rx;
> +	uint64_t dropped;
> +	uint64_t burst_rx;
> +} __rte_cache_aligned;
> +
> +struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
> +#endif /* STATS_INTERVAL */
> +
>  extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
> 
>  /* Port mask to identify the unprotected ports */
> @@ -85,4 +98,59 @@ is_unprotected_port(uint16_t port_id)
>  	return unprotected_port_mask & (1 << port_id);
>  }
> 
> +static inline void
> +core_stats_update_rx(int n)
> +{
> +#if (STATS_INTERVAL > 0)
> +	int lcore_id = rte_lcore_id();
> +	core_statistics[lcore_id].rx += n;
> +	if (n == MAX_PKT_BURST)
> +		core_statistics[lcore_id].burst_rx += n;
> +#else
> +	RTE_SET_USED(n);
> +#endif /* STATS_INTERVAL */
> +}
> +
> +static inline void
> +core_stats_update_tx(int n)
> +{
> +#if (STATS_INTERVAL > 0)
> +	int lcore_id = rte_lcore_id();
> +	core_statistics[lcore_id].tx += n;
> +#else
> +	RTE_SET_USED(n);
> +#endif /* STATS_INTERVAL */
> +}
> +
> +static inline void
> +core_stats_update_drop(int n)
> +{
> +#if (STATS_INTERVAL > 0)
> +	int lcore_id = rte_lcore_id();
> +	core_statistics[lcore_id].dropped += n;
> +#else
> +	RTE_SET_USED(n);
> +#endif /* STATS_INTERVAL */
> +}
> +
> +/* helper routine to free bulk of packets */
> +static inline void
> +free_pkts(struct rte_mbuf *mb[], uint32_t n)
> +{
> +	uint32_t i;
> +
> +	for (i = 0; i != n; i++)
> +		rte_pktmbuf_free(mb[i]);
> +
> +	core_stats_update_drop(n);
> +}
> +
> +/* helper routine to free single packet */
> +static inline void
> +free_pkt(struct rte_mbuf *mb)
> +{
> +	rte_pktmbuf_free(mb);
> +	core_stats_update_drop(1);

Probably just:
free_pkts(&mb, 1);
?

> +}
> +
>  #endif /* _IPSEC_SECGW_H_ */


More information about the dev mailing list