[dpdk-dev] [PATCH v3] net/bonding: reduce slave starvation on rx poll

Ferruh Yigit ferruh.yigit at intel.com
Tue Mar 21 14:53:11 CET 2017


On 3/7/2017 10:39 PM, Keith Wiles wrote:
> When polling the bonded ports for RX packets the old driver would
> always start with the first slave in the list. If the requested
> number of packets is filled on the first port in a two port config
> then the second port could be starved or have larger number of
> missed packet errors.
> 
> The code attempts to start with a different slave each time RX poll
> is done to help eliminate starvation of slave ports. The effect of
> the previous code was much lower performance for two slaves in the
> bond then just the one slave.
> 
> The performance drop was detected when the application can not poll
> the rings of rx packets fast enough and the packets per second for
> two or more ports was at the threshold thoughput of the application.
> At this threshold the slaves would see very little or no drops in
> the case of one slave. Then enable the second slave you would see
> a large drop rate on the two slave bond and reduction in thoughput.
> 
> Signed-off-by: Keith Wiles <keith.wiles at intel.com>
> ---
> v3 - remove more checkpatch errors
> v2 - remove checkpatch errors
> 
>  drivers/net/bonding/rte_eth_bond_pmd.c     | 21 +++++++++++++++------
>  drivers/net/bonding/rte_eth_bond_private.h |  3 ++-
>  2 files changed, 17 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
> index f3ac9e273..18c7b2e85 100644
> --- a/drivers/net/bonding/rte_eth_bond_pmd.c
> +++ b/drivers/net/bonding/rte_eth_bond_pmd.c
> @@ -1,7 +1,7 @@
>  /*-
>   *   BSD LICENSE
>   *
> - *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
> + *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
>   *   All rights reserved.
>   *
>   *   Redistribution and use in source and binary forms, with or without
> @@ -145,7 +145,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
>  	const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
>  	uint16_t num_rx_total = 0;	/* Total number of received packets */
>  	uint8_t slaves[RTE_MAX_ETHPORTS];
> -	uint8_t slave_count;
> +	uint8_t slave_count, idx;
>  
>  	uint8_t collecting;  /* current slave collecting status */
>  	const uint8_t promisc = internals->promiscuous_en;
> @@ -159,12 +159,18 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
>  	memcpy(slaves, internals->active_slaves,
>  			sizeof(internals->active_slaves[0]) * slave_count);
>  
> +	idx = internals->active_slave;
> +	if (idx >= slave_count) {
> +		internals->active_slave = 0;
> +		idx = 0;
> +	}
>  	for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
>  		j = num_rx_total;
> -		collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
> +		collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[idx]],
> +					 COLLECTING);
>  
>  		/* Read packets from this slave */
> -		num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
> +		num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
>  				&bufs[num_rx_total], nb_pkts - num_rx_total);
>  
>  		for (k = j; k < 2 && k < num_rx_total; k++)
> @@ -187,8 +193,8 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
>  					!is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
>  
>  				if (hdr->ether_type == ether_type_slow_be) {
> -					bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
> -						bufs[j]);
> +					bond_mode_8023ad_handle_slow_pkt(
> +					    internals, slaves[idx], bufs[j]);
>  				} else
>  					rte_pktmbuf_free(bufs[j]);
>  
> @@ -201,8 +207,11 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
>  			} else
>  				j++;
>  		}
> +		if (unlikely(++idx == slave_count))
> +			idx = 0;
>  	}
>  
> +	internals->active_slave = idx + 1;

idx already increased above, won't using "idx + 1" here cause skipping
some slaves in some conditions depending num_rx_total?

Like,

slave_count = 2
internals->active_slave = 0;

idx = 0; receive from slave[0]
idx = 1;
num_rx_total >= nb_pkts
internals->active_slave = 2;

idx = 2;
internals->active_slave = 0;
idx = 0; receive from slave[0]
idx = 1;
num_rx_total >= nb_pkts
internals->active_slave = 2;

....

Always read from slave[0] as long as it fills num_rx_total threshold,
same as previous.

OR

slave_count = 3
internals->active_slave = 0;

idx = 0; receive from slave[0]
idx = 1; receive from slave[1]
idx = 2;
num_rx_total >= nb_pkts
internals->active_slave = 3;

idx = 3;
internals->active_slave = 0;
idx = 0; receive from slave[0]
idx = 1; receive from slave[1]
idx = 2;
num_rx_total >= nb_pkts
internals->active_slave = 3;

....

Not read from slave[2] as long as slave[0 & 1] fills num_rx_total threshold.

>  	return num_rx_total;
>  }
>  
> diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
> index 5a411e22b..c8db09005 100644
> --- a/drivers/net/bonding/rte_eth_bond_private.h
> +++ b/drivers/net/bonding/rte_eth_bond_private.h
> @@ -1,7 +1,7 @@
>  /*-
>   *   BSD LICENSE
>   *
> - *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
> + *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
>   *   All rights reserved.
>   *
>   *   Redistribution and use in source and binary forms, with or without
> @@ -144,6 +144,7 @@ struct bond_dev_private {
>  	uint16_t nb_rx_queues;			/**< Total number of rx queues */
>  	uint16_t nb_tx_queues;			/**< Total number of tx queues*/
>  
> +	uint8_t active_slave;		/**< Next active_slave to poll */
>  	uint8_t active_slave_count;		/**< Number of active slaves */
>  	uint8_t active_slaves[RTE_MAX_ETHPORTS];	/**< Active slave list */
>  
> 



More information about the dev mailing list