[dpdk-dev] [PATCH v5 2/6] examples/ioat: add software copy support

Baran, MarcinX marcinx.baran at intel.com
Fri Sep 27 16:01:25 CEST 2019


-----Original Message-----
From: Bruce Richardson <bruce.richardson at intel.com> 
Sent: Friday, September 27, 2019 12:01 PM
To: Baran, MarcinX <marcinx.baran at intel.com>
Cc: dev at dpdk.org; Modrak, PawelX <pawelx.modrak at intel.com>
Subject: Re: [dpdk-dev] [PATCH v5 2/6] examples/ioat: add software copy support

On Fri, Sep 20, 2019 at 09:37:10AM +0200, Marcin Baran wrote:
> Added support for copying packets
> using software copy mode and MAC address changing. The copies are 
> processed using one lcore.
> 
> Signed-off-by: Marcin Baran <marcinx.baran at intel.com>
> Signed-off-by: Pawel Modrak <pawelx.modrak at intel.com>
> ---
>  examples/ioat/ioatfwd.c | 211 
> +++++++++++++++++++++++++++++++++++-----
>  1 file changed, 188 insertions(+), 23 deletions(-)
> 
> diff --git a/examples/ioat/ioatfwd.c b/examples/ioat/ioatfwd.c index 
> 977ea6a61..3a092c6cf 100644
> --- a/examples/ioat/ioatfwd.c
> +++ b/examples/ioat/ioatfwd.c
> @@ -13,7 +13,7 @@
>  #include <rte_rawdev.h>
>  #include <rte_ioat_rawdev.h>
>  
> -/* size of ring used for software copying between rx and tx. */
> + /* size of ring used for software copying between rx and tx. */
>  #define RTE_LOGTYPE_IOAT RTE_LOGTYPE_USER1  #define MAX_PKT_BURST 32  
> #define MEMPOOL_CACHE_SIZE 512 @@ -89,6 +89,142 @@ static struct 
> rte_ether_addr ioat_ports_eth_addr[RTE_MAX_ETHPORTS];
>  static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];  
> struct rte_mempool *ioat_pktmbuf_pool;
>  
> +static void
> +update_mac_addrs(struct rte_mbuf *m, uint32_t dest_portid) {
> +	struct rte_ether_hdr *eth;
> +	void *tmp;
> +
> +	eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
> +
> +	/* 02:00:00:00:00:xx - overwriting 2 bytes of source address but
> +	 * it's acceptable cause it gets overwritten by rte_ether_addr_copy
> +	 */
> +	tmp = &eth->d_addr.addr_bytes[0];
> +	*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40);
> +
> +	/* src addr */
> +	rte_ether_addr_copy(&ioat_ports_eth_addr[dest_portid], 
> +&eth->s_addr); }
> +
> +static inline void
> +pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst) {
> +	/* Copy packet metadata */
> +	rte_memcpy(&dst->rearm_data,
> +		&src->rearm_data,
> +		offsetof(struct rte_mbuf, cacheline1)
> +		- offsetof(struct rte_mbuf, rearm_data));
> +
> +	/* Copy packet data */
> +	rte_memcpy(rte_pktmbuf_mtod(dst, char *),
> +		rte_pktmbuf_mtod(src, char *), src->data_len); }
> +
> +/* Receive packets on one port and enqueue to IOAT rawdev or 
> +rte_ring. */ static void ioat_rx_port(struct rxtx_port_config 
> +*rx_config) {
> +	uint32_t nb_rx, nb_enq, i, j;
> +	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
> +
> +	for (i = 0; i < rx_config->nb_queues; i++) {
> +
> +		nb_rx = rte_eth_rx_burst(rx_config->rxtx_port, i,
> +			pkts_burst, MAX_PKT_BURST);
> +
> +		if (nb_rx == 0)
> +			continue;
> +
> +		/* Perform packet software copy, free source packets */
> +		int ret;
> +		struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
> +
> +		ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
> +			(void *)pkts_burst_copy, nb_rx);
> +
> +		if (unlikely(ret < 0))
> +			rte_exit(EXIT_FAILURE,
> +				"Unable to allocate memory.\n");
> +
> +		for (j = 0; j < nb_rx; j++)
> +			pktmbuf_sw_copy(pkts_burst[j],
> +				pkts_burst_copy[j]);
> +
> +		rte_mempool_put_bulk(ioat_pktmbuf_pool,
> +			(void *)pkts_burst, nb_rx);
> +
> +		nb_enq = rte_ring_enqueue_burst(
> +			rx_config->rx_to_tx_ring,
> +			(void *)pkts_burst_copy, nb_rx, NULL);
> +
> +		/* Free any not enqueued packets. */
> +		rte_mempool_put_bulk(ioat_pktmbuf_pool,
> +			(void *)&pkts_burst_copy[nb_enq],
> +			nb_rx - nb_enq);
> +	}
> +}
> +
> +/* Transmit packets from IOAT rawdev/rte_ring for one port. */ static 
> +void ioat_tx_port(struct rxtx_port_config *tx_config) {
> +	uint32_t i, nb_dq = 0;
> +	struct rte_mbuf *mbufs_dst[MAX_PKT_BURST];
> +
> +	/* Deque the mbufs from rx_to_tx_ring. */
> +	nb_dq = rte_ring_dequeue_burst(tx_config->rx_to_tx_ring,
> +		(void *)mbufs_dst, MAX_PKT_BURST, NULL);
> +
> +	if (nb_dq == 0)
> +		return;
> +
> +	/* Update macs if enabled */
> +	if (mac_updating) {
> +		for (i = 0; i < nb_dq; i++)
> +			update_mac_addrs(mbufs_dst[i],
> +				tx_config->rxtx_port);
> +	}
> +
> +	const uint16_t nb_tx = rte_eth_tx_burst(tx_config->rxtx_port,
> +		0, (void *)mbufs_dst, nb_dq);
> +
> +	/* Free any unsent packets. */
> +	if (unlikely(nb_tx < nb_dq))
> +		rte_mempool_put_bulk(ioat_pktmbuf_pool,
> +		(void *)&mbufs_dst[nb_tx],
> +			nb_dq - nb_tx);
> +}
> +
> +/* Main rx and tx loop if only one slave lcore available */ static 
> +void
> +rxtx_main_loop(void)
> +{
> +	uint16_t i;
> +	uint16_t nb_ports = cfg.nb_ports;
> +
> +	RTE_LOG(INFO, IOAT, "Entering main rx and tx loop for copy on"
> +		" lcore %u\n", rte_lcore_id());
> +
> +	while (!force_quit)
> +		for (i = 0; i < nb_ports; i++) {
> +			ioat_rx_port(&cfg.ports[i]);
> +			ioat_tx_port(&cfg.ports[i]);
> +		}
> +}
> +
> +static void start_forwarding_cores(void) {
> +	uint32_t lcore_id = rte_lcore_id();
> +
> +	RTE_LOG(INFO, IOAT, "Entering %s on lcore %u\n",
> +		__func__, rte_lcore_id());
> +
> +	lcore_id = rte_get_next_lcore(lcore_id, true, true);
> +	rte_eal_remote_launch((lcore_function_t *)rxtx_main_loop,
> +		NULL, lcore_id);
> +}
> +
>  /* Display usage */
>  static void
>  ioat_usage(const char *prgname)
> @@ -102,7 +238,7 @@ ioat_usage(const char *prgname)
>  		"       - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n"
>  		"  -c --copy-type CT: type of copy: sw|rawdev\n"
>  		"  -s --ring-size RS: size of IOAT rawdev ring for hardware copy mode or rte_ring for software copy mode\n",
> -	       prgname);
> +		prgname);
>  }
>  
>  static int
> @@ -161,16 +297,16 @@ ioat_parse_args(int argc, char **argv, unsigned int nb_ports)
>  	argvopt = argv;
>  
>  	while ((opt = getopt_long(argc, argvopt, short_options,
> -				  lgopts, &option_index)) != EOF) {
> +		lgopts, &option_index)) != EOF) {

These and the other whitespace changes in this patch should be fixed in patch 1 rather than having them as modifications here.

Without these whitespace changes merged into original patch:

Acked-by: Bruce Richardson <bruce.richardson at intel.com>

[Marcin]: Whitespace changes moved to previous commit for v6.



More information about the dev mailing list