[PATCH v2] pcap: support MTU set

Ido Goshen ido at cgstowernetworks.com
Mon May 23 16:27:05 CEST 2022


Support rte_eth_dev_set_mtu by pcap vdevs
Enforce mtu on rx/tx

Bugzilla ID: 961
Signed-off-by: Ido Goshen <ido at cgstowernetworks.com>

---
v2:
preserve pcap behavior to support max size packets by default
---
 drivers/net/pcap/pcap_ethdev.c | 35 +++++++++++++++++++++++++++++++---
 lib/ethdev/rte_ethdev.c        |  3 ++-
 lib/ethdev/rte_ethdev.h        |  1 +
 3 files changed, 35 insertions(+), 4 deletions(-)

diff --git a/drivers/net/pcap/pcap_ethdev.c b/drivers/net/pcap/pcap_ethdev.c
index ec29fd6bc5..7c5fd6dd98 100644
--- a/drivers/net/pcap/pcap_ethdev.c
+++ b/drivers/net/pcap/pcap_ethdev.c
@@ -278,11 +278,12 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	const u_char *packet;
 	struct rte_mbuf *mbuf;
 	struct pcap_rx_queue *pcap_q = queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[pcap_q->port_id];
 	uint16_t num_rx = 0;
 	uint32_t rx_bytes = 0;
 	pcap_t *pcap;
 
-	pp = rte_eth_devices[pcap_q->port_id].process_private;
+	pp = dev->process_private;
 	pcap = pp->rx_pcap[pcap_q->queue_id];
 
 	if (unlikely(pcap == NULL || nb_pkts == 0))
@@ -303,6 +304,12 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 			break;
 		}
 
+		if (unlikely(header.caplen > dev->data->mtu)) {
+			pcap_q->rx_stat.err_pkts++;
+			rte_pktmbuf_free(mbuf);
+			break;
+		}
+
 		if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) {
 			/* pcap packet will fit in the mbuf, can copy it */
 			rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
@@ -378,6 +385,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct rte_mbuf *mbuf;
 	struct pmd_process_private *pp;
 	struct pcap_tx_queue *dumper_q = queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[dumper_q->port_id];
 	uint16_t num_tx = 0;
 	uint32_t tx_bytes = 0;
 	struct pcap_pkthdr header;
@@ -385,7 +393,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
 	size_t len, caplen;
 
-	pp = rte_eth_devices[dumper_q->port_id].process_private;
+	pp = dev->process_private;
 	dumper = pp->tx_dumper[dumper_q->queue_id];
 
 	if (dumper == NULL || nb_pkts == 0)
@@ -396,6 +404,12 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
 		len = caplen = rte_pktmbuf_pkt_len(mbuf);
+
+		if (unlikely(len > dev->data->mtu)) {
+			rte_pktmbuf_free(mbuf);
+			continue;
+		}
+
 		if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
 				len > sizeof(temp_data))) {
 			caplen = sizeof(temp_data);
@@ -464,13 +478,14 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct rte_mbuf *mbuf;
 	struct pmd_process_private *pp;
 	struct pcap_tx_queue *tx_queue = queue;
+	struct rte_eth_dev *dev = &rte_eth_devices[tx_queue->port_id];
 	uint16_t num_tx = 0;
 	uint32_t tx_bytes = 0;
 	pcap_t *pcap;
 	unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN];
 	size_t len;
 
-	pp = rte_eth_devices[tx_queue->port_id].process_private;
+	pp = dev->process_private;
 	pcap = pp->tx_pcap[tx_queue->queue_id];
 
 	if (unlikely(nb_pkts == 0 || pcap == NULL))
@@ -479,6 +494,12 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	for (i = 0; i < nb_pkts; i++) {
 		mbuf = bufs[i];
 		len = rte_pktmbuf_pkt_len(mbuf);
+
+		if (unlikely(len > dev->data->mtu)) {
+			rte_pktmbuf_free(mbuf);
+			continue;
+		}
+
 		if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) &&
 				len > sizeof(temp_data))) {
 			PMD_LOG(ERR,
@@ -737,6 +758,7 @@ eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_rx_queues = dev->data->nb_rx_queues;
 	dev_info->max_tx_queues = dev->data->nb_tx_queues;
 	dev_info->min_rx_bufsize = 0;
+	dev_info->default_mtu = dev_info->max_mtu;
 
 	return 0;
 }
@@ -807,6 +829,12 @@ eth_stats_reset(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int eth_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+	PMD_LOG(INFO, "mtu set %s %u\n", dev->device->name, mtu);
+	return 0;
+}
+
 static inline void
 infinite_rx_ring_free(struct rte_ring *pkts)
 {
@@ -1004,6 +1032,7 @@ static const struct eth_dev_ops ops = {
 	.link_update = eth_link_update,
 	.stats_get = eth_stats_get,
 	.stats_reset = eth_stats_reset,
+	.mtu_set = eth_mtu_set,
 };
 
 static int
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 29a3d80466..75cb38a4b2 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -1186,7 +1186,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	}
 
 	if (dev_conf->rxmode.mtu == 0)
-		dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
+		dev->data->dev_conf.rxmode.mtu = dev_info.default_mtu;
 
 	ret = eth_dev_validate_mtu(port_id, &dev_info,
 			dev->data->dev_conf.rxmode.mtu);
@@ -3126,6 +3126,7 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
 	dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
 		RTE_ETHER_CRC_LEN;
 	dev_info->max_mtu = UINT16_MAX;
+	dev_info->default_mtu = RTE_ETHER_MTU;
 
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
 	diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 04cff8ee10..8a2fd2f4ff 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -1861,6 +1861,7 @@ struct rte_eth_dev_info {
 		Use if_indextoname() to translate into an interface name. */
 	uint16_t min_mtu;	/**< Minimum MTU allowed */
 	uint16_t max_mtu;	/**< Maximum MTU allowed */
+	uint16_t default_mtu;
 	const uint32_t *dev_flags; /**< Device flags */
 	uint32_t min_rx_bufsize; /**< Minimum size of Rx buffer. */
 	uint32_t max_rx_pktlen; /**< Maximum configurable length of Rx pkt. */
-- 
2.17.1



More information about the dev mailing list