<div dir="ltr"><div dir="ltr"><br></div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Mon, Jun 12, 2023 at 5:01 PM Kumara Parameshwaran <<a href="mailto:kumaraparamesh92@gmail.com">kumaraparamesh92@gmail.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">The patch adds GRO support for TCP/ipv6 packets. This does not<br>
include the support for vxlan, udp ipv6 packets.<br>
<br>
Signed-off-by: Kumara Parameshwaran <<a href="mailto:kumaraparamesh92@gmail.com" target="_blank">kumaraparamesh92@gmail.com</a>><br>
---<br>
v1:<br>
* Changes to support GRO for TCP/ipv6 packets. This does not include<br>
vxlan changes. <br>
* The GRO is performed only for ipv6 packets that does not contain <br>
extension headers. <br>
* The logic for the TCP coalescing remains the same, in ipv6 header <br>
the source address, destination address, flow label, version fields <br>
are expected to be the same. <br>
* Re-organised the code to reuse certain tcp functions for both ipv4 and <br>
ipv6 flows.<br>
v2:<br>
* Fix comments in gro_tcp6.h header file. <br>
<br>
v3:<br>
* Adderess review comments to fix code duplication for v4 and v6<br>
<br>
v4:<br>
* Addresses review comments for v3, do not use callbacks <br>
<br>
v5:<br>
* Address review comments<br>
<br>
v6:<br>
* Fix warning and coding style issues<br>
<br>
v7:<br>
* Fix build compilation issue<br>
lib/gro/gro_tcp4.c | 178 ++++++-------------------<br>
lib/gro/gro_tcp4.h | 170 +----------------------<br>
lib/gro/gro_tcp6.c | 267 +++++++++++++++++++++++++++++++++++++<br>
lib/gro/gro_tcp6.h | 161 ++++++++++++++++++++++<br>
lib/gro/gro_tcp_internal.c | 128 ++++++++++++++++++<br>
lib/gro/gro_tcp_internal.h | 212 +++++++++++++++++++++++++++++<br>
lib/gro/gro_vxlan_tcp4.c | 23 ++--<br>
lib/gro/gro_vxlan_tcp4.h | 3 +-<br>
lib/gro/meson.build | 2 +<br>
lib/gro/rte_gro.c | 83 ++++++++++--<br>
lib/gro/rte_gro.h | 3 +<br>
11 files changed, 897 insertions(+), 333 deletions(-)<br>
create mode 100644 lib/gro/gro_tcp6.c<br>
create mode 100644 lib/gro/gro_tcp6.h<br>
create mode 100644 lib/gro/gro_tcp_internal.c<br>
create mode 100644 lib/gro/gro_tcp_internal.h<br>
<br>
diff --git a/lib/gro/gro_tcp4.c b/lib/gro/gro_tcp4.c<br>
index 0014096e63..42fee78f30 100644<br>
--- a/lib/gro/gro_tcp4.c<br>
+++ b/lib/gro/gro_tcp4.c<br>
@@ -30,7 +30,7 @@ gro_tcp4_tbl_create(uint16_t socket_id,<br>
if (tbl == NULL)<br>
return NULL;<br>
<br>
- size = sizeof(struct gro_tcp4_item) * entries_num;<br>
+ size = sizeof(struct gro_tcp_item) * entries_num;<br>
tbl->items = rte_zmalloc_socket(__func__,<br>
size,<br>
RTE_CACHE_LINE_SIZE,<br>
@@ -71,18 +71,6 @@ gro_tcp4_tbl_destroy(void *tbl)<br>
rte_free(tcp_tbl);<br>
}<br>
<br>
-static inline uint32_t<br>
-find_an_empty_item(struct gro_tcp4_tbl *tbl)<br>
-{<br>
- uint32_t i;<br>
- uint32_t max_item_num = tbl->max_item_num;<br>
-<br>
- for (i = 0; i < max_item_num; i++)<br>
- if (tbl->items[i].firstseg == NULL)<br>
- return i;<br>
- return INVALID_ARRAY_INDEX;<br>
-}<br>
-<br>
static inline uint32_t<br>
find_an_empty_flow(struct gro_tcp4_tbl *tbl)<br>
{<br>
@@ -95,56 +83,6 @@ find_an_empty_flow(struct gro_tcp4_tbl *tbl)<br>
return INVALID_ARRAY_INDEX;<br>
}<br>
<br>
-static inline uint32_t<br>
-insert_new_item(struct gro_tcp4_tbl *tbl,<br>
- struct rte_mbuf *pkt,<br>
- uint64_t start_time,<br>
- uint32_t prev_idx,<br>
- uint32_t sent_seq,<br>
- uint16_t ip_id,<br>
- uint8_t is_atomic)<br>
-{<br>
- uint32_t item_idx;<br>
-<br>
- item_idx = find_an_empty_item(tbl);<br>
- if (item_idx == INVALID_ARRAY_INDEX)<br>
- return INVALID_ARRAY_INDEX;<br>
-<br>
- tbl->items[item_idx].firstseg = pkt;<br>
- tbl->items[item_idx].lastseg = rte_pktmbuf_lastseg(pkt);<br>
- tbl->items[item_idx].start_time = start_time;<br>
- tbl->items[item_idx].next_pkt_idx = INVALID_ARRAY_INDEX;<br>
- tbl->items[item_idx].sent_seq = sent_seq;<br>
- tbl->items[item_idx].ip_id = ip_id;<br>
- tbl->items[item_idx].nb_merged = 1;<br>
- tbl->items[item_idx].is_atomic = is_atomic;<br>
- tbl->item_num++;<br>
-<br>
- /* if the previous packet exists, chain them together. */<br>
- if (prev_idx != INVALID_ARRAY_INDEX) {<br>
- tbl->items[item_idx].next_pkt_idx =<br>
- tbl->items[prev_idx].next_pkt_idx;<br>
- tbl->items[prev_idx].next_pkt_idx = item_idx;<br>
- }<br>
-<br>
- return item_idx;<br>
-}<br>
-<br>
-static inline uint32_t<br>
-delete_item(struct gro_tcp4_tbl *tbl, uint32_t item_idx,<br>
- uint32_t prev_item_idx)<br>
-{<br>
- uint32_t next_idx = tbl->items[item_idx].next_pkt_idx;<br>
-<br>
- /* NULL indicates an empty item */<br>
- tbl->items[item_idx].firstseg = NULL;<br>
- tbl->item_num--;<br>
- if (prev_item_idx != INVALID_ARRAY_INDEX)<br>
- tbl->items[prev_item_idx].next_pkt_idx = next_idx;<br>
-<br>
- return next_idx;<br>
-}<br>
-<br>
static inline uint32_t<br>
insert_new_flow(struct gro_tcp4_tbl *tbl,<br>
struct tcp4_flow_key *src,<br>
@@ -159,13 +97,10 @@ insert_new_flow(struct gro_tcp4_tbl *tbl,<br>
<br>
dst = &(tbl->flows[flow_idx].key);<br>
<br>
- rte_ether_addr_copy(&(src->eth_saddr), &(dst->eth_saddr));<br>
- rte_ether_addr_copy(&(src->eth_daddr), &(dst->eth_daddr));<br>
+ ASSIGN_COMMON_TCP_KEY((&src->cmn_key), (&dst->cmn_key));<br>
+<br>
dst->ip_src_addr = src->ip_src_addr;<br>
dst->ip_dst_addr = src->ip_dst_addr;<br>
- dst->recv_ack = src->recv_ack;<br>
- dst->src_port = src->src_port;<br>
- dst->dst_port = src->dst_port;<br>
<br>
tbl->flows[flow_idx].start_index = item_idx;<br>
tbl->flow_num++;<br>
@@ -173,21 +108,6 @@ insert_new_flow(struct gro_tcp4_tbl *tbl,<br>
return flow_idx;<br>
}<br>
<br>
-/*<br>
- * update the packet length for the flushed packet.<br>
- */<br>
-static inline void<br>
-update_header(struct gro_tcp4_item *item)<br>
-{<br>
- struct rte_ipv4_hdr *ipv4_hdr;<br>
- struct rte_mbuf *pkt = item->firstseg;<br>
-<br>
- ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +<br>
- pkt->l2_len);<br>
- ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len -<br>
- pkt->l2_len);<br>
-}<br>
-<br>
int32_t<br>
gro_tcp4_reassemble(struct rte_mbuf *pkt,<br>
struct gro_tcp4_tbl *tbl,<br>
@@ -202,9 +122,8 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,<br>
uint8_t is_atomic;<br>
<br>
struct tcp4_flow_key key;<br>
- uint32_t cur_idx, prev_idx, item_idx;<br>
+ uint32_t item_idx;<br>
uint32_t i, max_flow_num, remaining_flow_num;<br>
- int cmp;<br>
uint8_t find;<br>
<br>
/*<br>
@@ -216,7 +135,7 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,<br>
<br>
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);<br>
ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);<br>
- tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);<br>
+ tcp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_tcp_hdr *, pkt->l2_len + pkt->l3_len);<br>
hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len;<br>
<br>
/*<br>
@@ -230,7 +149,6 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,<br>
ip_tlen = rte_be_to_cpu_16(ipv4_hdr->total_length);<br>
if (pkt->pkt_len > (uint32_t)(ip_tlen + pkt->l2_len))<br>
rte_pktmbuf_trim(pkt, pkt->pkt_len - ip_tlen - pkt->l2_len);<br>
-<br>
/*<br>
* Don't process the packet whose payload length is less than or<br>
* equal to 0.<br>
@@ -239,6 +157,13 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,<br>
if (tcp_dl <= 0)<br>
return -1;<br>
<br>
+ rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.cmn_key.eth_saddr));<br>
+ rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.cmn_key.eth_daddr));<br>
+ key.ip_src_addr = ipv4_hdr->src_addr;<br>
+ key.ip_dst_addr = ipv4_hdr->dst_addr;<br>
+ key.cmn_key.src_port = tcp_hdr->src_port;<br>
+ key.cmn_key.dst_port = tcp_hdr->dst_port;<br>
+ key.cmn_key.recv_ack = tcp_hdr->recv_ack;<br>
/*<br>
* Save IPv4 ID for the packet whose DF bit is 0. For the packet<br>
* whose DF bit is 1, IPv4 ID is ignored.<br>
@@ -246,15 +171,6 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,<br>
frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);<br>
is_atomic = (frag_off & RTE_IPV4_HDR_DF_FLAG) == RTE_IPV4_HDR_DF_FLAG;<br>
ip_id = is_atomic ? 0 : rte_be_to_cpu_16(ipv4_hdr->packet_id);<br>
- sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);<br>
-<br>
- rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.eth_saddr));<br>
- rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.eth_daddr));<br>
- key.ip_src_addr = ipv4_hdr->src_addr;<br>
- key.ip_dst_addr = ipv4_hdr->dst_addr;<br>
- key.src_port = tcp_hdr->src_port;<br>
- key.dst_port = tcp_hdr->dst_port;<br>
- key.recv_ack = tcp_hdr->recv_ack;<br>
<br>
/* Search for a matched flow. */<br>
max_flow_num = tbl->max_flow_num;<br>
@@ -270,63 +186,44 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,<br>
}<br>
}<br>
<br>
- /*<br>
- * Fail to find a matched flow. Insert a new flow and store the<br>
- * packet into the flow.<br>
- */<br>
if (find == 0) {<br>
- item_idx = insert_new_item(tbl, pkt, start_time,<br>
- INVALID_ARRAY_INDEX, sent_seq, ip_id,<br>
- is_atomic);<br>
+ sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);<br>
+ item_idx = insert_new_tcp_item(pkt, tbl->items, &tbl->item_num,<br>
+ tbl->max_item_num, start_time,<br>
+ INVALID_ARRAY_INDEX, sent_seq, ip_id,<br>
+ is_atomic);<br>
if (item_idx == INVALID_ARRAY_INDEX)<br>
return -1;<br>
if (insert_new_flow(tbl, &key, item_idx) ==<br>
- INVALID_ARRAY_INDEX) {<br>
+ INVALID_ARRAY_INDEX) {<br>
/*<br>
* Fail to insert a new flow, so delete the<br>
* stored packet.<br>
- */<br>
- delete_item(tbl, item_idx, INVALID_ARRAY_INDEX);<br>
+ */<br>
+ delete_tcp_item(tbl->items, item_idx, &tbl->item_num, INVALID_ARRAY_INDEX);<br>
return -1;<br>
}<br>
return 0;<br>
}<br>
<br>
- /*<br>
- * Check all packets in the flow and try to find a neighbor for<br>
- * the input packet.<br>
- */<br>
- cur_idx = tbl->flows[i].start_index;<br>
- prev_idx = cur_idx;<br>
- do {<br>
- cmp = check_seq_option(&(tbl->items[cur_idx]), tcp_hdr,<br>
- sent_seq, ip_id, pkt->l4_len, tcp_dl, 0,<br>
- is_atomic);<br>
- if (cmp) {<br>
- if (merge_two_tcp4_packets(&(tbl->items[cur_idx]),<br>
- pkt, cmp, sent_seq, ip_id, 0))<br>
- return 1;<br>
- /*<br>
- * Fail to merge the two packets, as the packet<br>
- * length is greater than the max value. Store<br>
- * the packet into the flow.<br>
- */<br>
- if (insert_new_item(tbl, pkt, start_time, cur_idx,<br>
- sent_seq, ip_id, is_atomic) ==<br>
- INVALID_ARRAY_INDEX)<br>
- return -1;<br>
- return 0;<br>
- }<br>
- prev_idx = cur_idx;<br>
- cur_idx = tbl->items[cur_idx].next_pkt_idx;<br>
- } while (cur_idx != INVALID_ARRAY_INDEX);<br>
+ return process_tcp_item(pkt, tcp_hdr, tcp_dl, tbl->items, tbl->flows[i].start_index,<br>
+ &tbl->item_num, tbl->max_item_num,<br>
+ ip_id, is_atomic, start_time);<br>
+}<br>
<br>
- /* Fail to find a neighbor, so store the packet into the flow. */<br>
- if (insert_new_item(tbl, pkt, start_time, prev_idx, sent_seq,<br>
- ip_id, is_atomic) == INVALID_ARRAY_INDEX)<br>
- return -1;<br>
+/*<br>
+ * update the packet length for the flushed packet.<br>
+ */<br>
+static inline void<br>
+update_header(struct gro_tcp_item *item)<br>
+{<br>
+ struct rte_ipv4_hdr *ipv4_hdr;<br>
+ struct rte_mbuf *pkt = item->firstseg;<br>
<br>
- return 0;<br>
+ ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +<br>
+ pkt->l2_len);<br>
+ ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len -<br>
+ pkt->l2_len);<br>
}<br>
<br>
uint16_t<br>
@@ -353,7 +250,8 @@ gro_tcp4_tbl_timeout_flush(struct gro_tcp4_tbl *tbl,<br>
* Delete the packet and get the next<br>
* packet in the flow.<br>
*/<br>
- j = delete_item(tbl, j, INVALID_ARRAY_INDEX);<br>
+ j = delete_tcp_item(tbl->items, j,<br>
+ &tbl->item_num, INVALID_ARRAY_INDEX);<br>
tbl->flows[i].start_index = j;<br>
if (j == INVALID_ARRAY_INDEX)<br>
tbl->flow_num--;<br>
diff --git a/lib/gro/gro_tcp4.h b/lib/gro/gro_tcp4.h<br>
index 212f97a042..c0154afa24 100644<br>
--- a/lib/gro/gro_tcp4.h<br>
+++ b/lib/gro/gro_tcp4.h<br>
@@ -5,32 +5,15 @@<br>
#ifndef _GRO_TCP4_H_<br>
#define _GRO_TCP4_H_<br>
<br>
-#include <rte_tcp.h><br>
+#include <gro_tcp_internal.h><br>
<br>
-#define INVALID_ARRAY_INDEX 0xffffffffUL<br>
#define GRO_TCP4_TBL_MAX_ITEM_NUM (1024UL * 1024UL)<br>
<br>
-/*<br>
- * The max length of a IPv4 packet, which includes the length of the L3<br>
- * header, the L4 header and the data payload.<br>
- */<br>
-#define MAX_IPV4_PKT_LENGTH UINT16_MAX<br>
-<br>
-/* The maximum TCP header length */<br>
-#define MAX_TCP_HLEN 60<br>
-#define INVALID_TCP_HDRLEN(len) \<br>
- (((len) < sizeof(struct rte_tcp_hdr)) || ((len) > MAX_TCP_HLEN))<br>
-<br>
-/* Header fields representing a TCP/IPv4 flow */<br>
+/* Header fields representing common fields in TCP flow */<br>
struct tcp4_flow_key {<br>
- struct rte_ether_addr eth_saddr;<br>
- struct rte_ether_addr eth_daddr;<br>
+ struct cmn_tcp_key cmn_key;<br>
uint32_t ip_src_addr;<br>
uint32_t ip_dst_addr;<br>
-<br>
- uint32_t recv_ack;<br>
- uint16_t src_port;<br>
- uint16_t dst_port;<br>
};<br>
<br>
struct gro_tcp4_flow {<br>
@@ -42,42 +25,12 @@ struct gro_tcp4_flow {<br>
uint32_t start_index;<br>
};<br>
<br>
-struct gro_tcp4_item {<br>
- /*<br>
- * The first MBUF segment of the packet. If the value<br>
- * is NULL, it means the item is empty.<br>
- */<br>
- struct rte_mbuf *firstseg;<br>
- /* The last MBUF segment of the packet */<br>
- struct rte_mbuf *lastseg;<br>
- /*<br>
- * The time when the first packet is inserted into the table.<br>
- * This value won't be updated, even if the packet is merged<br>
- * with other packets.<br>
- */<br>
- uint64_t start_time;<br>
- /*<br>
- * next_pkt_idx is used to chain the packets that<br>
- * are in the same flow but can't be merged together<br>
- * (e.g. caused by packet reordering).<br>
- */<br>
- uint32_t next_pkt_idx;<br>
- /* TCP sequence number of the packet */<br>
- uint32_t sent_seq;<br>
- /* IPv4 ID of the packet */<br>
- uint16_t ip_id;<br>
- /* the number of merged packets */<br>
- uint16_t nb_merged;<br>
- /* Indicate if IPv4 ID can be ignored */<br>
- uint8_t is_atomic;<br>
-};<br>
-<br>
/*<br>
* TCP/IPv4 reassembly table structure.<br>
*/<br>
struct gro_tcp4_tbl {<br>
/* item array */<br>
- struct gro_tcp4_item *items;<br>
+ struct gro_tcp_item *items;<br>
/* flow array */<br>
struct gro_tcp4_flow *flows;<br>
/* current item number */<br>
@@ -186,120 +139,9 @@ uint32_t gro_tcp4_tbl_pkt_count(void *tbl);<br>
static inline int<br>
is_same_tcp4_flow(struct tcp4_flow_key k1, struct tcp4_flow_key k2)<br>
{<br>
- return (rte_is_same_ether_addr(&k1.eth_saddr, &k2.eth_saddr) &&<br>
- rte_is_same_ether_addr(&k1.eth_daddr, &k2.eth_daddr) &&<br>
- (k1.ip_src_addr == k2.ip_src_addr) &&<br>
+ return ((k1.ip_src_addr == k2.ip_src_addr) &&<br>
(k1.ip_dst_addr == k2.ip_dst_addr) &&<br>
- (k1.recv_ack == k2.recv_ack) &&<br>
- (k1.src_port == k2.src_port) &&<br>
- (k1.dst_port == k2.dst_port));<br>
+ is_common_tcp_key(&k1.cmn_key, &k2.cmn_key));<br>
}<br>
<br>
-/*<br>
- * Merge two TCP/IPv4 packets without updating checksums.<br>
- * If cmp is larger than 0, append the new packet to the<br>
- * original packet. Otherwise, pre-pend the new packet to<br>
- * the original packet.<br>
- */<br>
-static inline int<br>
-merge_two_tcp4_packets(struct gro_tcp4_item *item,<br>
- struct rte_mbuf *pkt,<br>
- int cmp,<br>
- uint32_t sent_seq,<br>
- uint16_t ip_id,<br>
- uint16_t l2_offset)<br>
-{<br>
- struct rte_mbuf *pkt_head, *pkt_tail, *lastseg;<br>
- uint16_t hdr_len, l2_len;<br>
-<br>
- if (cmp > 0) {<br>
- pkt_head = item->firstseg;<br>
- pkt_tail = pkt;<br>
- } else {<br>
- pkt_head = pkt;<br>
- pkt_tail = item->firstseg;<br>
- }<br>
-<br>
- /* check if the IPv4 packet length is greater than the max value */<br>
- hdr_len = l2_offset + pkt_head->l2_len + pkt_head->l3_len +<br>
- pkt_head->l4_len;<br>
- l2_len = l2_offset > 0 ? pkt_head->outer_l2_len : pkt_head->l2_len;<br>
- if (unlikely(pkt_head->pkt_len - l2_len + pkt_tail->pkt_len -<br>
- hdr_len > MAX_IPV4_PKT_LENGTH))<br>
- return 0;<br>
-<br>
- /* remove the packet header for the tail packet */<br>
- rte_pktmbuf_adj(pkt_tail, hdr_len);<br>
-<br>
- /* chain two packets together */<br>
- if (cmp > 0) {<br>
- item->lastseg->next = pkt;<br>
- item->lastseg = rte_pktmbuf_lastseg(pkt);<br>
- /* update IP ID to the larger value */<br>
- item->ip_id = ip_id;<br>
- } else {<br>
- lastseg = rte_pktmbuf_lastseg(pkt);<br>
- lastseg->next = item->firstseg;<br>
- item->firstseg = pkt;<br>
- /* update sent_seq to the smaller value */<br>
- item->sent_seq = sent_seq;<br>
- item->ip_id = ip_id;<br>
- }<br>
- item->nb_merged++;<br>
-<br>
- /* update MBUF metadata for the merged packet */<br>
- pkt_head->nb_segs += pkt_tail->nb_segs;<br>
- pkt_head->pkt_len += pkt_tail->pkt_len;<br>
-<br>
- return 1;<br>
-}<br>
-<br>
-/*<br>
- * Check if two TCP/IPv4 packets are neighbors.<br>
- */<br>
-static inline int<br>
-check_seq_option(struct gro_tcp4_item *item,<br>
- struct rte_tcp_hdr *tcph,<br>
- uint32_t sent_seq,<br>
- uint16_t ip_id,<br>
- uint16_t tcp_hl,<br>
- uint16_t tcp_dl,<br>
- uint16_t l2_offset,<br>
- uint8_t is_atomic)<br>
-{<br>
- struct rte_mbuf *pkt_orig = item->firstseg;<br>
- struct rte_ipv4_hdr *iph_orig;<br>
- struct rte_tcp_hdr *tcph_orig;<br>
- uint16_t len, tcp_hl_orig;<br>
-<br>
- iph_orig = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt_orig, char *) +<br>
- l2_offset + pkt_orig->l2_len);<br>
- tcph_orig = (struct rte_tcp_hdr *)((char *)iph_orig + pkt_orig->l3_len);<br>
- tcp_hl_orig = pkt_orig->l4_len;<br>
-<br>
- /* Check if TCP option fields equal */<br>
- len = RTE_MAX(tcp_hl, tcp_hl_orig) - sizeof(struct rte_tcp_hdr);<br>
- if ((tcp_hl != tcp_hl_orig) || ((len > 0) &&<br>
- (memcmp(tcph + 1, tcph_orig + 1,<br>
- len) != 0)))<br>
- return 0;<br>
-<br>
- /* Don't merge packets whose DF bits are different */<br>
- if (unlikely(item->is_atomic ^ is_atomic))<br>
- return 0;<br>
-<br>
- /* check if the two packets are neighbors */<br>
- len = pkt_orig->pkt_len - l2_offset - pkt_orig->l2_len -<br>
- pkt_orig->l3_len - tcp_hl_orig;<br>
- if ((sent_seq == item->sent_seq + len) && (is_atomic ||<br>
- (ip_id == item->ip_id + 1)))<br>
- /* append the new packet */<br>
- return 1;<br>
- else if ((sent_seq + tcp_dl == item->sent_seq) && (is_atomic ||<br>
- (ip_id + item->nb_merged == item->ip_id)))<br>
- /* pre-pend the new packet */<br>
- return -1;<br>
-<br>
- return 0;<br>
-}<br>
#endif<br>
diff --git a/lib/gro/gro_tcp6.c b/lib/gro/gro_tcp6.c<br>
new file mode 100644<br>
index 0000000000..0ea73741c1<br>
--- /dev/null<br>
+++ b/lib/gro/gro_tcp6.c<br>
@@ -0,0 +1,267 @@<br>
+/* SPDX-License-Identifier: BSD-3-Clause<br>
+ * Copyright(c) 2017 Intel Corporation<br>
+ */<br>
+<br>
+#include <rte_malloc.h><br>
+#include <rte_mbuf.h><br>
+#include <rte_ethdev.h><br>
+<br>
+#include "gro_tcp6.h"<br>
+<br>
+void *<br>
+gro_tcp6_tbl_create(uint16_t socket_id,<br>
+ uint16_t max_flow_num,<br>
+ uint16_t max_item_per_flow)<br>
+{<br>
+ struct gro_tcp6_tbl *tbl;<br>
+ size_t size;<br>
+ uint32_t entries_num, i;<br>
+<br>
+ entries_num = max_flow_num * max_item_per_flow;<br>
+ entries_num = RTE_MIN(entries_num, GRO_TCP6_TBL_MAX_ITEM_NUM);<br>
+<br>
+ if (entries_num == 0)<br>
+ return NULL;<br>
+<br>
+ tbl = rte_zmalloc_socket(__func__,<br>
+ sizeof(struct gro_tcp6_tbl),<br>
+ RTE_CACHE_LINE_SIZE,<br>
+ socket_id);<br>
+ if (tbl == NULL)<br>
+ return NULL;<br>
+<br>
+ size = sizeof(struct gro_tcp_item) * entries_num;<br>
+ tbl->items = rte_zmalloc_socket(__func__,<br>
+ size,<br>
+ RTE_CACHE_LINE_SIZE,<br>
+ socket_id);<br>
+ if (tbl->items == NULL) {<br>
+ rte_free(tbl);<br>
+ return NULL;<br>
+ }<br>
+ tbl->max_item_num = entries_num;<br>
+<br>
+ size = sizeof(struct gro_tcp6_flow) * entries_num;<br>
+ tbl->flows = rte_zmalloc_socket(__func__,<br>
+ size,<br>
+ RTE_CACHE_LINE_SIZE,<br>
+ socket_id);<br>
+ if (tbl->flows == NULL) {<br>
+ rte_free(tbl->items);<br>
+ rte_free(tbl);<br>
+ return NULL;<br>
+ }<br>
+ /* INVALID_ARRAY_INDEX indicates an empty flow */<br>
+ for (i = 0; i < entries_num; i++)<br>
+ tbl->flows[i].start_index = INVALID_ARRAY_INDEX;<br>
+ tbl->max_flow_num = entries_num;<br>
+<br>
+ return tbl;<br>
+}<br>
+<br>
+void<br>
+gro_tcp6_tbl_destroy(void *tbl)<br>
+{<br>
+ struct gro_tcp6_tbl *tcp_tbl = tbl;<br>
+<br>
+ if (tcp_tbl) {<br>
+ rte_free(tcp_tbl->items);<br>
+ rte_free(tcp_tbl->flows);<br>
+ }<br>
+ rte_free(tcp_tbl);<br>
+}<br>
+<br>
+static inline uint32_t<br>
+find_an_empty_flow(struct gro_tcp6_tbl *tbl)<br>
+{<br>
+ uint32_t i;<br>
+ uint32_t max_flow_num = tbl->max_flow_num;<br>
+<br>
+ for (i = 0; i < max_flow_num; i++)<br>
+ if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX)<br>
+ return i;<br>
+ return INVALID_ARRAY_INDEX;<br>
+}<br>
+<br>
+static inline uint32_t<br>
+insert_new_flow(struct gro_tcp6_tbl *tbl,<br>
+ struct tcp6_flow_key *src,<br>
+ uint32_t item_idx)<br>
+{<br>
+ struct tcp6_flow_key *dst;<br>
+ uint32_t flow_idx;<br>
+<br>
+ flow_idx = find_an_empty_flow(tbl);<br>
+ if (unlikely(flow_idx == INVALID_ARRAY_INDEX))<br>
+ return INVALID_ARRAY_INDEX;<br>
+<br>
+ dst = &(tbl->flows[flow_idx].key);<br>
+<br>
+ ASSIGN_COMMON_TCP_KEY((&src->cmn_key), (&dst->cmn_key));<br>
+ memcpy(&dst->src_addr[0], &src->src_addr[0], sizeof(dst->src_addr));<br>
+ memcpy(&dst->dst_addr[0], &src->dst_addr[0], sizeof(dst->dst_addr));<br>
+ dst->vtc_flow = src->vtc_flow;<br>
+<br>
+ tbl->flows[flow_idx].start_index = item_idx;<br>
+ tbl->flow_num++;<br>
+<br>
+ return flow_idx;<br>
+}<br>
+<br>
+/*<br>
+ * update the packet length for the flushed packet.<br>
+ */<br>
+static inline void<br>
+update_header(struct gro_tcp_item *item)<br>
+{<br>
+ struct rte_ipv6_hdr *ipv6_hdr;<br>
+ struct rte_mbuf *pkt = item->firstseg;<br>
+<br>
+ ipv6_hdr = (struct rte_ipv6_hdr *)(rte_pktmbuf_mtod(pkt, char *) +<br>
+ pkt->l2_len);<br>
+ ipv6_hdr->payload_len = rte_cpu_to_be_16(pkt->pkt_len -<br>
+ pkt->l2_len - pkt->l3_len);<br>
+}<br>
+<br>
+int32_t<br>
+gro_tcp6_reassemble(struct rte_mbuf *pkt,<br>
+ struct gro_tcp6_tbl *tbl,<br>
+ uint64_t start_time)<br>
+{<br>
+ struct rte_ether_hdr *eth_hdr;<br>
+ struct rte_ipv6_hdr *ipv6_hdr;<br>
+ int32_t tcp_dl;<br>
+ uint16_t ip_tlen;<br>
+ struct tcp6_flow_key key;<br>
+ uint32_t i, max_flow_num, remaining_flow_num;<br>
+ uint32_t sent_seq;<br>
+ struct rte_tcp_hdr *tcp_hdr;<br>
+ uint8_t find;<br>
+ uint32_t item_idx;<br>
+ /*<br>
+ * Don't process the packet whose TCP header length is greater<br>
+ * than 60 bytes or less than 20 bytes.<br>
+ */<br>
+ if (unlikely(INVALID_TCP_HDRLEN(pkt->l4_len)))<br>
+ return -1;<br>
+<br>
+ eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);<br>
+ ipv6_hdr = (struct rte_ipv6_hdr *)((char *)eth_hdr + pkt->l2_len);<br>
+ tcp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_tcp_hdr *, pkt->l2_len + pkt->l3_len);<br>
+<br>
+ /*<br>
+ * Don't process the packet which has FIN, SYN, RST, PSH, URG, ECE<br>
+ * or CWR set.<br>
+ */<br>
+ if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG)<br>
+ return -1;<br>
+<br>
+ ip_tlen = rte_be_to_cpu_16(ipv6_hdr->payload_len);<br>
+ /*<br>
+ * Don't process the packet whose payload length is less than or<br>
+ * equal to 0.<br>
+ */<br>
+ tcp_dl = ip_tlen - pkt->l4_len;<br>
+ if (tcp_dl <= 0)<br>
+ return -1;<br>
+<br>
+ rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.cmn_key.eth_saddr));<br>
+ rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.cmn_key.eth_daddr));<br>
+ memcpy(&key.src_addr[0], &ipv6_hdr->src_addr, sizeof(key.src_addr));<br>
+ memcpy(&key.dst_addr[0], &ipv6_hdr->dst_addr, sizeof(key.dst_addr));<br>
+ key.cmn_key.src_port = tcp_hdr->src_port;<br>
+ key.cmn_key.dst_port = tcp_hdr->dst_port;<br>
+ key.cmn_key.recv_ack = tcp_hdr->recv_ack;<br>
+ key.vtc_flow = ipv6_hdr->vtc_flow;<br>
+<br>
+ /* Search for a matched flow. */<br>
+ max_flow_num = tbl->max_flow_num;<br>
+ remaining_flow_num = tbl->flow_num;<br>
+ find = 0;<br>
+ for (i = 0; i < max_flow_num && remaining_flow_num; i++) {<br>
+ if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) {<br>
+ if (is_same_tcp6_flow(&tbl->flows[i].key, &key)) {<br>
+ find = 1;<br>
+ break;<br>
+ }<br>
+ remaining_flow_num--;<br>
+ }<br>
+ }<br>
+<br>
+ if (find == 0) {<br>
+ sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);<br>
+ item_idx = insert_new_tcp_item(pkt, tbl->items, &tbl->item_num,<br>
+ tbl->max_item_num, start_time,<br>
+ INVALID_ARRAY_INDEX, sent_seq, 0, true);<br>
+ if (item_idx == INVALID_ARRAY_INDEX)<br>
+ return -1;<br>
+ if (insert_new_flow(tbl, &key, item_idx) ==<br>
+ INVALID_ARRAY_INDEX) {<br>
+ /*<br>
+ * Fail to insert a new flow, so delete the<br>
+ * stored packet.<br>
+ */<br>
+ delete_tcp_item(tbl->items, item_idx, &tbl->item_num, INVALID_ARRAY_INDEX);<br>
+ return -1;<br>
+ }<br>
+ return 0;<br>
+ }<br>
+<br>
+ return process_tcp_item(pkt, tcp_hdr, tcp_dl, tbl->items, tbl->flows[i].start_index,<br>
+ &tbl->item_num, tbl->max_item_num,<br>
+ 0, true, start_time);<br>
+}<br>
+<br>
+uint16_t<br>
+gro_tcp6_tbl_timeout_flush(struct gro_tcp6_tbl *tbl,<br>
+ uint64_t flush_timestamp,<br>
+ struct rte_mbuf **out,<br>
+ uint16_t nb_out)<br>
+{<br>
+ uint16_t k = 0;<br>
+ uint32_t i, j;<br>
+ uint32_t max_flow_num = tbl->max_flow_num;<br>
+<br>
+ for (i = 0; i < max_flow_num; i++) {<br>
+ if (unlikely(tbl->flow_num == 0))<br>
+ return k;<br>
+<br>
+ j = tbl->flows[i].start_index;<br>
+ while (j != INVALID_ARRAY_INDEX) {<br>
+ if (tbl->items[j].start_time <= flush_timestamp) {<br>
+ out[k++] = tbl->items[j].firstseg;<br>
+ if (tbl->items[j].nb_merged > 1)<br>
+ update_header(&(tbl->items[j]));<br>
+ /*<br>
+ * Delete the packet and get the next<br>
+ * packet in the flow.<br>
+ */<br>
+ j = delete_tcp_item(tbl->items, j,<br>
+ &tbl->item_num, INVALID_ARRAY_INDEX);<br>
+ tbl->flows[i].start_index = j;<br>
+ if (j == INVALID_ARRAY_INDEX)<br>
+ tbl->flow_num--;<br>
+<br>
+ if (unlikely(k == nb_out))<br>
+ return k;<br>
+ } else<br>
+ /*<br>
+ * The left packets in this flow won't be<br>
+ * timeout. Go to check other flows.<br>
+ */<br>
+ break;<br>
+ }<br>
+ }<br>
+ return k;<br>
+}<br>
+<br>
+uint32_t<br>
+gro_tcp6_tbl_pkt_count(void *tbl)<br>
+{<br>
+ struct gro_tcp6_tbl *gro_tbl = tbl;<br>
+<br>
+ if (gro_tbl)<br>
+ return gro_tbl->item_num;<br>
+<br>
+ return 0;<br>
+}<br>
diff --git a/lib/gro/gro_tcp6.h b/lib/gro/gro_tcp6.h<br>
new file mode 100644<br>
index 0000000000..cc02b0004a<br>
--- /dev/null<br>
+++ b/lib/gro/gro_tcp6.h<br>
@@ -0,0 +1,161 @@<br>
+/* SPDX-License-Identifier: BSD-3-Clause<br>
+ * Copyright(c) 2017 Intel Corporation<br>
+ */<br>
+<br>
+#ifndef _GRO_TCP6_H_<br>
+#define _GRO_TCP6_H_<br>
+<br>
+#include <gro_tcp_internal.h><br>
+<br>
+#define INVALID_ARRAY_INDEX 0xffffffffUL<br>
+#define GRO_TCP6_TBL_MAX_ITEM_NUM (1024UL * 1024UL)<br>
+<br>
+/* Header fields representing a TCP/IPv6 flow */<br>
+struct tcp6_flow_key {<br>
+ struct cmn_tcp_key cmn_key;<br>
+ uint8_t src_addr[16];<br>
+ uint8_t dst_addr[16];<br>
+ rte_be32_t vtc_flow;<br>
+};<br>
+<br>
+struct gro_tcp6_flow {<br>
+ struct tcp6_flow_key key;<br>
+ /*<br>
+ * The index of the first packet in the flow.<br>
+ * INVALID_ARRAY_INDEX indicates an empty flow.<br>
+ */<br>
+ uint32_t start_index;<br>
+};<br>
+<br>
+/*<br>
+ * TCP/IPv6 reassembly table structure.<br>
+ */<br>
+struct gro_tcp6_tbl {<br>
+ /* item array */<br>
+ struct gro_tcp_item *items;<br>
+ /* flow array */<br>
+ struct gro_tcp6_flow *flows;<br>
+ /* current item number */<br>
+ uint32_t item_num;<br>
+ /* current flow num */<br>
+ uint32_t flow_num;<br>
+ /* item array size */<br>
+ uint32_t max_item_num;<br>
+ /* flow array size */<br>
+ uint32_t max_flow_num;<br>
+};<br>
+<br>
+/**<br>
+ * This function creates a TCP/IPv6 reassembly table.<br>
+ *<br>
+ * @param socket_id<br>
+ * Socket index for allocating the TCP/IPv6 reassemble table<br>
+ * @param max_flow_num<br>
+ * The maximum number of flows in the TCP/IPv6 GRO table<br>
+ * @param max_item_per_flow<br>
+ * The maximum number of packets per flow<br>
+ *<br>
+ * @return<br>
+ * - Return the table pointer on success.<br>
+ * - Return NULL on failure.<br>
+ */<br>
+void *gro_tcp6_tbl_create(uint16_t socket_id,<br>
+ uint16_t max_flow_num,<br>
+ uint16_t max_item_per_flow);<br>
+<br>
+/**<br>
+ * This function destroys a TCP/IPv6 reassembly table.<br>
+ *<br>
+ * @param tbl<br>
+ * Pointer pointing to the TCP/IPv6 reassembly table.<br>
+ */<br>
+void gro_tcp6_tbl_destroy(void *tbl);<br>
+<br>
+/**<br>
+ * This function merges a TCP/IPv6 packet. It doesn't process the packet,<br>
+ * which has SYN, FIN, RST, PSH, CWR, ECE or URG set, or doesn't have<br>
+ * payload.<br>
+ *<br>
+ * This function doesn't check if the packet has correct checksums and<br>
+ * doesn't re-calculate checksums for the merged packet. Additionally,<br>
+ * it assumes the packets are complete (i.e., MF==0 && frag_off==0),<br>
+ * when IP fragmentation is possible (i.e., DF==0). It returns the<br>
+ * packet, if the packet has invalid parameters (e.g. SYN bit is set)<br>
+ * or there is no available space in the table.<br>
+ *<br>
+ * @param pkt<br>
+ * Packet to reassemble<br>
+ * @param tbl<br>
+ * Pointer pointing to the TCP/IPv6 reassembly table<br>
+ * @start_time<br>
+ * The time when the packet is inserted into the table<br>
+ *<br>
+ * @return<br>
+ * - Return a positive value if the packet is merged.<br>
+ * - Return zero if the packet isn't merged but stored in the table.<br>
+ * - Return a negative value for invalid parameters or no available<br>
+ * space in the table.<br>
+ */<br>
+int32_t gro_tcp6_reassemble(struct rte_mbuf *pkt,<br>
+ struct gro_tcp6_tbl *tbl,<br>
+ uint64_t start_time);<br>
+<br>
+/**<br>
+ * This function flushes timeout packets in a TCP/IPv6 reassembly table,<br>
+ * and without updating checksums.<br>
+ *<br>
+ * @param tbl<br>
+ * TCP/IPv6 reassembly table pointer<br>
+ * @param flush_timestamp<br>
+ * Flush packets which are inserted into the table before or at the<br>
+ * flush_timestamp.<br>
+ * @param out<br>
+ * Pointer array used to keep flushed packets<br>
+ * @param nb_out<br>
+ * The element number in 'out'. It also determines the maximum number of<br>
+ * packets that can be flushed finally.<br>
+ *<br>
+ * @return<br>
+ * The number of flushed packets<br>
+ */<br>
+uint16_t gro_tcp6_tbl_timeout_flush(struct gro_tcp6_tbl *tbl,<br>
+ uint64_t flush_timestamp,<br>
+ struct rte_mbuf **out,<br>
+ uint16_t nb_out);<br>
+<br>
+/**<br>
+ * This function returns the number of the packets in a TCP/IPv6<br>
+ * reassembly table.<br>
+ *<br>
+ * @param tbl<br>
+ * TCP/IPv6 reassembly table pointer<br>
+ *<br>
+ * @return<br>
+ * The number of packets in the table<br>
+ */<br>
+uint32_t gro_tcp6_tbl_pkt_count(void *tbl);<br>
+<br>
+/*<br>
+ * Check if two TCP/IPv6 packets belong to the same flow.<br>
+ */<br>
+static inline int<br>
+is_same_tcp6_flow(struct tcp6_flow_key *k1, struct tcp6_flow_key *k2)<br>
+{<br>
+ rte_be32_t vtc_flow_diff;<br>
+<br>
+ if (memcmp(&k1->src_addr, &k2->src_addr, 16))<br>
+ return 0;<br>
+ if (memcmp(&k1->dst_addr, &k2->dst_addr, 16))<br>
+ return 0;<br>
+ /*<br>
+ * IP version (4) Traffic Class (8) Flow Label (20)<br>
+ * All fields except Traffic class should be same<br>
+ */<br>
+ vtc_flow_diff = (k1->vtc_flow ^ k2->vtc_flow);<br>
+ if (vtc_flow_diff & htonl(0xF00FFFFF))<br>
+ return 0;<br>
+<br>
+ return is_common_tcp_key(&k1->cmn_key, &k2->cmn_key);<br>
+}<br>
+<br>
+#endif<br>
diff --git a/lib/gro/gro_tcp_internal.c b/lib/gro/gro_tcp_internal.c<br>
new file mode 100644<br>
index 0000000000..5a21bca7f8<br>
--- /dev/null<br>
+++ b/lib/gro/gro_tcp_internal.c<br>
@@ -0,0 +1,128 @@<br>
+/* SPDX-License-Identifier: BSD-3-Clause<br>
+ * Copyright(c) 2017 Intel Corporation<br>
+ */<br>
+#include <rte_malloc.h><br>
+#include <rte_mbuf.h><br>
+#include <rte_ethdev.h><br>
+<br>
+#include "gro_tcp_internal.h"<br>
+<br>
+static inline uint32_t<br>
+find_an_empty_item(struct gro_tcp_item *items,<br>
+ uint32_t max_item_num)<br>
+{<br>
+ uint32_t i;<br>
+<br>
+ for (i = 0; i < max_item_num; i++)<br>
+ if (items[i].firstseg == NULL)<br>
+ return i;<br>
+ return INVALID_ARRAY_INDEX;<br>
+}<br>
+<br>
+inline uint32_t<br>
+insert_new_tcp_item(struct rte_mbuf *pkt,<br>
+ struct gro_tcp_item *items,<br>
+ uint32_t *item_num,<br>
+ uint32_t max_item_num,<br>
+ uint64_t start_time,<br>
+ uint32_t prev_idx,<br>
+ uint32_t sent_seq,<br>
+ uint16_t ip_id,<br>
+ uint8_t is_atomic)<br>
+{<br>
+ uint32_t item_idx;<br>
+<br>
+ item_idx = find_an_empty_item(items, max_item_num);<br>
+ if (item_idx == INVALID_ARRAY_INDEX)<br>
+ return INVALID_ARRAY_INDEX;<br>
+<br>
+ items[item_idx].firstseg = pkt;<br>
+ items[item_idx].lastseg = rte_pktmbuf_lastseg(pkt);<br>
+ items[item_idx].start_time = start_time;<br>
+ items[item_idx].next_pkt_idx = INVALID_ARRAY_INDEX;<br>
+ items[item_idx].sent_seq = sent_seq;<br>
+ items[item_idx].l3.ip_id = ip_id;<br>
+ items[item_idx].nb_merged = 1;<br>
+ items[item_idx].is_atomic = is_atomic;<br>
+ (*item_num) += 1;<br>
+<br>
+ /* if the previous packet exists, chain them together. */<br>
+ if (prev_idx != INVALID_ARRAY_INDEX) {<br>
+ items[item_idx].next_pkt_idx =<br>
+ items[prev_idx].next_pkt_idx;<br>
+ items[prev_idx].next_pkt_idx = item_idx;<br>
+ }<br>
+<br>
+ return item_idx;<br>
+}<br>
+<br>
+inline uint32_t<br>
+delete_tcp_item(struct gro_tcp_item *items, uint32_t item_idx,<br>
+ uint32_t *item_num,<br>
+ uint32_t prev_item_idx)<br>
+{<br>
+ uint32_t next_idx = items[item_idx].next_pkt_idx;<br>
+<br>
+ /* NULL indicates an empty item */<br>
+ items[item_idx].firstseg = NULL;<br>
+ (*item_num) -= 1;<br>
+ if (prev_item_idx != INVALID_ARRAY_INDEX)<br>
+ items[prev_item_idx].next_pkt_idx = next_idx;<br>
+<br>
+ return next_idx;<br>
+}<br>
+<br>
+int32_t<br>
+process_tcp_item(struct rte_mbuf *pkt,<br>
+ struct rte_tcp_hdr *tcp_hdr,<br>
+ int32_t tcp_dl,<br>
+ struct gro_tcp_item *items,<br>
+ uint32_t item_idx,<br>
+ uint32_t *item_num,<br>
+ uint32_t max_item_num,<br>
+ uint16_t ip_id,<br>
+ uint8_t is_atomic,<br>
+ uint64_t start_time)<br>
+{<br>
+ uint32_t cur_idx;<br>
+ uint32_t prev_idx;<br>
+ int cmp;<br>
+ uint32_t sent_seq;<br>
+<br>
+ sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);<br>
+ /*<br>
+ * Check all packets in the flow and try to find a neighbor for<br>
+ * the input packet.<br>
+ */<br>
+ cur_idx = item_idx;<br>
+ prev_idx = cur_idx;<br>
+ do {<br>
+ cmp = check_seq_option(&items[cur_idx], tcp_hdr,<br>
+ sent_seq, ip_id, pkt->l4_len, tcp_dl, 0,<br>
+ is_atomic);<br>
+ if (cmp) {<br>
+ if (merge_two_tcp_packets(&items[cur_idx],<br>
+ pkt, cmp, sent_seq, ip_id, 0))<br>
+ return 1;<br>
+ /*<br>
+ * Fail to merge the two packets, as the packet<br>
+ * length is greater than the max value. Store<br>
+ * the packet into the flow.<br>
+ */<br>
+ if (insert_new_tcp_item(pkt, items, item_num, max_item_num,<br>
+ start_time, cur_idx, sent_seq, ip_id, is_atomic) ==<br>
+ INVALID_ARRAY_INDEX)<br>
+ return -1;<br>
+ return 0;<br>
+ }<br>
+ prev_idx = cur_idx;<br>
+ cur_idx = items[cur_idx].next_pkt_idx;<br>
+ } while (cur_idx != INVALID_ARRAY_INDEX);<br>
+<br>
+ /* Fail to find a neighbor, so store the packet into the flow. */<br>
+ if (insert_new_tcp_item(pkt, items, item_num, max_item_num, start_time, prev_idx, sent_seq,<br>
+ ip_id, is_atomic) == INVALID_ARRAY_INDEX)<br>
+ return -1;<br>
+<br>
+ return 0;<br>
+}<br>
diff --git a/lib/gro/gro_tcp_internal.h b/lib/gro/gro_tcp_internal.h<br>
new file mode 100644<br>
index 0000000000..072b7aea13<br>
--- /dev/null<br>
+++ b/lib/gro/gro_tcp_internal.h<br>
@@ -0,0 +1,212 @@<br>
+#ifndef _GRO_TCP_H_<br>
+#define _GRO_TCP_H_<br>
+<br>
+#define INVALID_ARRAY_INDEX 0xffffffffUL<br>
+<br>
+#include <rte_tcp.h><br>
+<br>
+/*<br>
+ * The max length of a IPv4 packet, which includes the length of the L3<br>
+ * header, the L4 header and the data payload.<br>
+ */<br>
+#define MAX_IP_PKT_LENGTH UINT16_MAX<br>
+<br>
+/* The maximum TCP header length */<br>
+#define MAX_TCP_HLEN 60<br>
+#define INVALID_TCP_HDRLEN(len) \<br>
+ (((len) < sizeof(struct rte_tcp_hdr)) || ((len) > MAX_TCP_HLEN))<br>
+<br>
+struct cmn_tcp_key {<br>
+ struct rte_ether_addr eth_saddr;<br>
+ struct rte_ether_addr eth_daddr;<br>
+ uint32_t recv_ack;<br>
+ uint16_t src_port;<br>
+ uint16_t dst_port;<br>
+};<br>
+<br>
+#define ASSIGN_COMMON_TCP_KEY(k1, k2) \<br>
+ do {\<br>
+ rte_ether_addr_copy(&(k1->eth_saddr), &(k2->eth_saddr)); \<br>
+ rte_ether_addr_copy(&(k1->eth_daddr), &(k2->eth_daddr)); \<br>
+ k2->recv_ack = k1->recv_ack; \<br>
+ k2->src_port = k1->src_port; \<br>
+ k2->dst_port = k1->dst_port; \<br>
+ } while (0)<br>
+<br>
+struct gro_tcp_item {<br>
+ /*<br>
+ * The first MBUF segment of the packet. If the value<br>
+ * is NULL, it means the item is empty.<br>
+ */<br>
+ struct rte_mbuf *firstseg;<br>
+ /* The last MBUF segment of the packet */<br>
+ struct rte_mbuf *lastseg;<br>
+ /*<br>
+ * The time when the first packet is inserted into the table.<br>
+ * This value won't be updated, even if the packet is merged<br>
+ * with other packets.<br>
+ */<br>
+ uint64_t start_time;<br>
+ /*<br>
+ * next_pkt_idx is used to chain the packets that<br>
+ * are in the same flow but can't be merged together<br>
+ * (e.g. caused by packet reordering).<br>
+ */<br>
+ uint32_t next_pkt_idx;<br>
+ /* TCP sequence number of the packet */<br>
+ uint32_t sent_seq;<br>
+ union {<br>
+ /* IPv4 ID of the packet */<br>
+ uint16_t ip_id;<br>
+ /* Unused field for IPv6 */<br>
+ uint16_t unused;<br>
+ } l3;<br>
+ /* the number of merged packets */<br>
+ uint16_t nb_merged;<br>
+ /* Indicate if IPv4 ID can be ignored */<br>
+ uint8_t is_atomic;<br>
+};<br>
+<br>
+uint32_t<br>
+insert_new_tcp_item(struct rte_mbuf *pkt,<br>
+ struct gro_tcp_item *items,<br>
+ uint32_t *item_num,<br>
+ uint32_t max_item_num,<br>
+ uint64_t start_time,<br>
+ uint32_t prev_idx,<br>
+ uint32_t sent_seq,<br>
+ uint16_t ip_id,<br>
+ uint8_t is_atomic);<br>
+<br>
+uint32_t<br>
+delete_tcp_item(struct gro_tcp_item *items, uint32_t item_idx,<br>
+ uint32_t *item_num,<br>
+ uint32_t prev_item_idx);<br>
+<br>
+int32_t<br>
+process_tcp_item(struct rte_mbuf *pkt,<br>
+ struct rte_tcp_hdr *tcp_hdr,<br>
+ int32_t tcp_dl,<br>
+ struct gro_tcp_item *items,<br>
+ uint32_t item_idx,<br>
+ uint32_t *item_num,<br>
+ uint32_t max_item_num,<br>
+ uint16_t ip_id,<br>
+ uint8_t is_atomic,<br>
+ uint64_t start_time);<br>
+<br>
+/*<br>
+ * Merge two TCP packets without updating checksums.<br>
+ * If cmp is larger than 0, append the new packet to the<br>
+ * original packet. Otherwise, pre-pend the new packet to<br>
+ * the original packet.<br>
+ */<br>
+static inline int<br>
+merge_two_tcp_packets(struct gro_tcp_item *item,<br>
+ struct rte_mbuf *pkt,<br>
+ int cmp,<br>
+ uint32_t sent_seq,<br>
+ uint16_t ip_id,<br>
+ uint16_t l2_offset)<br>
+{<br>
+ struct rte_mbuf *pkt_head, *pkt_tail, *lastseg;<br>
+ uint16_t hdr_len, l2_len;<br>
+<br>
+ if (cmp > 0) {<br>
+ pkt_head = item->firstseg;<br>
+ pkt_tail = pkt;<br>
+ } else {<br>
+ pkt_head = pkt;<br>
+ pkt_tail = item->firstseg;<br>
+ }<br>
+<br>
+ /* check if the IPv4 packet length is greater than the max value */<br>
+ hdr_len = l2_offset + pkt_head->l2_len + pkt_head->l3_len +<br>
+ pkt_head->l4_len;<br>
+ l2_len = l2_offset > 0 ? pkt_head->outer_l2_len : pkt_head->l2_len;<br>
+ if (unlikely(pkt_head->pkt_len - l2_len + pkt_tail->pkt_len -<br>
+ hdr_len > MAX_IP_PKT_LENGTH))<br>
+ return 0;<br>
+<br>
+ /* remove the packet header for the tail packet */<br>
+ rte_pktmbuf_adj(pkt_tail, hdr_len);<br>
+<br>
+ /* chain two packets together */<br>
+ if (cmp > 0) {<br>
+ item->lastseg->next = pkt;<br>
+ item->lastseg = rte_pktmbuf_lastseg(pkt);<br>
+ /* update IP ID to the larger value */<br>
+ item->l3.ip_id = ip_id;<br>
+ } else {<br>
+ lastseg = rte_pktmbuf_lastseg(pkt);<br>
+ lastseg->next = item->firstseg;<br>
+ item->firstseg = pkt;<br>
+ /* update sent_seq to the smaller value */<br>
+ item->sent_seq = sent_seq;<br>
+ item->l3.ip_id = ip_id;<br>
+ }<br>
+ item->nb_merged++;<br>
+<br>
+ /* update MBUF metadata for the merged packet */<br>
+ pkt_head->nb_segs += pkt_tail->nb_segs;<br>
+ pkt_head->pkt_len += pkt_tail->pkt_len;<br>
+<br>
+ return 1;<br>
+}<br>
+<br>
+/*<br>
+ * Check if two TCP/IPv4 packets are neighbors.<br>
+ */<br>
+static inline int<br>
+check_seq_option(struct gro_tcp_item *item,<br>
+ struct rte_tcp_hdr *tcph,<br>
+ uint32_t sent_seq,<br>
+ uint16_t ip_id,<br>
+ uint16_t tcp_hl,<br>
+ uint16_t tcp_dl,<br>
+ uint16_t l2_offset,<br>
+ uint8_t is_atomic)<br>
+{<br>
+ struct rte_mbuf *pkt_orig = item->firstseg;<br>
+ char *iph_orig;<br>
+ struct rte_tcp_hdr *tcph_orig;<br>
+ uint16_t len, tcp_hl_orig;<br>
+<br>
+ iph_orig = (char *)(rte_pktmbuf_mtod(pkt_orig, char *) +<br>
+ l2_offset + pkt_orig->l2_len);<br>
+ tcph_orig = (struct rte_tcp_hdr *)(iph_orig + pkt_orig->l3_len);<br>
+ tcp_hl_orig = pkt_orig->l4_len;<br>
+<br>
+ /* Check if TCP option fields equal */<br>
+ len = RTE_MAX(tcp_hl, tcp_hl_orig) - sizeof(struct rte_tcp_hdr);<br>
+ if ((tcp_hl != tcp_hl_orig) || ((len > 0) &&<br>
+ (memcmp(tcph + 1, tcph_orig + 1,<br>
+ len) != 0)))<br>
+ return 0;<br>
+<br>
+ /* Don't merge packets whose DF bits are different */<br>
+ if (unlikely(item->is_atomic ^ is_atomic))<br>
+ return 0;<br>
+<br>
+ /* check if the two packets are neighbors */<br>
+ len = pkt_orig->pkt_len - l2_offset - pkt_orig->l2_len -<br>
+ pkt_orig->l3_len - tcp_hl_orig;<br>
+ if ((sent_seq == item->sent_seq + len) && (is_atomic ||<br>
+ (ip_id == item->l3.ip_id + 1)))<br>
+ /* append the new packet */<br>
+ return 1;<br>
+ else if ((sent_seq + tcp_dl == item->sent_seq) && (is_atomic ||<br>
+ (ip_id + item->nb_merged == item->l3.ip_id)))<br>
+ /* pre-pend the new packet */<br>
+ return -1;<br>
+<br>
+ return 0;<br>
+}<br>
+<br>
+static inline int<br>
+is_common_tcp_key(struct cmn_tcp_key *k1, struct cmn_tcp_key *k2)<br>
+{<br>
+ return (!memcmp(k1, k2, sizeof(struct cmn_tcp_key)));<br>
+}<br>
+<br>
+#endif<br>
diff --git a/lib/gro/gro_vxlan_tcp4.c b/lib/gro/gro_vxlan_tcp4.c<br>
index 3be4deb7c7..81eebf0d2d 100644<br>
--- a/lib/gro/gro_vxlan_tcp4.c<br>
+++ b/lib/gro/gro_vxlan_tcp4.c<br>
@@ -7,6 +7,7 @@<br>
#include <rte_ethdev.h><br>
#include <rte_udp.h><br>
<br>
+#include "gro_tcp_internal.h"<br>
#include "gro_vxlan_tcp4.h"<br>
<br>
void *<br>
@@ -116,7 +117,7 @@ insert_new_item(struct gro_vxlan_tcp4_tbl *tbl,<br>
tbl->items[item_idx].inner_item.start_time = start_time;<br>
tbl->items[item_idx].inner_item.next_pkt_idx = INVALID_ARRAY_INDEX;<br>
tbl->items[item_idx].inner_item.sent_seq = sent_seq;<br>
- tbl->items[item_idx].inner_item.ip_id = ip_id;<br>
+ tbl->items[item_idx].inner_item.l3.ip_id = ip_id;<br>
tbl->items[item_idx].inner_item.nb_merged = 1;<br>
tbl->items[item_idx].inner_item.is_atomic = is_atomic;<br>
tbl->items[item_idx].outer_ip_id = outer_ip_id;<br>
@@ -163,15 +164,9 @@ insert_new_flow(struct gro_vxlan_tcp4_tbl *tbl,<br>
<br>
dst = &(tbl->flows[flow_idx].key);<br>
<br>
- rte_ether_addr_copy(&(src->inner_key.eth_saddr),<br>
- &(dst->inner_key.eth_saddr));<br>
- rte_ether_addr_copy(&(src->inner_key.eth_daddr),<br>
- &(dst->inner_key.eth_daddr));<br>
+ ASSIGN_COMMON_TCP_KEY((&(src->inner_key.cmn_key)), (&(dst->inner_key.cmn_key)));<br>
dst->inner_key.ip_src_addr = src->inner_key.ip_src_addr;<br>
dst->inner_key.ip_dst_addr = src->inner_key.ip_dst_addr;<br>
- dst->inner_key.recv_ack = src->inner_key.recv_ack;<br>
- dst->inner_key.src_port = src->inner_key.src_port;<br>
- dst->inner_key.dst_port = src->inner_key.dst_port;<br>
<br>
dst->vxlan_hdr.vx_flags = src->vxlan_hdr.vx_flags;<br>
dst->vxlan_hdr.vx_vni = src->vxlan_hdr.vx_vni;<br>
@@ -248,7 +243,7 @@ merge_two_vxlan_tcp4_packets(struct gro_vxlan_tcp4_item *item,<br>
uint16_t outer_ip_id,<br>
uint16_t ip_id)<br>
{<br>
- if (merge_two_tcp4_packets(&item->inner_item, pkt, cmp, sent_seq,<br>
+ if (merge_two_tcp_packets(&item->inner_item, pkt, cmp, sent_seq,<br>
ip_id, pkt->outer_l2_len +<br>
pkt->outer_l3_len)) {<br>
/* Update the outer IPv4 ID to the large value. */<br>
@@ -357,13 +352,13 @@ gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt,<br>
<br>
sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);<br>
<br>
- rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.inner_key.eth_saddr));<br>
- rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.inner_key.eth_daddr));<br>
+ rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.inner_key.cmn_key.eth_saddr));<br>
+ rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.inner_key.cmn_key.eth_daddr));<br>
key.inner_key.ip_src_addr = ipv4_hdr->src_addr;<br>
key.inner_key.ip_dst_addr = ipv4_hdr->dst_addr;<br>
- key.inner_key.recv_ack = tcp_hdr->recv_ack;<br>
- key.inner_key.src_port = tcp_hdr->src_port;<br>
- key.inner_key.dst_port = tcp_hdr->dst_port;<br>
+ key.inner_key.cmn_key.recv_ack = tcp_hdr->recv_ack;<br>
+ key.inner_key.cmn_key.src_port = tcp_hdr->src_port;<br>
+ key.inner_key.cmn_key.dst_port = tcp_hdr->dst_port;<br>
<br>
key.vxlan_hdr.vx_flags = vxlan_hdr->vx_flags;<br>
key.vxlan_hdr.vx_vni = vxlan_hdr->vx_vni;<br>
diff --git a/lib/gro/gro_vxlan_tcp4.h b/lib/gro/gro_vxlan_tcp4.h<br>
index 7832942a68..82eaaee11e 100644<br>
--- a/lib/gro/gro_vxlan_tcp4.h<br>
+++ b/lib/gro/gro_vxlan_tcp4.h<br>
@@ -5,6 +5,7 @@<br>
#ifndef _GRO_VXLAN_TCP4_H_<br>
#define _GRO_VXLAN_TCP4_H_<br>
<br>
+#include "gro_tcp_internal.h"<br>
#include "gro_tcp4.h"<br>
<br>
#define GRO_VXLAN_TCP4_TBL_MAX_ITEM_NUM (1024UL * 1024UL)<br>
@@ -36,7 +37,7 @@ struct gro_vxlan_tcp4_flow {<br>
};<br>
<br>
struct gro_vxlan_tcp4_item {<br>
- struct gro_tcp4_item inner_item;<br>
+ struct gro_tcp_item inner_item;<br>
/* IPv4 ID in the outer IPv4 header */<br>
uint16_t outer_ip_id;<br>
/* Indicate if outer IPv4 ID can be ignored */<br>
diff --git a/lib/gro/meson.build b/lib/gro/meson.build<br>
index e4fa2958bd..1640317890 100644<br>
--- a/lib/gro/meson.build<br>
+++ b/lib/gro/meson.build<br>
@@ -3,7 +3,9 @@<br>
<br>
sources = files(<br>
'rte_gro.c',<br>
+ 'gro_tcp_internal.c',<br>
'gro_tcp4.c',<br>
+ 'gro_tcp6.c',<br>
'gro_udp4.c',<br>
'gro_vxlan_tcp4.c',<br>
'gro_vxlan_udp4.c',<br>
diff --git a/lib/gro/rte_gro.c b/lib/gro/rte_gro.c<br>
index e35399fd42..d824eebd93 100644<br>
--- a/lib/gro/rte_gro.c<br>
+++ b/lib/gro/rte_gro.c<br>
@@ -8,6 +8,7 @@<br>
<br>
#include "rte_gro.h"<br>
#include "gro_tcp4.h"<br>
+#include "gro_tcp6.h"<br>
#include "gro_udp4.h"<br>
#include "gro_vxlan_tcp4.h"<br>
#include "gro_vxlan_udp4.h"<br>
@@ -20,14 +21,16 @@ typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl);<br>
<br>
static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = {<br>
gro_tcp4_tbl_create, gro_vxlan_tcp4_tbl_create,<br>
- gro_udp4_tbl_create, gro_vxlan_udp4_tbl_create, NULL};<br>
+ gro_udp4_tbl_create, gro_vxlan_udp4_tbl_create, gro_tcp6_tbl_create, NULL};<br>
static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = {<br>
gro_tcp4_tbl_destroy, gro_vxlan_tcp4_tbl_destroy,<br>
gro_udp4_tbl_destroy, gro_vxlan_udp4_tbl_destroy,<br>
+ gro_tcp6_tbl_destroy,<br>
NULL};<br>
static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {<br>
gro_tcp4_tbl_pkt_count, gro_vxlan_tcp4_tbl_pkt_count,<br>
gro_udp4_tbl_pkt_count, gro_vxlan_udp4_tbl_pkt_count,<br>
+ gro_tcp6_tbl_pkt_count,<br>
NULL};<br>
<br>
#define IS_IPV4_TCP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \<br>
@@ -35,6 +38,12 @@ static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {<br>
((ptype & RTE_PTYPE_L4_FRAG) != RTE_PTYPE_L4_FRAG) && \<br>
(RTE_ETH_IS_TUNNEL_PKT(ptype) == 0))<br>
<br>
+/* GRO with extension headers is not supported */<br>
+#define IS_IPV6_TCP_PKT(ptype) (RTE_ETH_IS_IPV6_HDR(ptype) && \<br>
+ ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP) && \<br>
+ ((ptype & RTE_PTYPE_L4_FRAG) != RTE_PTYPE_L4_FRAG) && \<br>
+ (RTE_ETH_IS_TUNNEL_PKT(ptype) == 0))<br>
+<br>
#define IS_IPV4_UDP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \<br>
((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \<br>
(RTE_ETH_IS_TUNNEL_PKT(ptype) == 0))<br>
@@ -147,7 +156,11 @@ rte_gro_reassemble_burst(struct rte_mbuf **pkts,<br>
/* allocate a reassembly table for TCP/IPv4 GRO */<br>
struct gro_tcp4_tbl tcp_tbl;<br>
struct gro_tcp4_flow tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM];<br>
- struct gro_tcp4_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };<br>
+ struct gro_tcp_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };<br>
+<br>
+ struct gro_tcp6_tbl tcp6_tbl;<br>
+ struct gro_tcp6_flow tcp6_flows[RTE_GRO_MAX_BURST_ITEM_NUM];<br>
+ struct gro_tcp_item tcp6_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };<br>
<br>
/* allocate a reassembly table for UDP/IPv4 GRO */<br>
struct gro_udp4_tbl udp_tbl;<br>
@@ -171,10 +184,10 @@ rte_gro_reassemble_burst(struct rte_mbuf **pkts,<br>
int32_t ret;<br>
uint16_t i, unprocess_num = 0, nb_after_gro = nb_pkts;<br>
uint8_t do_tcp4_gro = 0, do_vxlan_tcp_gro = 0, do_udp4_gro = 0,<br>
- do_vxlan_udp_gro = 0;<br>
+ do_vxlan_udp_gro = 0, do_tcp6_gro = 0;<br>
<br>
if (unlikely((param->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |<br>
- RTE_GRO_TCP_IPV4 |<br>
+ RTE_GRO_TCP_IPV4 | RTE_GRO_TCP_IPV6 |<br>
RTE_GRO_IPV4_VXLAN_UDP_IPV4 |<br>
RTE_GRO_UDP_IPV4)) == 0))<br>
return nb_pkts;<br>
@@ -236,6 +249,18 @@ rte_gro_reassemble_burst(struct rte_mbuf **pkts,<br>
do_udp4_gro = 1;<br>
}<br>
<br>
+ if (param->gro_types & RTE_GRO_TCP_IPV6) {<br>
+ for (i = 0; i < item_num; i++)<br>
+ tcp6_flows[i].start_index = INVALID_ARRAY_INDEX;<br>
+<br>
+ tcp6_tbl.flows = tcp6_flows;<br>
+ tcp6_tbl.items = tcp6_items;<br>
+ tcp6_tbl.flow_num = 0;<br>
+ tcp6_tbl.item_num = 0;<br>
+ tcp6_tbl.max_flow_num = item_num;<br>
+ tcp6_tbl.max_item_num = item_num;<br>
+ do_tcp6_gro = 1;<br>
+ }<br>
<br>
for (i = 0; i < nb_pkts; i++) {<br>
/*<br>
@@ -276,6 +301,14 @@ rte_gro_reassemble_burst(struct rte_mbuf **pkts,<br>
nb_after_gro--;<br>
else if (ret < 0)<br>
unprocess_pkts[unprocess_num++] = pkts[i];<br>
+ } else if (IS_IPV6_TCP_PKT(pkts[i]->packet_type) &&<br>
+ do_tcp6_gro) {<br>
+ ret = gro_tcp6_reassemble(pkts[i], &tcp6_tbl, 0);<br>
+ if (ret > 0)<br>
+ /* merge successfully */<br>
+ nb_after_gro--;<br>
+ else if (ret < 0)<br>
+ unprocess_pkts[unprocess_num++] = pkts[i];<br>
} else<br>
unprocess_pkts[unprocess_num++] = pkts[i];<br>
}<br>
@@ -283,9 +316,17 @@ rte_gro_reassemble_burst(struct rte_mbuf **pkts,<br>
if ((nb_after_gro < nb_pkts)<br>
|| (unprocess_num < nb_pkts)) {<br>
i = 0;<br>
+ /* Copy unprocessed packets */<br>
+ if (unprocess_num > 0) {<br>
+ memcpy(&pkts[i], unprocess_pkts,<br>
+ sizeof(struct rte_mbuf *) *<br>
+ unprocess_num);<br>
+ i = unprocess_num;<br>
+ }<br>
+<br>
/* Flush all packets from the tables */<br>
if (do_vxlan_tcp_gro) {<br>
- i = gro_vxlan_tcp4_tbl_timeout_flush(&vxlan_tcp_tbl,<br>
+ i += gro_vxlan_tcp4_tbl_timeout_flush(&vxlan_tcp_tbl,<br>
0, pkts, nb_pkts);<br>
}<br>
<br>
@@ -304,13 +345,11 @@ rte_gro_reassemble_burst(struct rte_mbuf **pkts,<br>
i += gro_udp4_tbl_timeout_flush(&udp_tbl, 0,<br>
&pkts[i], nb_pkts - i);<br>
}<br>
- /* Copy unprocessed packets */<br>
- if (unprocess_num > 0) {<br>
- memcpy(&pkts[i], unprocess_pkts,<br>
- sizeof(struct rte_mbuf *) *<br>
- unprocess_num);<br>
+<br>
+ if (do_tcp6_gro) {<br>
+ i += gro_tcp6_tbl_timeout_flush(&tcp6_tbl, 0,<br>
+ &pkts[i], nb_pkts - i);<br>
}<br>
- nb_after_gro = i + unprocess_num;<br>
}<br>
<br>
return nb_after_gro;<br>
@@ -323,13 +362,13 @@ rte_gro_reassemble(struct rte_mbuf **pkts,<br>
{<br>
struct rte_mbuf *unprocess_pkts[nb_pkts];<br>
struct gro_ctx *gro_ctx = ctx;<br>
- void *tcp_tbl, *udp_tbl, *vxlan_tcp_tbl, *vxlan_udp_tbl;<br>
+ void *tcp_tbl, *udp_tbl, *vxlan_tcp_tbl, *vxlan_udp_tbl, *tcp6_tbl;<br>
uint64_t current_time;<br>
uint16_t i, unprocess_num = 0;<br>
- uint8_t do_tcp4_gro, do_vxlan_tcp_gro, do_udp4_gro, do_vxlan_udp_gro;<br>
+ uint8_t do_tcp4_gro, do_vxlan_tcp_gro, do_udp4_gro, do_vxlan_udp_gro, do_tcp6_gro;<br>
<br>
if (unlikely((gro_ctx->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 |<br>
- RTE_GRO_TCP_IPV4 |<br>
+ RTE_GRO_TCP_IPV4 | RTE_GRO_TCP_IPV6 |<br>
RTE_GRO_IPV4_VXLAN_UDP_IPV4 |<br>
RTE_GRO_UDP_IPV4)) == 0))<br>
return nb_pkts;<br>
@@ -338,6 +377,7 @@ rte_gro_reassemble(struct rte_mbuf **pkts,<br>
vxlan_tcp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX];<br>
udp_tbl = gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX];<br>
vxlan_udp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX];<br>
+ tcp6_tbl = gro_ctx->tbls[RTE_GRO_TCP_IPV6_INDEX];<br>
<br>
do_tcp4_gro = (gro_ctx->gro_types & RTE_GRO_TCP_IPV4) ==<br>
RTE_GRO_TCP_IPV4;<br>
@@ -347,6 +387,7 @@ rte_gro_reassemble(struct rte_mbuf **pkts,<br>
RTE_GRO_UDP_IPV4;<br>
do_vxlan_udp_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) ==<br>
RTE_GRO_IPV4_VXLAN_UDP_IPV4;<br>
+ do_tcp6_gro = (gro_ctx->gro_types & RTE_GRO_TCP_IPV6) == RTE_GRO_TCP_IPV6;<br>
<br>
current_time = rte_rdtsc();<br>
<br>
@@ -371,6 +412,11 @@ rte_gro_reassemble(struct rte_mbuf **pkts,<br>
if (gro_udp4_reassemble(pkts[i], udp_tbl,<br>
current_time) < 0)<br>
unprocess_pkts[unprocess_num++] = pkts[i];<br>
+ } else if (IS_IPV6_TCP_PKT(pkts[i]->packet_type) &&<br>
+ do_tcp6_gro) {<br>
+ if (gro_tcp6_reassemble(pkts[i], tcp6_tbl,<br>
+ current_time) < 0)<br>
+ unprocess_pkts[unprocess_num++] = pkts[i];<br>
} else<br>
unprocess_pkts[unprocess_num++] = pkts[i];<br>
}<br>
@@ -426,6 +472,15 @@ rte_gro_timeout_flush(void *ctx,<br>
gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX],<br>
flush_timestamp,<br>
&out[num], left_nb_out);<br>
+ left_nb_out = max_nb_out - num;<br>
+ }<br>
+<br>
+ if ((gro_types & RTE_GRO_TCP_IPV6) && left_nb_out > 0) {<br>
+ num += gro_tcp6_tbl_timeout_flush(<br>
+ gro_ctx->tbls[RTE_GRO_TCP_IPV6_INDEX],<br>
+ flush_timestamp,<br>
+ &out[num], left_nb_out);<br>
+<br>
}<br>
<br>
return num;<br>
diff --git a/lib/gro/rte_gro.h b/lib/gro/rte_gro.h<br>
index 9f9ed4935a..c83dfd9ad1 100644<br>
--- a/lib/gro/rte_gro.h<br>
+++ b/lib/gro/rte_gro.h<br>
@@ -38,6 +38,9 @@ extern "C" {<br>
#define RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX 3<br>
#define RTE_GRO_IPV4_VXLAN_UDP_IPV4 (1ULL << RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX)<br>
/**< VxLAN UDP/IPv4 GRO flag. */<br>
+#define RTE_GRO_TCP_IPV6_INDEX 4<br>
+#define RTE_GRO_TCP_IPV6 (1ULL << RTE_GRO_TCP_IPV6_INDEX)<br>
+/**< TCP/IPv6 GRO flag. */<br>
<br>
/**<br>
* Structure used to create GRO context objects or used to pass<br>
-- <br>
2.25.1<br> </blockquote><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex"><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex"><div>Hi Jiyau, I have updated the review comments. Tested GRO for both v4 and v6. I will update the generic_receive_offload_lib.rst and release note in subsequent change. Would be great if you can review the current changes by then. </div></blockquote></blockquote></div></div>