[PATCH v2 8/8] common/cnxk: enable CPT CQ for inline IPsec inbound

Rahul Bhansali rbhansali at marvell.com
Thu Feb 26 14:17:15 CET 2026


From: Rakesh Kudurumalla <rkudurumalla at marvell.com>

Added support of CPT CQ configurations for inline inbound IPsec.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla at marvell.com>
---
Changes in v2: fix cleanup on configuration failure case.

 drivers/common/cnxk/roc_nix_inl.c         |  8 +-
 drivers/common/cnxk/roc_nix_inl.h         |  3 +-
 drivers/common/cnxk/roc_nix_inl_dev.c     | 90 ++++++++++++++++++++---
 drivers/common/cnxk/roc_nix_inl_dev_irq.c | 19 +++--
 drivers/net/cnxk/cn20k_ethdev_sec.c       | 54 +++++++++-----
 5 files changed, 136 insertions(+), 38 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 911c349604..26be1adac9 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -486,6 +486,7 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
 	} else {
 		struct nix_rx_inl_lf_cfg_req *lf_cfg;
 		uint64_t def_cptq = 0;
+		uint64_t cpt_cq_ena = 0;

 		/* Setup device specific inb SA table */
 		lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
@@ -508,9 +509,10 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
 		if (res_addr_offset)
 			res_addr_offset |= (1UL << 56);

+		cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
 		lf_cfg->enable = 1;
 		lf_cfg->profile_id = profile_id; /* IPsec profile is 0th one */
-		lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id];
+		lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id] | cpt_cq_ena;
 		lf_cfg->rx_inline_cfg0 =
 			((def_cptq << 57) | res_addr_offset | ((uint64_t)SSO_TT_ORDERED << 44) |
 			 (sa_pow2_sz << 16) | lenm1_max);
@@ -588,6 +590,7 @@ nix_inl_reass_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 	uint64_t max_sa = 1, sa_pow2_sz;
 	uint64_t sa_idx_w, lenm1_max;
 	uint64_t res_addr_offset = 0;
+	uint64_t cpt_cq_ena = 0;
 	uint64_t def_cptq = 0;
 	size_t inb_sa_sz = 1;
 	uint8_t profile_id;
@@ -637,9 +640,10 @@ nix_inl_reass_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 			res_addr_offset |= (1UL << 56);
 	}

+	cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
 	lf_cfg->enable = 1;
 	lf_cfg->profile_id = profile_id;
-	lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id];
+	lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id] | cpt_cq_ena;
 	lf_cfg->rx_inline_cfg0 =
 		((def_cptq << 57) | res_addr_offset | ((uint64_t)SSO_TT_ORDERED << 44) |
 		 (sa_pow2_sz << 16) | lenm1_max);
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 596f12d1c7..d1a08a4495 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -44,7 +44,8 @@
 #define ROC_NIX_INL_RXC_QUE_BLK_THR 0x40UL

 enum nix_inl_event_type {
-	NIX_INL_CPT_CQ = 1,
+	NIX_INL_INB_CPT_CQ = 1,
+	NIX_INL_OUTB_CPT_CQ,
 	NIX_INL_SSO,
 	NIX_INL_SOFT_EXPIRY_THRD,
 };
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 35528efa46..246dd4612f 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -382,6 +382,7 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
 		struct nix_rx_inl_lf_cfg_req *lf_cfg;
 		uint64_t res_addr_offset;
 		uint64_t def_cptq;
+		uint64_t cpt_cq_ena;

 		lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
 		if (lf_cfg == NULL) {
@@ -401,7 +402,9 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
 		lf_cfg->profile_id = inl_dev->ipsec_prof_id;
 		if (ena) {
 			lf_cfg->enable = 1;
-			lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id];
+			cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
+			lf_cfg->rx_inline_sa_base =
+				(uintptr_t)inl_dev->inb_sa_base[profile_id] | (cpt_cq_ena);
 			lf_cfg->rx_inline_cfg0 =
 				((def_cptq << 57) | res_addr_offset |
 				 ((uint64_t)SSO_TT_ORDERED << 44) | (sa_pow2_sz << 16) | lenm1_max);
@@ -482,13 +485,33 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
 lf_fini:
 	for (i = 0; i < inl_dev->nb_cptlf; i++) {
 		struct roc_cpt_lf *lf = &inl_dev->cpt_lf[i];
-		cpt_lf_fini(lf, lf->cpt_cq_ena);
+		cpt_lf_fini(lf, false);
 	}
 lf_free:
 	rc |= cpt_lfs_free(dev);
 	return rc;
 }

+static int
+nix_inl_cpt_cq_inb_release(struct nix_inl_dev *inl_dev)
+{
+	int i;
+
+	if (!inl_dev || !inl_dev->cpt_cq_ena)
+		return 0;
+	for (i = 0; i < inl_dev->nb_inb_cptlfs; i++) {
+		uint8_t slot_id = inl_dev->inb_cpt_lf_id + i;
+		struct roc_cpt_lf *lf = &inl_dev->cpt_lf[slot_id];
+
+		if (lf->cpt_cq_ena) {
+			cpt_lf_cq_fini(lf);
+			cpt_lf_unregister_irqs(lf, cpt_lf_misc_irq, nix_inl_cpt_done_irq);
+		}
+	}
+
+	return 0;
+}
+
 static int
 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
 {
@@ -625,6 +648,7 @@ nix_inl_nix_profile_config(struct nix_inl_dev *inl_dev, uint8_t profile_id)
 	uint64_t max_sa, sa_w, sa_pow2_sz, lenm1_max;
 	struct nix_rx_inl_lf_cfg_req *lf_cfg;
 	uint64_t res_addr_offset;
+	uint64_t cpt_cq_ena;
 	uint64_t def_cptq;
 	size_t inb_sa_sz;
 	void *sa;
@@ -665,7 +689,8 @@ nix_inl_nix_profile_config(struct nix_inl_dev *inl_dev, uint8_t profile_id)

 	lf_cfg->enable = 1;
 	lf_cfg->profile_id = profile_id;
-	lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id];
+	cpt_cq_ena = (uint64_t)inl_dev->cpt_cq_ena << 63;
+	lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id] | cpt_cq_ena;
 	lf_cfg->rx_inline_cfg0 =
 		((def_cptq << 57) | res_addr_offset | ((uint64_t)SSO_TT_ORDERED << 44) |
 		 (sa_pow2_sz << 16) | lenm1_max);
@@ -716,6 +741,42 @@ nix_inl_nix_profile_release(struct nix_inl_dev *inl_dev, uint8_t profile_id)
 	return rc;
 }

+static int
+nix_inl_cpt_cq_inb_setup(struct nix_inl_dev *inl_dev)
+{
+	int i, rc;
+
+	if (!inl_dev->cpt_cq_ena)
+		return 0;
+
+	for (i = 0; i < inl_dev->nb_inb_cptlfs; i++) {
+		uint8_t slot_id = inl_dev->inb_cpt_lf_id + i;
+		struct roc_cpt_lf *lf = &inl_dev->cpt_lf[slot_id];
+
+		lf->dq_ack_ena = true;
+		lf->cpt_cq_ena = true;
+		lf->cq_entry_size = 0;
+		lf->cq_all = 0;
+		lf->cq_size = lf->nb_desc;
+		lf->dev = &inl_dev->dev;
+		lf->cq_head = 1;
+
+		rc = cpt_lf_cq_init(lf);
+		if (rc)
+			return rc;
+
+		rc = cpt_lf_register_irqs(lf, cpt_lf_misc_irq, nix_inl_cpt_done_irq);
+		if (rc) {
+			cpt_lf_cq_fini(lf);
+			return rc;
+		}
+
+		roc_cpt_cq_enable(lf);
+	}
+
+	return 0;
+}
+
 static int
 nix_inl_nix_reass_setup(struct nix_inl_dev *inl_dev)
 {
@@ -1451,11 +1512,17 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	if (rc)
 		goto sso_release;

+	if (roc_feature_nix_has_cpt_cq_support()) {
+		rc = nix_inl_cpt_cq_inb_setup(inl_dev);
+		if (rc)
+			goto cpt_release;
+	}
+
 	/* Setup device specific inb SA table */
 	rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
 	if (rc) {
 		plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
-		goto cpt_release;
+		goto cpt_cq_inb_release;
 	}

 	/* Setup Reassembly */
@@ -1464,20 +1531,20 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)

 		rc = nix_inl_nix_reass_setup(inl_dev);
 		if (rc)
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 	}

 	if (inl_dev->set_soft_exp_poll) {
 		rc = nix_inl_outb_poll_thread_setup(inl_dev);
 		if (rc)
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 	}

 	/* Perform selftest if asked for */
 	if (inl_dev->selftest) {
 		rc = nix_inl_selftest();
 		if (rc)
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 	}
 	inl_dev->max_ipsec_rules = roc_inl_dev->max_ipsec_rules;

@@ -1486,14 +1553,14 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 			plt_zmalloc(sizeof(int) * inl_dev->max_ipsec_rules, PLT_CACHE_LINE_SIZE);
 		if (inl_dev->ipsec_index == NULL) {
 			rc = NPC_ERR_NO_MEM;
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 		}
 		rc = npc_mcam_alloc_entries(inl_dev->dev.mbox, inl_dev->max_ipsec_rules,
 					    inl_dev->ipsec_index, inl_dev->max_ipsec_rules,
 					    NPC_MCAM_HIGHER_PRIO, &resp_count, 1);
 		if (rc) {
 			plt_free(inl_dev->ipsec_index);
-			goto cpt_release;
+			goto cpt_cq_inb_release;
 		}

 		start_index = inl_dev->ipsec_index[0];
@@ -1507,6 +1574,8 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	idev->nix_inl_dev = inl_dev;

 	return 0;
+cpt_cq_inb_release:
+	rc |= nix_inl_cpt_cq_inb_release(inl_dev);
 cpt_release:
 	rc |= nix_inl_cpt_release(inl_dev);
 sso_release:
@@ -1558,8 +1627,9 @@ roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
 	/* Flush Inbound CTX cache entries */
 	nix_inl_cpt_ctx_cache_sync(inl_dev);

+	rc = nix_inl_cpt_cq_inb_release(inl_dev);
 	/* Release CPT */
-	rc = nix_inl_cpt_release(inl_dev);
+	rc |= nix_inl_cpt_release(inl_dev);

 	/* Release SSO */
 	rc |= nix_inl_sso_release(inl_dev);
diff --git a/drivers/common/cnxk/roc_nix_inl_dev_irq.c b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
index 89155a1f7d..30986e780a 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev_irq.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
@@ -49,10 +49,11 @@ static void
 nix_inl_cpt_cq_cb(struct roc_cpt_lf *lf)
 {
 	struct roc_nix *roc_nix = (struct roc_nix *)lf->dev->roc_nix;
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct idev_cfg *idev = idev_get_cfg();
 	uint32_t port_id = roc_nix->port_id;
 	struct nix_inl_dev *inl_dev = NULL;
-	struct roc_ow_ipsec_outb_sa *sa;
+	enum nix_inl_event_type cq_type;
 	union cpt_lf_cq_base cq_base;
 	union cpt_lf_cq_ptr cq_ptr;
 	struct cpt_cq_s *cq_s;
@@ -60,6 +61,7 @@ nix_inl_cpt_cq_cb(struct roc_cpt_lf *lf)
 	uint32_t count, head;
 	uint32_t nq_ptr;
 	uint64_t i;
+	void *sa;

 	if (idev)
 		inl_dev = idev->nix_inl_dev;
@@ -75,23 +77,30 @@ nix_inl_cpt_cq_cb(struct roc_cpt_lf *lf)
 	count = cq_ptr.s.count;
 	nq_ptr = cq_ptr.s.nq_ptr;

+	if (lf->dev == &inl_dev->dev)
+		cq_type = NIX_INL_INB_CPT_CQ;
+	else if (lf->dev == &nix->dev)
+		cq_type = NIX_INL_OUTB_CPT_CQ;
+	else
+		return;
+
 	for (i = 0; i < count; i++) {
 		cq_s = (struct cpt_cq_s *)(uintptr_t)(((cq_base.s.addr << 7)) + (head << 5));

 		if (cq_s->w0.s.uc_compcode && cq_s->w0.s.compcode) {
 			switch (cq_s->w2.s.fmt & fmt_msk) {
 			case WQE_PTR_CPTR:
-				sa = (struct roc_ow_ipsec_outb_sa *)cq_s->w1.esn;
+				sa = (void *)cq_s->w1.esn;
 				break;
 			case CPTR_WQE_PTR:
-				sa = (struct roc_ow_ipsec_outb_sa *)cq_s->w3.comp_ptr;
+				sa = (void *)cq_s->w3.comp_ptr;
 				break;
 			default:
 				plt_err("Invalid event Received ");
 				goto done;
 			}
 			uint64_t tmp = ~(uint32_t)0x0;
-			inl_dev->work_cb(&tmp, sa, NIX_INL_CPT_CQ, (void *)cq_s, port_id);
+			inl_dev->work_cb(&tmp, sa, cq_type, (void *)cq_s, port_id);
 		}
 done:
 		head = (head + 1) % lf->cq_size;
@@ -165,7 +174,7 @@ nix_inl_sso_hws_irq(void *param)
 void
 nix_inl_cpt_done_irq(void *param)
 {
-	struct roc_cpt_lf *lf = param;
+	struct roc_cpt_lf *lf = (struct roc_cpt_lf *)param;
 	uint64_t done_wait;
 	uint64_t intr;

diff --git a/drivers/net/cnxk/cn20k_ethdev_sec.c b/drivers/net/cnxk/cn20k_ethdev_sec.c
index eab06be68f..5d0debb81d 100644
--- a/drivers/net/cnxk/cn20k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn20k_ethdev_sec.c
@@ -439,18 +439,31 @@ cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
 }

 static void
-cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, struct roc_ow_ipsec_outb_sa *sa,
+cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, void *sa, enum nix_inl_event_type type,
 			 uint16_t uc_compcode, uint16_t compcode, struct rte_mbuf *mbuf)
 {
 	struct rte_eth_event_ipsec_desc desc;
 	struct cn20k_sec_sess_priv sess_priv;
-	struct cn20k_outb_priv_data *priv;
+	struct cn20k_outb_priv_data *outb_priv;
+	struct cn20k_inb_priv_data *inb_priv;
 	static uint64_t warn_cnt;
+	uint64_t life_unit;

 	memset(&desc, 0, sizeof(desc));
-	priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
 	sess_priv.u64 = 0;

+	if (type == NIX_INL_INB_CPT_CQ) {
+		struct roc_ow_ipsec_inb_sa *inb_sa = (struct roc_ow_ipsec_inb_sa *)sa;
+		inb_priv = roc_nix_inl_ow_ipsec_inb_sa_sw_rsvd(sa);
+		desc.metadata = (uint64_t)inb_priv->userdata;
+		life_unit = inb_sa->w2.s.life_unit;
+	} else {
+		struct roc_ow_ipsec_outb_sa *outb_sa = (struct roc_ow_ipsec_outb_sa *)sa;
+		outb_priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
+		desc.metadata = (uint64_t)outb_priv->userdata;
+		life_unit = outb_sa->w2.s.life_unit;
+	}
+
 	if (mbuf)
 		sess_priv.u64 = *rte_security_dynfield(mbuf);

@@ -459,14 +472,14 @@ cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, struct roc_ow_ipsec_outb_s
 		desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
 		break;
 	case ROC_IE_OW_UCC_ERR_SA_EXPIRED:
-		if (sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
+		if (life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
 		else
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
 		break;
 	case ROC_IE_OW_UCC_SUCCESS_SA_SOFTEXP_FIRST:
 	case ROC_IE_OW_UCC_SUCCESS_SA_SOFTEXP_AGAIN:
-		if (sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
+		if (life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
 		else
 			desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
@@ -490,7 +503,6 @@ cn20k_eth_sec_post_event(struct rte_eth_dev *eth_dev, struct roc_ow_ipsec_outb_s
 		break;
 	}

-	desc.metadata = (uint64_t)priv->userdata;
 	rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
 }

@@ -498,12 +510,15 @@ static const char *
 get_inl_event_type(enum nix_inl_event_type type)
 {
 	switch (type) {
-	case NIX_INL_CPT_CQ:
-		return "NIX_INL_CPT_CQ";
+	case NIX_INL_OUTB_CPT_CQ:
+		return "NIX_INL_OUTB_CPT_CQ";
+	case NIX_INL_INB_CPT_CQ:
+		return "NIX_INL_INB_CPT_CQ";
 	case NIX_INL_SSO:
 		return "NIX_INL_SSO";
 	case NIX_INL_SOFT_EXPIRY_THRD:
 		return "NIX_INL_SOFT_EXPIRY_THRD";
+
 	default:
 		return "Unknown event";
 	}
@@ -515,8 +530,8 @@ cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, enum nix_inl_event_type type
 {
 	struct rte_eth_event_ipsec_desc desc;
 	struct cn20k_sec_sess_priv sess_priv;
-	struct cn20k_outb_priv_data *priv;
-	struct roc_ow_ipsec_outb_sa *sa;
+	struct cn20k_outb_priv_data *outb_priv;
+	struct roc_ow_ipsec_outb_sa *outb_sa;
 	struct cpt_cn20k_res_s *res;
 	struct rte_eth_dev *eth_dev;
 	struct cnxk_eth_dev *dev;
@@ -546,20 +561,19 @@ cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, enum nix_inl_event_type type
 		/* Fall through */
 	default:
 		if (type) {
-			sa = (struct roc_ow_ipsec_outb_sa *)args;
-			priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
-			desc.metadata = (uint64_t)priv->userdata;
 			eth_dev = &rte_eth_devices[port_id];
-			if (type == NIX_INL_CPT_CQ) {
-				struct cpt_cq_s *cqs = (struct cpt_cq_s *)cq_s;
-
-				cn20k_eth_sec_post_event(eth_dev, sa,
+			struct cpt_cq_s *cqs = (struct cpt_cq_s *)cq_s;
+			if (type < NIX_INL_SSO) {
+				cn20k_eth_sec_post_event(eth_dev, args, type,
 							 (uint16_t)cqs->w0.s.uc_compcode,
 							 (uint16_t)cqs->w0.s.compcode, NULL);
 				return;
 			}
 			if (type == NIX_INL_SOFT_EXPIRY_THRD) {
-				if (sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
+				outb_sa = (struct roc_ow_ipsec_outb_sa *)args;
+				outb_priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(outb_sa);
+				desc.metadata = (uint64_t)outb_priv->userdata;
+				if (outb_sa->w2.s.life_unit == ROC_IE_OW_SA_LIFE_UNIT_PKTS)
 					desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
 				else
 					desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
@@ -596,9 +610,9 @@ cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, enum nix_inl_event_type type
 	sess_priv.u64 = *rte_security_dynfield(mbuf);

 	sa_base = dev->outb.sa_base;
-	sa = roc_nix_inl_ow_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
+	outb_sa = roc_nix_inl_ow_ipsec_outb_sa(sa_base, sess_priv.sa_idx);

-	cn20k_eth_sec_post_event(eth_dev, sa, res->uc_compcode, res->compcode, mbuf);
+	cn20k_eth_sec_post_event(eth_dev, outb_sa, type, res->uc_compcode, res->compcode, mbuf);

 	cnxk_pktmbuf_free_no_cache(mbuf);
 }
--
2.34.1



More information about the dev mailing list