[dpdk-dev] [PATCH] event/octeontx2: fix xaq pool reconfigure
pbhagavatula at marvell.com
pbhagavatula at marvell.com
Fri Mar 19 22:08:15 CET 2021
From: Pavan Nikhilesh <pbhagavatula at marvell.com>
When XAQ pool is being re-configured, and if the same memzone
is used for fc_mem when freeing the old mempool the fc_mem
will be incorrectly updated with the free count.
Fixes: ffa4ec0b6063 ("event/octeontx2: allow adapters to resize inflight buffers")
Cc: stable at dpdk.org
Signed-off-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
---
drivers/common/octeontx2/otx2_mbox.h | 7 +++++++
drivers/event/octeontx2/otx2_evdev.c | 31 ++++++++++++++++++++++------
2 files changed, 32 insertions(+), 6 deletions(-)
diff --git a/drivers/common/octeontx2/otx2_mbox.h b/drivers/common/octeontx2/otx2_mbox.h
index 7e7667bf0..ae6c661ab 100644
--- a/drivers/common/octeontx2/otx2_mbox.h
+++ b/drivers/common/octeontx2/otx2_mbox.h
@@ -177,6 +177,8 @@ M(SSO_GRP_GET_STATS, 0x609, sso_grp_get_stats, sso_info_req, \
sso_grp_stats) \
M(SSO_HWS_GET_STATS, 0x610, sso_hws_get_stats, sso_info_req, \
sso_hws_stats) \
+M(SSO_HW_RELEASE_XAQ, 0x611, sso_hw_release_xaq_aura, \
+ sso_release_xaq, msg_rsp) \
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
M(TIM_LF_ALLOC, 0x800, tim_lf_alloc, tim_lf_alloc_req, \
tim_lf_alloc_rsp) \
@@ -1190,6 +1192,11 @@ struct sso_hw_setconfig {
uint16_t __otx2_io hwgrps;
};
+struct sso_release_xaq {
+ struct mbox_msghdr hdr;
+ uint16_t __otx2_io hwgrps;
+};
+
struct sso_info_req {
struct mbox_msghdr hdr;
union {
diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index 7e2343599..770a801c4 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -986,7 +986,7 @@ sso_xaq_allocate(struct otx2_sso_evdev *dev)
dev->fc_iova = mz->iova;
dev->fc_mem = mz->addr;
-
+ *dev->fc_mem = 0;
aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem + OTX2_ALIGN);
memset(aura, 0, sizeof(struct npa_aura_s));
@@ -1062,6 +1062,19 @@ sso_ggrp_alloc_xaq(struct otx2_sso_evdev *dev)
return otx2_mbox_process(mbox);
}
+static int
+sso_ggrp_free_xaq(struct otx2_sso_evdev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct sso_release_xaq *req;
+
+ otx2_sso_dbg("Freeing XAQ for GGRPs");
+ req = otx2_mbox_alloc_msg_sso_hw_release_xaq_aura(mbox);
+ req->hwgrps = dev->nb_event_queues;
+
+ return otx2_mbox_process(mbox);
+}
+
static void
sso_lf_teardown(struct otx2_sso_evdev *dev,
enum otx2_sso_lf_type lf_type)
@@ -1452,12 +1465,16 @@ sso_cleanup(struct rte_eventdev *event_dev, uint8_t enable)
ssogws_reset((struct otx2_ssogws *)&ws->ws_state[1]);
ws->swtag_req = 0;
ws->vws = 0;
+ ws->fc_mem = dev->fc_mem;
+ ws->xaq_lmt = dev->xaq_lmt;
} else {
struct otx2_ssogws *ws;
ws = event_dev->data->ports[i];
ssogws_reset(ws);
ws->swtag_req = 0;
+ ws->fc_mem = dev->fc_mem;
+ ws->xaq_lmt = dev->xaq_lmt;
}
}
@@ -1498,28 +1515,30 @@ int
sso_xae_reconfigure(struct rte_eventdev *event_dev)
{
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
- struct rte_mempool *prev_xaq_pool;
int rc = 0;
if (event_dev->data->dev_started)
sso_cleanup(event_dev, 0);
- prev_xaq_pool = dev->xaq_pool;
+ rc = sso_ggrp_free_xaq(dev);
+ if (rc < 0) {
+ otx2_err("Failed to free XAQ\n");
+ return rc;
+ }
+
+ rte_mempool_free(dev->xaq_pool);
dev->xaq_pool = NULL;
rc = sso_xaq_allocate(dev);
if (rc < 0) {
otx2_err("Failed to alloc xaq pool %d", rc);
- rte_mempool_free(prev_xaq_pool);
return rc;
}
rc = sso_ggrp_alloc_xaq(dev);
if (rc < 0) {
otx2_err("Failed to alloc xaq to ggrp %d", rc);
- rte_mempool_free(prev_xaq_pool);
return rc;
}
- rte_mempool_free(prev_xaq_pool);
rte_mb();
if (event_dev->data->dev_started)
sso_cleanup(event_dev, 1);
--
2.17.1
More information about the dev
mailing list