[PATCH v2 01/37] baseband/acc100: add enqueue status

Hernan Vargas hernan.vargas at intel.com
Sat Aug 20 04:31:21 CEST 2022


Add enqueue status as part of rte_bbdev_queue_data.
This is a new feature to update queue status and indicate the reason why
a previous enqueue may or may not have consumed all requested operations.

Signed-off-by: Hernan Vargas <hernan.vargas at intel.com>
---
 drivers/baseband/acc100/rte_acc100_pmd.c | 85 ++++++++++++++++++++----
 1 file changed, 71 insertions(+), 14 deletions(-)

diff --git a/drivers/baseband/acc100/rte_acc100_pmd.c b/drivers/baseband/acc100/rte_acc100_pmd.c
index a68a9b0fd9..7f698ec3d2 100644
--- a/drivers/baseband/acc100/rte_acc100_pmd.c
+++ b/drivers/baseband/acc100/rte_acc100_pmd.c
@@ -3436,6 +3436,35 @@ get_num_cbs_in_tb_ldpc_dec(struct rte_bbdev_op_ldpc_dec *ldpc_dec)
 	return cbs_in_tb;
 }
 
+static inline void
+acc100_enqueue_status(struct rte_bbdev_queue_data *q_data,
+		enum rte_bbdev_enqueue_status status)
+{
+	q_data->enqueue_status = status;
+	q_data->queue_stats.enqueue_status_count[status]++;
+	rte_bbdev_log(WARNING, "Enqueue Status: %d %#"PRIx64"",
+			status,
+			q_data->queue_stats.enqueue_status_count[status]);
+}
+
+static inline void
+acc100_enqueue_invalid(struct rte_bbdev_queue_data *q_data)
+{
+	acc100_enqueue_status(q_data, RTE_BBDEV_ENQ_STATUS_INVALID_OP);
+}
+
+static inline void
+acc100_enqueue_ring_full(struct rte_bbdev_queue_data *q_data)
+{
+	acc100_enqueue_status(q_data, RTE_BBDEV_ENQ_STATUS_RING_FULL);
+}
+
+static inline void
+acc100_enqueue_queue_full(struct rte_bbdev_queue_data *q_data)
+{
+	acc100_enqueue_status(q_data, RTE_BBDEV_ENQ_STATUS_QUEUE_FULL);
+}
+
 /* Enqueue encode operations for ACC100 device in CB mode. */
 static uint16_t
 acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,
@@ -3449,13 +3478,17 @@ acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,
 
 	for (i = 0; i < num; ++i) {
 		/* Check if there are available space for further processing */
-		if (unlikely(avail - 1 < 0))
+		if (unlikely(avail - 1 < 0)) {
+			acc100_enqueue_ring_full(q_data);
 			break;
+		}
 		avail -= 1;
 
 		ret = enqueue_enc_one_op_cb(q, ops[i], i);
-		if (ret < 0)
+		if (ret < 0) {
+			acc100_enqueue_invalid(q_data);
 			break;
+		}
 	}
 
 	if (unlikely(i == 0))
@@ -3505,20 +3538,26 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,
 	int16_t enq, left = num;
 
 	while (left > 0) {
-		if (unlikely(avail < 1))
+		if (unlikely(avail < 1)) {
+			acc100_enqueue_ring_full(q_data);
 			break;
+		}
 		avail--;
 		enq = RTE_MIN(left, ACC100_MUX_5GDL_DESC);
 		if (check_mux(&ops[i], enq)) {
 			ret = enqueue_ldpc_enc_n_op_cb(q, &ops[i],
 					desc_idx, enq);
-			if (ret < 0)
+			if (ret < 0) {
+				acc100_enqueue_invalid(q_data);
 				break;
+			}
 			i += enq;
 		} else {
 			ret = enqueue_ldpc_enc_one_op_cb(q, ops[i], desc_idx);
-			if (ret < 0)
+			if (ret < 0) {
+				acc100_enqueue_invalid(q_data);
 				break;
+			}
 			i++;
 		}
 		desc_idx++;
@@ -3557,13 +3596,17 @@ acc100_enqueue_enc_tb(struct rte_bbdev_queue_data *q_data,
 	for (i = 0; i < num; ++i) {
 		cbs_in_tb = get_num_cbs_in_tb_enc(&ops[i]->turbo_enc);
 		/* Check if there are available space for further processing */
-		if (unlikely(avail - cbs_in_tb < 0))
+		if (unlikely(avail - cbs_in_tb < 0)) {
+			acc100_enqueue_ring_full(q_data);
 			break;
+		}
 		avail -= cbs_in_tb;
 
 		ret = enqueue_enc_one_op_tb(q, ops[i], enqueued_cbs, cbs_in_tb);
-		if (ret < 0)
+		if (ret < 0) {
+			acc100_enqueue_invalid(q_data);
 			break;
+		}
 		enqueued_cbs += ret;
 	}
 	if (unlikely(enqueued_cbs == 0))
@@ -3618,13 +3661,17 @@ acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data,
 
 	for (i = 0; i < num; ++i) {
 		/* Check if there are available space for further processing */
-		if (unlikely(avail - 1 < 0))
+		if (unlikely(avail - 1 < 0)) {
+			acc100_enqueue_ring_full(q_data);
 			break;
+		}
 		avail -= 1;
 
 		ret = enqueue_dec_one_op_cb(q, ops[i], i);
-		if (ret < 0)
+		if (ret < 0) {
+			acc100_enqueue_invalid(q_data);
 			break;
+		}
 	}
 
 	if (unlikely(i == 0))
@@ -3678,8 +3725,10 @@ acc100_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data,
 
 		ret = enqueue_ldpc_dec_one_op_tb(q, ops[i],
 				enqueued_cbs, cbs_in_tb);
-		if (ret < 0)
+		if (ret < 0) {
+			acc100_enqueue_invalid(q_data);
 			break;
+		}
 		enqueued_cbs += ret;
 	}
 
@@ -3704,8 +3753,10 @@ acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,
 	bool same_op = false;
 	for (i = 0; i < num; ++i) {
 		/* Check if there are available space for further processing */
-		if (unlikely(avail < 1))
+		if (unlikely(avail < 1)) {
+			acc100_enqueue_ring_full(q_data);
 			break;
+		}
 		avail -= 1;
 
 		if (i > 0)
@@ -3718,8 +3769,10 @@ acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,
 			ops[i]->ldpc_dec.n_filler, ops[i]->ldpc_dec.cb_params.e,
 			same_op);
 		ret = enqueue_ldpc_dec_one_op_cb(q, ops[i], i, same_op);
-		if (ret < 0)
+		if (ret < 0) {
+			acc100_enqueue_invalid(q_data);
 			break;
+		}
 	}
 
 	if (unlikely(i == 0))
@@ -3755,13 +3808,17 @@ acc100_enqueue_dec_tb(struct rte_bbdev_queue_data *q_data,
 	for (i = 0; i < num; ++i) {
 		cbs_in_tb = get_num_cbs_in_tb_dec(&ops[i]->turbo_dec);
 		/* Check if there are available space for further processing */
-		if (unlikely(avail - cbs_in_tb < 0))
+		if (unlikely(avail - cbs_in_tb < 0)) {
+			acc100_enqueue_ring_full(q_data);
 			break;
+		}
 		avail -= cbs_in_tb;
 
 		ret = enqueue_dec_one_op_tb(q, ops[i], enqueued_cbs, cbs_in_tb);
-		if (ret < 0)
+		if (ret < 0) {
+			acc100_enqueue_invalid(q_data);
 			break;
+		}
 		enqueued_cbs += ret;
 	}
 
-- 
2.37.1



More information about the dev mailing list