[dpdk-dev] [PATCH v6 1/7] bbdev: renaming non-generic LTE specific structure

Nicolas Chautru nicolas.chautru at intel.com
Wed Jul 3 17:24:04 CEST 2019


Renaming of the enums and structure which were LTE specific to
allow for extension and support for 5GNR operations.

Signed-off-by: Nicolas Chautru <nicolas.chautru at intel.com>
Acked-by: Amr Mokhtar <amr.mokhtar at intel.com>
---
 app/test-bbdev/test_bbdev_vector.c               |   4 +-
 app/test-bbdev/test_bbdev_vector.h               |   2 +-
 drivers/baseband/fpga_lte_fec/fpga_lte_fec.c     | 103 ++++++++++++-----------
 drivers/baseband/turbo_sw/bbdev_turbo_software.c |  39 +++++----
 lib/librte_bbdev/rte_bbdev_op.h                  |  26 +++---
 5 files changed, 89 insertions(+), 85 deletions(-)

diff --git a/app/test-bbdev/test_bbdev_vector.c b/app/test-bbdev/test_bbdev_vector.c
index e4f68e2..e149ced 100644
--- a/app/test-bbdev/test_bbdev_vector.c
+++ b/app/test-bbdev/test_bbdev_vector.c
@@ -298,9 +298,9 @@
 	op_data = vector->entries[type].segments;
 	nb_ops = &vector->entries[type].nb_segments;
 
-	if (*nb_ops >= RTE_BBDEV_MAX_CODE_BLOCKS) {
+	if (*nb_ops >= RTE_BBDEV_TURBO_MAX_CODE_BLOCKS) {
 		printf("Too many segments (code blocks defined): %u, max %d!\n",
-				*nb_ops, RTE_BBDEV_MAX_CODE_BLOCKS);
+				*nb_ops, RTE_BBDEV_TURBO_MAX_CODE_BLOCKS);
 		return -1;
 	}
 
diff --git a/app/test-bbdev/test_bbdev_vector.h b/app/test-bbdev/test_bbdev_vector.h
index 476aae1..c85e94d 100644
--- a/app/test-bbdev/test_bbdev_vector.h
+++ b/app/test-bbdev/test_bbdev_vector.h
@@ -46,7 +46,7 @@ struct op_data_buf {
 };
 
 struct op_data_entries {
-	struct op_data_buf segments[RTE_BBDEV_MAX_CODE_BLOCKS];
+	struct op_data_buf segments[RTE_BBDEV_TURBO_MAX_CODE_BLOCKS];
 	unsigned int nb_segments;
 };
 
diff --git a/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c b/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c
index 19e7689..7e05b94 100644
--- a/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c
+++ b/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c
@@ -607,9 +607,9 @@ struct __rte_cache_aligned fpga_queue {
 					RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP,
 				.max_llr_modulus = INT8_MAX,
 				.num_buffers_src =
-						RTE_BBDEV_MAX_CODE_BLOCKS,
+						RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
 				.num_buffers_hard_out =
-					RTE_BBDEV_MAX_CODE_BLOCKS,
+					RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
 				.num_buffers_soft_out = 0
 			}
 		},
@@ -621,9 +621,9 @@ struct __rte_cache_aligned fpga_queue {
 					RTE_BBDEV_TURBO_RATE_MATCH |
 					RTE_BBDEV_TURBO_ENC_INTERRUPTS,
 				.num_buffers_src =
-						RTE_BBDEV_MAX_CODE_BLOCKS,
+						RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
 				.num_buffers_dst =
-						RTE_BBDEV_MAX_CODE_BLOCKS
+						RTE_BBDEV_TURBO_MAX_CODE_BLOCKS
 			}
 		},
 		RTE_BBDEV_END_OF_CAPABILITIES_LIST()
@@ -1332,14 +1332,15 @@ struct __rte_cache_aligned fpga_queue {
 validate_enc_op(struct rte_bbdev_enc_op *op)
 {
 	struct rte_bbdev_op_turbo_enc *turbo_enc = &op->turbo_enc;
-	struct rte_bbdev_op_enc_cb_params *cb = NULL;
-	struct rte_bbdev_op_enc_tb_params *tb = NULL;
+	struct rte_bbdev_op_enc_turbo_cb_params *cb = NULL;
+	struct rte_bbdev_op_enc_turbo_tb_params *tb = NULL;
 	uint16_t kw, kw_neg, kw_pos;
 
 	if (turbo_enc->input.length >
-			RTE_BBDEV_MAX_TB_SIZE >> 3) {
+			RTE_BBDEV_TURBO_MAX_TB_SIZE >> 3) {
 		rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d",
-				turbo_enc->input.length, RTE_BBDEV_MAX_TB_SIZE);
+				turbo_enc->input.length,
+				RTE_BBDEV_TURBO_MAX_TB_SIZE);
 		op->status = 1 << RTE_BBDEV_DATA_ERROR;
 		return -1;
 	}
@@ -1372,32 +1373,32 @@ struct __rte_cache_aligned fpga_queue {
 
 	if (turbo_enc->code_block_mode == 0) {
 		tb = &turbo_enc->tb_params;
-		if ((tb->k_neg < RTE_BBDEV_MIN_CB_SIZE
-				|| tb->k_neg > RTE_BBDEV_MAX_CB_SIZE)
+		if ((tb->k_neg < RTE_BBDEV_TURBO_MIN_CB_SIZE
+				|| tb->k_neg > RTE_BBDEV_TURBO_MAX_CB_SIZE)
 				&& tb->c_neg > 0) {
 			rte_bbdev_log(ERR,
 					"k_neg (%u) is out of range %u <= value <= %u",
-					tb->k_neg, RTE_BBDEV_MIN_CB_SIZE,
-					RTE_BBDEV_MAX_CB_SIZE);
+					tb->k_neg, RTE_BBDEV_TURBO_MIN_CB_SIZE,
+					RTE_BBDEV_TURBO_MAX_CB_SIZE);
 			return -1;
 		}
-		if (tb->k_pos < RTE_BBDEV_MIN_CB_SIZE
-				|| tb->k_pos > RTE_BBDEV_MAX_CB_SIZE) {
+		if (tb->k_pos < RTE_BBDEV_TURBO_MIN_CB_SIZE
+				|| tb->k_pos > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
 			rte_bbdev_log(ERR,
 					"k_pos (%u) is out of range %u <= value <= %u",
-					tb->k_pos, RTE_BBDEV_MIN_CB_SIZE,
-					RTE_BBDEV_MAX_CB_SIZE);
+					tb->k_pos, RTE_BBDEV_TURBO_MIN_CB_SIZE,
+					RTE_BBDEV_TURBO_MAX_CB_SIZE);
 			return -1;
 		}
-		if (tb->c_neg > (RTE_BBDEV_MAX_CODE_BLOCKS - 1))
+		if (tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1))
 			rte_bbdev_log(ERR,
 					"c_neg (%u) is out of range 0 <= value <= %u",
 					tb->c_neg,
-					RTE_BBDEV_MAX_CODE_BLOCKS - 1);
-		if (tb->c < 1 || tb->c > RTE_BBDEV_MAX_CODE_BLOCKS) {
+					RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1);
+		if (tb->c < 1 || tb->c > RTE_BBDEV_TURBO_MAX_CODE_BLOCKS) {
 			rte_bbdev_log(ERR,
 					"c (%u) is out of range 1 <= value <= %u",
-					tb->c, RTE_BBDEV_MAX_CODE_BLOCKS);
+					tb->c, RTE_BBDEV_TURBO_MAX_CODE_BLOCKS);
 			return -1;
 		}
 		if (tb->cab > tb->c) {
@@ -1406,23 +1407,23 @@ struct __rte_cache_aligned fpga_queue {
 					tb->cab, tb->c);
 			return -1;
 		}
-		if ((tb->ea < RTE_BBDEV_MIN_CB_SIZE || (tb->ea % 2))
+		if ((tb->ea < RTE_BBDEV_TURBO_MIN_CB_SIZE || (tb->ea % 2))
 				&& tb->r < tb->cab) {
 			rte_bbdev_log(ERR,
 					"ea (%u) is less than %u or it is not even",
-					tb->ea, RTE_BBDEV_MIN_CB_SIZE);
+					tb->ea, RTE_BBDEV_TURBO_MIN_CB_SIZE);
 			return -1;
 		}
-		if ((tb->eb < RTE_BBDEV_MIN_CB_SIZE || (tb->eb % 2))
+		if ((tb->eb < RTE_BBDEV_TURBO_MIN_CB_SIZE || (tb->eb % 2))
 				&& tb->c > tb->cab) {
 			rte_bbdev_log(ERR,
 					"eb (%u) is less than %u or it is not even",
-					tb->eb, RTE_BBDEV_MIN_CB_SIZE);
+					tb->eb, RTE_BBDEV_TURBO_MIN_CB_SIZE);
 			return -1;
 		}
 
 		kw_neg = 3 * RTE_ALIGN_CEIL(tb->k_neg + 4,
-					RTE_BBDEV_C_SUBBLOCK);
+					RTE_BBDEV_TURBO_C_SUBBLOCK);
 		if (tb->ncb_neg < tb->k_neg || tb->ncb_neg > kw_neg) {
 			rte_bbdev_log(ERR,
 					"ncb_neg (%u) is out of range (%u) k_neg <= value <= (%u) kw_neg",
@@ -1431,7 +1432,7 @@ struct __rte_cache_aligned fpga_queue {
 		}
 
 		kw_pos = 3 * RTE_ALIGN_CEIL(tb->k_pos + 4,
-					RTE_BBDEV_C_SUBBLOCK);
+					RTE_BBDEV_TURBO_C_SUBBLOCK);
 		if (tb->ncb_pos < tb->k_pos || tb->ncb_pos > kw_pos) {
 			rte_bbdev_log(ERR,
 					"ncb_pos (%u) is out of range (%u) k_pos <= value <= (%u) kw_pos",
@@ -1446,23 +1447,23 @@ struct __rte_cache_aligned fpga_queue {
 		}
 	} else {
 		cb = &turbo_enc->cb_params;
-		if (cb->k < RTE_BBDEV_MIN_CB_SIZE
-				|| cb->k > RTE_BBDEV_MAX_CB_SIZE) {
+		if (cb->k < RTE_BBDEV_TURBO_MIN_CB_SIZE
+				|| cb->k > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
 			rte_bbdev_log(ERR,
 					"k (%u) is out of range %u <= value <= %u",
-					cb->k, RTE_BBDEV_MIN_CB_SIZE,
-					RTE_BBDEV_MAX_CB_SIZE);
+					cb->k, RTE_BBDEV_TURBO_MIN_CB_SIZE,
+					RTE_BBDEV_TURBO_MAX_CB_SIZE);
 			return -1;
 		}
 
-		if (cb->e < RTE_BBDEV_MIN_CB_SIZE || (cb->e % 2)) {
+		if (cb->e < RTE_BBDEV_TURBO_MIN_CB_SIZE || (cb->e % 2)) {
 			rte_bbdev_log(ERR,
 					"e (%u) is less than %u or it is not even",
-					cb->e, RTE_BBDEV_MIN_CB_SIZE);
+					cb->e, RTE_BBDEV_TURBO_MIN_CB_SIZE);
 			return -1;
 		}
 
-		kw = RTE_ALIGN_CEIL(cb->k + 4, RTE_BBDEV_C_SUBBLOCK) * 3;
+		kw = RTE_ALIGN_CEIL(cb->k + 4, RTE_BBDEV_TURBO_C_SUBBLOCK) * 3;
 		if (cb->ncb < cb->k || cb->ncb > kw) {
 			rte_bbdev_log(ERR,
 					"ncb (%u) is out of range (%u) k <= value <= (%u) kw",
@@ -1655,8 +1656,8 @@ struct __rte_cache_aligned fpga_queue {
 validate_dec_op(struct rte_bbdev_dec_op *op)
 {
 	struct rte_bbdev_op_turbo_dec *turbo_dec = &op->turbo_dec;
-	struct rte_bbdev_op_dec_cb_params *cb = NULL;
-	struct rte_bbdev_op_dec_tb_params *tb = NULL;
+	struct rte_bbdev_op_dec_turbo_cb_params *cb = NULL;
+	struct rte_bbdev_op_dec_turbo_tb_params *tb = NULL;
 
 	if (op->mempool == NULL) {
 		rte_bbdev_log(ERR, "Invalid mempool pointer");
@@ -1713,33 +1714,33 @@ struct __rte_cache_aligned fpga_queue {
 		}
 
 		tb = &turbo_dec->tb_params;
-		if ((tb->k_neg < RTE_BBDEV_MIN_CB_SIZE
-				|| tb->k_neg > RTE_BBDEV_MAX_CB_SIZE)
+		if ((tb->k_neg < RTE_BBDEV_TURBO_MIN_CB_SIZE
+				|| tb->k_neg > RTE_BBDEV_TURBO_MAX_CB_SIZE)
 				&& tb->c_neg > 0) {
 			rte_bbdev_log(ERR,
 					"k_neg (%u) is out of range %u <= value <= %u",
-					tb->k_neg, RTE_BBDEV_MIN_CB_SIZE,
-					RTE_BBDEV_MAX_CB_SIZE);
+					tb->k_neg, RTE_BBDEV_TURBO_MIN_CB_SIZE,
+					RTE_BBDEV_TURBO_MAX_CB_SIZE);
 			return -1;
 		}
-		if ((tb->k_pos < RTE_BBDEV_MIN_CB_SIZE
-				|| tb->k_pos > RTE_BBDEV_MAX_CB_SIZE)
+		if ((tb->k_pos < RTE_BBDEV_TURBO_MIN_CB_SIZE
+				|| tb->k_pos > RTE_BBDEV_TURBO_MAX_CB_SIZE)
 				&& tb->c > tb->c_neg) {
 			rte_bbdev_log(ERR,
 					"k_pos (%u) is out of range %u <= value <= %u",
-					tb->k_pos, RTE_BBDEV_MIN_CB_SIZE,
-					RTE_BBDEV_MAX_CB_SIZE);
+					tb->k_pos, RTE_BBDEV_TURBO_MIN_CB_SIZE,
+					RTE_BBDEV_TURBO_MAX_CB_SIZE);
 			return -1;
 		}
-		if (tb->c_neg > (RTE_BBDEV_MAX_CODE_BLOCKS - 1))
+		if (tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1))
 			rte_bbdev_log(ERR,
 					"c_neg (%u) is out of range 0 <= value <= %u",
 					tb->c_neg,
-					RTE_BBDEV_MAX_CODE_BLOCKS - 1);
-		if (tb->c < 1 || tb->c > RTE_BBDEV_MAX_CODE_BLOCKS) {
+					RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1);
+		if (tb->c < 1 || tb->c > RTE_BBDEV_TURBO_MAX_CODE_BLOCKS) {
 			rte_bbdev_log(ERR,
 					"c (%u) is out of range 1 <= value <= %u",
-					tb->c, RTE_BBDEV_MAX_CODE_BLOCKS);
+					tb->c, RTE_BBDEV_TURBO_MAX_CODE_BLOCKS);
 			return -1;
 		}
 		if (tb->cab > tb->c) {
@@ -1757,12 +1758,12 @@ struct __rte_cache_aligned fpga_queue {
 		}
 
 		cb = &turbo_dec->cb_params;
-		if (cb->k < RTE_BBDEV_MIN_CB_SIZE
-				|| cb->k > RTE_BBDEV_MAX_CB_SIZE) {
+		if (cb->k < RTE_BBDEV_TURBO_MIN_CB_SIZE
+				|| cb->k > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
 			rte_bbdev_log(ERR,
 					"k (%u) is out of range %u <= value <= %u",
-					cb->k, RTE_BBDEV_MIN_CB_SIZE,
-					RTE_BBDEV_MAX_CB_SIZE);
+					cb->k, RTE_BBDEV_TURBO_MIN_CB_SIZE,
+					RTE_BBDEV_TURBO_MAX_CB_SIZE);
 			return -1;
 		}
 	}
diff --git a/drivers/baseband/turbo_sw/bbdev_turbo_software.c b/drivers/baseband/turbo_sw/bbdev_turbo_software.c
index 94aa536..5551f84 100644
--- a/drivers/baseband/turbo_sw/bbdev_turbo_software.c
+++ b/drivers/baseband/turbo_sw/bbdev_turbo_software.c
@@ -34,9 +34,9 @@
 	rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
 		##__VA_ARGS__)
 
-#define DEINT_INPUT_BUF_SIZE (((RTE_BBDEV_MAX_CB_SIZE >> 3) + 1) * 48)
+#define DEINT_INPUT_BUF_SIZE (((RTE_BBDEV_TURBO_MAX_CB_SIZE >> 3) + 1) * 48)
 #define DEINT_OUTPUT_BUF_SIZE (DEINT_INPUT_BUF_SIZE * 6)
-#define ADAPTER_OUTPUT_BUF_SIZE ((RTE_BBDEV_MAX_CB_SIZE + 4) * 48)
+#define ADAPTER_OUTPUT_BUF_SIZE ((RTE_BBDEV_TURBO_MAX_CB_SIZE + 4) * 48)
 
 /* private data structure */
 struct bbdev_private {
@@ -103,7 +103,7 @@ struct turbo_sw_queue {
 {
 	int32_t result = 0;
 
-	if (k < RTE_BBDEV_MIN_CB_SIZE || k > RTE_BBDEV_MAX_CB_SIZE)
+	if (k < RTE_BBDEV_TURBO_MIN_CB_SIZE || k > RTE_BBDEV_TURBO_MAX_CB_SIZE)
 		return -1;
 
 	if (k > 2048) {
@@ -158,9 +158,10 @@ struct turbo_sw_queue {
 					RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP |
 					RTE_BBDEV_TURBO_EARLY_TERMINATION,
 				.max_llr_modulus = 16,
-				.num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
+				.num_buffers_src =
+						RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
 				.num_buffers_hard_out =
-						RTE_BBDEV_MAX_CODE_BLOCKS,
+						RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
 				.num_buffers_soft_out = 0,
 			}
 		},
@@ -172,8 +173,10 @@ struct turbo_sw_queue {
 						RTE_BBDEV_TURBO_CRC_24A_ATTACH |
 						RTE_BBDEV_TURBO_RATE_MATCH |
 						RTE_BBDEV_TURBO_RV_INDEX_BYPASS,
-				.num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
-				.num_buffers_dst = RTE_BBDEV_MAX_CODE_BLOCKS,
+				.num_buffers_src =
+						RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
+				.num_buffers_dst =
+						RTE_BBDEV_TURBO_MAX_CODE_BLOCKS,
 			}
 		},
 #endif
@@ -257,7 +260,7 @@ struct turbo_sw_queue {
 		return -ENAMETOOLONG;
 	}
 	q->enc_out = rte_zmalloc_socket(name,
-			((RTE_BBDEV_MAX_TB_SIZE >> 3) + 3) *
+			((RTE_BBDEV_TURBO_MAX_TB_SIZE >> 3) + 3) *
 			sizeof(*q->enc_out) * 3,
 			RTE_CACHE_LINE_SIZE, queue_conf->socket);
 	if (q->enc_out == NULL) {
@@ -277,7 +280,7 @@ struct turbo_sw_queue {
 		return -ENAMETOOLONG;
 	}
 	q->enc_in = rte_zmalloc_socket(name,
-			(RTE_BBDEV_MAX_CB_SIZE >> 3) * sizeof(*q->enc_in),
+			(RTE_BBDEV_TURBO_MAX_CB_SIZE >> 3) * sizeof(*q->enc_in),
 			RTE_CACHE_LINE_SIZE, queue_conf->socket);
 	if (q->enc_in == NULL) {
 		rte_bbdev_log(ERR,
@@ -295,7 +298,7 @@ struct turbo_sw_queue {
 		return -ENAMETOOLONG;
 	}
 	q->ag = rte_zmalloc_socket(name,
-			RTE_BBDEV_MAX_CB_SIZE * 10 * sizeof(*q->ag),
+			RTE_BBDEV_TURBO_MAX_CB_SIZE * 10 * sizeof(*q->ag),
 			RTE_CACHE_LINE_SIZE, queue_conf->socket);
 	if (q->ag == NULL) {
 		rte_bbdev_log(ERR,
@@ -313,7 +316,7 @@ struct turbo_sw_queue {
 		return -ENAMETOOLONG;
 	}
 	q->code_block = rte_zmalloc_socket(name,
-			RTE_BBDEV_MAX_CB_SIZE * sizeof(*q->code_block),
+			RTE_BBDEV_TURBO_MAX_CB_SIZE * sizeof(*q->code_block),
 			RTE_CACHE_LINE_SIZE, queue_conf->socket);
 	if (q->code_block == NULL) {
 		rte_bbdev_log(ERR,
@@ -439,9 +442,9 @@ struct turbo_sw_queue {
 		return -1;
 	}
 
-	if (k > RTE_BBDEV_MAX_CB_SIZE) {
+	if (k > RTE_BBDEV_TURBO_MAX_CB_SIZE) {
 		rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
-				k, RTE_BBDEV_MAX_CB_SIZE);
+				k, RTE_BBDEV_TURBO_MAX_CB_SIZE);
 		return -1;
 	}
 
@@ -466,9 +469,9 @@ struct turbo_sw_queue {
 		return -1;
 	}
 
-	if (kw > RTE_BBDEV_MAX_KW) {
+	if (kw > RTE_BBDEV_TURBO_MAX_KW) {
 		rte_bbdev_log(ERR, "Input length (%u) is too big, max: %d",
-				kw, RTE_BBDEV_MAX_KW);
+				kw, RTE_BBDEV_TURBO_MAX_KW);
 		return -1;
 	}
 
@@ -773,9 +776,9 @@ struct turbo_sw_queue {
 	/* Clear op status */
 	op->status = 0;
 
-	if (mbuf_total_left > RTE_BBDEV_MAX_TB_SIZE >> 3) {
+	if (mbuf_total_left > RTE_BBDEV_TURBO_MAX_TB_SIZE >> 3) {
 		rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d",
-				mbuf_total_left, RTE_BBDEV_MAX_TB_SIZE);
+				mbuf_total_left, RTE_BBDEV_TURBO_MAX_TB_SIZE);
 		op->status = 1 << RTE_BBDEV_DATA_ERROR;
 		return;
 	}
@@ -1074,7 +1077,7 @@ struct turbo_sw_queue {
 		 * where D is the size of each output from turbo encoder block
 		 * (k + 4).
 		 */
-		kw = RTE_ALIGN_CEIL(k + 4, RTE_BBDEV_C_SUBBLOCK) * 3;
+		kw = RTE_ALIGN_CEIL(k + 4, RTE_BBDEV_TURBO_C_SUBBLOCK) * 3;
 
 		process_dec_cb(q, op, c, k, kw, m_in, m_out_head, m_out,
 				in_offset, out_offset, check_bit(dec->op_flags,
diff --git a/lib/librte_bbdev/rte_bbdev_op.h b/lib/librte_bbdev/rte_bbdev_op.h
index b6ca3b0..d860989 100644
--- a/lib/librte_bbdev/rte_bbdev_op.h
+++ b/lib/librte_bbdev/rte_bbdev_op.h
@@ -26,22 +26,22 @@
 #include <rte_mempool.h>
 
 /* Number of columns in sub-block interleaver (36.212, section 5.1.4.1.1) */
-#define RTE_BBDEV_C_SUBBLOCK (32)
+#define RTE_BBDEV_TURBO_C_SUBBLOCK (32)
 /* Maximum size of Transport Block (36.213, Table, Table 7.1.7.2.5-1) */
-#define RTE_BBDEV_MAX_TB_SIZE (391656)
+#define RTE_BBDEV_TURBO_MAX_TB_SIZE (391656)
 /* Maximum size of Code Block (36.212, Table 5.1.3-3) */
-#define RTE_BBDEV_MAX_CB_SIZE (6144)
+#define RTE_BBDEV_TURBO_MAX_CB_SIZE (6144)
 /* Minimum size of Code Block (36.212, Table 5.1.3-3) */
-#define RTE_BBDEV_MIN_CB_SIZE (40)
+#define RTE_BBDEV_TURBO_MIN_CB_SIZE (40)
 /* Maximum size of circular buffer */
-#define RTE_BBDEV_MAX_KW (18528)
+#define RTE_BBDEV_TURBO_MAX_KW (18528)
 /*
  * Maximum number of Code Blocks in Transport Block. It is calculated based on
  * maximum size of one Code Block and one Transport Block (considering CRC24A
  * and CRC24B):
  * (391656 + 24) / (6144 - 24) = 64
  */
-#define RTE_BBDEV_MAX_CODE_BLOCKS (64)
+#define RTE_BBDEV_TURBO_MAX_CODE_BLOCKS (64)
 
 /** Flags for turbo decoder operation and capability structure */
 enum rte_bbdev_op_td_flag_bitmasks {
@@ -133,7 +133,7 @@ struct rte_bbdev_op_data {
 	 * multiple CBs contiguously located next to each other.
 	 * A Transport Block (TB) represents a whole piece of data that is
 	 * divided into one or more CBs. Maximum number of CBs can be contained
-	 * in one TB is defined by RTE_BBDEV_MAX_CODE_BLOCKS.
+	 * in one TB is defined by RTE_BBDEV_TURBO_MAX_CODE_BLOCKS.
 	 *
 	 * An mbuf data structure cannot represent more than one TB. The
 	 * smallest piece of data that can be contained in one mbuf is one CB.
@@ -204,7 +204,7 @@ struct rte_bbdev_op_dec_tb_params {
 	uint16_t k_pos;
 	/**< The number of CBs that have K- size, [0:63] */
 	uint8_t c_neg;
-	/**< The total number of CBs in the TB, [1:RTE_BBDEV_MAX_CODE_BLOCKS] */
+	/**< The total number of CBs in the TB, [1:RTE_BBDEV_TURBO_MAX_CODE_BLOCKS] */
 	uint8_t c;
 	/**< The number of CBs that uses Ea before switching to Eb, [0:63] */
 	uint8_t cab;
@@ -288,7 +288,7 @@ struct rte_bbdev_op_turbo_dec {
 	};
 };
 
-struct rte_bbdev_op_enc_cb_params {
+struct rte_bbdev_op_enc_turbo_cb_params {
 	/**< The K size of the input CB, in bits [40:6144], as specified in
 	 * 3GPP TS 36.212.
 	 * This size is inclusive of CRC24A, regardless whether it was
@@ -305,7 +305,7 @@ struct rte_bbdev_op_enc_cb_params {
 	uint16_t ncb;
 };
 
-struct rte_bbdev_op_enc_tb_params {
+struct rte_bbdev_op_enc_turbo_tb_params {
 	/**< The K- size of the input CB, in bits [40:6144], that is in the
 	 * Turbo operation when r < C-, as in 3GPP TS 36.212.
 	 * This size is inclusive of CRC24B, regardless whether it was
@@ -320,7 +320,7 @@ struct rte_bbdev_op_enc_tb_params {
 	uint16_t k_pos;
 	/**< The number of CBs that have K- size, [0:63] */
 	uint8_t c_neg;
-	/**< The total number of CBs in the TB, [1:RTE_BBDEV_MAX_CODE_BLOCKS] */
+	/**< The total number of CBs in the TB, [1:RTE_BBDEV_TURBO_MAX_CODE_BLOCKS] */
 	uint8_t c;
 	/**< The number of CBs that uses Ea before switching to Eb, [0:63] */
 	uint8_t cab;
@@ -375,9 +375,9 @@ struct rte_bbdev_op_turbo_enc {
 	uint8_t code_block_mode; /**< [0 - TB : 1 - CB] */
 	union {
 		/**< Struct which stores Code Block specific parameters */
-		struct rte_bbdev_op_enc_cb_params cb_params;
+		struct rte_bbdev_op_enc_turbo_cb_params cb_params;
 		/**< Struct which stores Transport Block specific parameters */
-		struct rte_bbdev_op_enc_tb_params tb_params;
+		struct rte_bbdev_op_enc_turbo_tb_params tb_params;
 	};
 };
 
-- 
1.8.3.1



More information about the dev mailing list