[PATCH v2 03/37] baseband/acc100: add function to check AQ availability
Maxime Coquelin
maxime.coquelin at redhat.com
Wed Sep 14 19:00:42 CEST 2022
On 8/20/22 04:31, Hernan Vargas wrote:
> It is possible for some corner case to run more batch enqueue than
> supported. A protection is required to avoid that corner case.
> Enhance all ACC100 enqueue operations with check to see if there is room
> in the atomic queue for enqueueing batches into the queue manager
> Check room in AQ for the enqueues batches into Qmgr
Same as on patch 2. Reading the commit message, it sounds like a fix.
> Signed-off-by: Hernan Vargas <hernan.vargas at intel.com>
> ---
> drivers/baseband/acc100/rte_acc100_pmd.c | 30 +++++++++++++++++-------
> 1 file changed, 22 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/baseband/acc100/rte_acc100_pmd.c b/drivers/baseband/acc100/rte_acc100_pmd.c
> index 0598d33582..7349bb5bad 100644
> --- a/drivers/baseband/acc100/rte_acc100_pmd.c
> +++ b/drivers/baseband/acc100/rte_acc100_pmd.c
> @@ -3635,12 +3635,27 @@ acc100_enqueue_enc_tb(struct rte_bbdev_queue_data *q_data,
> return i;
> }
>
> +/* Check room in AQ for the enqueues batches into Qmgr */
> +static int32_t
> +acc100_aq_avail(struct rte_bbdev_queue_data *q_data, uint16_t num_ops)
> +{
> + struct acc100_queue *q = q_data->queue_private;
> + int32_t aq_avail = q->aq_depth -
> + ((q->aq_enqueued - q->aq_dequeued +
> + ACC100_MAX_QUEUE_DEPTH) % ACC100_MAX_QUEUE_DEPTH)
> + - (num_ops >> 7);
> + if (aq_avail <= 0)
> + acc100_enqueue_queue_full(q_data);
Ok, forget my comment on patch 1, acc100_enqueue_queue_full() being used
here.
> + return aq_avail;
> +}
> +
> /* Enqueue encode operations for ACC100 device. */
> static uint16_t
> acc100_enqueue_enc(struct rte_bbdev_queue_data *q_data,
> struct rte_bbdev_enc_op **ops, uint16_t num)
> {
> - if (unlikely(num == 0))
> + int32_t aq_avail = acc100_aq_avail(q_data, num);
> + if (unlikely((aq_avail <= 0) || (num == 0)))
> return 0;
> if (ops[0]->turbo_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
> return acc100_enqueue_enc_tb(q_data, ops, num);
> @@ -3653,7 +3668,8 @@ static uint16_t
> acc100_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
> struct rte_bbdev_enc_op **ops, uint16_t num)
> {
> - if (unlikely(num == 0))
> + int32_t aq_avail = acc100_aq_avail(q_data, num);
> + if (unlikely((aq_avail <= 0) || (num == 0)))
> return 0;
> if (ops[0]->ldpc_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
> return acc100_enqueue_enc_tb(q_data, ops, num);
> @@ -3850,7 +3866,8 @@ static uint16_t
> acc100_enqueue_dec(struct rte_bbdev_queue_data *q_data,
> struct rte_bbdev_dec_op **ops, uint16_t num)
> {
> - if (unlikely(num == 0))
> + int32_t aq_avail = acc100_aq_avail(q_data, num);
> + if (unlikely((aq_avail <= 0) || (num == 0)))
> return 0;
> if (ops[0]->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
> return acc100_enqueue_dec_tb(q_data, ops, num);
> @@ -3863,11 +3880,8 @@ static uint16_t
> acc100_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
> struct rte_bbdev_dec_op **ops, uint16_t num)
> {
> - struct acc100_queue *q = q_data->queue_private;
> - int32_t aq_avail = q->aq_depth +
> - (q->aq_dequeued - q->aq_enqueued) / 128;
> -
> - if (unlikely((aq_avail == 0) || (num == 0)))
> + int32_t aq_avail = acc100_aq_avail(q_data, num);
> + if (unlikely((aq_avail <= 0) || (num == 0)))
> return 0;
>
> if (ops[0]->ldpc_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
More information about the dev
mailing list