[dpdk-dev] [PATCH v2] lpm: fix unchecked return value

Medvedkin, Vladimir vladimir.medvedkin at intel.com
Fri Jul 17 19:12:00 CEST 2020


Hi Ruifeng,

On 16/07/2020 16:49, Ruifeng Wang wrote:
> Coverity complains about unchecked return value of rte_rcu_qsbr_dq_enqueue.
> By default, defer queue size is big enough to hold all tbl8 groups. When
> enqueue fails, return error to the user to indicate system issue.
> 
> Coverity issue: 360832
> Fixes: 8a9f8564e9f9 ("lpm: implement RCU rule reclamation")
> 
> Signed-off-by: Ruifeng Wang <ruifeng.wang at arm.com>
> ---
> v2:
> Converted return value to conform to LPM API convention. (Vladimir)
> 
>   lib/librte_lpm/rte_lpm.c | 19 +++++++++++++------
>   1 file changed, 13 insertions(+), 6 deletions(-)
> 
> diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
> index 2db9e16a2..757436f49 100644
> --- a/lib/librte_lpm/rte_lpm.c
> +++ b/lib/librte_lpm/rte_lpm.c
> @@ -532,11 +532,12 @@ tbl8_alloc(struct rte_lpm *lpm)
>   	return group_idx;
>   }
>   
> -static void
> +static int32_t
>   tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
>   {
>   	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
>   	struct __rte_lpm *internal_lpm;
> +	int status;
>   
>   	internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
>   	if (internal_lpm->v == NULL) {
> @@ -552,9 +553,15 @@ tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
>   				__ATOMIC_RELAXED);
>   	} else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
>   		/* Push into QSBR defer queue. */
> -		rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
> +		status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
>   				(void *)&tbl8_group_start);
> +		if (status == 1) {
> +			RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
> +			return -rte_errno;
> +		}
>   	}
> +
> +	return 0;
>   }
>   
>   static __rte_noinline int32_t
> @@ -1040,7 +1047,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
>   #define group_idx next_hop
>   	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
>   			tbl8_range, i;
> -	int32_t tbl8_recycle_index;
> +	int32_t tbl8_recycle_index, status = 0;
>   
>   	/*
>   	 * Calculate the index into tbl24 and range. Note: All depths larger
> @@ -1097,7 +1104,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
>   		 */
>   		lpm->tbl24[tbl24_index].valid = 0;
>   		__atomic_thread_fence(__ATOMIC_RELEASE);
> -		tbl8_free(lpm, tbl8_group_start);
> +		status = tbl8_free(lpm, tbl8_group_start);
>   	} else if (tbl8_recycle_index > -1) {
>   		/* Update tbl24 entry. */
>   		struct rte_lpm_tbl_entry new_tbl24_entry = {
> @@ -1113,10 +1120,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
>   		__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
>   				__ATOMIC_RELAXED);
>   		__atomic_thread_fence(__ATOMIC_RELEASE);
> -		tbl8_free(lpm, tbl8_group_start);
> +		status = tbl8_free(lpm, tbl8_group_start);
>   	}
>   #undef group_idx
> -	return 0;
> +	return status;

This will change rte_lpm_delete API. As a suggestion, you can leave it 
as it was before ("return 0"), and send separate patch (with "return 
status)" which will be targeted to 20.11.

>   }
>   
>   /*
> 

-- 
Regards,
Vladimir


More information about the dev mailing list