[dpdk-dev] [PATCH v4 3/3] lib/lpm: use atomic store to avoid partial update

Honnappa Nagarahalli Honnappa.Nagarahalli at arm.com
Mon Jul 8 06:56:58 CEST 2019


> 
> Compiler could generate non-atomic stores for whole table entry updating.
> This may cause incorrect nexthop to be returned, if the byte with valid flag is
> updated prior to the byte with next hot is updated.
                                                           ^^^^^^^
Should be nexthop

> 
> Changed to use atomic store to update whole table entry.
> 
> Suggested-by: Medvedkin Vladimir <vladimir.medvedkin at intel.com>
> Signed-off-by: Ruifeng Wang <ruifeng.wang at arm.com>
> Reviewed-by: Gavin Hu <gavin.hu at arm.com>
> ---
> v4: initial version
> 
>  lib/librte_lpm/rte_lpm.c | 34 ++++++++++++++++++++++++----------
>  1 file changed, 24 insertions(+), 10 deletions(-)
> 
> diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index
> baa6e7460..5d1dbd7e6 100644
> --- a/lib/librte_lpm/rte_lpm.c
> +++ b/lib/librte_lpm/rte_lpm.c
> @@ -767,7 +767,9 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm,
> uint32_t ip, uint8_t depth,
>  					 * Setting tbl8 entry in one go to
> avoid
>  					 * race conditions
>  					 */
> -					lpm->tbl8[j] = new_tbl8_entry;
> +					__atomic_store(&lpm->tbl8[j],
> +						&new_tbl8_entry,
> +						__ATOMIC_RELAXED);
> 
>  					continue;
>  				}
> @@ -837,7 +839,9 @@ add_depth_small_v1604(struct rte_lpm *lpm,
> uint32_t ip, uint8_t depth,
>  					 * Setting tbl8 entry in one go to
> avoid
>  					 * race conditions
>  					 */
> -					lpm->tbl8[j] = new_tbl8_entry;
> +					__atomic_store(&lpm->tbl8[j],
> +						&new_tbl8_entry,
> +						__ATOMIC_RELAXED);
> 
>  					continue;
>  				}
> @@ -965,7 +969,8 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm,
> uint32_t ip_masked, uint8_t depth,
>  				 * Setting tbl8 entry in one go to avoid race
>  				 * condition
>  				 */
> -				lpm->tbl8[i] = new_tbl8_entry;
> +				__atomic_store(&lpm->tbl8[i],
> &new_tbl8_entry,
> +						__ATOMIC_RELAXED);
> 
>  				continue;
>  			}
> @@ -1100,7 +1105,8 @@ add_depth_big_v1604(struct rte_lpm *lpm,
> uint32_t ip_masked, uint8_t depth,
>  				 * Setting tbl8 entry in one go to avoid race
>  				 * condition
>  				 */
> -				lpm->tbl8[i] = new_tbl8_entry;
> +				__atomic_store(&lpm->tbl8[i],
> &new_tbl8_entry,
> +						__ATOMIC_RELAXED);
> 
>  				continue;
>  			}
> @@ -1393,7 +1399,9 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm,
> uint32_t ip_masked,
> 
> 	RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
> 
>  					if (lpm->tbl8[j].depth <= depth)
> -						lpm->tbl8[j] =
> new_tbl8_entry;
> +						__atomic_store(&lpm->tbl8[j],
> +							&new_tbl8_entry,
> +							__ATOMIC_RELAXED);
>  				}
>  			}
>  		}
> @@ -1490,7 +1498,9 @@ delete_depth_small_v1604(struct rte_lpm *lpm,
> uint32_t ip_masked,
> 
> 	RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
> 
>  					if (lpm->tbl8[j].depth <= depth)
> -						lpm->tbl8[j] =
> new_tbl8_entry;
> +						__atomic_store(&lpm->tbl8[j],
> +							&new_tbl8_entry,
> +							__ATOMIC_RELAXED);
>  				}
>  			}
>  		}
> @@ -1646,7 +1656,8 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm,
> uint32_t ip_masked,
>  		 */
>  		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
>  			if (lpm->tbl8[i].depth <= depth)
> -				lpm->tbl8[i] = new_tbl8_entry;
> +				__atomic_store(&lpm->tbl8[i],
> &new_tbl8_entry,
> +						__ATOMIC_RELAXED);
>  		}
>  	}
> 
> @@ -1677,7 +1688,8 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm,
> uint32_t ip_masked,
>  		/* Set tbl24 before freeing tbl8 to avoid race condition.
>  		 * Prevent the free of the tbl8 group from hoisting.
>  		 */
> -		lpm->tbl24[tbl24_index] = new_tbl24_entry;
> +		__atomic_store(&lpm->tbl24[tbl24_index],
> &new_tbl24_entry,
> +				__ATOMIC_RELAXED);
>  		__atomic_thread_fence(__ATOMIC_RELEASE);
>  		tbl8_free_v20(lpm->tbl8, tbl8_group_start);
tbl8_alloc_v20/tbl8_free_v20 need to be updated to use __atomic_store

>  	}
> @@ -1730,7 +1742,8 @@ delete_depth_big_v1604(struct rte_lpm *lpm,
> uint32_t ip_masked,
>  		 */
>  		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
>  			if (lpm->tbl8[i].depth <= depth)
> -				lpm->tbl8[i] = new_tbl8_entry;
> +				__atomic_store(&lpm->tbl8[i],
> &new_tbl8_entry,
> +						__ATOMIC_RELAXED);
>  		}
>  	}
> 
> @@ -1761,7 +1774,8 @@ delete_depth_big_v1604(struct rte_lpm *lpm,
> uint32_t ip_masked,
>  		/* Set tbl24 before freeing tbl8 to avoid race condition.
>  		 * Prevent the free of the tbl8 group from hoisting.
>  		 */
> -		lpm->tbl24[tbl24_index] = new_tbl24_entry;
> +		__atomic_store(&lpm->tbl24[tbl24_index],
> &new_tbl24_entry,
> +				__ATOMIC_RELAXED);
>  		__atomic_thread_fence(__ATOMIC_RELEASE);
>  		tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
tbl8_alloc_v1604 /tbl8_free_v1604 need to be updated to use __atomic_store

>  	}
> --
> 2.17.1



More information about the dev mailing list