[dpdk-dev] [PATCH v4 3/3] lib/lpm: use atomic store to avoid partial update

Honnappa Nagarahalli Honnappa.Nagarahalli at arm.com
Tue Jul 9 06:43:19 CEST 2019


> >
> > >
> > > Compiler could generate non-atomic stores for whole table entry updating.
> > > This may cause incorrect nexthop to be returned, if the byte with
> > > valid flag is updated prior to the byte with next hot is updated.
> >                                                            ^^^^^^^
> > Should be nexthop
> >
> > >
> > > Changed to use atomic store to update whole table entry.
> > >
> > > Suggested-by: Medvedkin Vladimir <vladimir.medvedkin at intel.com>
> > > Signed-off-by: Ruifeng Wang <ruifeng.wang at arm.com>
> > > Reviewed-by: Gavin Hu <gavin.hu at arm.com>
> > > ---
> > > v4: initial version
> > >
> > >  lib/librte_lpm/rte_lpm.c | 34 ++++++++++++++++++++++++----------
> > >  1 file changed, 24 insertions(+), 10 deletions(-)
> > >
> > > diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
> > > index
> > > baa6e7460..5d1dbd7e6 100644
> > > --- a/lib/librte_lpm/rte_lpm.c
> > > +++ b/lib/librte_lpm/rte_lpm.c
> > > @@ -767,7 +767,9 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm,
> > > uint32_t ip, uint8_t depth,
> > >  					 * Setting tbl8 entry in one go to avoid
> > >  					 * race conditions
> > >  					 */
> > > -					lpm->tbl8[j] = new_tbl8_entry;
> > > +					__atomic_store(&lpm->tbl8[j],
> > > +						&new_tbl8_entry,
> > > +						__ATOMIC_RELAXED);
> > >
> > >  					continue;
> > >  				}
> > > @@ -837,7 +839,9 @@ add_depth_small_v1604(struct rte_lpm *lpm,
> > > uint32_t ip, uint8_t depth,
> > >  					 * Setting tbl8 entry in one go to avoid
> > >  					 * race conditions
> > >  					 */
> > > -					lpm->tbl8[j] = new_tbl8_entry;
> > > +					__atomic_store(&lpm->tbl8[j],
> > > +						&new_tbl8_entry,
> > > +						__ATOMIC_RELAXED);
> > >
> > >  					continue;
> > >  				}
> > > @@ -965,7 +969,8 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm,
> > > uint32_t ip_masked, uint8_t depth,
> > >  				 * Setting tbl8 entry in one go to avoid race
> > >  				 * condition
> > >  				 */
> > > -				lpm->tbl8[i] = new_tbl8_entry;
> > > +				__atomic_store(&lpm->tbl8[i],
> > > &new_tbl8_entry,
> > > +						__ATOMIC_RELAXED);
> > >
> > >  				continue;
> > >  			}
> > > @@ -1100,7 +1105,8 @@ add_depth_big_v1604(struct rte_lpm *lpm,
> > > uint32_t ip_masked, uint8_t depth,
> > >  				 * Setting tbl8 entry in one go to avoid race
> > >  				 * condition
> > >  				 */
> > > -				lpm->tbl8[i] = new_tbl8_entry;
> > > +				__atomic_store(&lpm->tbl8[i],
> > > &new_tbl8_entry,
> > > +						__ATOMIC_RELAXED);
> > >
> > >  				continue;
> > >  			}
> > > @@ -1393,7 +1399,9 @@ delete_depth_small_v20(struct rte_lpm_v20
> > *lpm,
> > > uint32_t ip_masked,
> > >
> > > 	RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
> > >
> > >  					if (lpm->tbl8[j].depth <= depth)
> > > -						lpm->tbl8[j] =
> > > new_tbl8_entry;
> > > +						__atomic_store(&lpm-
> > >tbl8[j],
> > > +							&new_tbl8_entry,
> > > +							__ATOMIC_RELAXED);
> > >  				}
> > >  			}
> > >  		}
> > > @@ -1490,7 +1498,9 @@ delete_depth_small_v1604(struct rte_lpm *lpm,
> > > uint32_t ip_masked,
> > >
> > > 	RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
> > >
> > >  					if (lpm->tbl8[j].depth <= depth)
> > > -						lpm->tbl8[j] =
> > > new_tbl8_entry;
> > > +						__atomic_store(&lpm-
> > >tbl8[j],
> > > +							&new_tbl8_entry,
> > > +							__ATOMIC_RELAXED);
> > >  				}
> > >  			}
> > >  		}
> > > @@ -1646,7 +1656,8 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm,
> > > uint32_t ip_masked,
> > >  		 */
> > >  		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
> > >  			if (lpm->tbl8[i].depth <= depth)
> > > -				lpm->tbl8[i] = new_tbl8_entry;
> > > +				__atomic_store(&lpm->tbl8[i],
> > > &new_tbl8_entry,
> > > +						__ATOMIC_RELAXED);
> > >  		}
> > >  	}
> > >
> > > @@ -1677,7 +1688,8 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm,
> > > uint32_t ip_masked,
> > >  		/* Set tbl24 before freeing tbl8 to avoid race condition.
> > >  		 * Prevent the free of the tbl8 group from hoisting.
> > >  		 */
> > > -		lpm->tbl24[tbl24_index] = new_tbl24_entry;
> > > +		__atomic_store(&lpm->tbl24[tbl24_index],
> > > &new_tbl24_entry,
> > > +				__ATOMIC_RELAXED);
> > >  		__atomic_thread_fence(__ATOMIC_RELEASE);
> > >  		tbl8_free_v20(lpm->tbl8, tbl8_group_start);
> > tbl8_alloc_v20/tbl8_free_v20 need to be updated to use __atomic_store
> >
> tbl8_alloc_v20/tbl8_free_v20 updates a single field of table entry. The process
> is already atomic. Do we really need to use __atomic_store?
I thought we agreed that all the tbl8 stores will use __atomic_store.
IMO, it is better to use C11 atomic built-ins entirely, at least for the data structures used in reader-writer scenario. Otherwise, the code does not follow C11 memory model completely. (I do not know what to call such a model).

> 
> > >  	}
> > > @@ -1730,7 +1742,8 @@ delete_depth_big_v1604(struct rte_lpm *lpm,
> > > uint32_t ip_masked,
> > >  		 */
> > >  		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
> > >  			if (lpm->tbl8[i].depth <= depth)
> > > -				lpm->tbl8[i] = new_tbl8_entry;
> > > +				__atomic_store(&lpm->tbl8[i],
> > > &new_tbl8_entry,
> > > +						__ATOMIC_RELAXED);
> > >  		}
> > >  	}
> > >
> > > @@ -1761,7 +1774,8 @@ delete_depth_big_v1604(struct rte_lpm *lpm,
> > > uint32_t ip_masked,
> > >  		/* Set tbl24 before freeing tbl8 to avoid race condition.
> > >  		 * Prevent the free of the tbl8 group from hoisting.
> > >  		 */
> > > -		lpm->tbl24[tbl24_index] = new_tbl24_entry;
> > > +		__atomic_store(&lpm->tbl24[tbl24_index],
> > > &new_tbl24_entry,
> > > +				__ATOMIC_RELAXED);
> > >  		__atomic_thread_fence(__ATOMIC_RELEASE);
> > >  		tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
> > tbl8_alloc_v1604 /tbl8_free_v1604 need to be updated to use
> > __atomic_store
> Ditto.
> 
> >
> > >  	}
> > > --
> > > 2.17.1



More information about the dev mailing list