[dpdk-dev] [PATCH v3 2/3] mempool: use bit flags instead of is_mp and is_mc

Lazaros Koromilas l at nofutznetworks.com
Thu Jun 16 13:02:18 CEST 2016


Pass the same flags as in rte_mempool_create().  Changes API calls:

    rte_mempool_generic_put(mp, obj_table, n, flags)
    rte_mempool_generic_get(mp, obj_table, n, flags)

Signed-off-by: Lazaros Koromilas <l at nofutznetworks.com>
---
 lib/librte_mempool/rte_mempool.h | 58 +++++++++++++++++++++-------------------
 1 file changed, 30 insertions(+), 28 deletions(-)

diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 7446843..191edba 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -949,12 +949,13 @@ void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
  * @param n
  *   The number of objects to store back in the mempool, must be strictly
  *   positive.
- * @param is_mp
- *   Mono-producer (0) or multi-producers (1).
+ * @param flags
+ *   The flags used for the mempool creation.
+ *   Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
  */
 static inline void __attribute__((always_inline))
 __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
-		      unsigned n, int is_mp)
+		      unsigned n, int flags)
 {
 	struct rte_mempool_cache *cache;
 	uint32_t index;
@@ -967,7 +968,7 @@ __mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
 	__MEMPOOL_STAT_ADD(mp, put, n);
 
 	/* cache is not enabled or single producer or non-EAL thread */
-	if (unlikely(cache_size == 0 || is_mp == 0 ||
+	if (unlikely(cache_size == 0 || flags & MEMPOOL_F_SP_PUT ||
 		     lcore_id >= RTE_MAX_LCORE))
 		goto ring_enqueue;
 
@@ -1020,15 +1021,16 @@ ring_enqueue:
  *   A pointer to a table of void * pointers (objects).
  * @param n
  *   The number of objects to add in the mempool from the obj_table.
- * @param is_mp
- *   Mono-producer (0) or multi-producers (1).
+ * @param flags
+ *   The flags used for the mempool creation.
+ *   Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
  */
 static inline void __attribute__((always_inline))
 rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
-			unsigned n, int is_mp)
+			unsigned n, int flags)
 {
 	__mempool_check_cookies(mp, obj_table, n, 0);
-	__mempool_generic_put(mp, obj_table, n, is_mp);
+	__mempool_generic_put(mp, obj_table, n, flags);
 }
 
 /**
@@ -1046,7 +1048,7 @@ __rte_deprecated static inline void __attribute__((always_inline))
 rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
 			unsigned n)
 {
-	rte_mempool_generic_put(mp, obj_table, n, 1);
+	rte_mempool_generic_put(mp, obj_table, n, 0);
 }
 
 /**
@@ -1064,7 +1066,7 @@ __rte_deprecated static inline void __attribute__((always_inline))
 rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
 			unsigned n)
 {
-	rte_mempool_generic_put(mp, obj_table, n, 0);
+	rte_mempool_generic_put(mp, obj_table, n, MEMPOOL_F_SP_PUT);
 }
 
 /**
@@ -1085,8 +1087,7 @@ static inline void __attribute__((always_inline))
 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
 		     unsigned n)
 {
-	rte_mempool_generic_put(mp, obj_table, n,
-				!(mp->flags & MEMPOOL_F_SP_PUT));
+	rte_mempool_generic_put(mp, obj_table, n, mp->flags);
 }
 
 /**
@@ -1101,7 +1102,7 @@ rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
 __rte_deprecated static inline void __attribute__((always_inline))
 rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
 {
-	rte_mempool_generic_put(mp, &obj, 1, 1);
+	rte_mempool_generic_put(mp, &obj, 1, 0);
 }
 
 /**
@@ -1116,7 +1117,7 @@ rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
 __rte_deprecated static inline void __attribute__((always_inline))
 rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
 {
-	rte_mempool_generic_put(mp, &obj, 1, 0);
+	rte_mempool_generic_put(mp, &obj, 1, MEMPOOL_F_SP_PUT);
 }
 
 /**
@@ -1145,15 +1146,16 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
  *   A pointer to a table of void * pointers (objects).
  * @param n
  *   The number of objects to get, must be strictly positive.
- * @param is_mc
- *   Mono-consumer (0) or multi-consumers (1).
+ * @param flags
+ *   The flags used for the mempool creation.
+ *   Single-consumer (MEMPOOL_F_SC_GET flag) or multi-consumers.
  * @return
  *   - >=0: Success; number of objects supplied.
  *   - <0: Error; code of ring dequeue function.
  */
 static inline int __attribute__((always_inline))
 __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
-		      unsigned n, int is_mc)
+		      unsigned n, int flags)
 {
 	int ret;
 	struct rte_mempool_cache *cache;
@@ -1163,7 +1165,7 @@ __mempool_generic_get(struct rte_mempool *mp, void **obj_table,
 	uint32_t cache_size = mp->cache_size;
 
 	/* cache is not enabled or single consumer */
-	if (unlikely(cache_size == 0 || is_mc == 0 ||
+	if (unlikely(cache_size == 0 || flags & MEMPOOL_F_SC_GET ||
 		     n >= cache_size || lcore_id >= RTE_MAX_LCORE))
 		goto ring_dequeue;
 
@@ -1228,18 +1230,19 @@ ring_dequeue:
  *   A pointer to a table of void * pointers (objects) that will be filled.
  * @param n
  *   The number of objects to get from mempool to obj_table.
- * @param is_mc
- *   Mono-consumer (0) or multi-consumers (1).
+ * @param flags
+ *   The flags used for the mempool creation.
+ *   Single-consumer (MEMPOOL_F_SC_GET flag) or multi-consumers.
  * @return
  *   - 0: Success; objects taken.
  *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
  */
 static inline int __attribute__((always_inline))
 rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n,
-			int is_mc)
+			int flags)
 {
 	int ret;
-	ret = __mempool_generic_get(mp, obj_table, n, is_mc);
+	ret = __mempool_generic_get(mp, obj_table, n, flags);
 	if (ret == 0)
 		__mempool_check_cookies(mp, obj_table, n, 1);
 	return ret;
@@ -1267,7 +1270,7 @@ rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n,
 __rte_deprecated static inline int __attribute__((always_inline))
 rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
-	return rte_mempool_generic_get(mp, obj_table, n, 1);
+	return rte_mempool_generic_get(mp, obj_table, n, 0);
 }
 
 /**
@@ -1293,7 +1296,7 @@ rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 __rte_deprecated static inline int __attribute__((always_inline))
 rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
-	return rte_mempool_generic_get(mp, obj_table, n, 0);
+	return rte_mempool_generic_get(mp, obj_table, n, MEMPOOL_F_SC_GET);
 }
 
 /**
@@ -1321,8 +1324,7 @@ rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 static inline int __attribute__((always_inline))
 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
-	return rte_mempool_generic_get(mp, obj_table, n,
-				       !(mp->flags & MEMPOOL_F_SC_GET));
+	return rte_mempool_generic_get(mp, obj_table, n, mp->flags);
 }
 
 /**
@@ -1345,7 +1347,7 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 __rte_deprecated static inline int __attribute__((always_inline))
 rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
 {
-	return rte_mempool_generic_get(mp, obj_p, 1, 1);
+	return rte_mempool_generic_get(mp, obj_p, 1, 0);
 }
 
 /**
@@ -1368,7 +1370,7 @@ rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
 __rte_deprecated static inline int __attribute__((always_inline))
 rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
 {
-	return rte_mempool_generic_get(mp, obj_p, 1, 0);
+	return rte_mempool_generic_get(mp, obj_p, 1, MEMPOOL_F_SC_GET);
 }
 
 /**
-- 
1.9.1



More information about the dev mailing list