[dpdk-dev] [RFC v2 04/17] mempool: add op to populate objects using provided memory
Andrew Rybchenko
arybchenko at solarflare.com
Tue Jan 23 14:15:59 CET 2018
The callback allows to customize how objects are stored in the
memory chunk. Default implementation of the callback which simply
puts objects one by one is available.
Suggested-by: Olivier Matz <olivier.matz at 6wind.com>
Signed-off-by: Andrew Rybchenko <arybchenko at solarflare.com>
---
lib/librte_mempool/rte_mempool.c | 44 +++++++++++-----
lib/librte_mempool/rte_mempool.h | 83 ++++++++++++++++++++++++++++++
lib/librte_mempool/rte_mempool_ops.c | 18 +++++++
lib/librte_mempool/rte_mempool_version.map | 1 +
4 files changed, 133 insertions(+), 13 deletions(-)
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 1f54f95..c5003a9 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -145,9 +145,6 @@ mempool_add_elem(struct rte_mempool *mp, void *obj, rte_iova_t iova)
tlr = __mempool_get_trailer(obj);
tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE;
#endif
-
- /* enqueue in ring */
- rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
}
/* call obj_cb() for each mempool element */
@@ -396,6 +393,30 @@ rte_mempool_free_memchunks(struct rte_mempool *mp)
}
}
+int
+rte_mempool_populate_one_by_one(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb)
+{
+ size_t total_elt_sz;
+ size_t off;
+ unsigned int i;
+ void *obj;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ for (off = 0, i = 0; off + total_elt_sz <= len && i < max_objs; i++) {
+ off += mp->header_size;
+ obj = (char *)vaddr + off;
+ obj_cb(mp, obj,
+ (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off));
+ rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
+ off += mp->elt_size + mp->trailer_size;
+ }
+
+ return i;
+}
+
/* Add objects in the pool, using a physically contiguous memory
* zone. Return the number of objects added, or a negative value
* on error.
@@ -466,16 +487,13 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
else
off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr;
- while (off + total_elt_sz <= len && mp->populated_size < mp->size) {
- off += mp->header_size;
- if (iova == RTE_BAD_IOVA)
- mempool_add_elem(mp, (char *)vaddr + off,
- RTE_BAD_IOVA);
- else
- mempool_add_elem(mp, (char *)vaddr + off, iova + off);
- off += mp->elt_size + mp->trailer_size;
- i++;
- }
+ if (off > len)
+ return -EINVAL;
+
+ i = rte_mempool_ops_populate(mp, mp->size - mp->populated_size,
+ (char *)vaddr + off,
+ (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off),
+ len - off, mempool_add_elem);
/* not enough room to store one object */
if (i == 0)
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index be8a371..f6ffab9 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -461,6 +461,59 @@ ssize_t rte_mempool_calc_mem_size_def(const struct rte_mempool *mp,
uint32_t obj_num, uint32_t pg_shift,
size_t *min_chunk_size, size_t *align);
+/**
+ * Function to be called for each populated object.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param vaddr
+ * Object virtual address.
+ * @param iova
+ * Input/output virtual addresss of the object or #RTE_BAD_IOVA.
+ */
+typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp,
+ void *vaddr, rte_iova_t iova);
+
+/**
+ * Populate memory pool objects using provided memory chunk.
+ *
+ * Populated objects should be enqueued to the pool, e.g. using
+ * rte_mempool_ops_enqueue_bulk().
+ *
+ * If the given IO address is unknown (iova = RTE_BAD_IOVA),
+ * the chunk doesn't need to be physically contiguous (only virtually),
+ * and allocated objects may span two pages.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param max_objs
+ * Maximum number of objects to be populated.
+ * @param vaddr
+ * The virtual address of memory that should be used to store objects.
+ * @param iova
+ * The IO address
+ * @param len
+ * The length of memory in bytes.
+ * @param obj_cb
+ * Callback function to be executed for each populated object.
+ * @return
+ * The number of objects added on success.
+ * On error, no objects are populated and a negative errno is returned.
+ */
+typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
+ unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb);
+
+/**
+ * Default way to populate memory pool object using provided memory
+ * chunk: just slice objects one by one.
+ */
+int rte_mempool_populate_one_by_one(struct rte_mempool *mp,
+ unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb);
+
/** Structure defining mempool operations structure */
struct rte_mempool_ops {
char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
@@ -482,6 +535,11 @@ struct rte_mempool_ops {
* store specified number of objects.
*/
rte_mempool_calc_mem_size_t calc_mem_size;
+ /**
+ * Optional callback to populate mempool objects using
+ * provided memory chunk.
+ */
+ rte_mempool_populate_t populate;
} __rte_cache_aligned;
#define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
@@ -654,6 +712,31 @@ ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
size_t *min_chunk_size, size_t *align);
/**
+ * @internal wrapper for mempool_ops populate callback.
+ *
+ * Populate memory pool objects using provided memory chunk.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param max_objs
+ * Maximum number of objects to be populated.
+ * @param vaddr
+ * The virtual address of memory that should be used to store objects.
+ * @param iova
+ * The IO address
+ * @param len
+ * The length of memory in bytes.
+ * @param obj_cb
+ * Callback function to be executed for each populated object.
+ * @return
+ * The number of objects added on success.
+ * On error, no objects are populated and a negative errno is returned.
+ */
+int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb);
+
+/**
* @internal wrapper for mempool_ops free callback.
*
* @param mp
diff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c
index d048b37..7c4a22b 100644
--- a/lib/librte_mempool/rte_mempool_ops.c
+++ b/lib/librte_mempool/rte_mempool_ops.c
@@ -89,6 +89,7 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h)
ops->get_capabilities = h->get_capabilities;
ops->register_memory_area = h->register_memory_area;
ops->calc_mem_size = h->calc_mem_size;
+ ops->populate = h->populate;
rte_spinlock_unlock(&rte_mempool_ops_table.sl);
@@ -170,6 +171,23 @@ rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align);
}
+/* wrapper to populate memory pool objects using provided memory chunk */
+int
+rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+
+ if (ops->populate == NULL)
+ return rte_mempool_populate_one_by_one(mp, max_objs, vaddr,
+ iova, len, obj_cb);
+
+ return ops->populate(mp, max_objs, vaddr, iova, len, obj_cb);
+}
+
/* sets mempool ops previously registered by rte_mempool_register_ops. */
int
rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map
index 9fa7270..00288de 100644
--- a/lib/librte_mempool/rte_mempool_version.map
+++ b/lib/librte_mempool/rte_mempool_version.map
@@ -56,6 +56,7 @@ DPDK_18.05 {
global:
rte_mempool_calc_mem_size_def;
+ rte_mempool_populate_one_by_one;
} DPDK_17.11;
--
2.7.4
More information about the dev
mailing list