|WARNING| pw117324 [PATCH] [v2] mempool: fix get objects from mempool with cache
dpdklab at iol.unh.edu
dpdklab at iol.unh.edu
Tue Oct 4 18:20:38 CEST 2022
Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/117324
_apply patch failure_
Submitter: Morten Brørup <mb at smartsharesystems.com>
Date: Tuesday, October 04 2022 16:03:49
Applied on: CommitID:fe1a5a9b42f33a4cbfea741d03179557c03acd18
Apply patch set 117324 failed:
Checking patch lib/mempool/rte_mempool.h...
error: while searching for:
uint32_t index, len;
void **cache_objs;
/* No cache provided or cannot be satisfied from cache */
if (unlikely(cache == NULL || n >= cache->size))
goto ring_dequeue;
cache_objs = cache->objs;
/* Can this be satisfied from the cache? */
if (cache->len < n) {
/* No. Backfill the cache first, and then fill from it */
uint32_t req = n + (cache->size - cache->len);
/* How many do we require i.e. number to fill the cache + the request */
ret = rte_mempool_ops_dequeue_bulk(mp,
&cache->objs[cache->len], req);
if (unlikely(ret < 0)) {
/*
* In the off chance that we are buffer constrained,
* where we are not able to allocate cache + n, go to
* the ring directly. If that fails, we are truly out of
* buffers.
*/
goto ring_dequeue;
}
cache->len += req;
}
/* Now fill in the response ... */
for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
*obj_table = cache_objs[len];
cache->len -= n;
RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n); @@ -1503,7 +1536,7 @@ rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
error: patch failed: lib/mempool/rte_mempool.h:1463
Applying patch lib/mempool/rte_mempool.h with 1 reject...
Rejected hunk #1.
diff a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h (rejected hunks)
@@ -1463,38 +1463,71 @@ rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
uint32_t index, len;
void **cache_objs;
- /* No cache provided or cannot be satisfied from cache */
- if (unlikely(cache == NULL || n >= cache->size))
+ /* No cache provided or if get would overflow mem allocated for cache */
+ if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
goto ring_dequeue;
- cache_objs = cache->objs;
+ cache_objs = &cache->objs[cache->len];
+
+ if (n <= cache->len) {
+ /* The entire request can be satisfied from the cache. */
+ cache->len -= n;
+ for (index = 0; index < n; index++)
+ *obj_table++ = *--cache_objs;
+
+ RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
- /* Can this be satisfied from the cache? */
- if (cache->len < n) {
- /* No. Backfill the cache first, and then fill from it */
- uint32_t req = n + (cache->size - cache->len);
+ return 0;
+ }
- /* How many do we require i.e. number to fill the cache + the request */
- ret = rte_mempool_ops_dequeue_bulk(mp,
- &cache->objs[cache->len], req);
+ /* Satisfy the first part of the request by depleting the cache. */
+ len = cache->len;
+ for (index = 0; index < len; index++)
+ *obj_table++ = *--cache_objs;
+
+ /* Number of objects remaining to satisfy the request. */
+ len = n - len;
+
+ /* Fill the cache from the ring; fetch size + remaining objects. */
+ ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs,
+ cache->size + len);
+ if (unlikely(ret < 0)) {
+ /*
+ * We are buffer constrained, and not able to allocate
+ * cache + remaining.
+ * Do not fill the cache, just satisfy the remaining part of
+ * the request directly from the ring.
+ */
+ ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, len);
if (unlikely(ret < 0)) {
/*
- * In the off chance that we are buffer constrained,
- * where we are not able to allocate cache + n, go to
- * the ring directly. If that fails, we are truly out of
- * buffers.
+ * That also failed.
+ * No further action is required to roll the first
+ * part of the request back into the cache, as both
+ * cache->len and the objects in the cache are intact.
*/
- goto ring_dequeue;
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
+
+ return ret;
}
- cache->len += req;
+ /* Commit that the cache was emptied. */
+ cache->len = 0;
+
+ RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
+ RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
+
+ return 0;
}
- /* Now fill in the response ... */
- for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++)
- *obj_table = cache_objs[len];
+ cache_objs = &cache->objs[cache->size + len];
- cache->len -= n;
+ /* Satisfy the remaining part of the request from the filled cache. */
+ cache->len = cache->size;
+ for (index = 0; index < len; index++)
+ *obj_table++ = *--cache_objs;
RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n); @@ -1503,7 +1536,7 @@ rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
https://lab.dpdk.org/results/dashboard/patchsets/23766/
UNH-IOL DPDK Community Lab
More information about the test-report
mailing list