Bruce,
I haven't looked at the Next-net-intel tree, so it might already have been
fixed...
ci_tx_free_bufs_vec() in drivers/net/intel/common/tx.h has:
/* is fast-free enabled? */
struct rte_mempool *mp =
likely(txq->fast_free_mp != (void *)UINTPTR_MAX) ?
txq->fast_free_mp :
(txq->fast_free_mp = txep[0].mbuf->pool);
if (mp != NULL && (n & 31) == 0) {
void **cache_objs;
struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
rte_lcore_id());
if (cache == NULL)
goto normal;
cache_objs = &cache->objs[cache->len];
if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
goto done;
}
/* The cache follows the following algorithm
* 1. Add the objects to the cache
* 2. Anything greater than the cache min value (if it
* crosses the cache flush threshold) is flushed to the ring.
*/
/* Add elements back into the cache */
uint32_t copied = 0;
/* n is multiple of 32 */
while (copied < n) {
memcpy(&cache_objs[copied], &txep[copied], 32 *
sizeof(void *));
copied += 32;
}
cache->len += n;
if (cache->len >= cache->flushthresh) {
rte_mempool_ops_enqueue_bulk(mp,
&cache->objs[cache->size],
cache->len - cache->size);
cache->len = cache->size;
}
goto done;
}
It should be replaced by:
/* is fast-free enabled? */
struct rte_mempool *mp =
likely(txq->fast_free_mp != (void *)UINTPTR_MAX) ?
txq->fast_free_mp :
(txq->fast_free_mp = txep[0].mbuf->pool);
if (mp != NULL) {
rte_mbuf_raw_free_bulk(mp, txep, n);
goto done;
}
This removes a layer violation and adds the missing mbuf sanity checks and mbuf
history marks due to that layer violation.
And it implements fast-free for bulks not a multiple of 32.
Venlig hilsen / Kind regards,
-Morten Brørup