Enhances test_ring_perf to optionally select two cores on the same socket but on different L3 caches using hwloc.
This allows performance characterization of ring library on processors with split L3 cache architectures, providing more realistic measurements of inter-core communication and cache effects. The feature is conditional on hwloc being present, ensuring builds succeed on systems without hwloc. Signed-off-by: Sivaprasad Tummala <[email protected]> v3: * Localized hwloc dependency only to app/test for targeted linking. v2: * Localized hwloc dependency to app only. * Optimized get_two_l3caches() by moving socket and L3 cache ID retrieval to the outer loop. --- app/test/meson.build | 10 ++++++ app/test/test_ring_perf.c | 76 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) diff --git a/app/test/meson.build b/app/test/meson.build index 8df8d3edd1..667b191a5f 100644 --- a/app/test/meson.build +++ b/app/test/meson.build @@ -257,6 +257,16 @@ endforeach cflags += no_wvla_cflag +# Optional hwloc support for L3 cache–aware tests +hwloc_dep = dependency('hwloc', required: false) +if hwloc_dep.found() + message('hwloc found — enabling L3 cache–aware topology support') + cflags += ['-DHAVE_HWLOC=1'] + ext_deps += hwloc_dep +else + message('hwloc not found — L3 cache–aware tests will be disabled') +endif + extra_flags = [ # Strict-aliasing rules are violated by uint8_t[] to context size casts. '-fno-strict-aliasing', diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c index 9a2a481458..51585d647b 100644 --- a/app/test/test_ring_perf.c +++ b/app/test/test_ring_perf.c @@ -15,6 +15,10 @@ #include "test.h" #include "test_ring.h" +#ifdef HAVE_HWLOC +#include <hwloc.h> +#endif /* HAVE_HWLOC */ + /* * Ring performance test cases, measures performance of various operations * using rdtsc for legacy and 16B size ring elements. @@ -122,6 +126,70 @@ get_two_cores(struct lcore_pair *lcp) return 1; } +#ifdef HAVE_HWLOC + +#if HWLOC_API_VERSION < 0x20000 +#define hwloc_get_next_obj_cpuset_by_type_compat(t, s, ty, p) \ + hwloc_get_next_obj_covering_cpuset_by_type(t, ty, p, s) +#else +#define hwloc_get_next_obj_cpuset_by_type_compat(t, s, ty, p) \ + hwloc_get_next_obj_covering_cpuset_by_type(t, s, ty, p) +#endif + +static int +get_l3_cache_id(unsigned int cpu_id) +{ + hwloc_bitmap_t cpuset = hwloc_bitmap_alloc(); + hwloc_topology_t topo; + hwloc_obj_t obj; + int l3_id = -1; + + if (hwloc_topology_init(&topo) < 0 || + hwloc_topology_load(topo) < 0) { + hwloc_bitmap_free(cpuset); + return -1; + } + + hwloc_bitmap_only(cpuset, cpu_id); + + obj = hwloc_get_next_obj_cpuset_by_type_compat( + topo, cpuset, HWLOC_OBJ_L3CACHE, NULL); + + if (obj) + l3_id = (int)obj->logical_index; + + hwloc_bitmap_free(cpuset); + hwloc_topology_destroy(topo); + + return l3_id; +} + +static int +get_two_l3caches(struct lcore_pair *lcp) +{ + unsigned int id1, id2; + unsigned int c1, c2, s1, s2; + RTE_LCORE_FOREACH(id1) { + c1 = get_l3_cache_id(id1); + s1 = rte_lcore_to_socket_id(id1); + + RTE_LCORE_FOREACH(id2) { + if (id1 == id2) + continue; + + c2 = get_l3_cache_id(id2); + s2 = rte_lcore_to_socket_id(id2); + if ((c1 != c2) && (s1 == s2)) { + lcp->c1 = id1; + lcp->c2 = id2; + return 0; + } + } + } + return 1; +} +#endif /* HAVE_HWLOC */ + static int get_two_sockets(struct lcore_pair *lcp) { @@ -483,6 +551,14 @@ test_ring_perf_esize_run_on_two_cores( if (run_on_core_pair(&cores, param1, param2) < 0) return -1; } +#ifdef HAVE_HWLOC + if (get_two_l3caches(&cores) == 0) { + printf("\n### Testing using two cores on same socket" + " with different L3 caches ###\n"); + if (run_on_core_pair(&cores, param1, param2) < 0) + return -1; + } +#endif /* HAVE_HWLOC */ if (get_two_sockets(&cores) == 0) { printf("\n### Testing using two NUMA nodes ###\n"); if (run_on_core_pair(&cores, param1, param2) < 0) -- 2.43.0

