Pierrick Bouvier <pierrick.bouv...@linaro.org> writes:

> Add an explicit test to check expected memory values are read/written.
> 8,16,32 load/store are tested for all arch.
> 64,128 load/store are tested for aarch64/x64.
> atomic operations (8,16,32,64) are tested for x64 only.
>
> By default, atomic accesses are non atomic if a single cpu is running,
> so we force creation of a second one by creating a new thread first.
>
> load/store helpers code path can't be triggered easily in user mode (no
> softmmu), so we can't test it here.
>
> Output of test-plugin-mem-access.c is the list of expected patterns in
> plugin output. By reading stdout, we can compare to plugins output and
> have a multiarch test.
>
> Can be run with:
> make -C build/tests/tcg/$ARCH-linux-user 
> run-plugin-test-plugin-mem-access-with-libmem.so
>
> Tested-by: Xingtao Yao <yaoxt.f...@fujitsu.com>
> Signed-off-by: Pierrick Bouvier <pierrick.bouv...@linaro.org>
> ---
>  tests/tcg/multiarch/test-plugin-mem-access.c  | 175 ++++++++++++++++++
>  tests/tcg/multiarch/Makefile.target           |   7 +
>  .../tcg/multiarch/check-plugin-mem-access.sh  |  30 +++
>  3 files changed, 212 insertions(+)
>  create mode 100644 tests/tcg/multiarch/test-plugin-mem-access.c
>  create mode 100755 tests/tcg/multiarch/check-plugin-mem-access.sh
>
> diff --git a/tests/tcg/multiarch/test-plugin-mem-access.c 
> b/tests/tcg/multiarch/test-plugin-mem-access.c
> new file mode 100644
> index 00000000000..09d1fa22e35
> --- /dev/null
> +++ b/tests/tcg/multiarch/test-plugin-mem-access.c
> @@ -0,0 +1,175 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + *
> + * Check if we detect all memory accesses expected using plugin API.
> + * Used in conjunction with ./check-plugin-mem-access.sh check script.
> + * Output of this program is the list of patterns expected in plugin output.
> + *
> + * 8,16,32 load/store are tested for all arch.
> + * 64,128 load/store are tested for aarch64/x64.
> + * atomic operations (8,16,32,64) are tested for x64 only.
> + */

It would be nice to build this for the softmmu path as well. I'm not
sure if this can be done with as single source or we need a second test.
I shall have a play.

> +
> +#include <pthread.h>
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +
> +#if defined(__x86_64__)
> +#include <emmintrin.h>
> +#elif defined(__aarch64__)
> +#include <arm_neon.h>
> +#endif /* __x86_64__ */
> +
> +static void *data;
> +
> +/* ,store_u8,.*,8,store,0xf1 */
> +#define PRINT_EXPECTED(function, type, value, action)                 \
> +do {                                                                  \
> +    printf(",%s,.*,%d,%s,%s\n",                                       \
> +           #function, (int) sizeof(type) * 8, action, value);         \
> +}                                                                     \
> +while (0)
> +
> +#define DEFINE_STORE(name, type, value)                  \
> +                                                         \
> +static void print_expected_store_##name(void)            \
> +{                                                        \
> +    PRINT_EXPECTED(store_##name, type, #value, "store"); \
> +}                                                        \
> +                                                         \
> +static void store_##name(void)                           \
> +{                                                        \
> +    *((type *)data) = value;                             \
> +    print_expected_store_##name();                       \
> +}
> +
> +#define DEFINE_ATOMIC_OP(name, type, value)                    \
> +                                                               \
> +static void print_expected_atomic_op_##name(void)              \
> +{                                                              \
> +    PRINT_EXPECTED(atomic_op_##name, type, "0x0*42", "load");  \
> +    PRINT_EXPECTED(atomic_op_##name, type, #value, "store");   \
> +}                                                              \
> +                                                               \
> +static void atomic_op_##name(void)                             \
> +{                                                              \
> +    *((type *)data) = 0x42;                                    \
> +    __sync_val_compare_and_swap((type *)data, 0x42, value);    \
> +    print_expected_atomic_op_##name();                         \
> +}
> +
> +#define DEFINE_LOAD(name, type, value)                  \
> +                                                        \
> +static void print_expected_load_##name(void)            \
> +{                                                       \
> +    PRINT_EXPECTED(load_##name, type, #value, "load");  \
> +}                                                       \
> +                                                        \
> +static void load_##name(void)                           \
> +{                                                       \
> +    type src = *((type *) data);                        \
> +    type dest = src;                                    \
> +    (void)src, (void)dest;                              \
> +    print_expected_load_##name();                       \
> +}
> +
> +DEFINE_STORE(u8, uint8_t, 0xf1)
> +DEFINE_LOAD(u8, uint8_t, 0xf1)
> +DEFINE_STORE(u16, uint16_t, 0xf123)
> +DEFINE_LOAD(u16, uint16_t, 0xf123)
> +DEFINE_STORE(u32, uint32_t, 0xff112233)
> +DEFINE_LOAD(u32, uint32_t, 0xff112233)
> +
> +#if defined(__x86_64__) || defined(__aarch64__)
> +DEFINE_STORE(u64, uint64_t, 0xf123456789abcdef)
> +DEFINE_LOAD(u64, uint64_t, 0xf123456789abcdef)
> +
> +static void print_expected_store_u128(void)
> +{
> +    PRINT_EXPECTED(store_u128, __int128,
> +                   "0xf122334455667788f123456789abcdef", "store");
> +}
> +
> +static void store_u128(void)
> +{
> +#ifdef __x86_64__
> +    _mm_store_si128(data, _mm_set_epi32(0xf1223344, 0x55667788,
> +                                        0xf1234567, 0x89abcdef));
> +#else
> +    const uint32_t init[4] = {0x89abcdef, 0xf1234567, 0x55667788, 
> 0xf1223344};
> +    uint32x4_t vec = vld1q_u32(init);
> +    vst1q_u32(data, vec);
> +#endif /* __x86_64__ */
> +    print_expected_store_u128();
> +}
> +
> +static void print_expected_load_u128(void)
> +{
> +    PRINT_EXPECTED(load_u128, __int128,
> +                   "0xf122334455667788f123456789abcdef", "load");
> +}
> +
> +static void load_u128(void)
> +{
> +#ifdef __x86_64__
> +    __m128i var = _mm_load_si128(data);
> +#else
> +    uint32x4_t var = vld1q_u32(data);
> +#endif
> +    (void) var;
> +    print_expected_load_u128();
> +}
> +#endif /* __x86_64__ || __aarch64__ */
> +
> +#if defined(__x86_64__)
> +DEFINE_ATOMIC_OP(u8, uint8_t, 0xf1)
> +DEFINE_ATOMIC_OP(u16, uint16_t, 0xf123)
> +DEFINE_ATOMIC_OP(u32, uint32_t, 0xff112233)
> +DEFINE_ATOMIC_OP(u64, uint64_t, 0xf123456789abcdef)
> +#endif /* __x86_64__ */
> +
> +static void *f(void *p)
> +{
> +    return NULL;
> +}
> +
> +int main(void)
> +{
> +    /*
> +     * We force creation of a second thread to enable cpu flag CF_PARALLEL.
> +     * This will generate atomic operations when needed.
> +     */
> +    pthread_t thread;
> +    pthread_create(&thread, NULL, &f, NULL);
> +    pthread_join(thread, NULL);
> +
> +    /* allocate storage up to 128 bits */
> +    data = malloc(16);
> +
> +    store_u8();
> +    load_u8();
> +
> +    store_u16();
> +    load_u16();
> +
> +    store_u32();
> +    load_u32();
> +
> +#if defined(__x86_64__) || defined(__aarch64__)
> +    store_u64();
> +    load_u64();
> +
> +    store_u128();
> +    load_u128();
> +#endif /* __x86_64__ || __aarch64__ */
> +
> +#if defined(__x86_64__)
> +    atomic_op_u8();
> +    atomic_op_u16();
> +    atomic_op_u32();
> +    atomic_op_u64();
> +#endif /* __x86_64__ */
> +
> +    free(data);
> +}
> diff --git a/tests/tcg/multiarch/Makefile.target 
> b/tests/tcg/multiarch/Makefile.target
> index 5e3391ec9d2..d90cbd3e521 100644
> --- a/tests/tcg/multiarch/Makefile.target
> +++ b/tests/tcg/multiarch/Makefile.target
> @@ -170,5 +170,12 @@ run-plugin-semiconsole-with-%:
>  TESTS += semihosting semiconsole
>  endif
>  
> +# Test plugin memory access instrumentation
> +run-plugin-test-plugin-mem-access-with-libmem.so: \
> +     PLUGIN_ARGS=$(COMMA)print-accesses=true
> +run-plugin-test-plugin-mem-access-with-libmem.so: \
> +     CHECK_PLUGIN_OUTPUT_COMMAND= \
> +     $(SRC_PATH)/tests/tcg/multiarch/check-plugin-mem-access.sh
> +
>  # Update TESTS
>  TESTS += $(MULTIARCH_TESTS)
> diff --git a/tests/tcg/multiarch/check-plugin-mem-access.sh 
> b/tests/tcg/multiarch/check-plugin-mem-access.sh
> new file mode 100755
> index 00000000000..909606943bb
> --- /dev/null
> +++ b/tests/tcg/multiarch/check-plugin-mem-access.sh
> @@ -0,0 +1,30 @@
> +#!/usr/bin/env bash
> +
> +set -euo pipefail
> +
> +die()
> +{
> +    echo "$@" 1>&2
> +    exit 1
> +}
> +
> +check()
> +{
> +    file=$1
> +    pattern=$2
> +    grep "$pattern" "$file" > /dev/null || die "\"$pattern\" not found in 
> $file"
> +}
> +
> +[ $# -eq 1 ] || die "usage: plugin_out_file"
> +
> +plugin_out=$1
> +
> +expected()
> +{
> +    ./test-plugin-mem-access ||
> +        die "running test-plugin-mem-access executable failed"

I'm confused by this. We seem to be running the test again and this is
going to fail if binfmt_misc isn't setup (which we don't assume for
running the TCG tests).

> +}
> +
> +expected | while read line; do
> +    check "$plugin_out" "$line"
> +done

-- 
Alex Bennée
Virtualisation Tech Lead @ Linaro

Reply via email to