The autotests are too long to be run often.
This patch reduces the needed time of some tests in fast_test.
The others will be analyzed when they will be able to run in a
VM with a reasonnable amount of memory.

The current status in a small VM is below:

> make fast_test
/root/dpdk/build/app/test -c f -n 4

Test name                      Test result                      Test
Total
================================================================================
Start group_1:                 Success                       [00m 00s]
Timer autotest:                Success                       [00m 02s]
Debug autotest:                Success                       [00m 00s]
Errno autotest:                Success                       [00m 00s]
Meter autotest:                Success                       [00m 00s]
Common autotest:               Success                       [00m 01s]
Dump log history:              Success                       [00m 00s]
Dump rings:                    Success                       [00m 00s]
Dump mempools:                 Success                       [00m 00s] [00m 05s]
Start group_2:                 Success                       [00m 00s]
Memory autotest:               Success                       [00m 00s]
Read/write lock autotest:      Success                       [00m 00s]
Logs autotest:                 Success                       [00m 00s]
CPU flags autotest:            Success                       [00m 00s]
Version autotest:              Success                       [00m 00s]
EAL filesystem autotest:       Success                       [00m 00s]
EAL flags autotest:            Success                       [00m 05s]
Hash autotest:                 Success                       [00m 00s] [00m 11s]
Start group_3:                 Fail [No prompt]              [00m 00s]
LPM autotest:                  Fail [No prompt]              [00m 00s]
IVSHMEM autotest:              Fail [No prompt]              [00m 00s]
Memcpy autotest:               Fail [No prompt]              [00m 00s]
Memzone autotest:              Fail [No prompt]              [00m 00s]
String autotest:               Fail [No prompt]              [00m 00s]
Alarm autotest:                Fail [No prompt]              [00m 00s] [00m 11s]
Start group_4:                 Success                       [00m 00s]
PCI autotest:                  Success                       [00m 00s]
Malloc autotest:               Success                       [00m 00s]
Multi-process autotest:        Success                       [00m 00s]
Mbuf autotest:                 Success                       [00m 02s]
Per-lcore autotest:            Success                       [00m 00s]
Ring autotest:                 Success                       [00m 00s] [00m 16s]
Start group_5:                 Success                       [00m 00s]
Spinlock autotest:             Success                       [00m 00s]
Byte order autotest:           Success                       [00m 00s]
TAILQ autotest:                Success                       [00m 00s]
Command-line autotest:         Success                       [00m 00s]
Interrupts autotest:           Success                       [00m 00s] [00m 18s]
Start group_6:                 Fail [No prompt]              [00m 00s]
Function reentrancy autotest:  Fail [No prompt]              [00m 00s]
Mempool autotest:              Fail [No prompt]              [00m 00s]
Atomics autotest:              Fail [No prompt]              [00m 00s]
Prefetch autotest:             Fail [No prompt]              [00m 00s]
Red autotest:                  Fail [No prompt]              [00m 00s] [00m 18s]
Start group_7:                 Success                       [00m 00s]
PMD ring autotest:             Success                       [00m 00s]
Access list control autotest:  Success                       [00m 01s]
Sched autotest:                Success                       [00m 00s] [00m 20s]
Start kni:                     Fail [No prompt]              [00m 00s]
KNI autotest:                  Fail [No prompt]              [00m 00s] [00m 20s]
Start mempool_perf:            Success                       [00m 00s]
Cycles autotest:               Success                       [00m 01s] [00m 22s]
Start power:                   Fail [No prompt]              [00m 00s]
Power autotest:                Fail [No prompt]              [00m 00s] [00m 22s]
Start power_acpi_cpufreq:      Fail [No prompt]              [00m 00s]
Power ACPI cpufreq autotest:   Fail [No prompt]              [00m 00s] [00m 22s]
Start power_kvm_vm:            Fail [No prompt]              [00m 00s]
Power KVM VM  autotest:        Fail [No prompt]              [00m 00s] [00m 23s]
Start timer_perf:              Fail [No prompt]              [00m 00s]
Timer performance autotest:    Fail [No prompt]              [00m 00s] [00m 23s]
================================================================================
Total run time: 00m 23s
Number of failed tests: 16

Signed-off-by: Thomas Monjalon <thomas.monjalon at 6wind.com>
---
 app/test/autotest_test_funcs.py | 14 +++++++-------
 app/test/test_hash.c            |  8 ++++----
 app/test/test_interrupts.c      |  4 ++--
 app/test/test_mbuf.c            |  2 +-
 app/test/test_per_lcore.c       |  4 ++--
 app/test/test_ring.c            |  7 +++----
 app/test/test_spinlock.c        |  6 +++---
 app/test/test_timer.c           | 20 ++++++++++----------
 8 files changed, 32 insertions(+), 33 deletions(-)

diff --git a/app/test/autotest_test_funcs.py b/app/test/autotest_test_funcs.py
index f04909d..5222f6e 100644
--- a/app/test/autotest_test_funcs.py
+++ b/app/test/autotest_test_funcs.py
@@ -83,7 +83,7 @@ def spinlock_autotest(child, test_name):
                        "Test Failed",
                        "Hello from core ([0-9]*) !",
                        "Hello from within recursive locks from ([0-9]*) !",
-               pexpect.TIMEOUT], timeout = 20)
+               pexpect.TIMEOUT], timeout = 5)
                # ok
                if index == 0:
                        break
@@ -177,9 +177,9 @@ def timer_autotest(child, test_name):
        i = 0
        child.sendline(test_name)

-       index = child.expect(["Start timer stress tests \(20 seconds\)",
+       index = child.expect(["Start timer stress tests",
                "Test Failed",
-               pexpect.TIMEOUT], timeout = 10)
+               pexpect.TIMEOUT], timeout = 5)

        if index == 1:
                return -1, "Fail"
@@ -188,16 +188,16 @@ def timer_autotest(child, test_name):

        index = child.expect(["Start timer stress tests 2",
                "Test Failed",
-               pexpect.TIMEOUT], timeout = 40)
+               pexpect.TIMEOUT], timeout = 5)

        if index == 1:
                return -1, "Fail"
        elif index == 2:
                return -1, "Fail [Timeout]"

-       index = child.expect(["Start timer basic tests \(20 seconds\)",
+       index = child.expect(["Start timer basic tests",
                "Test Failed",
-               pexpect.TIMEOUT], timeout = 20)
+               pexpect.TIMEOUT], timeout = 5)

        if index == 1:
                return -1, "Fail"
@@ -277,7 +277,7 @@ def timer_autotest(child, test_name):
 def ring_autotest(child, test_name):
        child.sendline(test_name)
        index = child.expect(["Test OK", "Test Failed",
-               pexpect.TIMEOUT], timeout = 15)
+               pexpect.TIMEOUT], timeout = 2)
        if index == 1:
                return -1, "Fail"
        elif index == 2:
diff --git a/app/test/test_hash.c b/app/test/test_hash.c
index 61fc0a0..7e41725 100644
--- a/app/test/test_hash.c
+++ b/app/test/test_hash.c
@@ -176,7 +176,7 @@ static struct rte_hash_parameters ut_params = {
        .socket_id = 0,
 };

-#define CRC32_ITERATIONS (1U << 20)
+#define CRC32_ITERATIONS (1U << 10)
 #define CRC32_DWORDS (1U << 6)
 /*
  * Test if all CRC32 implementations yield the same hash value
@@ -1081,7 +1081,7 @@ test_hash_creation_with_good_parameters(void)
        return 0;
 }

-#define ITERATIONS 50
+#define ITERATIONS 3
 /*
  * Test to see the average table utilization (entries added/max entries)
  * before hitting a random entry that cannot be added
@@ -1098,7 +1098,7 @@ static int test_average_table_utilization(void)
               "\n  before adding elements begins to fail\n");
        printf("Measuring performance, please wait");
        fflush(stdout);
-       ut_params.entries = 1 << 20;
+       ut_params.entries = 1 << 16;
        ut_params.name = "test_average_utilization";
        ut_params.hash_func = rte_jhash;
        handle = rte_hash_create(&ut_params);
@@ -1138,7 +1138,7 @@ static int test_average_table_utilization(void)
        return 0;
 }

-#define NUM_ENTRIES 1024
+#define NUM_ENTRIES 256
 static int test_hash_iteration(void)
 {
        struct rte_hash *handle;
diff --git a/app/test/test_interrupts.c b/app/test/test_interrupts.c
index 6e3dec3..df6d261 100644
--- a/app/test/test_interrupts.c
+++ b/app/test/test_interrupts.c
@@ -41,7 +41,7 @@

 #include "test.h"

-#define TEST_INTERRUPT_CHECK_INTERVAL 1000 /* ms */
+#define TEST_INTERRUPT_CHECK_INTERVAL 100 /* ms */

 /* predefined interrupt handle types */
 enum test_interrupt_handle_type {
@@ -372,7 +372,7 @@ test_interrupt_full_path_check(enum 
test_interrupt_handle_type intr_type)
        if (test_interrupt_trigger_interrupt() < 0)
                return -1;

-       /* check flag in 3 seconds */
+       /* check flag */
        for (count = 0; flag == 0 && count < 3; count++)
                rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);

diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
index 98ff93a..59f9979 100644
--- a/app/test/test_mbuf.c
+++ b/app/test/test_mbuf.c
@@ -748,7 +748,7 @@ test_refcnt_iter(unsigned lcore, unsigned iter)
                            __func__, lcore, iter, tref);
                        return;
                }
-               rte_delay_ms(1000);
+               rte_delay_ms(100);
        }

        rte_panic("(lcore=%u, iter=%u): after %us only "
diff --git a/app/test/test_per_lcore.c b/app/test/test_per_lcore.c
index b16449a..f452cdb 100644
--- a/app/test/test_per_lcore.c
+++ b/app/test/test_per_lcore.c
@@ -92,8 +92,8 @@ display_vars(__attribute__((unused)) void *arg)
 static int
 test_per_lcore_delay(__attribute__((unused)) void *arg)
 {
-       rte_delay_ms(5000);
-       printf("wait 5000ms on lcore %u\n", rte_lcore_id());
+       rte_delay_ms(100);
+       printf("wait 100ms on lcore %u\n", rte_lcore_id());

        return 0;
 }
diff --git a/app/test/test_ring.c b/app/test/test_ring.c
index 0d7523e..d18812e 100644
--- a/app/test/test_ring.c
+++ b/app/test/test_ring.c
@@ -101,7 +101,6 @@
 #define RING_SIZE 4096
 #define MAX_BULK 32
 #define N 65536
-#define TIME_S 5

 static rte_atomic32_t synchro;

@@ -130,7 +129,7 @@ check_live_watermark_change(__attribute__((unused)) void 
*dummy)

        /* init the object table */
        memset(obj_table, 0, sizeof(obj_table));
-       end_time = rte_get_timer_cycles() + (hz * 2);
+       end_time = rte_get_timer_cycles() + (hz / 4);

        /* check that bulk and watermark are 4 and 32 (respectively) */
        while (diff >= 0) {
@@ -194,9 +193,9 @@ test_live_watermark_change(void)
         * watermark and quota */
        rte_eal_remote_launch(check_live_watermark_change, NULL, lcore_id2);

-       rte_delay_ms(1000);
+       rte_delay_ms(100);
        rte_ring_set_water_mark(r, 32);
-       rte_delay_ms(1000);
+       rte_delay_ms(100);

        if (rte_eal_wait_lcore(lcore_id2) < 0)
                return -1;
diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c
index 16ced7f..180d6de 100644
--- a/app/test/test_spinlock.c
+++ b/app/test/test_spinlock.c
@@ -129,7 +129,7 @@ test_spinlock_recursive_per_core(__attribute__((unused)) 
void *arg)
 static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER;
 static uint64_t lock_count[RTE_MAX_LCORE] = {0};

-#define TIME_S 5
+#define TIME_MS 100

 static int
 load_loop_fn(void *func_param)
@@ -145,7 +145,7 @@ load_loop_fn(void *func_param)
                while (rte_atomic32_read(&synchro) == 0);

        begin = rte_get_timer_cycles();
-       while (time_diff / hz < TIME_S) {
+       while (time_diff < hz * TIME_MS / 1000) {
                if (use_lock)
                        rte_spinlock_lock(&lk);
                lcount++;
@@ -258,7 +258,7 @@ test_spinlock(void)

        RTE_LCORE_FOREACH_SLAVE(i) {
                rte_spinlock_unlock(&sl_tab[i]);
-               rte_delay_ms(100);
+               rte_delay_ms(10);
        }

        rte_eal_mp_wait_lcore();
diff --git a/app/test/test_timer.c b/app/test/test_timer.c
index 944e2ad..bc07925 100644
--- a/app/test/test_timer.c
+++ b/app/test/test_timer.c
@@ -137,7 +137,7 @@
 #include <rte_random.h>
 #include <rte_malloc.h>

-#define TEST_DURATION_S 20 /* in seconds */
+#define TEST_DURATION_S 1 /* in seconds */
 #define NB_TIMER 4

 #define RTE_LOGTYPE_TESTTIMER RTE_LOGTYPE_USER3
@@ -305,7 +305,7 @@ timer_stress2_main_loop(__attribute__((unused)) void *arg)
 {
        static struct rte_timer *timers;
        int i, ret;
-       uint64_t delay = rte_get_timer_hz() / 4;
+       uint64_t delay = rte_get_timer_hz() / 20;
        unsigned lcore_id = rte_lcore_id();
        unsigned master = rte_get_master_lcore();
        int32_t my_collisions = 0;
@@ -346,7 +346,7 @@ timer_stress2_main_loop(__attribute__((unused)) void *arg)
                rte_atomic32_add(&collisions, my_collisions);

        /* wait long enough for timers to expire */
-       rte_delay_ms(500);
+       rte_delay_ms(100);

        /* all cores rendezvous */
        if (lcore_id == master) {
@@ -396,7 +396,7 @@ timer_stress2_main_loop(__attribute__((unused)) void *arg)
        }

        /* wait long enough for timers to expire */
-       rte_delay_ms(500);
+       rte_delay_ms(100);

        /* now check that we get the right number of callbacks */
        if (lcore_id == master) {
@@ -495,13 +495,13 @@ timer_basic_main_loop(__attribute__((unused)) void *arg)

        /* launch all timers on core 0 */
        if (lcore_id == rte_get_master_lcore()) {
-               mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id,
+               mytimer_reset(&mytiminfo[0], hz/4, SINGLE, lcore_id,
                              timer_basic_cb);
-               mytimer_reset(&mytiminfo[1], hz*2, SINGLE, lcore_id,
+               mytimer_reset(&mytiminfo[1], hz/2, SINGLE, lcore_id,
                              timer_basic_cb);
-               mytimer_reset(&mytiminfo[2], hz, PERIODICAL, lcore_id,
+               mytimer_reset(&mytiminfo[2], hz/4, PERIODICAL, lcore_id,
                              timer_basic_cb);
-               mytimer_reset(&mytiminfo[3], hz, PERIODICAL,
+               mytimer_reset(&mytiminfo[3], hz/4, PERIODICAL,
                              rte_get_next_lcore(lcore_id, 0, 1),
                              timer_basic_cb);
        }
@@ -591,7 +591,7 @@ test_timer(void)
        end_time = cur_time + (hz * TEST_DURATION_S);

        /* start other cores */
-       printf("Start timer stress tests (%d seconds)\n", TEST_DURATION_S);
+       printf("Start timer stress tests\n");
        rte_eal_mp_remote_launch(timer_stress_main_loop, NULL, CALL_MASTER);
        rte_eal_mp_wait_lcore();

@@ -612,7 +612,7 @@ test_timer(void)
        end_time = cur_time + (hz * TEST_DURATION_S);

        /* start other cores */
-       printf("\nStart timer basic tests (%d seconds)\n", TEST_DURATION_S);
+       printf("\nStart timer basic tests\n");
        rte_eal_mp_remote_launch(timer_basic_main_loop, NULL, CALL_MASTER);
        rte_eal_mp_wait_lcore();

-- 
2.7.0

Reply via email to