From: Raja Zidane <rzid...@nvidia.com> TIMER_MILLISECOND is defined as the number of cpu cycles per millisecond, current definition is correct for cores with frequency of 2GHZ, for cores with different frequency, it caused different periods between refresh, (i.e. the definition is about 14ms on ARM cores).
Use dpdk API to get CPU frequency, to define TIMER_MILLISECOND. Fixes: af75078fece3 ("first public release") Cc: sta...@dpdk.org Signed-off-by: Omar Awaysa <om...@nvidia.com> --- v2: use rte_get_timer_hz instead of rte_get_tsc_hz update initial timer value to be in seconds --- --- examples/link_status_interrupt/main.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c index 551f0524da..79dd7461fd 100644 --- a/examples/link_status_interrupt/main.c +++ b/examples/link_status_interrupt/main.c @@ -101,9 +101,10 @@ struct lsi_port_statistics { struct lsi_port_statistics port_statistics[RTE_MAX_ETHPORTS]; /* A tsc-based timer responsible for triggering statistics printout */ -#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */ +#define TIMER_MILLISECOND (rte_get_timer_hz() / 1000) #define MAX_TIMER_PERIOD 86400 /* 1 day max */ -static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */ +#define DEFAULT_TIMER_PERIOD 10UL /* default period is 10 seconds */ +static int64_t timer_period; /* Print out statistics on packets dropped */ static void @@ -370,6 +371,8 @@ lsi_parse_args(int argc, char **argv) {NULL, 0, 0, 0} }; + timer_period = DEFAULT_TIMER_PERIOD * TIMER_MILLISECOND * 1000; + argvopt = argv; while ((opt = getopt_long(argc, argvopt, "p:q:T:", -- 2.17.1