Hi!
> +#if __i386__ || __x86_64__
> +#define HUGE_SIZE (2 * 1024 * 1024)
> +
> +#elif __powerpc__ || __powerpc64__
> +#define HUGE_SIZE (16 * 1024 * 1024)
> +
> +#elif __s390__ || __s390x__
> +#define HUGE_SIZE (1 * 1024 * 1024)
> +
> +#else
> +#define HUGE_SIZE (2 * 1024 * 1024)
> +#endif
Can we use Hugepagesize from /proc/meminfo instead?
> +#define PATH_NR_HUGEPAGES "/proc/sys/vm/nr_hugepages"
> +
> +const char *TCID = "futex_wake04";
> +const int TST_TOTAL = 1;
> +
> +static futex_t *futex1, *futex2;
> +
> +static long th2_wait_time;
> +static int th2_wait_done;
> +
> +static long orig_hugepages;
> +
> +static void setup(void)
> +{
> + tst_require_root(NULL);
> + tst_tmpdir();
> +
> + SAFE_FILE_SCANF(NULL, PATH_NR_HUGEPAGES, "%ld", &orig_hugepages);
> + SAFE_FILE_PRINTF(NULL, PATH_NR_HUGEPAGES, "%d", 1);
> +
> + TEST_PAUSE;
> +}
> +
> +static void cleanup(void)
> +{
> + SAFE_FILE_PRINTF(NULL, PATH_NR_HUGEPAGES, "%ld", orig_hugepages);
> +
> + tst_rmdir();
> +}
> +
> +static void *wait_thread1(void *arg LTP_ATTRIBUTE_UNUSED)
> +{
> + futex_wait(futex1, *futex1, NULL, 0);
> +
> + return NULL;
> +}
> +
> +static void *wait_thread2(void *arg LTP_ATTRIBUTE_UNUSED)
> +{
> + struct timeval tv;
> +
> + gettimeofday(&tv, NULL);
> + th2_wait_time = tv.tv_sec;
> + futex_wait(futex2, *futex2, NULL, 0);
Eh, futex_wait can have timeout parameter. Why don't you set the timeout
here and fail the test if the futex timeouted? That would be the
simplest solution.
> + th2_wait_done = 1;
> + tst_resm(TPASS, "Hi hydra, thread2 awake!");
> +
> + return NULL;
> +}
> +
> +static void wakeup_thread2(void)
> +{
> + void *addr;
> + int pgsz, wait_max_time = 30;
> + pthread_t th1, th2;
> + struct timeval tv;
> +
> + /*allocate some shared memory*/
> + addr = mmap(NULL,
> + HUGE_SIZE,
> + PROT_READ | PROT_WRITE,
> + MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB,
> + -1,
> + 0);
> + if (addr == MAP_FAILED) {
> + printf("errno=%d\n", errno);
> + perror("mmap");
> + if (errno == ENOMEM) {
> + tst_brkm(TBROK | TERRNO, NULL,
> + "Probably system didn't actually create any
> huge pages.");
> + }
> + }
> +
> + pgsz = getpagesize();
> +
> + /*apply the first subpage to futex1*/
> + futex1 = addr;
> + *futex1 = 0;
> + /*apply the second subpage to futex2*/
> + futex2 = (futex_t *)((char *)addr + pgsz);
> + *futex2 = 0;
> +
> + /*thread1 block on futex1 first,then thread2 block on futex2*/
> + pthread_create(&th1, NULL, wait_thread1, NULL);
> + sleep(2);
> + pthread_create(&th2, NULL, wait_thread2, NULL);
> + sleep(2);
No sleeps in testcases, you have to use proper synchronization
primitives.
If you need to run the test in several threads have a look at
futex_wake03.c how to assert that threads are sleeping on futex.
If the bug can be reproduced using child processes as well, you can use
TST_PROCESS_STATE_WAIT() to ensure children are sleeping in kernel and
tst_record_childstatus() to propagate the test result from child
process.
> + /*try to wake up thread2*/
> + futex_wake(futex2, 1, 0);
> +
> + /*see if thread2 can be woke up*/
> + while (!th2_wait_done) {
> + gettimeofday(&tv, NULL);
> + /*thread2 block over 30 secs, test fail*/
> + if (tv.tv_sec > (th2_wait_time + wait_max_time)) {
> + tst_resm(TFAIL,
> + "Bug: wait_thread2 did not wake after %ld
> secs.",
> + tv.tv_sec - th2_wait_time);
> + break;
> + }
> + }
> +
> + munmap(addr, HUGE_SIZE);
SAFE_MUNMAP()
You should also wake the second thread here and join both of them,
otherwise with test looping the program will create more and more
threads until it fails.
> +}
> +
> +int main(int argc, char *argv[])
> +{
> + int lc;
> +
> + tst_parse_opts(argc, argv, NULL, NULL);
> +
> + setup();
> +
> + for (lc = 0; TEST_LOOPING(lc); lc++)
> + wakeup_thread2();
> +
> + cleanup();
> + tst_exit();
> +}
> --
> 1.8.3.1
>
>
> ------------------------------------------------------------------------------
> _______________________________________________
> Ltp-list mailing list
> [email protected]
> https://lists.sourceforge.net/lists/listinfo/ltp-list
--
Cyril Hrubis
[email protected]
------------------------------------------------------------------------------
_______________________________________________
Ltp-list mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/ltp-list