prio-wake: enable using more than 95 threads

prio-wake will currently spew pthread errors on systems with more
than 95 CPUS, or if an -n value > 95 is specified on the command line.
To avoid this, spread threads out equally over the priorities by
calculated the number of threads per priority.

Tested with all pathological numbers (i.e. mod=0 mod=1, etc) of threads.
Ran 10000 times in parallel.  Confirmed failure continues to exist without
requeue PI (as it should) and success is seen with requeue PI 
(as it should be).

Signed-off-by: Darren Hart <[email protected]>
Acked-By: Dinakar Guniguntala <[email protected]>
Acked-by: Vernon Mauery <[email protected]>
Acked-by: Gowrishankar <[email protected]>

---
 func/prio-wake/prio-wake.c |   24 +++++++++++++++++++-----
 1 file changed, 19 insertions(+), 5 deletions(-)

Index: realtime/func/prio-wake/prio-wake.c
===================================================================
--- realtime.orig/func/prio-wake/prio-wake.c
+++ realtime/func/prio-wake/prio-wake.c
@@ -54,6 +54,8 @@
 #include <errno.h>
 #include <sys/syscall.h>
 #include <librttest.h>
+#include <libstats.h>
+
 volatile int running_threads = 0;
 static int rt_threads = 0;
 static int locked_broadcast = 1;
@@ -164,8 +166,10 @@ void *worker_thread(void* arg)
 
 int main(int argc, char* argv[])
 {
-       int pri_boost;
+       int threads_per_prio;
         int numcpus;
+       int numprios;
+       int prio;
        int i;
        setup();
 
@@ -184,19 +188,29 @@ int main(int argc, char* argv[])
        printf("Calling pthread_cond_broadcast() with mutex: %s\n\n",
               locked_broadcast ? "LOCKED" : "UNLOCKED");
 
-       pri_boost = 3;
-
        beginrun = rt_gettime();
 
        init_pi_mutex(&mutex);
 
+       /* calculate the number of threads per priority */
+       /* we get num numprios -1 for the workers, leaving one for the master */
+       numprios = sched_get_priority_max(SCHED_FIFO) -
+                  sched_get_priority_min(SCHED_FIFO);
+
+       threads_per_prio = rt_threads / numprios;
+       if (rt_threads % numprios)
+               threads_per_prio++;
+
        /* start the worker threads */
+       prio = sched_get_priority_min(SCHED_FIFO);
        for (i = rt_threads; i > 0; i--) {
-               create_fifo_thread(worker_thread, (void*)(intptr_t)i, 
sched_get_priority_min(SCHED_FIFO) + pri_boost++);
+               if ((i != rt_threads && (i % threads_per_prio) == 0))
+                       prio++;
+               create_fifo_thread(worker_thread, (void*)(intptr_t)i, prio);
        }
 
        /* start the master thread */
-       create_fifo_thread(master_thread, (void*)(intptr_t)i, 
sched_get_priority_min(SCHED_FIFO) + pri_boost);
+       create_fifo_thread(master_thread, (void*)(intptr_t)i, ++prio);
 
        /* wait for threads to complete */
        join_threads();
-- 
Darren Hart
IBM Linux Technology Center
Real-Time Linux Team

------------------------------------------------------------------------------
The Planet: dedicated and managed hosting, cloud storage, colocation
Stay online with enterprise data centers and the best network in the business
Choose flexible plans and management services without long-term contracts
Personal 24x7 support from experience hosting pros just a phone call away.
http://p.sf.net/sfu/theplanet-com
_______________________________________________
Ltp-list mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/ltp-list

Reply via email to