It seems to me that outside of the special high-priority, low-latency
producer and consumers, most services should be using one global shared
pool to prevent too many pools and threads created. Here is a code change I
am playing with to add function mlt_slices_init_global_pool(int threads),
which runs at normal priority. What do you think?
--- src/framework/mlt.vers
+++ src/framework/mlt.vers
@@ -493,4 +493,5 @@ MLT_6.6.0 {
global:
mlt_slices_count;
mlt_slices_init_pool;
+ mlt_slices_init_global_pool;
} MLT_6.4.0;
--- src/framework/mlt_slices.c
+++ src/framework/mlt_slices.c
@@ -27,6 +27,7 @@
#include <stdlib.h>
#include <unistd.h>
#include <pthread.h>
+#include <sched.h>
#ifdef _WIN32
#ifdef _WIN32_WINNT
#undef _WIN32_WINNT
@@ -129,9 +130,9 @@ static void* mlt_slices_worker( void* p )
/** Initialize a sliced threading context
*
* \public \memberof mlt_slices_s
- * \param threads number of threads to use for job list
- * \param policy scheduling policy of processing threads
- * \param priority priority value that can be used with the
scheduling algorithm
+ * \param threads number of threads to use for job list, 0 for #cpus
+ * \param policy scheduling policy of processing threads, -1 for normal
+ * \param priority priority value that can be used with the
scheduling algorithm, -1 for maximum
* \return the context pointer
*/
@@ -184,6 +185,10 @@ mlt_slices mlt_slices_init( int threads, int
policy, int priority )
pthread_cond_init ( &ctx->cond_var_job, NULL );
pthread_cond_init ( &ctx->cond_var_ready, NULL );
pthread_attr_init( &tattr );
+ if ( policy < 0 )
+ policy = SCHED_OTHER;
+ if ( priority < 0 )
+ priority = sched_get_priority_max( policy );
pthread_attr_setschedpolicy( &tattr, policy );
param.sched_priority = priority;
pthread_attr_setschedparam( &tattr, ¶m );
@@ -309,9 +314,9 @@ void mlt_slices_run( mlt_slices ctx, int jobs,
mlt_slices_proc proc, void* cooki
/** Initialize a sliced threading context pool
*
* \public \memberof mlt_slices_s
- * \param threads number of threads to use for job list
- * \param policy scheduling policy of processing threads
- * \param priority priority value that can be used with the
scheduling algorithm
+ * \param threads number of threads to use for job list, 0 for #cpus
+ * \param policy scheduling policy of processing threads, -1 for normal
+ * \param priority priority value that can be used with the
scheduling algorithm, -1 for maximum
* \param name name of pool of threads
* \return the context pointer
*/
@@ -355,6 +360,18 @@ mlt_slices mlt_slices_init_pool( int threads, int
policy, int priority, const ch
return ctx;
}
+/** Initialize the global sliced thread pool.
+ *
+ * \public \memberof mlt_slices_s
+ * \param threads number of threads to use for job list, 0 for #cpus
+ * \return the context pointer
+ */
+
+mlt_slices mlt_slices_init_global_pool(int threads)
+{
+ return mlt_slices_init_pool( threads, MLT_SLICES_SCHED_NORMAL,
MLT_SLICES_SCHED_NORMAL, "_mlt_slices_global" );
+}
+
/** Get the number of slices.
*
* \public \memberof mlt_slices_s
--- src/framework/mlt_slices.h
+++ src/framework/mlt_slices.h
@@ -23,6 +23,8 @@
#ifndef MLT_SLICES_H
#define MLT_SLICES_H
+#define MLT_SLICES_SCHED_NORMAL (-1)
+
struct mlt_slices_s;
typedef struct mlt_slices_s *mlt_slices; /**<
pointer to Sliced processing context object */
@@ -38,4 +40,6 @@ extern int mlt_slices_count( mlt_slices ctx );
extern mlt_slices mlt_slices_init_pool( int threads, int policy, int
priority, const char* name );
+extern mlt_slices mlt_slices_init_global_pool( int threads );
+
#endif
------------------------------------------------------------------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, SlashDot.org! http://sdm.link/slashdot
_______________________________________________
Mlt-devel mailing list
Mlt-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/mlt-devel