> -----Original Message-----
> From: lng-odp [mailto:[email protected]] On Behalf Of
> ext Bill Fischofer
> Sent: Thursday, August 27, 2015 2:41 AM
> To: [email protected]
> Subject: [lng-odp] [API-NEXT PATCHv3 06/10] linux-generic: schedule:
> implement scheduler groups
>
> Signed-off-by: Bill Fischofer <[email protected]>
> ---
> .../include/odp/plat/schedule_types.h | 4 +
> platform/linux-generic/odp_schedule.c | 160
> +++++++++++++++++++++
> platform/linux-generic/odp_thread.c | 25 +++-
> 3 files changed, 183 insertions(+), 6 deletions(-)
>
> diff --git a/platform/linux-generic/include/odp/plat/schedule_types.h
> b/platform/linux-generic/include/odp/plat/schedule_types.h
> index 87f9c11..9ab90f4 100644
> --- a/platform/linux-generic/include/odp/plat/schedule_types.h
> +++ b/platform/linux-generic/include/odp/plat/schedule_types.h
> @@ -43,8 +43,12 @@ typedef int odp_schedule_sync_t;
>
> typedef int odp_schedule_group_t;
>
> +/* These must be kept in sync with thread_globals_t in odp_thread.c */
> +#define ODP_SCHED_GROUP_INVALID -1
> #define ODP_SCHED_GROUP_ALL 0
> #define ODP_SCHED_GROUP_WORKER 1
> +#define ODP_SCHED_GROUP_CONTROL 2
> +#define ODP_SCHED_GROUP_NAMED 3
This is actually an API change (plat/schedule_types.h implements API).
Additional constants should be documented in odp/api/schedule_types.h. _INVALID
and _CONTROL can be added to the API. _NAMED seems to be an implementation
detail and should be moved out from this header (not visible through odp.h)
>
> #define ODP_SCHED_GROUP_NAME_LEN 32
>
> diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-
> generic/odp_schedule.c
> index 5d32c81..c195ba5 100644
> --- a/platform/linux-generic/odp_schedule.c
> +++ b/platform/linux-generic/odp_schedule.c
> @@ -23,6 +23,8 @@
> #include <odp_queue_internal.h>
> #include <odp_packet_io_internal.h>
>
> +odp_thrmask_t sched_mask_all;
> +
> /* Number of schedule commands.
> * One per scheduled queue and packet interface */
> #define NUM_SCHED_CMD (ODP_CONFIG_QUEUES + ODP_CONFIG_PKTIO_ENTRIES)
> @@ -48,6 +50,11 @@ typedef struct {
> odp_pool_t pool;
> odp_shm_t shm;
> uint32_t
> pri_count[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO];
> + odp_spinlock_t grp_lock;
> + struct {
> + char name[ODP_SCHED_GROUP_NAME_LEN];
> + odp_thrmask_t *mask;
> + } sched_grp[ODP_CONFIG_SCHED_GRPS];
> } sched_t;
>
> /* Schedule command */
> @@ -87,6 +94,9 @@ static sched_t *sched;
> /* Thread local scheduler context */
> static __thread sched_local_t sched_local;
>
> +/* Internal routine to get scheduler thread mask addrs */
> +odp_thrmask_t *thread_sched_grp_mask(int index);
> +
> static void sched_local_init(void)
> {
> int i;
> @@ -123,6 +133,7 @@ int odp_schedule_init_global(void)
>
> memset(sched, 0, sizeof(sched_t));
>
> + odp_pool_param_init(¶ms);
> params.buf.size = sizeof(sched_cmd_t);
> params.buf.align = 0;
> params.buf.num = NUM_SCHED_CMD;
> @@ -163,6 +174,15 @@ int odp_schedule_init_global(void)
> }
> }
>
> + odp_spinlock_init(&sched->grp_lock);
> +
> + for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++) {
> + memset(&sched->sched_grp[i].name, 0,
> ODP_SCHED_GROUP_NAME_LEN);
> + sched->sched_grp[i].mask = thread_sched_grp_mask(i);
> + }
> +
> + odp_thrmask_setall(&sched_mask_all);
> +
> ODP_DBG("done\n");
>
> return 0;
> @@ -466,6 +486,18 @@ static int schedule(odp_queue_t *out_queue,
> odp_event_t out_ev[],
> }
>
> qe = sched_cmd->qe;
> +
> + if (qe->s.param.sched.group > ODP_SCHED_GROUP_ALL &&
> + !odp_thrmask_isset(sched->sched_grp
> + [qe->s.param.sched.group].mask,
Is this style OK? Hard to read if line breaks on array brackets. Use temp
variable instead to fit in 80 chars.
> + thr)) {
> + /* This thread is not eligible for work from
> + * this queue, so continue scheduling it.
> + */
> + if (odp_queue_enq(pri_q, ev))
> + ODP_ABORT("schedule failed\n");
> + continue;
This logic is not optimal. It would be better to arrange scheduler queues in
groups and poll on those groups which this thread belong. It causes unnecessary
ping-pong of queues in and out scheduler queues. However, in sake of time I'm
OK to put this in and optimize afterwards.
-Petri
> + }
> num = queue_deq_multi(qe, sched_local.buf_hdr,
> max_deq);
>
> if (num < 0) {
> @@ -587,3 +619,131 @@ int odp_schedule_num_prio(void)
> {
> return ODP_CONFIG_SCHED_PRIOS;
> }
_______________________________________________
lng-odp mailing list
[email protected]
https://lists.linaro.org/mailman/listinfo/lng-odp