Kthreads are currently implemented as an infinite loop. Each
has its own variant of checks for terminating, freezing,
awakening. In many cases it is unclear to say in which state
it is and sometimes it is done a wrong way.

The plan is to convert kthreads into kthread_worker or workqueues
API. It allows to split the functionality into separate operations.
It helps to make a better structure. Also it defines a clean state
where no locks are taken, IRQs blocked, the kthread might sleep
or even be safely migrated.

The kthread worker API is useful when we want to have a dedicated
single thread for the work. It helps to make sure that it is
available when needed. Also it allows a better control, e.g.
define a scheduling priority.

This patch converts both the producer and consumer into kthread
workers because they modify the scheduling priority and policy.

Each kthread is replaced with the respective kthread work.
The producer work queues the consumer work when needed.
Also the producer queues itself with 10 second delay.

kthread_should_stop() could not longer be used inside the works
because it defines the life of the worker and it needs to stay
usable until all works are done. Instead, we add @test_end
global variable.

IMHO, the implementation is easier now. There is less of the
tricky scheduler code. Also the wait_do_die() function
has gone.

Signed-off-by: Petr Mladek <pmla...@suse.com>
---

The kthread worker API improvements are in 4.9-rc1. Therefore
we could start converting the appropriate kthreads.

 kernel/trace/ring_buffer_benchmark.c | 133 ++++++++++++++++-------------------
 1 file changed, 59 insertions(+), 74 deletions(-)

diff --git a/kernel/trace/ring_buffer_benchmark.c 
b/kernel/trace/ring_buffer_benchmark.c
index 6df9a83e20d7..52f0990378d1 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -26,10 +26,17 @@ struct rb_page {
 static int reader_finish;
 static DECLARE_COMPLETION(read_start);
 static DECLARE_COMPLETION(read_done);
-
 static struct ring_buffer *buffer;
-static struct task_struct *producer;
-static struct task_struct *consumer;
+
+static void rb_producer_hammer_func(struct kthread_work *dummy);
+static struct kthread_worker *rb_producer_worker;
+static DEFINE_KTHREAD_DELAYED_WORK(rb_producer_hammer_work,
+                                   rb_producer_hammer_func);
+
+static void rb_consumer_func(struct kthread_work *dummy);
+static struct kthread_worker *rb_consumer_worker;
+static DEFINE_KTHREAD_WORK(rb_consumer_work, rb_consumer_func);
+
 static unsigned long read;
 
 static unsigned int disable_reader;
@@ -61,6 +68,7 @@ struct rb_page {
 static int read_events;
 
 static int test_error;
+static int test_end;
 
 #define TEST_ERROR()                           \
        do {                                    \
@@ -77,7 +85,7 @@ enum event_status {
 
 static bool break_test(void)
 {
-       return test_error || kthread_should_stop();
+       return test_error || test_end;
 }
 
 static enum event_status read_event(int cpu)
@@ -262,8 +270,8 @@ static void ring_buffer_producer(void)
                end_time = ktime_get();
 
                cnt++;
-               if (consumer && !(cnt % wakeup_interval))
-                       wake_up_process(consumer);
+               if (rb_consumer_worker && !(cnt % wakeup_interval))
+                       wake_up_process(rb_consumer_worker->task);
 
 #ifndef CONFIG_PREEMPT
                /*
@@ -281,14 +289,14 @@ static void ring_buffer_producer(void)
        } while (ktime_before(end_time, timeout) && !break_test());
        trace_printk("End ring buffer hammer\n");
 
-       if (consumer) {
+       if (rb_consumer_worker) {
                /* Init both completions here to avoid races */
                init_completion(&read_start);
                init_completion(&read_done);
                /* the completions must be visible before the finish var */
                smp_wmb();
                reader_finish = 1;
-               wake_up_process(consumer);
+               wake_up_process(rb_consumer_worker->task);
                wait_for_completion(&read_done);
        }
 
@@ -366,68 +374,39 @@ static void ring_buffer_producer(void)
        }
 }
 
-static void wait_to_die(void)
-{
-       set_current_state(TASK_INTERRUPTIBLE);
-       while (!kthread_should_stop()) {
-               schedule();
-               set_current_state(TASK_INTERRUPTIBLE);
-       }
-       __set_current_state(TASK_RUNNING);
-}
-
-static int ring_buffer_consumer_thread(void *arg)
+static void rb_consumer_func(struct kthread_work *dummy)
 {
-       while (!break_test()) {
-               complete(&read_start);
-
-               ring_buffer_consumer();
+       complete(&read_start);
 
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (break_test())
-                       break;
-               schedule();
-       }
-       __set_current_state(TASK_RUNNING);
-
-       if (!kthread_should_stop())
-               wait_to_die();
-
-       return 0;
+       ring_buffer_consumer();
 }
 
-static int ring_buffer_producer_thread(void *arg)
+static void rb_producer_hammer_func(struct kthread_work *dummy)
 {
-       while (!break_test()) {
-               ring_buffer_reset(buffer);
+       if (break_test())
+               return;
 
-               if (consumer) {
-                       wake_up_process(consumer);
-                       wait_for_completion(&read_start);
-               }
-
-               ring_buffer_producer();
-               if (break_test())
-                       goto out_kill;
+       ring_buffer_reset(buffer);
 
-               trace_printk("Sleeping for 10 secs\n");
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (break_test())
-                       goto out_kill;
-               schedule_timeout(HZ * SLEEP_TIME);
+       if (rb_consumer_worker) {
+               kthread_queue_work(rb_consumer_worker, &rb_consumer_work);
+               wait_for_completion(&read_start);
        }
 
-out_kill:
-       __set_current_state(TASK_RUNNING);
-       if (!kthread_should_stop())
-               wait_to_die();
+       ring_buffer_producer();
 
-       return 0;
+       if (break_test())
+               return;
+
+       trace_printk("Sleeping for 10 secs\n");
+       kthread_queue_delayed_work(rb_producer_worker,
+                                  &rb_producer_hammer_work,
+                                  HZ * SLEEP_TIME);
 }
 
 static int __init ring_buffer_benchmark_init(void)
 {
-       int ret;
+       int ret = 0;
 
        /* make a one meg buffer in overwite mode */
        buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
@@ -435,19 +414,21 @@ static int __init ring_buffer_benchmark_init(void)
                return -ENOMEM;
 
        if (!disable_reader) {
-               consumer = kthread_create(ring_buffer_consumer_thread,
-                                         NULL, "rb_consumer");
-               ret = PTR_ERR(consumer);
-               if (IS_ERR(consumer))
+               rb_consumer_worker = kthread_create_worker(0, "rb_consumer");
+               if (IS_ERR(rb_consumer_worker)) {
+                       ret = PTR_ERR(rb_consumer_worker);
                        goto out_fail;
+               }
        }
 
-       producer = kthread_run(ring_buffer_producer_thread,
-                              NULL, "rb_producer");
-       ret = PTR_ERR(producer);
-
-       if (IS_ERR(producer))
+       rb_producer_worker = kthread_create_worker(0, "rb_producer");
+       if (IS_ERR(rb_producer_worker)) {
+               ret = PTR_ERR(rb_producer_worker);
                goto out_kill;
+       }
+
+       kthread_queue_delayed_work(rb_producer_worker,
+                                  &rb_producer_hammer_work, 0);
 
        /*
         * Run them as low-prio background tasks by default:
@@ -457,24 +438,26 @@ static int __init ring_buffer_benchmark_init(void)
                        struct sched_param param = {
                                .sched_priority = consumer_fifo
                        };
-                       sched_setscheduler(consumer, SCHED_FIFO, &param);
+                       sched_setscheduler(rb_consumer_worker->task,
+                                          SCHED_FIFO, &param);
                } else
-                       set_user_nice(consumer, consumer_nice);
+                       set_user_nice(rb_consumer_worker->task, consumer_nice);
        }
 
        if (producer_fifo >= 0) {
                struct sched_param param = {
                        .sched_priority = producer_fifo
                };
-               sched_setscheduler(producer, SCHED_FIFO, &param);
+               sched_setscheduler(rb_producer_worker->task,
+                                  SCHED_FIFO, &param);
        } else
-               set_user_nice(producer, producer_nice);
+               set_user_nice(rb_producer_worker->task, producer_nice);
 
        return 0;
 
  out_kill:
-       if (consumer)
-               kthread_stop(consumer);
+       if (rb_consumer_worker)
+               kthread_destroy_worker(rb_consumer_worker);
 
  out_fail:
        ring_buffer_free(buffer);
@@ -483,9 +466,11 @@ static int __init ring_buffer_benchmark_init(void)
 
 static void __exit ring_buffer_benchmark_exit(void)
 {
-       kthread_stop(producer);
-       if (consumer)
-               kthread_stop(consumer);
+       test_end = 1;
+       kthread_cancel_delayed_work_sync(&rb_producer_hammer_work);
+       kthread_destroy_worker(rb_producer_worker);
+       if (rb_consumer_worker)
+               kthread_destroy_worker(rb_consumer_worker);
        ring_buffer_free(buffer);
 }
 
-- 
1.8.5.6

Reply via email to