A frequent mode of operation is fanning out N tasks to execute in
parallel, collating results, fanning out M tasks, rinse and repeat. This
is also common to the notion of the async/sync kernel domain split.
A barrier provides a mechanism by which all work queued after the
barrier must wait (i.e. not be scheduled) until all work queued before the
barrier is completed.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Sumit Semwal <sumit.semwal at linaro.org>
Cc: Shuah Khan <shuahkh at osg.samsung.com>
Cc: Tejun Heo <tj at kernel.org>
Cc: Daniel Vetter <daniel.vetter at ffwll.ch>
Cc: Andrew Morton <akpm at linux-foundation.org>
Cc: Ingo Molnar <mingo at kernel.org>
Cc: Kees Cook <keescook at chromium.org>
Cc: Thomas Gleixner <tglx at linutronix.de>
Cc: "Paul E. McKenney" <paulmck at linux.vnet.ibm.com>
Cc: Dan Williams <dan.j.williams at intel.com>
Cc: Andrey Ryabinin <aryabinin at virtuozzo.com>
Cc: Davidlohr Bueso <dave at stgolabs.net>
Cc: Nikolay Aleksandrov <nikolay at cumulusnetworks.com>
Cc: "David S. Miller" <davem at davemloft.net>
Cc: "Peter Zijlstra (Intel)" <peterz at infradead.org>
Cc: Rasmus Villemoes <linux at rasmusvillemoes.dk>
Cc: Andy Shevchenko <andriy.shevchenko at linux.intel.com>
Cc: Dmitry Vyukov <dvyukov at google.com>
Cc: Alexander Potapenko <glider at google.com>
Cc: linux-kernel at vger.kernel.org
Cc: linux-media at vger.kernel.org
Cc: dri-devel at lists.freedesktop.org
Cc: linaro-mm-sig at lists.linaro.org
---
 include/linux/async.h |  4 +++
 kernel/async.c        | 72 +++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 76 insertions(+)

diff --git a/include/linux/async.h b/include/linux/async.h
index e7d7289a9889..de44306f8cb7 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -26,6 +26,7 @@ struct async_work {

 struct async_domain {
        struct list_head pending;
+       struct kfence *barrier;
        unsigned registered:1;
 };

@@ -59,6 +60,9 @@ extern void async_synchronize_cookie(async_cookie_t cookie);
 extern void async_synchronize_cookie_domain(async_cookie_t cookie,
                                            struct async_domain *domain);

+extern void async_barrier(void);
+extern void async_barrier_domain(struct async_domain *domain);
+
 extern bool current_is_async(void);

 extern struct async_work *
diff --git a/kernel/async.c b/kernel/async.c
index 0d695919a60d..5cfa398a19b2 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -154,6 +154,15 @@ struct async_work *async_work_create(async_func_t func, 
void *data, gfp_t gfp)
 }
 EXPORT_SYMBOL_GPL(async_work_create);

+static void async_barrier_delete(struct async_domain *domain)
+{
+       if (!domain->barrier)
+               return;
+
+       kfence_put(domain->barrier);
+       domain->barrier = NULL;
+}
+
 async_cookie_t queue_async_work(struct async_domain *domain,
                                struct async_work *work,
                                gfp_t gfp)
@@ -174,6 +183,10 @@ async_cookie_t queue_async_work(struct async_domain 
*domain,
        async_pending_count++;
        spin_unlock_irqrestore(&async_lock, flags);

+       if (domain->barrier &&
+           !kfence_await_kfence(&entry->base.fence, domain->barrier, gfp))
+               async_barrier_delete(domain);
+
        /* mark that this task has queued an async job, used by module init */
        current->flags |= PF_USED_ASYNC;

@@ -241,6 +254,63 @@ async_cookie_t async_schedule_domain(async_func_t func, 
void *data,
 }
 EXPORT_SYMBOL_GPL(async_schedule_domain);

+static struct kfence *__async_barrier_create(struct async_domain *domain)
+{
+       struct kfence *fence;
+       struct async_entry *entry;
+       unsigned long flags;
+       int ret;
+
+       fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+       if (!fence)
+               goto out_sync;
+
+       kfence_init(fence, NULL);
+
+       ret = 0;
+       spin_lock_irqsave(&async_lock, flags);
+       list_for_each_entry(entry, &domain->pending, pending_link[0]) {
+               ret |= kfence_await_kfence(fence,
+                                          &entry->base.fence,
+                                          GFP_ATOMIC);
+               if (ret < 0)
+                       break;
+       }
+       spin_unlock_irqrestore(&async_lock, flags);
+       if (ret <= 0)
+               goto out_put;
+
+       if (domain->barrier)
+               kfence_await_kfence(fence, domain->barrier, GFP_KERNEL);
+
+       kfence_complete(fence);
+       return fence;
+
+out_put:
+       kfence_complete(fence);
+       kfence_put(fence);
+out_sync:
+       async_synchronize_full_domain(domain);
+       return NULL;
+}
+
+void async_barrier(void)
+{
+       async_barrier_domain(&async_dfl_domain);
+}
+EXPORT_SYMBOL_GPL(async_barrier);
+
+void async_barrier_domain(struct async_domain *domain)
+{
+       struct kfence *barrier = __async_barrier_create(domain);
+
+       if (domain->barrier)
+               kfence_put(domain->barrier);
+
+       domain->barrier = barrier;
+}
+EXPORT_SYMBOL_GPL(async_barrier_domain);
+
 /**
  * async_synchronize_full - synchronize all asynchronous function calls
  *
@@ -264,6 +334,8 @@ EXPORT_SYMBOL_GPL(async_synchronize_full);
 void async_unregister_domain(struct async_domain *domain)
 {
        WARN_ON(!list_empty(&domain->pending));
+
+       async_barrier_delete(domain);
        domain->registered = 0;
 }
 EXPORT_SYMBOL_GPL(async_unregister_domain);
-- 
2.8.1

Reply via email to