lib/kunit/user_alloc.c currently uses kthread_use_mm() without a
corresponding kthread_unuse_mm(). This is a bug, but fixing it in KUnit
makes writing tests that use mms more difficult, because of KUnit's
resource/try-catch model.

Therefore, introduce a new operation that does what kunit_attach_mm()
wants, namely an unbalanced call with cleanup deferred to
kthread_exit().

This is actually just the same as kthread_use_mm() but without taking a
reference on the mm_struct.

While adding this, clarify the reference returned by mm_alloc(), since
that is what kthread_take_mm() is gonna be paired with, in practice.

Signed-off-by: Brendan Jackman <[email protected]>
---
 include/linux/kthread.h |  1 +
 kernel/fork.c           |  3 ++-
 kernel/kthread.c        | 36 +++++++++++++++++++++++++++---------
 3 files changed, 30 insertions(+), 10 deletions(-)

diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 8d27403888ce9..2e6244d8ff1a3 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -259,6 +259,7 @@ bool kthread_cancel_delayed_work_sync(struct 
kthread_delayed_work *work);
 
 void kthread_destroy_worker(struct kthread_worker *worker);
 
+void kthread_take_mm(struct mm_struct *mm);
 void kthread_use_mm(struct mm_struct *mm);
 void kthread_unuse_mm(struct mm_struct *mm);
 
diff --git a/kernel/fork.c b/kernel/fork.c
index b1f3915d5f8ec..761e6232ea75a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1147,7 +1147,8 @@ static struct mm_struct *mm_init(struct mm_struct *mm, 
struct task_struct *p,
 }
 
 /*
- * Allocate and initialize an mm_struct.
+ * Allocate and initialize an mm_struct. The caller gets a single reference to
+ * the mm's address space, which should be released with a call to mmput().
  */
 struct mm_struct *mm_alloc(void)
 {
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 99a3808d086f0..c660c04a1b627 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -1589,10 +1589,16 @@ void kthread_destroy_worker(struct kthread_worker 
*worker)
 EXPORT_SYMBOL(kthread_destroy_worker);
 
 /**
- * kthread_use_mm - make the calling kthread operate on an address space
+ * kthread_take_mm - make the calling kthread own an address space.
+ *
+ * Unlike kthread_use_mm(), this doesn't have a cleanup, instead that happens
+ * automatically on kthread exit. Correspondingly, it does not take any
+ * references, by calling this function you donate your reference to the 
address
+ * space (from mmget()/mm_users).
+ *
  * @mm: address space to operate on
  */
-void kthread_use_mm(struct mm_struct *mm)
+void kthread_take_mm(struct mm_struct *mm)
 {
        struct mm_struct *active_mm;
        struct task_struct *tsk = current;
@@ -1600,13 +1606,6 @@ void kthread_use_mm(struct mm_struct *mm)
        WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
        WARN_ON_ONCE(tsk->mm);
 
-       /*
-        * It is possible for mm to be the same as tsk->active_mm, but
-        * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
-        * because these references are not equivalent.
-        */
-       mmgrab(mm);
-
        task_lock(tsk);
        /* Hold off tlb flush IPIs while switching mm's */
        local_irq_disable();
@@ -1632,6 +1631,25 @@ void kthread_use_mm(struct mm_struct *mm)
         */
        mmdrop_lazy_tlb(active_mm);
 }
+EXPORT_SYMBOL_GPL(kthread_take_mm);
+
+/**
+ * kthread_use_mm - make the calling kthread operate on an address space.
+ *
+ * This must be paired with a call to kthread_unuse_mm().
+ *
+ * @mm: address space to operate on
+ */
+void kthread_use_mm(struct mm_struct *mm)
+{
+       /*
+        * It is possible for mm to be the same as tsk->active_mm, but we must
+        * still mmgrab(mm) and mmdrop_lazy_tlb(active_mm) (in
+        * kthread_take_mm()), because these references are not equivalent.
+        */
+       mmgrab(mm);
+       kthread_take_mm(mm);
+}
 EXPORT_SYMBOL_GPL(kthread_use_mm);
 
 /**

-- 
2.51.2


Reply via email to