Author: mav
Date: Fri Feb  2 18:02:06 2018
New Revision: 328798
URL: https://svnweb.freebsd.org/changeset/base/328798

Log:
  MFC r307566 (by sbruno):
  Ensure that tasks with a specific cpu set prior to smp starting get
  re-attached to a thread running on that cpu.
  
  ref: 
https://github.com/NextBSD/NextBSD/commit/fcc20e306bc93ebbbe51f3775d1afb527970a2e9

Modified:
  stable/11/sys/kern/subr_gtaskqueue.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/kern/subr_gtaskqueue.c
==============================================================================
--- stable/11/sys/kern/subr_gtaskqueue.c        Fri Feb  2 17:59:44 2018        
(r328797)
+++ stable/11/sys/kern/subr_gtaskqueue.c        Fri Feb  2 18:02:06 2018        
(r328798)
@@ -555,7 +555,7 @@ struct taskq_bind_task {
 };
 
 static void
-taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx)
+taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu)
 {
        struct taskqgroup_cpu *qcpu;
 
@@ -565,7 +565,7 @@ taskqgroup_cpu_create(struct taskqgroup *qgroup, int i
            taskqueue_thread_enqueue, &qcpu->tgc_taskq);
        gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
            "%s_%d", qgroup->tqg_name, idx);
-       qcpu->tgc_cpu = idx * qgroup->tqg_stride;
+       qcpu->tgc_cpu = cpu;
 }
 
 static void
@@ -634,8 +634,8 @@ taskqgroup_attach(struct taskqgroup *qgroup, struct gr
        qgroup->tqg_queue[qid].tgc_cnt++;
        LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
        gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
-       gtask->gt_cpu = qgroup->tqg_queue[qid].tgc_cpu;
        if (irq != -1 && smp_started) {
+               gtask->gt_cpu = qgroup->tqg_queue[qid].tgc_cpu;
                CPU_ZERO(&mask);
                CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
                mtx_unlock(&qgroup->tqg_lock);
@@ -644,6 +644,32 @@ taskqgroup_attach(struct taskqgroup *qgroup, struct gr
                mtx_unlock(&qgroup->tqg_lock);
 }
 
+static void
+taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
+{
+       cpuset_t mask;
+       int qid, cpu;
+
+       mtx_lock(&qgroup->tqg_lock);
+       qid = taskqgroup_find(qgroup, gtask->gt_uniq);
+       cpu = qgroup->tqg_queue[qid].tgc_cpu;
+       if (gtask->gt_irq != -1) {
+               mtx_unlock(&qgroup->tqg_lock);
+
+                       CPU_ZERO(&mask);
+                       CPU_SET(cpu, &mask);
+                       intr_setaffinity(gtask->gt_irq, &mask);
+
+                       mtx_lock(&qgroup->tqg_lock);
+       }
+       qgroup->tqg_queue[qid].tgc_cnt++;
+
+       LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
+                        gt_list);
+       gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
+       mtx_unlock(&qgroup->tqg_lock);
+}
+
 int
 taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
        void *uniq, int cpu, int irq, char *name)
@@ -672,13 +698,46 @@ taskqgroup_attach_cpu(struct taskqgroup *qgroup, struc
        qgroup->tqg_queue[qid].tgc_cnt++;
        LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
        gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
-       if (irq != -1 && smp_started) {
-               CPU_ZERO(&mask);
-               CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
-               mtx_unlock(&qgroup->tqg_lock);
+       cpu = qgroup->tqg_queue[qid].tgc_cpu;
+       mtx_unlock(&qgroup->tqg_lock);
+
+       CPU_ZERO(&mask);
+       CPU_SET(cpu, &mask);
+       if (irq != -1 && smp_started)
                intr_setaffinity(irq, &mask);
-       } else
+       return (0);
+}
+
+static int
+taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask 
*gtask)
+{
+       cpuset_t mask;
+       int i, qid, irq, cpu;
+
+       qid = -1;
+       irq = gtask->gt_irq;
+       cpu = gtask->gt_cpu;
+       MPASS(smp_started);
+       mtx_lock(&qgroup->tqg_lock);
+       for (i = 0; i < qgroup->tqg_cnt; i++)
+               if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
+                       qid = i;
+                       break;
+               }
+       if (qid == -1) {
                mtx_unlock(&qgroup->tqg_lock);
+               return (EINVAL);
+       }
+       qgroup->tqg_queue[qid].tgc_cnt++;
+       LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
+       gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
+       mtx_unlock(&qgroup->tqg_lock);
+
+       CPU_ZERO(&mask);
+       CPU_SET(cpu, &mask);
+
+       if (irq != -1)
+               intr_setaffinity(irq, &mask);
        return (0);
 }
 
@@ -742,9 +801,8 @@ static int
 _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
 {
        LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
-       cpuset_t mask;
        struct grouptask *gtask;
-       int i, k, old_cnt, qid, cpu;
+       int i, k, old_cnt, old_cpu, cpu;
 
        mtx_assert(&qgroup->tqg_lock, MA_OWNED);
 
@@ -759,6 +817,9 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt,
        }
        qgroup->tqg_adjusting = 1;
        old_cnt = qgroup->tqg_cnt;
+       old_cpu = 0;
+       if (old_cnt < cnt)
+               old_cpu = qgroup->tqg_queue[old_cnt].tgc_cpu;
        mtx_unlock(&qgroup->tqg_lock);
        /*
         * Set up queue for tasks added before boot.
@@ -772,8 +833,13 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt,
        /*
         * If new taskq threads have been added.
         */
-       for (i = old_cnt; i < cnt; i++)
-               taskqgroup_cpu_create(qgroup, i);
+       cpu = old_cpu;
+       for (i = old_cnt; i < cnt; i++) {
+               for (k = 0; k < qgroup->tqg_stride; k++)
+                       cpu = CPU_NEXT(cpu);
+
+               taskqgroup_cpu_create(qgroup, i, cpu);
+       }
        mtx_lock(&qgroup->tqg_lock);
        qgroup->tqg_cnt = cnt;
        qgroup->tqg_stride = stride;
@@ -789,39 +855,15 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt,
                }
        }
 
+       mtx_unlock(&qgroup->tqg_lock);
+
        while ((gtask = LIST_FIRST(&gtask_head))) {
                LIST_REMOVE(gtask, gt_list);
                if (gtask->gt_cpu == -1)
-                       qid = taskqgroup_find(qgroup, gtask->gt_uniq);
-               else {
-                       for (i = 0; i < qgroup->tqg_cnt; i++)
-                               if (qgroup->tqg_queue[i].tgc_cpu == 
gtask->gt_cpu) {
-                                       qid = i;
-                                       break;
-                               }
-               }
-               qgroup->tqg_queue[qid].tgc_cnt++;
-               LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
-                   gt_list);
-               gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
+                       taskqgroup_attach_deferred(qgroup, gtask);
+               else if (taskqgroup_attach_cpu_deferred(qgroup, gtask))
+                       taskqgroup_attach_deferred(qgroup, gtask);
        }
-       /*
-        * Set new CPU and IRQ affinity
-        */
-       cpu = CPU_FIRST();
-       for (i = 0; i < cnt; i++) {
-               qgroup->tqg_queue[i].tgc_cpu = cpu;
-               for (k = 0; k < qgroup->tqg_stride; k++)
-                       cpu = CPU_NEXT(cpu);
-               CPU_ZERO(&mask);
-               CPU_SET(qgroup->tqg_queue[i].tgc_cpu, &mask);
-               LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) {
-                       if (gtask->gt_irq == -1)
-                               continue;
-                       intr_setaffinity(gtask->gt_irq, &mask);
-               }
-       }
-       mtx_unlock(&qgroup->tqg_lock);
 
        /*
         * If taskq thread count has been reduced.
@@ -838,12 +880,12 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt,
 }
 
 int
-taskqgroup_adjust(struct taskqgroup *qgroup, int cpu, int stride)
+taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
 {
        int error;
 
        mtx_lock(&qgroup->tqg_lock);
-       error = _taskqgroup_adjust(qgroup, cpu, stride);
+       error = _taskqgroup_adjust(qgroup, cnt, stride);
        mtx_unlock(&qgroup->tqg_lock);
 
        return (error);
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to