Author: mjg
Date: Mon May  7 23:36:16 2018
New Revision: 333344
URL: https://svnweb.freebsd.org/changeset/base/333344

Log:
  Inlined sched_userret.
  
  The tested condition is rarely true and it induces a function call
  on each return to userspace.
  
  Bumps getuid rate by about 1% on Broadwell.

Modified:
  head/sys/kern/sched_4bsd.c
  head/sys/kern/sched_ule.c
  head/sys/sys/sched.h

Modified: head/sys/kern/sched_4bsd.c
==============================================================================
--- head/sys/kern/sched_4bsd.c  Mon May  7 23:23:11 2018        (r333343)
+++ head/sys/kern/sched_4bsd.c  Mon May  7 23:36:16 2018        (r333344)
@@ -1481,25 +1481,13 @@ sched_preempt(struct thread *td)
 }
 
 void
-sched_userret(struct thread *td)
+sched_userret_slowpath(struct thread *td)
 {
-       /*
-        * XXX we cheat slightly on the locking here to avoid locking in
-        * the usual case.  Setting td_priority here is essentially an
-        * incomplete workaround for not setting it properly elsewhere.
-        * Now that some interrupt handlers are threads, not setting it
-        * properly elsewhere can clobber it in the window between setting
-        * it here and returning to user mode, so don't waste time setting
-        * it perfectly here.
-        */
-       KASSERT((td->td_flags & TDF_BORROWING) == 0,
-           ("thread with borrowed priority returning to userland"));
-       if (td->td_priority != td->td_user_pri) {
-               thread_lock(td);
-               td->td_priority = td->td_user_pri;
-               td->td_base_pri = td->td_user_pri;
-               thread_unlock(td);
-       }
+
+       thread_lock(td);
+       td->td_priority = td->td_user_pri;
+       td->td_base_pri = td->td_user_pri;
+       thread_unlock(td);
 }
 
 void

Modified: head/sys/kern/sched_ule.c
==============================================================================
--- head/sys/kern/sched_ule.c   Mon May  7 23:23:11 2018        (r333343)
+++ head/sys/kern/sched_ule.c   Mon May  7 23:36:16 2018        (r333344)
@@ -2356,26 +2356,14 @@ sched_preempt(struct thread *td)
  * to static priorities in msleep() or similar.
  */
 void
-sched_userret(struct thread *td)
+sched_userret_slowpath(struct thread *td)
 {
-       /*
-        * XXX we cheat slightly on the locking here to avoid locking in  
-        * the usual case.  Setting td_priority here is essentially an
-        * incomplete workaround for not setting it properly elsewhere.
-        * Now that some interrupt handlers are threads, not setting it
-        * properly elsewhere can clobber it in the window between setting
-        * it here and returning to user mode, so don't waste time setting
-        * it perfectly here.
-        */
-       KASSERT((td->td_flags & TDF_BORROWING) == 0,
-           ("thread with borrowed priority returning to userland"));
-       if (td->td_priority != td->td_user_pri) {
-               thread_lock(td);
-               td->td_priority = td->td_user_pri;
-               td->td_base_pri = td->td_user_pri;
-               tdq_setlowpri(TDQ_SELF(), td);
-               thread_unlock(td);
-        }
+
+       thread_lock(td);
+       td->td_priority = td->td_user_pri;
+       td->td_base_pri = td->td_user_pri;
+       tdq_setlowpri(TDQ_SELF(), td);
+       thread_unlock(td);
 }
 
 /*

Modified: head/sys/sys/sched.h
==============================================================================
--- head/sys/sys/sched.h        Mon May  7 23:23:11 2018        (r333343)
+++ head/sys/sys/sched.h        Mon May  7 23:36:16 2018        (r333344)
@@ -103,13 +103,32 @@ void      sched_switch(struct thread *td, struct thread 
*ne
 void   sched_throw(struct thread *td);
 void   sched_unlend_prio(struct thread *td, u_char prio);
 void   sched_user_prio(struct thread *td, u_char prio);
-void   sched_userret(struct thread *td);
+void   sched_userret_slowpath(struct thread *td);
 void   sched_wakeup(struct thread *td);
 #ifdef RACCT
 #ifdef SCHED_4BSD
 fixpt_t        sched_pctcpu_delta(struct thread *td);
 #endif
 #endif
+
+static inline void
+sched_userret(struct thread *td)
+{
+
+       /*
+        * XXX we cheat slightly on the locking here to avoid locking in
+        * the usual case.  Setting td_priority here is essentially an
+        * incomplete workaround for not setting it properly elsewhere.
+        * Now that some interrupt handlers are threads, not setting it
+        * properly elsewhere can clobber it in the window between setting
+        * it here and returning to user mode, so don't waste time setting
+        * it perfectly here.
+        */
+       KASSERT((td->td_flags & TDF_BORROWING) == 0,
+           ("thread with borrowed priority returning to userland"));
+       if (__predict_false(td->td_priority != td->td_user_pri))
+               sched_userret_slowpath(td);
+}
 
 /*
  * Threads are moved on and off of run queues
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to