Re: hardclock: don't call statclock(), stathz is always non-zero

2023-01-31 Thread Mark Kettenis
> Date: Tue, 31 Jan 2023 04:50:59 -0600
> From: Scott Cheloha 
> 
> On Mon, Jan 30, 2023 at 05:08:38PM +0100, Mark Kettenis wrote:
> > > Date: Sat, 21 Jan 2023 17:02:48 -0600
> > > From: Scott Cheloha 
> > > 
> > > All the platforms have switched to clockintr.
> > > 
> > > Let's start by isolating statclock() from hardclock().  stathz is now
> > > always non-zero: statclock() must be called separately.  Update
> > > several of the the stathz users to reflect that the value is always
> > > non-zero.
> > > 
> > > This is a first step toward making hardclock and statclock into
> > > schedulable entities.
> > > 
> > > ok?
> > 
> > If you are confident enough to start burning bridges, yes ok kettenis@
> > 
> > Maybe you want to add
> > 
> > KASSERT(stathz != 0);
> > KASSERT(profhz != 0);
> > 
> > at the start of initclocks() just to be sure?
> > 
> > Either way is fine with me.
> 
> I thought about doing that, but those checks are done during
> cpu_initclocks(), in clockintr_init():
> 
> 60  void
> 61  clockintr_init(u_int flags)
> 62  {
> 63  KASSERT(CPU_IS_PRIMARY(curcpu()));
> 64  KASSERT(clockintr_flags == 0);
> 65  KASSERT(!ISSET(flags, ~CL_FLAG_MASK));
> 66  
> 67  KASSERT(hz > 0 && hz <= 10);
> 68  hardclock_period = 10 / hz;
> 69  
> 70  KASSERT(stathz >= 1 && stathz <= 10);
> 
> Checking them again might make intent more explicit...  still, I'm
> leaning toward leaving them out.

Ah, sure, yes, that's fine.



Re: hardclock: don't call statclock(), stathz is always non-zero

2023-01-31 Thread Scott Cheloha
On Mon, Jan 30, 2023 at 05:08:38PM +0100, Mark Kettenis wrote:
> > Date: Sat, 21 Jan 2023 17:02:48 -0600
> > From: Scott Cheloha 
> > 
> > All the platforms have switched to clockintr.
> > 
> > Let's start by isolating statclock() from hardclock().  stathz is now
> > always non-zero: statclock() must be called separately.  Update
> > several of the the stathz users to reflect that the value is always
> > non-zero.
> > 
> > This is a first step toward making hardclock and statclock into
> > schedulable entities.
> > 
> > ok?
> 
> If you are confident enough to start burning bridges, yes ok kettenis@
> 
> Maybe you want to add
> 
> KASSERT(stathz != 0);
> KASSERT(profhz != 0);
> 
> at the start of initclocks() just to be sure?
> 
> Either way is fine with me.

I thought about doing that, but those checks are done during
cpu_initclocks(), in clockintr_init():

60  void
61  clockintr_init(u_int flags)
62  {
63  KASSERT(CPU_IS_PRIMARY(curcpu()));
64  KASSERT(clockintr_flags == 0);
65  KASSERT(!ISSET(flags, ~CL_FLAG_MASK));
66  
67  KASSERT(hz > 0 && hz <= 10);
68  hardclock_period = 10 / hz;
69  
70  KASSERT(stathz >= 1 && stathz <= 10);

Checking them again might make intent more explicit...  still, I'm
leaning toward leaving them out.



Re: hardclock: don't call statclock(), stathz is always non-zero

2023-01-30 Thread Mark Kettenis
> Date: Sat, 21 Jan 2023 17:02:48 -0600
> From: Scott Cheloha 
> 
> All the platforms have switched to clockintr.
> 
> Let's start by isolating statclock() from hardclock().  stathz is now
> always non-zero: statclock() must be called separately.  Update
> several of the the stathz users to reflect that the value is always
> non-zero.
> 
> This is a first step toward making hardclock and statclock into
> schedulable entities.
> 
> ok?

If you are confident enough to start burning bridges, yes ok kettenis@

Maybe you want to add

KASSERT(stathz != 0);
KASSERT(profhz != 0);

at the start of initclocks() just to be sure?

Either way is fine with me.


> Index: kern_clock.c
> ===
> RCS file: /cvs/src/sys/kern/kern_clock.c,v
> retrieving revision 1.105
> diff -u -p -r1.105 kern_clock.c
> --- kern_clock.c  14 Aug 2022 01:58:27 -  1.105
> +++ kern_clock.c  21 Jan 2023 22:59:34 -
> @@ -98,8 +98,6 @@ volatile unsigned long jiffies; /* XXX 
>  void
>  initclocks(void)
>  {
> - int i;
> -
>   ticks = INT_MAX - (15 * 60 * hz);
>   jiffies = ULONG_MAX - (10 * 60 * hz);
>  
> @@ -111,12 +109,9 @@ initclocks(void)
>   cpu_initclocks();
>  
>   /*
> -  * Compute profhz/stathz, and fix profhz if needed.
> +  * Compute profhz/stathz.
>*/
> - i = stathz ? stathz : hz;
> - if (profhz == 0)
> - profhz = i;
> - psratio = profhz / i;
> + psratio = profhz / stathz;
>  
>   inittimecounter();
>  }
> @@ -158,12 +153,6 @@ hardclock(struct clockframe *frame)
>   }
>   }
>  
> - /*
> -  * If no separate statistics clock is available, run it from here.
> -  */
> - if (stathz == 0)
> - statclock(frame);
> -
>   if (--ci->ci_schedstate.spc_rrticks <= 0)
>   roundrobin(ci);
>  
> @@ -268,7 +257,7 @@ startprofclock(struct process *pr)
>  
>   if ((pr->ps_flags & PS_PROFIL) == 0) {
>   atomic_setbits_int(>ps_flags, PS_PROFIL);
> - if (++profprocs == 1 && stathz != 0) {
> + if (++profprocs == 1) {
>   s = splstatclock();
>   psdiv = pscnt = psratio;
>   setstatclockrate(profhz);
> @@ -287,7 +276,7 @@ stopprofclock(struct process *pr)
>  
>   if (pr->ps_flags & PS_PROFIL) {
>   atomic_clearbits_int(>ps_flags, PS_PROFIL);
> - if (--profprocs == 0 && stathz != 0) {
> + if (--profprocs == 0) {
>   s = splstatclock();
>   psdiv = pscnt = 1;
>   setstatclockrate(stathz);
> @@ -415,6 +404,6 @@ sysctl_clockrate(char *where, size_t *si
>   clkinfo.tick = tick;
>   clkinfo.hz = hz;
>   clkinfo.profhz = profhz;
> - clkinfo.stathz = stathz ? stathz : hz;
> + clkinfo.stathz = stathz;
>   return (sysctl_rdstruct(where, sizep, newp, , sizeof(clkinfo)));
>  }
> Index: kern_time.c
> ===
> RCS file: /cvs/src/sys/kern/kern_time.c,v
> retrieving revision 1.161
> diff -u -p -r1.161 kern_time.c
> --- kern_time.c   2 Jan 2023 23:09:48 -   1.161
> +++ kern_time.c   21 Jan 2023 22:59:34 -
> @@ -218,10 +218,9 @@ sys_clock_getres(struct proc *p, void *v
>   struct timespec ts;
>   struct proc *q;
>   u_int64_t scale;
> - int error = 0, realstathz;
> + int error = 0;
>  
>   memset(, 0, sizeof(ts));
> - realstathz = (stathz == 0) ? hz : stathz;
>   clock_id = SCARG(uap, clock_id);
>  
>   switch (clock_id) {
> @@ -238,7 +237,7 @@ sys_clock_getres(struct proc *p, void *v
>   break;
>   case CLOCK_PROCESS_CPUTIME_ID:
>   case CLOCK_THREAD_CPUTIME_ID:
> - ts.tv_nsec = 10 / realstathz;
> + ts.tv_nsec = 10 / stathz;
>   break;
>   default:
>   /* check for clock from pthread_getcpuclockid() */
> @@ -248,7 +247,7 @@ sys_clock_getres(struct proc *p, void *v
>   if (q == NULL)
>   error = ESRCH;
>   else
> - ts.tv_nsec = 10 / realstathz;
> + ts.tv_nsec = 10 / stathz;
>   KERNEL_UNLOCK();
>   } else
>   error = EINVAL;
> 
> 



Re: hardclock: don't call statclock(), stathz is always non-zero

2023-01-23 Thread Scott Cheloha
On Sat, Jan 21, 2023 at 05:02:49PM -0600, Scott Cheloha wrote:
> All the platforms have switched to clockintr.
> 
> Let's start by isolating statclock() from hardclock().  stathz is now
> always non-zero: statclock() must be called separately.  Update
> several of the the stathz users to reflect that the value is always
> non-zero.
> 
> This is a first step toward making hardclock and statclock into
> schedulable entities.

Here are a few more.  The big comment in kern_clock.c needs to be
updated, as does the hardclock.9 manpage, but I think that those can
wait until the dust has settled.  More stuff is going to move around
in this area.

ok?

Index: kern_clock.c
===
RCS file: /cvs/src/sys/kern/kern_clock.c,v
retrieving revision 1.105
diff -u -p -r1.105 kern_clock.c
--- kern_clock.c14 Aug 2022 01:58:27 -  1.105
+++ kern_clock.c24 Jan 2023 00:03:06 -
@@ -98,8 +98,6 @@ volatile unsigned long jiffies;   /* XXX 
 void
 initclocks(void)
 {
-   int i;
-
ticks = INT_MAX - (15 * 60 * hz);
jiffies = ULONG_MAX - (10 * 60 * hz);
 
@@ -111,12 +109,9 @@ initclocks(void)
cpu_initclocks();
 
/*
-* Compute profhz/stathz, and fix profhz if needed.
+* Compute profhz/stathz.
 */
-   i = stathz ? stathz : hz;
-   if (profhz == 0)
-   profhz = i;
-   psratio = profhz / i;
+   psratio = profhz / stathz;
 
inittimecounter();
 }
@@ -158,12 +153,6 @@ hardclock(struct clockframe *frame)
}
}
 
-   /*
-* If no separate statistics clock is available, run it from here.
-*/
-   if (stathz == 0)
-   statclock(frame);
-
if (--ci->ci_schedstate.spc_rrticks <= 0)
roundrobin(ci);
 
@@ -268,7 +257,7 @@ startprofclock(struct process *pr)
 
if ((pr->ps_flags & PS_PROFIL) == 0) {
atomic_setbits_int(>ps_flags, PS_PROFIL);
-   if (++profprocs == 1 && stathz != 0) {
+   if (++profprocs == 1) {
s = splstatclock();
psdiv = pscnt = psratio;
setstatclockrate(profhz);
@@ -287,7 +276,7 @@ stopprofclock(struct process *pr)
 
if (pr->ps_flags & PS_PROFIL) {
atomic_clearbits_int(>ps_flags, PS_PROFIL);
-   if (--profprocs == 0 && stathz != 0) {
+   if (--profprocs == 0) {
s = splstatclock();
psdiv = pscnt = 1;
setstatclockrate(stathz);
@@ -415,6 +404,6 @@ sysctl_clockrate(char *where, size_t *si
clkinfo.tick = tick;
clkinfo.hz = hz;
clkinfo.profhz = profhz;
-   clkinfo.stathz = stathz ? stathz : hz;
+   clkinfo.stathz = stathz;
return (sysctl_rdstruct(where, sizep, newp, , sizeof(clkinfo)));
 }
Index: kern_resource.c
===
RCS file: /cvs/src/sys/kern/kern_resource.c,v
retrieving revision 1.76
diff -u -p -r1.76 kern_resource.c
--- kern_resource.c 17 Nov 2022 18:53:13 -  1.76
+++ kern_resource.c 24 Jan 2023 00:03:06 -
@@ -410,7 +410,6 @@ calctsru(struct tusage *tup, struct time
 struct timespec *ip)
 {
u_quad_t st, ut, it;
-   int freq;
 
st = tup->tu_sticks;
ut = tup->tu_uticks;
@@ -424,16 +423,14 @@ calctsru(struct tusage *tup, struct time
return;
}
 
-   freq = stathz ? stathz : hz;
-
-   st = st * 10 / freq;
+   st = st * 10 / stathz;
sp->tv_sec = st / 10;
sp->tv_nsec = st % 10;
-   ut = ut * 10 / freq;
+   ut = ut * 10 / stathz;
up->tv_sec = ut / 10;
up->tv_nsec = ut % 10;
if (ip != NULL) {
-   it = it * 10 / freq;
+   it = it * 10 / stathz;
ip->tv_sec = it / 10;
ip->tv_nsec = it % 10;
}
Index: sched_bsd.c
===
RCS file: /cvs/src/sys/kern/sched_bsd.c,v
retrieving revision 1.73
diff -u -p -r1.73 sched_bsd.c
--- sched_bsd.c 5 Dec 2022 23:18:37 -   1.73
+++ sched_bsd.c 24 Jan 2023 00:03:06 -
@@ -189,16 +189,6 @@ schedcpu(void *arg)
struct proc *p;
int s;
unsigned int newcpu;
-   int phz;
-
-   /*
-* If we have a statistics clock, use that to calculate CPU
-* time, otherwise revert to using the profiling clock (which,
-* in turn, defaults to hz if there is no separate profiling
-* clock available)
-*/
-   phz = stathz ? stathz : profhz;
-   KASSERT(phz);
 
LIST_FOREACH(p, , p_list) {
/*
@@ -225,13 +215,13 @@ schedcpu(void *arg)
 * p_pctcpu is only for diagnostic 

hardclock: don't call statclock(), stathz is always non-zero

2023-01-21 Thread Scott Cheloha
All the platforms have switched to clockintr.

Let's start by isolating statclock() from hardclock().  stathz is now
always non-zero: statclock() must be called separately.  Update
several of the the stathz users to reflect that the value is always
non-zero.

This is a first step toward making hardclock and statclock into
schedulable entities.

ok?

Index: kern_clock.c
===
RCS file: /cvs/src/sys/kern/kern_clock.c,v
retrieving revision 1.105
diff -u -p -r1.105 kern_clock.c
--- kern_clock.c14 Aug 2022 01:58:27 -  1.105
+++ kern_clock.c21 Jan 2023 22:59:34 -
@@ -98,8 +98,6 @@ volatile unsigned long jiffies;   /* XXX 
 void
 initclocks(void)
 {
-   int i;
-
ticks = INT_MAX - (15 * 60 * hz);
jiffies = ULONG_MAX - (10 * 60 * hz);
 
@@ -111,12 +109,9 @@ initclocks(void)
cpu_initclocks();
 
/*
-* Compute profhz/stathz, and fix profhz if needed.
+* Compute profhz/stathz.
 */
-   i = stathz ? stathz : hz;
-   if (profhz == 0)
-   profhz = i;
-   psratio = profhz / i;
+   psratio = profhz / stathz;
 
inittimecounter();
 }
@@ -158,12 +153,6 @@ hardclock(struct clockframe *frame)
}
}
 
-   /*
-* If no separate statistics clock is available, run it from here.
-*/
-   if (stathz == 0)
-   statclock(frame);
-
if (--ci->ci_schedstate.spc_rrticks <= 0)
roundrobin(ci);
 
@@ -268,7 +257,7 @@ startprofclock(struct process *pr)
 
if ((pr->ps_flags & PS_PROFIL) == 0) {
atomic_setbits_int(>ps_flags, PS_PROFIL);
-   if (++profprocs == 1 && stathz != 0) {
+   if (++profprocs == 1) {
s = splstatclock();
psdiv = pscnt = psratio;
setstatclockrate(profhz);
@@ -287,7 +276,7 @@ stopprofclock(struct process *pr)
 
if (pr->ps_flags & PS_PROFIL) {
atomic_clearbits_int(>ps_flags, PS_PROFIL);
-   if (--profprocs == 0 && stathz != 0) {
+   if (--profprocs == 0) {
s = splstatclock();
psdiv = pscnt = 1;
setstatclockrate(stathz);
@@ -415,6 +404,6 @@ sysctl_clockrate(char *where, size_t *si
clkinfo.tick = tick;
clkinfo.hz = hz;
clkinfo.profhz = profhz;
-   clkinfo.stathz = stathz ? stathz : hz;
+   clkinfo.stathz = stathz;
return (sysctl_rdstruct(where, sizep, newp, , sizeof(clkinfo)));
 }
Index: kern_time.c
===
RCS file: /cvs/src/sys/kern/kern_time.c,v
retrieving revision 1.161
diff -u -p -r1.161 kern_time.c
--- kern_time.c 2 Jan 2023 23:09:48 -   1.161
+++ kern_time.c 21 Jan 2023 22:59:34 -
@@ -218,10 +218,9 @@ sys_clock_getres(struct proc *p, void *v
struct timespec ts;
struct proc *q;
u_int64_t scale;
-   int error = 0, realstathz;
+   int error = 0;
 
memset(, 0, sizeof(ts));
-   realstathz = (stathz == 0) ? hz : stathz;
clock_id = SCARG(uap, clock_id);
 
switch (clock_id) {
@@ -238,7 +237,7 @@ sys_clock_getres(struct proc *p, void *v
break;
case CLOCK_PROCESS_CPUTIME_ID:
case CLOCK_THREAD_CPUTIME_ID:
-   ts.tv_nsec = 10 / realstathz;
+   ts.tv_nsec = 10 / stathz;
break;
default:
/* check for clock from pthread_getcpuclockid() */
@@ -248,7 +247,7 @@ sys_clock_getres(struct proc *p, void *v
if (q == NULL)
error = ESRCH;
else
-   ts.tv_nsec = 10 / realstathz;
+   ts.tv_nsec = 10 / stathz;
KERNEL_UNLOCK();
} else
error = EINVAL;