Re: [v2]: uvm_meter, schedcpu: make uvm_meter() an independent timeout

2023-08-03 Thread Martin Pieuchot
On 02/08/23(Wed) 18:27, Claudio Jeker wrote:
> On Wed, Aug 02, 2023 at 10:15:20AM -0500, Scott Cheloha wrote:
> > Now that the proc0 wakeup(9) is gone we can retry the other part of
> > the uvm_meter() patch.
> > 
> > uvm_meter() is meant to run every 5 seconds, but for historical
> > reasons it is called from schedcpu() and it is scheduled against the
> > UTC clock.  schedcpu() and uvm_meter() have different periods, so
> > uvm_meter() ought to be a separate timeout.  uvm_meter() is started
> > alongside schedcpu() so the two will still run in sync.
> > 
> > v1: https://marc.info/?l=openbsd-tech&m=168710929409153&w=2
> > 
> > ok?
> 
> I would refer if uvm_meter is killed and the load calcualtion moved to the
> scheduler.

Me too.

> > Index: sys/uvm/uvm_meter.c
> > ===
> > RCS file: /cvs/src/sys/uvm/uvm_meter.c,v
> > retrieving revision 1.46
> > diff -u -p -r1.46 uvm_meter.c
> > --- sys/uvm/uvm_meter.c 2 Aug 2023 13:54:45 -   1.46
> > +++ sys/uvm/uvm_meter.c 2 Aug 2023 15:13:49 -
> > @@ -85,10 +85,12 @@ void uvmexp_read(struct uvmexp *);
> >   * uvm_meter: calculate load average
> >   */
> >  void
> > -uvm_meter(void)
> > +uvm_meter(void *unused)
> >  {
> > -   if ((gettime() % 5) == 0)
> > -   uvm_loadav(&averunnable);
> > +   static struct timeout to = TIMEOUT_INITIALIZER(uvm_meter, NULL);
> > +
> > +   timeout_add_sec(&to, 5);
> > +   uvm_loadav(&averunnable);
> >  }
> >  
> >  /*
> > Index: sys/uvm/uvm_extern.h
> > ===
> > RCS file: /cvs/src/sys/uvm/uvm_extern.h,v
> > retrieving revision 1.170
> > diff -u -p -r1.170 uvm_extern.h
> > --- sys/uvm/uvm_extern.h21 Jun 2023 21:16:21 -  1.170
> > +++ sys/uvm/uvm_extern.h2 Aug 2023 15:13:49 -
> > @@ -414,7 +414,7 @@ voiduvmspace_free(struct vmspace *);
> >  struct vmspace *uvmspace_share(struct process *);
> >  intuvm_share(vm_map_t, vaddr_t, vm_prot_t,
> > vm_map_t, vaddr_t, vsize_t);
> > -void   uvm_meter(void);
> > +void   uvm_meter(void *);
> >  intuvm_sysctl(int *, u_int, void *, size_t *, 
> > void *, size_t, struct proc *);
> >  struct vm_page *uvm_pagealloc(struct uvm_object *,
> > Index: sys/kern/sched_bsd.c
> > ===
> > RCS file: /cvs/src/sys/kern/sched_bsd.c,v
> > retrieving revision 1.78
> > diff -u -p -r1.78 sched_bsd.c
> > --- sys/kern/sched_bsd.c25 Jul 2023 18:16:19 -  1.78
> > +++ sys/kern/sched_bsd.c2 Aug 2023 15:13:50 -
> > @@ -235,7 +235,6 @@ schedcpu(void *arg)
> > }
> > SCHED_UNLOCK(s);
> > }
> > -   uvm_meter();
> > wakeup(&lbolt);
> > timeout_add_sec(to, 1);
> >  }
> > @@ -688,6 +687,7 @@ scheduler_start(void)
> >  
> > rrticks_init = hz / 10;
> > schedcpu(&schedcpu_to);
> > +   uvm_meter(NULL);
> >  
> >  #ifndef SMALL_KERNEL
> > if (perfpolicy == PERFPOL_AUTO)
> > Index: share/man/man9/uvm_init.9
> > ===
> > RCS file: /cvs/src/share/man/man9/uvm_init.9,v
> > retrieving revision 1.7
> > diff -u -p -r1.7 uvm_init.9
> > --- share/man/man9/uvm_init.9   21 Jun 2023 21:16:21 -  1.7
> > +++ share/man/man9/uvm_init.9   2 Aug 2023 15:13:50 -
> > @@ -168,7 +168,7 @@ argument is ignored.
> >  .Ft void
> >  .Fn uvm_kernacc "caddr_t addr" "size_t len" "int rw"
> >  .Ft void
> > -.Fn uvm_meter
> > +.Fn uvm_meter "void *arg"
> >  .Ft int
> >  .Fn uvm_sysctl "int *name" "u_int namelen" "void *oldp" "size_t *oldlenp" 
> > "void *newp " "size_t newlen" "struct proc *p"
> >  .Ft int
> > @@ -212,7 +212,7 @@ access, in the kernel address space.
> >  .Pp
> >  The
> >  .Fn uvm_meter
> > -function calculates the load average and wakes up the swapper if necessary.
> > +timeout updates system load averages every five seconds.
> >  .Pp
> >  The
> >  .Fn uvm_sysctl
> 
> -- 
> :wq Claudio
> 



Re: [v2]: uvm_meter, schedcpu: make uvm_meter() an independent timeout

2023-08-02 Thread Claudio Jeker
On Wed, Aug 02, 2023 at 10:15:20AM -0500, Scott Cheloha wrote:
> Now that the proc0 wakeup(9) is gone we can retry the other part of
> the uvm_meter() patch.
> 
> uvm_meter() is meant to run every 5 seconds, but for historical
> reasons it is called from schedcpu() and it is scheduled against the
> UTC clock.  schedcpu() and uvm_meter() have different periods, so
> uvm_meter() ought to be a separate timeout.  uvm_meter() is started
> alongside schedcpu() so the two will still run in sync.
> 
> v1: https://marc.info/?l=openbsd-tech&m=168710929409153&w=2
> 
> ok?

I would refer if uvm_meter is killed and the load calcualtion moved to the
scheduler.
 
> Index: sys/uvm/uvm_meter.c
> ===
> RCS file: /cvs/src/sys/uvm/uvm_meter.c,v
> retrieving revision 1.46
> diff -u -p -r1.46 uvm_meter.c
> --- sys/uvm/uvm_meter.c   2 Aug 2023 13:54:45 -   1.46
> +++ sys/uvm/uvm_meter.c   2 Aug 2023 15:13:49 -
> @@ -85,10 +85,12 @@ void uvmexp_read(struct uvmexp *);
>   * uvm_meter: calculate load average
>   */
>  void
> -uvm_meter(void)
> +uvm_meter(void *unused)
>  {
> - if ((gettime() % 5) == 0)
> - uvm_loadav(&averunnable);
> + static struct timeout to = TIMEOUT_INITIALIZER(uvm_meter, NULL);
> +
> + timeout_add_sec(&to, 5);
> + uvm_loadav(&averunnable);
>  }
>  
>  /*
> Index: sys/uvm/uvm_extern.h
> ===
> RCS file: /cvs/src/sys/uvm/uvm_extern.h,v
> retrieving revision 1.170
> diff -u -p -r1.170 uvm_extern.h
> --- sys/uvm/uvm_extern.h  21 Jun 2023 21:16:21 -  1.170
> +++ sys/uvm/uvm_extern.h  2 Aug 2023 15:13:49 -
> @@ -414,7 +414,7 @@ void  uvmspace_free(struct vmspace *);
>  struct vmspace   *uvmspace_share(struct process *);
>  int  uvm_share(vm_map_t, vaddr_t, vm_prot_t,
>   vm_map_t, vaddr_t, vsize_t);
> -void uvm_meter(void);
> +void uvm_meter(void *);
>  int  uvm_sysctl(int *, u_int, void *, size_t *, 
>   void *, size_t, struct proc *);
>  struct vm_page   *uvm_pagealloc(struct uvm_object *,
> Index: sys/kern/sched_bsd.c
> ===
> RCS file: /cvs/src/sys/kern/sched_bsd.c,v
> retrieving revision 1.78
> diff -u -p -r1.78 sched_bsd.c
> --- sys/kern/sched_bsd.c  25 Jul 2023 18:16:19 -  1.78
> +++ sys/kern/sched_bsd.c  2 Aug 2023 15:13:50 -
> @@ -235,7 +235,6 @@ schedcpu(void *arg)
>   }
>   SCHED_UNLOCK(s);
>   }
> - uvm_meter();
>   wakeup(&lbolt);
>   timeout_add_sec(to, 1);
>  }
> @@ -688,6 +687,7 @@ scheduler_start(void)
>  
>   rrticks_init = hz / 10;
>   schedcpu(&schedcpu_to);
> + uvm_meter(NULL);
>  
>  #ifndef SMALL_KERNEL
>   if (perfpolicy == PERFPOL_AUTO)
> Index: share/man/man9/uvm_init.9
> ===
> RCS file: /cvs/src/share/man/man9/uvm_init.9,v
> retrieving revision 1.7
> diff -u -p -r1.7 uvm_init.9
> --- share/man/man9/uvm_init.9 21 Jun 2023 21:16:21 -  1.7
> +++ share/man/man9/uvm_init.9 2 Aug 2023 15:13:50 -
> @@ -168,7 +168,7 @@ argument is ignored.
>  .Ft void
>  .Fn uvm_kernacc "caddr_t addr" "size_t len" "int rw"
>  .Ft void
> -.Fn uvm_meter
> +.Fn uvm_meter "void *arg"
>  .Ft int
>  .Fn uvm_sysctl "int *name" "u_int namelen" "void *oldp" "size_t *oldlenp" 
> "void *newp " "size_t newlen" "struct proc *p"
>  .Ft int
> @@ -212,7 +212,7 @@ access, in the kernel address space.
>  .Pp
>  The
>  .Fn uvm_meter
> -function calculates the load average and wakes up the swapper if necessary.
> +timeout updates system load averages every five seconds.
>  .Pp
>  The
>  .Fn uvm_sysctl

-- 
:wq Claudio



[v2]: uvm_meter, schedcpu: make uvm_meter() an independent timeout

2023-08-02 Thread Scott Cheloha
Now that the proc0 wakeup(9) is gone we can retry the other part of
the uvm_meter() patch.

uvm_meter() is meant to run every 5 seconds, but for historical
reasons it is called from schedcpu() and it is scheduled against the
UTC clock.  schedcpu() and uvm_meter() have different periods, so
uvm_meter() ought to be a separate timeout.  uvm_meter() is started
alongside schedcpu() so the two will still run in sync.

v1: https://marc.info/?l=openbsd-tech&m=168710929409153&w=2

ok?

Index: sys/uvm/uvm_meter.c
===
RCS file: /cvs/src/sys/uvm/uvm_meter.c,v
retrieving revision 1.46
diff -u -p -r1.46 uvm_meter.c
--- sys/uvm/uvm_meter.c 2 Aug 2023 13:54:45 -   1.46
+++ sys/uvm/uvm_meter.c 2 Aug 2023 15:13:49 -
@@ -85,10 +85,12 @@ void uvmexp_read(struct uvmexp *);
  * uvm_meter: calculate load average
  */
 void
-uvm_meter(void)
+uvm_meter(void *unused)
 {
-   if ((gettime() % 5) == 0)
-   uvm_loadav(&averunnable);
+   static struct timeout to = TIMEOUT_INITIALIZER(uvm_meter, NULL);
+
+   timeout_add_sec(&to, 5);
+   uvm_loadav(&averunnable);
 }
 
 /*
Index: sys/uvm/uvm_extern.h
===
RCS file: /cvs/src/sys/uvm/uvm_extern.h,v
retrieving revision 1.170
diff -u -p -r1.170 uvm_extern.h
--- sys/uvm/uvm_extern.h21 Jun 2023 21:16:21 -  1.170
+++ sys/uvm/uvm_extern.h2 Aug 2023 15:13:49 -
@@ -414,7 +414,7 @@ voiduvmspace_free(struct vmspace *);
 struct vmspace *uvmspace_share(struct process *);
 intuvm_share(vm_map_t, vaddr_t, vm_prot_t,
vm_map_t, vaddr_t, vsize_t);
-void   uvm_meter(void);
+void   uvm_meter(void *);
 intuvm_sysctl(int *, u_int, void *, size_t *, 
void *, size_t, struct proc *);
 struct vm_page *uvm_pagealloc(struct uvm_object *,
Index: sys/kern/sched_bsd.c
===
RCS file: /cvs/src/sys/kern/sched_bsd.c,v
retrieving revision 1.78
diff -u -p -r1.78 sched_bsd.c
--- sys/kern/sched_bsd.c25 Jul 2023 18:16:19 -  1.78
+++ sys/kern/sched_bsd.c2 Aug 2023 15:13:50 -
@@ -235,7 +235,6 @@ schedcpu(void *arg)
}
SCHED_UNLOCK(s);
}
-   uvm_meter();
wakeup(&lbolt);
timeout_add_sec(to, 1);
 }
@@ -688,6 +687,7 @@ scheduler_start(void)
 
rrticks_init = hz / 10;
schedcpu(&schedcpu_to);
+   uvm_meter(NULL);
 
 #ifndef SMALL_KERNEL
if (perfpolicy == PERFPOL_AUTO)
Index: share/man/man9/uvm_init.9
===
RCS file: /cvs/src/share/man/man9/uvm_init.9,v
retrieving revision 1.7
diff -u -p -r1.7 uvm_init.9
--- share/man/man9/uvm_init.9   21 Jun 2023 21:16:21 -  1.7
+++ share/man/man9/uvm_init.9   2 Aug 2023 15:13:50 -
@@ -168,7 +168,7 @@ argument is ignored.
 .Ft void
 .Fn uvm_kernacc "caddr_t addr" "size_t len" "int rw"
 .Ft void
-.Fn uvm_meter
+.Fn uvm_meter "void *arg"
 .Ft int
 .Fn uvm_sysctl "int *name" "u_int namelen" "void *oldp" "size_t *oldlenp" 
"void *newp " "size_t newlen" "struct proc *p"
 .Ft int
@@ -212,7 +212,7 @@ access, in the kernel address space.
 .Pp
 The
 .Fn uvm_meter
-function calculates the load average and wakes up the swapper if necessary.
+timeout updates system load averages every five seconds.
 .Pp
 The
 .Fn uvm_sysctl