small typo in imsg_init.3

2020-12-12 Thread Aisha Tammy
Hi,
  While creating a portable version of imsg, I noticed
a small typo in the imsg_init.3 man page which says
the returned value is 'len' instead of 'datalen'.
Attached the patch to fix it.

OK?

Cheers,
Aisha

diff --git a/lib/libutil/imsg_init.3 b/lib/libutil/imsg_init.3
index 18720d1d59b..e7dc6ab928d 100644
--- a/lib/libutil/imsg_init.3
+++ b/lib/libutil/imsg_init.3
@@ -186,7 +186,7 @@ appends to
 bytes of ancillary data pointed to by
 .Fa data .
 It returns
-.Fa len
+.Fa datalen
 if it succeeds, \-1 otherwise.
 .Pp
 .Fn imsg_close



Re: diff: cleanup type handling

2020-12-12 Thread Alexander Bluhm
On Sat, Dec 12, 2020 at 02:25:03PM +0100, Jan Klemkow wrote:
> The type of the local variable hash in pf_map_addr() has right length
> but the wrong type.  This diff uses the correct type and removes the
> useless casts.  Both functions uses hash as pf_addr, so no cast is
> needed.
> 
> OK?

OK bluhm@

> Index: net/pf_lb.c
> ===
> RCS file: /cvs/src/sys/net/pf_lb.c,v
> retrieving revision 1.67
> diff -u -p -r1.67 pf_lb.c
> --- net/pf_lb.c   29 Jul 2020 02:32:13 -  1.67
> +++ net/pf_lb.c   12 Dec 2020 13:06:49 -
> @@ -349,7 +349,7 @@ pf_map_addr(sa_family_t af, struct pf_ru
>  struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node 
> **sns,
>  struct pf_pool *rpool, enum pf_sn_types type)
>  {
> - unsigned charhash[16];
> + struct pf_addr   hash;
>   struct pf_addr   faddr;
>   struct pf_addr  *raddr = &rpool->addr.v.a.addr;
>   struct pf_addr  *rmask = &rpool->addr.v.a.mask;
> @@ -460,8 +460,7 @@ pf_map_addr(sa_family_t af, struct pf_ru
>   }
>   break;
>   case PF_POOL_SRCHASH:
> - hashidx =
> - pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af);
> + hashidx = pf_hash(saddr, &hash, &rpool->key, af);
>  
>   if (rpool->addr.type == PF_ADDR_TABLE ||
>   rpool->addr.type == PF_ADDR_DYNIFTL) {
> @@ -483,8 +482,7 @@ pf_map_addr(sa_family_t af, struct pf_ru
>   return (1);
>   pf_addrcpy(naddr, &rpool->counter, af);
>   } else {
> - pf_poolmask(naddr, raddr, rmask,
> - (struct pf_addr *)&hash, af);
> + pf_poolmask(naddr, raddr, rmask, &hash, af);
>   }
>   break;
>   case PF_POOL_ROUNDROBIN:



syncer_thread: sleep without lbolt

2020-12-12 Thread Scott Cheloha
Hi,

The syncer thread is one of the last users of the lbolt (lightning
bolt!) sleep channel.

If we add a syncer-specific sleep channel (syncer_chan) and do a bit
of time math we can replicate the current behavior and remove another
lbolt user.

This isn't a perfect recreation of the current behavior.  In this
version the sleep period will drift if processing takes longer than 1
second.  I think it's good enough.  If people are concerned about a
perfect recreation of the current behavior we *can* do it, but it will
require more code.  I don't think it's worth it.

This also fixes two problems in the current code.  They aren't huge
bugs, but they did jump out as potential problems because they make
the syncer's behavior less deterministic:

- The current code uses gettime(9), which will jump and screw up your
  measurement if someone calls settimeofday(2).  The new code uses the
  uptime clock, which is monotonic and stable.

- The current code uses gettime(9), which has a resolution of 1
  second.  Measuring a 1 second timeout with an interface with
  a resolution of 1 second is crude and error-prone.  The new code
  uses getnsecuptime(), which has a resolution of roughly 1/hz.
  Much better.

I vaguely recall beck@ trying to do something with this in the recent
past, so CC beck@.

Thoughts?  ok?

Index: vfs_sync.c
===
RCS file: /cvs/src/sys/kern/vfs_sync.c,v
retrieving revision 1.64
diff -u -p -r1.64 vfs_sync.c
--- vfs_sync.c  24 Jun 2020 22:03:41 -  1.64
+++ vfs_sync.c  12 Dec 2020 19:29:11 -
@@ -48,6 +48,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -73,6 +74,7 @@ LIST_HEAD(synclist, vnode);
 static struct synclist *syncer_workitem_pending;
 
 struct proc *syncerproc;
+int syncer_chan;
 
 /*
  * The workitem queue.
@@ -129,6 +131,15 @@ vn_syncer_add_to_worklist(struct vnode *
splx(s);
 }
 
+uint64_t
+getnsecuptime(void)
+{
+   struct timespec now;
+
+   getnanouptime(&now);
+   return TIMESPEC_TO_NSEC(&now);
+}
+
 /*
  * System filesystem synchronizer daemon.
  */
@@ -138,11 +149,11 @@ syncer_thread(void *arg)
struct proc *p = curproc;
struct synclist *slp;
struct vnode *vp;
-   time_t starttime;
+   uint64_t elapsed, start;
int s;
 
for (;;) {
-   starttime = gettime();
+   start = getnsecuptime();
 
/*
 * Push files whose dirty time has expired.
@@ -220,6 +231,7 @@ syncer_thread(void *arg)
rushjob -= 1;
continue;
}
+
/*
 * If it has taken us less than a second to process the
 * current work, then wait. Otherwise start right over
@@ -228,8 +240,11 @@ syncer_thread(void *arg)
 * matter as we are just trying to generally pace the
 * filesystem activity.
 */
-   if (gettime() == starttime)
-   tsleep_nsec(&lbolt, PPAUSE, "syncer", INFSLP);
+   elapsed = getnsecuptime() - start;
+   if (elapsed < SEC_TO_NSEC(1)) {
+   tsleep_nsec(&syncer_chan, PPAUSE, "syncer",
+   SEC_TO_NSEC(1) - elapsed);
+   }
}
 }
 
@@ -242,7 +257,7 @@ int
 speedup_syncer(void)
 {
if (syncerproc)
-   wakeup_proc(syncerproc, &lbolt);
+   wakeup_proc(syncerproc, &syncer_chan);
if (rushjob < syncdelay / 2) {
rushjob += 1;
stat_rush_requests += 1;



diff: cleanup type handling

2020-12-12 Thread Jan Klemkow
Hi,

The type of the local variable hash in pf_map_addr() has right length
but the wrong type.  This diff uses the correct type and removes the
useless casts.  Both functions uses hash as pf_addr, so no cast is
needed.

OK?

bye,
Jan

Index: net/pf_lb.c
===
RCS file: /cvs/src/sys/net/pf_lb.c,v
retrieving revision 1.67
diff -u -p -r1.67 pf_lb.c
--- net/pf_lb.c 29 Jul 2020 02:32:13 -  1.67
+++ net/pf_lb.c 12 Dec 2020 13:06:49 -
@@ -349,7 +349,7 @@ pf_map_addr(sa_family_t af, struct pf_ru
 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sns,
 struct pf_pool *rpool, enum pf_sn_types type)
 {
-   unsigned charhash[16];
+   struct pf_addr   hash;
struct pf_addr   faddr;
struct pf_addr  *raddr = &rpool->addr.v.a.addr;
struct pf_addr  *rmask = &rpool->addr.v.a.mask;
@@ -460,8 +460,7 @@ pf_map_addr(sa_family_t af, struct pf_ru
}
break;
case PF_POOL_SRCHASH:
-   hashidx =
-   pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af);
+   hashidx = pf_hash(saddr, &hash, &rpool->key, af);
 
if (rpool->addr.type == PF_ADDR_TABLE ||
rpool->addr.type == PF_ADDR_DYNIFTL) {
@@ -483,8 +482,7 @@ pf_map_addr(sa_family_t af, struct pf_ru
return (1);
pf_addrcpy(naddr, &rpool->counter, af);
} else {
-   pf_poolmask(naddr, raddr, rmask,
-   (struct pf_addr *)&hash, af);
+   pf_poolmask(naddr, raddr, rmask, &hash, af);
}
break;
case PF_POOL_ROUNDROBIN:



diff: replace useless use of MCLGETL with MCLGET

2020-12-12 Thread Jan Klemkow
Hi,

The use of MCLGETL with the default length MCLBYTES is useless.  Thus,
this diff removes '(void)' from the MCLGET macro as it is in the MCLGETL
and replaces all uses of MCLGETL with MCLBYTES by MCLGET.

OK?

bye,
Jan

Index: arch/octeon/dev/if_ogx.c
===
RCS file: /cvs/src/sys/arch/octeon/dev/if_ogx.c,v
retrieving revision 1.3
diff -u -p -r1.3 if_ogx.c
--- arch/octeon/dev/if_ogx.c12 Dec 2020 11:48:52 -  1.3
+++ arch/octeon/dev/if_ogx.c12 Dec 2020 12:58:31 -
@@ -1147,7 +1147,7 @@ ogx_load_mbufs(struct ogx_softc *sc, uns
paddr_t pktbuf;
 
for ( ; n > 0; n--) {
-   m = MCLGETL(NULL, M_NOWAIT, MCLBYTES);
+   m = MCLGET(NULL, M_NOWAIT);
if (m == NULL)
break;
 
Index: dev/fdt/if_dwge.c
===
RCS file: /cvs/src/sys/dev/fdt/if_dwge.c,v
retrieving revision 1.7
diff -u -p -r1.7 if_dwge.c
--- dev/fdt/if_dwge.c   12 Dec 2020 11:48:52 -  1.7
+++ dev/fdt/if_dwge.c   12 Dec 2020 12:58:31 -
@@ -1283,7 +1283,7 @@ dwge_alloc_mbuf(struct dwge_softc *sc, b
 {
struct mbuf *m = NULL;
 
-   m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
+   m = MCLGET(NULL, M_DONTWAIT);
if (!m)
return (NULL);
m->m_len = m->m_pkthdr.len = MCLBYTES;
Index: dev/fdt/if_dwxe.c
===
RCS file: /cvs/src/sys/dev/fdt/if_dwxe.c,v
retrieving revision 1.18
diff -u -p -r1.18 if_dwxe.c
--- dev/fdt/if_dwxe.c   12 Dec 2020 11:48:52 -  1.18
+++ dev/fdt/if_dwxe.c   12 Dec 2020 12:58:31 -
@@ -1342,7 +1342,7 @@ dwxe_alloc_mbuf(struct dwxe_softc *sc, b
 {
struct mbuf *m = NULL;
 
-   m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
+   m = MCLGET(NULL, M_DONTWAIT);
if (!m)
return (NULL);
m->m_len = m->m_pkthdr.len = MCLBYTES;
Index: dev/fdt/if_fec.c
===
RCS file: /cvs/src/sys/dev/fdt/if_fec.c,v
retrieving revision 1.11
diff -u -p -r1.11 if_fec.c
--- dev/fdt/if_fec.c12 Dec 2020 11:48:52 -  1.11
+++ dev/fdt/if_fec.c12 Dec 2020 12:58:31 -
@@ -1298,7 +1298,7 @@ fec_alloc_mbuf(struct fec_softc *sc, bus
 {
struct mbuf *m = NULL;
 
-   m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
+   m = MCLGET(NULL, M_DONTWAIT);
if (!m)
return (NULL);
m->m_len = m->m_pkthdr.len = MCLBYTES;
Index: dev/fdt/if_mvneta.c
===
RCS file: /cvs/src/sys/dev/fdt/if_mvneta.c,v
retrieving revision 1.16
diff -u -p -r1.16 if_mvneta.c
--- dev/fdt/if_mvneta.c 12 Dec 2020 11:48:52 -  1.16
+++ dev/fdt/if_mvneta.c 12 Dec 2020 12:58:31 -
@@ -1654,7 +1654,7 @@ mvneta_alloc_mbuf(struct mvneta_softc *s
 {
struct mbuf *m = NULL;
 
-   m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
+   m = MCLGET(NULL, M_DONTWAIT);
if (!m)
return (NULL);
m->m_len = m->m_pkthdr.len = MCLBYTES;
Index: dev/fdt/if_mvpp.c
===
RCS file: /cvs/src/sys/dev/fdt/if_mvpp.c,v
retrieving revision 1.44
diff -u -p -r1.44 if_mvpp.c
--- dev/fdt/if_mvpp.c   12 Dec 2020 11:48:52 -  1.44
+++ dev/fdt/if_mvpp.c   12 Dec 2020 12:58:31 -
@@ -3124,7 +3124,7 @@ mvpp2_alloc_mbuf(struct mvpp2_softc *sc,
 {
struct mbuf *m = NULL;
 
-   m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
+   m = MCLGET(NULL, M_DONTWAIT);
if (!m)
return (NULL);
m->m_len = m->m_pkthdr.len = MCLBYTES;
Index: dev/ic/bcmgenet.c
===
RCS file: /cvs/src/sys/dev/ic/bcmgenet.c,v
retrieving revision 1.4
diff -u -p -r1.4 bcmgenet.c
--- dev/ic/bcmgenet.c   12 Dec 2020 11:48:52 -  1.4
+++ dev/ic/bcmgenet.c   12 Dec 2020 12:58:31 -
@@ -300,7 +300,7 @@ genet_alloc_mbufcl(struct genet_softc *s
 {
struct mbuf *m;
 
-   m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
+   m = MCLGET(NULL, M_DONTWAIT);
if (m != NULL)
m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
 
Index: dev/ic/elink3.c
===
RCS file: /cvs/src/sys/dev/ic/elink3.c,v
retrieving revision 1.98
diff -u -p -r1.98 elink3.c
--- dev/ic/elink3.c 12 Dec 2020 11:48:52 -  1.98
+++ dev/ic/elink3.c 12 Dec 2020 12:58:31 -
@@ -1343,7 +1343,7 @@ epget(struct ep_softc *sc, int totlen)
m = sc->mb[sc->next_mb];
sc->mb[sc->next_mb] = NULL;
if (m == NULL) {
-   m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
+   m = MCLGET(NULL, M_DONTWAIT);
/* If the queue is no longer full, refill. */
if (!timeout_pending(&sc->sc_epmbuffill_tmo))
   

Re: libcurses: --enable-const

2020-12-12 Thread Todd C . Miller
On Sat, 12 Dec 2020 16:28:18 +0100, Christian Weisgerber wrote:

> ncurses has a configure option that adds a few more consts to its
> headers by way of the NCURSES_CONST define.  Starting with version
> 6.0, this has become the default.  OpenBSD is still on ncurses 5.7,
> but FreeBSD and I guess most Linux distributions have moved on.
> I suggest we also enable the additional consts.  This eliminates
> compiler warnings when sharing code with other platforms.
>
> The diff below has successfully gone through a release build on
> amd64, as well as a full amd64 package build.

OK millert@

 - todd



libcurses: --enable-const

2020-12-12 Thread Christian Weisgerber
ncurses has a configure option that adds a few more consts to its
headers by way of the NCURSES_CONST define.  Starting with version
6.0, this has become the default.  OpenBSD is still on ncurses 5.7,
but FreeBSD and I guess most Linux distributions have moved on.
I suggest we also enable the additional consts.  This eliminates
compiler warnings when sharing code with other platforms.

The diff below has successfully gone through a release build on
amd64, as well as a full amd64 package build.

OK?

Index: lib/libcurses/curses.h
===
RCS file: /cvs/src/lib/libcurses/curses.h,v
retrieving revision 1.61
diff -u -p -r1.61 curses.h
--- lib/libcurses/curses.h  6 Sep 2010 17:26:17 -   1.61
+++ lib/libcurses/curses.h  9 Dec 2020 22:56:31 -
@@ -101,7 +101,7 @@
  * doing so makes it incompatible with other implementations of X/Open Curses.
  */
 #undef  NCURSES_CONST
-#define NCURSES_CONST /*nothing*/
+#define NCURSES_CONST const
 
 #undef NCURSES_INLINE
 #define NCURSES_INLINE inline
Index: lib/libcurses/term.h
===
RCS file: /cvs/src/lib/libcurses/term.h,v
retrieving revision 1.15
diff -u -p -r1.15 term.h
--- lib/libcurses/term.h14 Nov 2015 23:56:49 -  1.15
+++ lib/libcurses/term.h9 Dec 2020 23:03:46 -
@@ -68,7 +68,7 @@ extern "C" {
  */
 
 #undef  NCURSES_CONST
-#define NCURSES_CONST /*nothing*/
+#define NCURSES_CONST const
 
 #undef  NCURSES_SBOOL
 #define NCURSES_SBOOL signed char
Index: lib/libcurses/termcap.h
===
RCS file: /cvs/src/lib/libcurses/termcap.h,v
retrieving revision 1.10
diff -u -p -r1.10 termcap.h
--- lib/libcurses/termcap.h 10 Dec 2013 20:33:51 -  1.10
+++ lib/libcurses/termcap.h 9 Dec 2020 23:03:54 -
@@ -62,7 +62,7 @@ extern "C"
 #include 
 
 #undef  NCURSES_CONST 
-#define NCURSES_CONST /*nothing*/ 
+#define NCURSES_CONST const
 
 #undef  NCURSES_OSPEED 
 #define NCURSES_OSPEED int 
-- 
Christian "naddy" Weisgerber  na...@mips.inka.de



Re: uvm_fault: entering swap code

2020-12-12 Thread Jonathan Matthew
On Thu, Dec 10, 2020 at 10:46:58AM -0300, Martin Pieuchot wrote:
> On 08/12/20(Tue) 22:55, Jonathan Matthew wrote:
> > On Mon, Dec 07, 2020 at 03:15:50PM -0300, Martin Pieuchot wrote:
> > > Getting a page from the fault handler might require poking at some
> > > swap-related states.
> > > 
> > > These are not in the hot-path of the fault handler so for the moment
> > > just assert that the KERNEL_LOCK() is held or grab it if the function
> > > might be called from an future unlocked path.
> > > 
> > > ok?
> > 
> > Could you add 'K' to the list of locks in the comment above struct uvmexp 
> > too?
> 
> Updated diff below.
> 
> > I went looking for other uses of swpgonly and saw that it's used under
> > uvm_map_teardown -> uvm_unmap_kill_entry -> uvm_km_pgremove,
> > and uvm_map_teardown ensures that the kernel lock is not held.
> > Not related to this diff exactly, but is this something we need to fix?
> 
> I suppose that the problem can only occur if a kernel thread is exiting
> since this code is only executed for the kernel pmap.  Anyway I added an
> assertion.

Right, and as I understand it, kernel threads all share the proc0 vm space,
so its reference count won't ever reach 0, so the kernel map portions of
uvm_unmap_kill_entry() can't be reached from the reaper.  Looks like this is
all safe, it just requires a bit more reading than I did the first time.
I'll see if I can find a way to make it more clear.

> 
> Index: uvm/uvm_km.c
> ===
> RCS file: /cvs/src/sys/uvm/uvm_km.c,v
> retrieving revision 1.137
> diff -u -p -r1.137 uvm_km.c
> --- uvm/uvm_km.c  23 May 2020 06:15:09 -  1.137
> +++ uvm/uvm_km.c  10 Dec 2020 13:33:49 -
> @@ -243,6 +243,7 @@ uvm_km_pgremove(struct uvm_object *uobj,
>   voff_t curoff;
>   int slot;
>  
> + KERNEL_ASSERT_LOCKED();
>   KASSERT(uobj->pgops == &aobj_pager);
>  
>   for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) {
> Index: uvm/uvm_swap.c
> ===
> RCS file: /cvs/src/sys/uvm/uvm_swap.c,v
> retrieving revision 1.147
> diff -u -p -r1.147 uvm_swap.c
> --- uvm/uvm_swap.c29 Sep 2020 11:47:41 -  1.147
> +++ uvm/uvm_swap.c10 Dec 2020 13:30:30 -
> @@ -1403,7 +1403,7 @@ uvm_swap_alloc(int *nslots, boolean_t le
>   /*
>* lock data lock, convert slots into blocks, and enter loop
>*/
> -
> + KERNEL_ASSERT_LOCKED();
>  ReTry:   /* XXXMRG */
>   LIST_FOREACH(spp, &swap_priority, spi_swappri) {
>   TAILQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
> @@ -1449,8 +1449,10 @@ uvm_swapisfull(void)
>  {
>   int result;
>  
> + KERNEL_LOCK();
>   KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
>   result = (uvmexp.swpgonly == uvmexp.swpages);
> + KERNEL_UNLOCK();
>  
>   return result;
>  }
> @@ -1465,6 +1467,7 @@ uvm_swap_markbad(int startslot, int nslo
>  {
>   struct swapdev *sdp;
>  
> + KERNEL_LOCK();
>   sdp = swapdrum_getsdp(startslot);
>   if (sdp != NULL) {
>   /*
> @@ -1475,6 +1478,7 @@ uvm_swap_markbad(int startslot, int nslo
>*/
>   sdp->swd_npgbad += nslots;
>   }
> + KERNEL_UNLOCK();
>  }
>  
>  /*
> @@ -1501,7 +1505,7 @@ uvm_swap_free(int startslot, int nslots)
>* in the extent, and return.   must hold pri lock to do
>* lookup and access the extent.
>*/
> -
> + KERNEL_LOCK();
>   sdp = swapdrum_getsdp(startslot);
>   KASSERT(uvmexp.nswapdev >= 1);
>   KASSERT(sdp != NULL);
> @@ -1533,6 +1537,7 @@ uvm_swap_free(int startslot, int nslots)
>   }
>   }
>  #endif /* UVM_SWAP_ENCRYPT */
> + KERNEL_UNLOCK();
>  }
>  
>  /*
> @@ -1567,6 +1572,7 @@ uvm_swap_get(struct vm_page *page, int s
>   return VM_PAGER_ERROR;
>   }
>  
> + KERNEL_LOCK();
>   /* this page is (about to be) no longer only in swap. */
>   uvmexp.swpgonly--;
>  
> @@ -1577,7 +1583,7 @@ uvm_swap_get(struct vm_page *page, int s
>   /* oops, the read failed so it really is still only in swap. */
>   uvmexp.swpgonly++;
>   }
> -
> + KERNEL_UNLOCK();
>   return (result);
>  }
>  
> @@ -1599,6 +1605,8 @@ uvm_swap_io(struct vm_page **pps, int st
>   struct swapdev *sdp;
>   int encrypt = 0;
>  #endif
> +
> + KERNEL_ASSERT_LOCKED();
>  
>   write = (flags & B_READ) == 0;
>   async = (flags & B_ASYNC) != 0;
> Index: uvm/uvmexp.h
> ===
> RCS file: /cvs/src/sys/uvm/uvmexp.h,v
> retrieving revision 1.6
> diff -u -p -r1.6 uvmexp.h
> --- uvm/uvmexp.h  1 Dec 2020 13:56:22 -   1.6
> +++ uvm/uvmexp.h  10 Dec 2020 13:31:01 -
> @@ -42,6 +42,7 @@
>   *
>   *  Locks used to protect struct members in this file:
>   *   I   immutable after creation
> + *   K   kernel lock
>   *   F   u

Re: openrsync: fix poll_timeout in server mode

2020-12-12 Thread Claudio Jeker
On Sat, Dec 12, 2020 at 07:07:20AM -0500, Daniel Moch wrote:
> A recent change to openrsync added the --timeout opt.  There's code to
> handle the (default) case of --timeout=0, which sets the poll_timeout
> to -1 (INFTIM).  Unfortunately that code doesn't run in the server
> process, meaning all of the relevant calls to poll(2) return
> immediately and the process fails.
> 
> The following patch addresses the issue by moving the code that
> handles --timeout=0 up to run before the rsync_server call.
> 
> Index: main.c
> ===
> RCS file: /cvs/src/usr.bin/rsync/main.c,v
> retrieving revision 1.50
> diff -r1.50 main.c
> 411a412,417
> > /* by default and for --timeout=0 disable poll_timeout */
> > if (poll_timeout == 0)
> > poll_timeout = -1;
> > else
> > poll_timeout *= 1000;
> > 
> 420,425d425
> < 
> < /* by default and for --timeout=0 disable poll_timeout */
> < if (poll_timeout == 0)
> < poll_timeout = -1;
> < else
> < poll_timeout *= 1000;
> 

Here the unified diff which moves the poll_timeout initalisation before
the rsync_server() call.

-- 
:wq Claudio

Index: main.c
===
RCS file: /cvs/src/usr.bin/rsync/main.c,v
retrieving revision 1.50
diff -u -p -r1.50 main.c
--- main.c  24 Nov 2020 16:54:44 -  1.50
+++ main.c  12 Dec 2020 12:33:07 -
@@ -409,6 +409,12 @@ main(int argc, char *argv[])
if (opts.port == NULL)
opts.port = "rsync";
 
+   /* by default and for --timeout=0 disable poll_timeout */
+   if (poll_timeout == 0)
+   poll_timeout = -1;
+   else
+   poll_timeout *= 1000;
+
/*
 * This is what happens when we're started with the "hidden"
 * --server option, which is invoked for the rsync on the remote
@@ -417,12 +423,6 @@ main(int argc, char *argv[])
 
if (opts.server)
exit(rsync_server(&opts, (size_t)argc, argv));
-
-   /* by default and for --timeout=0 disable poll_timeout */
-   if (poll_timeout == 0)
-   poll_timeout = -1;
-   else
-   poll_timeout *= 1000;
 
/*
 * Now we know that we're the client on the local machine



openrsync: fix poll_timeout in server mode

2020-12-12 Thread Daniel Moch
A recent change to openrsync added the --timeout opt.  There's code to
handle the (default) case of --timeout=0, which sets the poll_timeout
to -1 (INFTIM).  Unfortunately that code doesn't run in the server
process, meaning all of the relevant calls to poll(2) return
immediately and the process fails.

The following patch addresses the issue by moving the code that
handles --timeout=0 up to run before the rsync_server call.

Index: main.c
===
RCS file: /cvs/src/usr.bin/rsync/main.c,v
retrieving revision 1.50
diff -r1.50 main.c
411a412,417
>   /* by default and for --timeout=0 disable poll_timeout */
>   if (poll_timeout == 0)
>   poll_timeout = -1;
>   else
>   poll_timeout *= 1000;
> 
420,425d425
< 
<   /* by default and for --timeout=0 disable poll_timeout */
<   if (poll_timeout == 0)
<   poll_timeout = -1;
<   else
<   poll_timeout *= 1000;



Re: Switch select(2) to kqueue-based implementation

2020-12-12 Thread Visa Hankala
On Fri, Dec 11, 2020 at 09:35:59AM -0300, Martin Pieuchot wrote:
> On 10/12/20(Thu) 09:59, Martin Pieuchot wrote:
> > All previous kqueue refactoring have been committed, here's a final diff
> > to modify the internal implementation of {p,}select(2) to query kqfilter
> > handlers instead of poll ones.
> > 
> > {p,}poll(2) are left untouched to ease the transition.
> > 
> > Here's what I said in the original mail back in May [0]:
> > 
> > > The main argument for this proposal is to reduce the amount of code
> > > executed to notify userland when an event occur.  The outcome of this
> > > diff is that a single notification subsystem needs to be taken out of
> > > the KERNEL_LOCK().  This simplifies a lot existing locking tentacles.
> > > 
> > > Using kqueue internally means collision is avoided and there's no need
> > > to query handlers for fds that aren't ready.  This comes at the cost of
> > > allocating descriptors.  A space vs time trade-off.  Note that this cost
> > > can be diminished by doing lazy removal of event descriptors to be able
> > > to re-use them.
> > 
> > The logic is as follow:
> > 
> > - With this change every thread use a "private" kqueue, usable only by
> >   the kernel, to register events for select(2) and later poll(2).
> > 
> > - Events specified via FD_SET(2) are converted to their kqueue equivalent.
> >   ktrace(1) now also outputs converted events to ease debugging.
> > 
> > - kqueue_scan() might be called multiple times per syscall, just like with
> >   the last version of kevent(2). 
> > 
> > - At the end of every {p,}select(2) syscall the private kqueue is purged.
> > 
> > [0] https://marc.info/?l=openbsd-tech&m=158979921322191&w=2
> 
> Updated diff that adds a FALLTHOUGHT lost in refactoring and do not
> block in if the timeout is cleared and no events are requested, pointed 
> by cheloha@:
> 
> Index: kern/sys_generic.c
> ===
> RCS file: /cvs/src/sys/kern/sys_generic.c,v
> retrieving revision 1.132
> diff -u -p -r1.132 sys_generic.c
> --- kern/sys_generic.c2 Oct 2020 15:45:22 -   1.132
> +++ kern/sys_generic.c11 Dec 2020 12:28:10 -
> @@ -55,6 +55,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #ifdef KTRACE
>  #include 
>  #endif
> @@ -66,8 +67,21 @@
>  
>  #include 
>  
> -int selscan(struct proc *, fd_set *, fd_set *, int, int, register_t *);
> -void pollscan(struct proc *, struct pollfd *, u_int, register_t *);
> +/*
> + * Debug values:
> + *  1 - print implementation errors, things that should not happen.
> + *  2 - print ppoll(2) information, somewhat verbose
> + *  3 - print pselect(2) and ppoll(2) information, very verbose
> + */
> +int kqpoll_debug = 0;
> +#define DPRINTFN(v, x...) if (kqpoll_debug > v) do { \
> +printf("%s(%d): ", curproc->p_p->ps_comm, curproc->p_tid);   \
> +printf(x);   \
> +} while (0)

"do" and "while" are not needed with the "if", indentation with tabs.

> +
> +int pselregister(struct proc *, fd_set *, int, int, int *);
> +int pselcollect(struct proc *, struct kevent *, fd_set *[]);
> +
>  int pollout(struct pollfd *, struct pollfd *, u_int);
>  int dopselect(struct proc *, int, fd_set *, fd_set *, fd_set *,
>  struct timespec *, const sigset_t *, register_t *);
> @@ -584,11 +598,10 @@ int
>  dopselect(struct proc *p, int nd, fd_set *in, fd_set *ou, fd_set *ex,
>  struct timespec *timeout, const sigset_t *sigmask, register_t *retval)
>  {
> + struct kqueue_scan_state scan;
>   fd_mask bits[6];
>   fd_set *pibits[3], *pobits[3];
> - struct timespec elapsed, start, stop;
> - uint64_t nsecs;
> - int s, ncoll, error = 0;
> + int error, nevents = 0;
>   u_int ni;
>  
>   if (nd < 0)
> @@ -618,6 +631,8 @@ dopselect(struct proc *p, int nd, fd_set
>   pobits[2] = (fd_set *)&bits[5];
>   }
>  
> + kqpoll_init();
> +
>  #define  getbits(name, x) \
>   if (name && (error = copyin(name, pibits[x], ni))) \
>   goto done;
> @@ -636,43 +651,59 @@ dopselect(struct proc *p, int nd, fd_set
>   if (sigmask)
>   dosigsuspend(p, *sigmask &~ sigcantmask);
>  
> -retry:
> - ncoll = nselcoll;
> - atomic_setbits_int(&p->p_flag, P_SELECT);
> - error = selscan(p, pibits[0], pobits[0], nd, ni, retval);
> - if (error || *retval)
> + /* Register kqueue events */
> + if ((error = pselregister(p, pibits[0], nd, ni, &nevents) != 0))
>   goto done;
> - if (timeout == NULL || timespecisset(timeout)) {
> - if (timeout != NULL) {
> - getnanouptime(&start);
> - nsecs = MIN(TIMESPEC_TO_NSEC(timeout), MAXTSLP);
> - } else
> - nsecs = INFSLP;
> - s = splhigh();
> - if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) {
> - splx(s);
> -