powerpc64: do tc_init(9) before cpu_startclock()

2022-05-24 Thread Scott Cheloha
In the future, the clock interrupt will need a working timecounter to
accurately reschedule itself.

Move tc_init(9) up before cpu_startclock().

(I can't test this but it seems correct.)

ok?

Index: clock.c
===
RCS file: /cvs/src/sys/arch/powerpc64/powerpc64/clock.c,v
retrieving revision 1.3
diff -u -p -r1.3 clock.c
--- clock.c 23 Feb 2021 04:44:31 -  1.3
+++ clock.c 25 May 2022 00:05:59 -
@@ -57,6 +57,9 @@ tb_get_timecount(struct timecounter *tc)
 void
 cpu_initclocks(void)
 {
+   tb_timecounter.tc_frequency = tb_freq;
+   tc_init(_timecounter);
+
tick_increment = tb_freq / hz;
 
stathz = 100;
@@ -68,9 +71,6 @@ cpu_initclocks(void)
evcount_attach(_count, "stat", NULL);
 
cpu_startclock();
-
-   tb_timecounter.tc_frequency = tb_freq;
-   tc_init(_timecounter);
 }
 
 void



tcpdump: add RFC9234 "BGP Role" support

2022-05-24 Thread Job Snijders
Hi all,

I based this off reading https://datatracker.ietf.org/doc/html/rfc9234

This code is untested! I haven't had a chance yet to tcpdump a RFC 9234
capable BGP speaker. There might be some out there, according to
https://trac.ietf.org/trac/idr/wiki/draft-ietf-idr-bgp-open-policy

Kind regards,

Job

Index: print-bgp.c
===
RCS file: /cvs/src/usr.sbin/tcpdump/print-bgp.c,v
retrieving revision 1.30
diff -u -p -r1.30 print-bgp.c
--- print-bgp.c 17 Jun 2021 15:59:23 -  1.30
+++ print-bgp.c 24 May 2022 20:06:25 -
@@ -135,6 +135,7 @@ struct bgp_attr {
 #define BGPTYPE_AS4_PATH   17  /* RFC4893 */
 #define BGPTYPE_AGGREGATOR418  /* RFC4893 */
 #define BGPTYPE_LARGE_COMMUNITIES  32  /* 
draft-ietf-idr-large-community */
+#define BGPTYPE_ONLY_TO_CUSTOMER   35  /* RFC9234 */
 
 #define BGP_AS_SET 1
 #define BGP_AS_SEQUENCE2
@@ -172,6 +173,7 @@ static const char *bgpopt_type[] = {
 
 #define BGP_CAPCODE_MP 1
 #define BGP_CAPCODE_REFRESH2
+#define BGP_CAPCODE_BGPROLE9 /* RFC9234 */
 #define BGP_CAPCODE_RESTART64 /* draft-ietf-idr-restart-05  */
 #define BGP_CAPCODE_AS465 /* RFC4893 */
 
@@ -180,7 +182,9 @@ static const char *bgp_capcode[] = {
/* 3: RFC5291 */ "OUTBOUND_ROUTE_FILTERING",
/* 4: RFC3107 */ "MULTIPLE_ROUTES",
/* 5: RFC5549 */ "EXTENDED_NEXTHOP_ENCODING",
-   0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+   0, 0, 0,
+   /* 9: RFC9234 */ "BGP_ROLE",
+   0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -191,10 +195,17 @@ static const char *bgp_capcode[] = {
/* 69: [draft-ietf-idr-add-paths] */ "ADD-PATH",
/* 70: RFC7313 */ "ENHANCED_ROUTE_REFRESH"
 };
-
 #define bgp_capcode(x) \
num_or_str(bgp_capcode, sizeof(bgp_capcode)/sizeof(bgp_capcode[0]), (x))
 
+static const char *bgp_roletype[] = {
+   NULL, "Provider", "Route Server", "Route Server Client", "Customer",
+   "Lateral Peer"
+};
+#define bgp_roletype(x) \
+   num_or_str(bgp_roletype, \
+   sizeof(bgp_roletype)/sizeof(bgp_roletype[0]), (x))
+
 #define BGP_NOTIFY_MAJOR_CEASE 6
 static const char *bgpnotify_major[] = {
NULL, "Message Header Error",
@@ -215,7 +226,8 @@ static const char *bgpnotify_minor_open[
NULL, "Unsupported Version Number",
"Bad Peer AS", "Bad BGP Identifier",
"Unsupported Optional Parameter", "Authentication Failure",
-   "Unacceptable Hold Time", "Unsupported Capability",
+   "Unacceptable Hold Time", "Unsupported Capability", "Deprecated",
+   "Deprecated", "Deprecated", "Role Mismatch"
 };
 
 static const char *bgpnotify_minor_update[] = {
@@ -285,7 +297,7 @@ static const char *bgpattr_type[] = {
"ADVERTISERS", "RCID_PATH", "MP_REACH_NLRI", "MP_UNREACH_NLRI",
"EXTD_COMMUNITIES", "AS4_PATH", "AGGREGATOR4", NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-   "LARGE_COMMUNITIES",
+   "LARGE_COMMUNITIES", NULL, NULL, "ONLY_TO_CUSTOMER"
 };
 #define bgp_attr_type(x) \
num_or_str(bgpattr_type, \
@@ -590,6 +602,14 @@ bgp_attr_print(const struct bgp_attr *at
p += 12;
}
break;
+   case BGPTYPE_ONLY_TO_CUSTOMER:
+   if (len != 4) {
+   printf(" invalid len"); 
+   break;
+   }
+   TCHECK2(p[0], 4);
+   printf(" AS%u", EXTRACT_32BITS(p));
+   break;
case BGPTYPE_ORIGINATOR_ID:
if (len != 4) {
printf(" invalid len");
@@ -769,6 +789,13 @@ bgp_open_capa_print(const u_char *opt, i
printf(" BAD ENCODING");
break;
}
+   break;
+   case BGP_CAPCODE_BGPROLE:
+   if (cap_len != 1) {
+   printf(" BAD ENCODING");
+   break;
+   }
+   printf(" [%s]", bgp_roletype(opt[i]));
break;
case BGP_CAPCODE_RESTART:
if (cap_len < 2 || (cap_len - 2) % 4) {



Re: ffs_truncate: Missing uvm_vnp_uncache() w/ softdep

2022-05-24 Thread Martin Pieuchot
On 24/05/22(Tue) 15:24, Mark Kettenis wrote:
> > Date: Tue, 24 May 2022 14:28:39 +0200
> > From: Martin Pieuchot 
> > 
> > The softdep code path is missing a UVM cache invalidation compared to
> > the !softdep one.  This is necessary to flush pages of a persisting
> > vnode.
> > 
> > Since uvm_vnp_setsize() is also called later in this function for the
> > !softdep case move it to not call it twice.
> > 
> > ok?
> 
> I'm not sure this is correct.  I'm trying to understand why you're
> moving the uvm_uvn_setsize() call.  Are you just trying to call it
> twice?  Or are you trying to avoid calling it at all when we end up in
> an error path?
>
> The way you moved it means we'll still call it twice for "partially
> truncated" files with softdeps.  At least the way I understand the
> code is that the code will fsync the vnode and dropping down in the
> "normal" non-softdep code that will call uvm_vnp_setsize() (and
> uvn_vnp_uncache()) again.  So maybe you should move the
> uvm_uvn_setsize() call into the else case?

We might want to do that indeed.  I'm not sure what are the implications
of calling uvm_vnp_setsize/uncache() after VOP_FSYNC(), which might fail.
So I'd rather play safe and go with that diff.

> > Index: ufs/ffs/ffs_inode.c
> > ===
> > RCS file: /cvs/src/sys/ufs/ffs/ffs_inode.c,v
> > retrieving revision 1.81
> > diff -u -p -r1.81 ffs_inode.c
> > --- ufs/ffs/ffs_inode.c 12 Dec 2021 09:14:59 -  1.81
> > +++ ufs/ffs/ffs_inode.c 4 May 2022 15:32:15 -
> > @@ -172,11 +172,12 @@ ffs_truncate(struct inode *oip, off_t le
> > if (length > fs->fs_maxfilesize)
> > return (EFBIG);
> >  
> > -   uvm_vnp_setsize(ovp, length);
> > oip->i_ci.ci_lasta = oip->i_ci.ci_clen 
> > = oip->i_ci.ci_cstart = oip->i_ci.ci_lastw = 0;
> >  
> > if (DOINGSOFTDEP(ovp)) {
> > +   uvm_vnp_setsize(ovp, length);
> > +   (void) uvm_vnp_uncache(ovp);
> > if (length > 0 || softdep_slowdown(ovp)) {
> > /*
> >  * If a file is only partially truncated, then
> > 
> > 



Re: Call uvm_vnp_uncache() before VOP_RENAME()

2022-05-24 Thread Mark Kettenis
> Date: Tue, 24 May 2022 14:23:46 +0200
> From: Martin Pieuchot 
> 
> On 17/05/22(Tue) 16:55, Martin Pieuchot wrote:
> > nfsrv_rename() should behave like dorenameat() and tell UVM to "flush" a
> > possibly mmap'ed file before calling VOP_RENAME().
> > 
> > ok?
> 
> Anyone?

Makes sense to me.

ok kettenis@

> > Index: nfs/nfs_serv.c
> > ===
> > RCS file: /cvs/src/sys/nfs/nfs_serv.c,v
> > retrieving revision 1.120
> > diff -u -p -r1.120 nfs_serv.c
> > --- nfs/nfs_serv.c  11 Mar 2021 13:31:35 -  1.120
> > +++ nfs/nfs_serv.c  4 May 2022 15:29:06 -
> > @@ -1488,6 +1488,9 @@ nfsrv_rename(struct nfsrv_descript *nfsd
> > error = -1;
> >  out:
> > if (!error) {
> > +   if (tvp) {
> > +   (void)uvm_vnp_uncache(tvp);
> > +   }
> > error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, _cnd,
> >tond.ni_dvp, tond.ni_vp, _cnd);
> > } else {
> > 
> 
> 



Re: ffs_truncate: Missing uvm_vnp_uncache() w/ softdep

2022-05-24 Thread Mark Kettenis
> Date: Tue, 24 May 2022 14:28:39 +0200
> From: Martin Pieuchot 
> 
> The softdep code path is missing a UVM cache invalidation compared to
> the !softdep one.  This is necessary to flush pages of a persisting
> vnode.
> 
> Since uvm_vnp_setsize() is also called later in this function for the
> !softdep case move it to not call it twice.
> 
> ok?

I'm not sure this is correct.  I'm trying to understand why you're
moving the uvm_uvn_setsize() call.  Are you just trying to call it
twice?  Or are you trying to avoid calling it at all when we end up in
an error path?

The way you moved it means we'll still call it twice for "partially
truncated" files with softdeps.  At least the way I understand the
code is that the code will fsync the vnode and dropping down in the
"normal" non-softdep code that will call uvm_vnp_setsize() (and
uvn_vnp_uncache()) again.  So maybe you should move the
uvm_uvn_setsize() call into the else case?


> Index: ufs/ffs/ffs_inode.c
> ===
> RCS file: /cvs/src/sys/ufs/ffs/ffs_inode.c,v
> retrieving revision 1.81
> diff -u -p -r1.81 ffs_inode.c
> --- ufs/ffs/ffs_inode.c   12 Dec 2021 09:14:59 -  1.81
> +++ ufs/ffs/ffs_inode.c   4 May 2022 15:32:15 -
> @@ -172,11 +172,12 @@ ffs_truncate(struct inode *oip, off_t le
>   if (length > fs->fs_maxfilesize)
>   return (EFBIG);
>  
> - uvm_vnp_setsize(ovp, length);
>   oip->i_ci.ci_lasta = oip->i_ci.ci_clen 
>   = oip->i_ci.ci_cstart = oip->i_ci.ci_lastw = 0;
>  
>   if (DOINGSOFTDEP(ovp)) {
> + uvm_vnp_setsize(ovp, length);
> + (void) uvm_vnp_uncache(ovp);
>   if (length > 0 || softdep_slowdown(ovp)) {
>   /*
>* If a file is only partially truncated, then
> 
> 



Please test: rewrite of pdaemon

2022-05-24 Thread Martin Pieuchot
Diff below brings in & adapt most of the changes from NetBSD's r1.37 of
uvm_pdaemon.c.  My motivation for doing this is to untangle the inner
loop of uvmpd_scan_inactive() which will allow us to split the global
`pageqlock' mutex in a next step.

The idea behind this change is to get rid of the too-complex uvm_pager*
abstraction by checking early if a page is going to be flushed or
swapped to disk.  The loop is then clearly divided into two cases which
makes it more readable. 

This also opens the door to a better integration between UVM's vnode
layer and the buffer cache.

The main loop of uvmpd_scan_inactive() can be understood as below:

. If a page can be flushed we can call "uvn_flush()" directly and pass the
  PGO_ALLPAGES flag instead of building a cluster beforehand.  Note that,
  in its current form uvn_flush() is synchronous.

. If the page needs to be swapped, mark it as PG_PAGEOUT, build a cluster
  and once it is full call uvm_swap_put(). 

Please test this diff, do not hesitate to play with the `vm.swapencrypt.enable'
sysctl(2).

Index: uvm/uvm_aobj.c
===
RCS file: /cvs/src/sys/uvm/uvm_aobj.c,v
retrieving revision 1.103
diff -u -p -r1.103 uvm_aobj.c
--- uvm/uvm_aobj.c  29 Dec 2021 20:22:06 -  1.103
+++ uvm/uvm_aobj.c  24 May 2022 12:31:34 -
@@ -143,7 +143,7 @@ struct pool uvm_aobj_pool;
 
 static struct uao_swhash_elt   *uao_find_swhash_elt(struct uvm_aobj *, int,
 boolean_t);
-static int  uao_find_swslot(struct uvm_object *, int);
+int uao_find_swslot(struct uvm_object *, int);
 static boolean_tuao_flush(struct uvm_object *, voff_t,
 voff_t, int);
 static void uao_free(struct uvm_aobj *);
@@ -241,7 +241,7 @@ uao_find_swhash_elt(struct uvm_aobj *aob
 /*
  * uao_find_swslot: find the swap slot number for an aobj/pageidx
  */
-inline static int
+int
 uao_find_swslot(struct uvm_object *uobj, int pageidx)
 {
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
Index: uvm/uvm_aobj.h
===
RCS file: /cvs/src/sys/uvm/uvm_aobj.h,v
retrieving revision 1.17
diff -u -p -r1.17 uvm_aobj.h
--- uvm/uvm_aobj.h  21 Oct 2020 09:08:14 -  1.17
+++ uvm/uvm_aobj.h  24 May 2022 12:31:34 -
@@ -60,6 +60,7 @@
 
 void uao_init(void);
 int uao_set_swslot(struct uvm_object *, int, int);
+int uao_find_swslot (struct uvm_object *, int);
 int uao_dropswap(struct uvm_object *, int);
 int uao_swap_off(int, int);
 int uao_shrink(struct uvm_object *, int);
Index: uvm/uvm_map.c
===
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
retrieving revision 1.291
diff -u -p -r1.291 uvm_map.c
--- uvm/uvm_map.c   4 May 2022 14:58:26 -   1.291
+++ uvm/uvm_map.c   24 May 2022 12:31:34 -
@@ -3215,8 +3215,9 @@ uvm_object_printit(struct uvm_object *uo
  * uvm_page_printit: actually print the page
  */
 static const char page_flagbits[] =
-   "\20\1BUSY\2WANTED\3TABLED\4CLEAN\5CLEANCHK\6RELEASED\7FAKE\10RDONLY"
-   "\11ZERO\12DEV\15PAGER1\21FREE\22INACTIVE\23ACTIVE\25ANON\26AOBJ"
+   "\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY"
+   "\11ZERO\12DEV\13CLEANCHK"
+   "\15PAGER1\21FREE\22INACTIVE\23ACTIVE\25ANON\26AOBJ"
"\27ENCRYPT\31PMAP0\32PMAP1\33PMAP2\34PMAP3\35PMAP4\36PMAP5";
 
 void
Index: uvm/uvm_page.c
===
RCS file: /cvs/src/sys/uvm/uvm_page.c,v
retrieving revision 1.166
diff -u -p -r1.166 uvm_page.c
--- uvm/uvm_page.c  12 May 2022 12:48:36 -  1.166
+++ uvm/uvm_page.c  24 May 2022 12:32:54 -
@@ -960,6 +960,7 @@ uvm_pageclean(struct vm_page *pg)
 {
u_int flags_to_clear = 0;
 
+   KASSERT((pg->pg_flags & PG_PAGEOUT) == 0);
if ((pg->pg_flags & (PG_TABLED|PQ_ACTIVE|PQ_INACTIVE)) &&
(pg->uobject == NULL || !UVM_OBJ_IS_PMAP(pg->uobject)))
MUTEX_ASSERT_LOCKED();
@@ -978,11 +979,14 @@ uvm_pageclean(struct vm_page *pg)
rw_write_held(pg->uanon->an_lock));
 
/*
-* if the page was an object page (and thus "TABLED"), remove it
-* from the object.
+* remove page from its object or anon.
 */
-   if (pg->pg_flags & PG_TABLED)
+   if (pg->pg_flags & PG_TABLED) {
uvm_pageremove(pg);
+   } else if (pg->uanon != NULL) {
+   pg->uanon->an_page = NULL;
+   pg->uanon = NULL;
+   }
 
/*
 * now remove the page from the queues
@@ -996,10 +1000,6 @@ uvm_pageclean(struct vm_page *pg)
pg->wire_count = 0;
uvmexp.wired--;
}
-   if (pg->uanon) {
-   pg->uanon->an_page = NULL;
-   pg->uanon = NULL;
-   }
 
   

ffs_truncate: Missing uvm_vnp_uncache() w/ softdep

2022-05-24 Thread Martin Pieuchot
The softdep code path is missing a UVM cache invalidation compared to
the !softdep one.  This is necessary to flush pages of a persisting
vnode.

Since uvm_vnp_setsize() is also called later in this function for the
!softdep case move it to not call it twice.

ok?

Index: ufs/ffs/ffs_inode.c
===
RCS file: /cvs/src/sys/ufs/ffs/ffs_inode.c,v
retrieving revision 1.81
diff -u -p -r1.81 ffs_inode.c
--- ufs/ffs/ffs_inode.c 12 Dec 2021 09:14:59 -  1.81
+++ ufs/ffs/ffs_inode.c 4 May 2022 15:32:15 -
@@ -172,11 +172,12 @@ ffs_truncate(struct inode *oip, off_t le
if (length > fs->fs_maxfilesize)
return (EFBIG);
 
-   uvm_vnp_setsize(ovp, length);
oip->i_ci.ci_lasta = oip->i_ci.ci_clen 
= oip->i_ci.ci_cstart = oip->i_ci.ci_lastw = 0;
 
if (DOINGSOFTDEP(ovp)) {
+   uvm_vnp_setsize(ovp, length);
+   (void) uvm_vnp_uncache(ovp);
if (length > 0 || softdep_slowdown(ovp)) {
/*
 * If a file is only partially truncated, then



Re: Call uvm_vnp_uncache() before VOP_RENAME()

2022-05-24 Thread Martin Pieuchot
On 17/05/22(Tue) 16:55, Martin Pieuchot wrote:
> nfsrv_rename() should behave like dorenameat() and tell UVM to "flush" a
> possibly mmap'ed file before calling VOP_RENAME().
> 
> ok?

Anyone?

> Index: nfs/nfs_serv.c
> ===
> RCS file: /cvs/src/sys/nfs/nfs_serv.c,v
> retrieving revision 1.120
> diff -u -p -r1.120 nfs_serv.c
> --- nfs/nfs_serv.c11 Mar 2021 13:31:35 -  1.120
> +++ nfs/nfs_serv.c4 May 2022 15:29:06 -
> @@ -1488,6 +1488,9 @@ nfsrv_rename(struct nfsrv_descript *nfsd
>   error = -1;
>  out:
>   if (!error) {
> + if (tvp) {
> + (void)uvm_vnp_uncache(tvp);
> + }
>   error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, _cnd,
>  tond.ni_dvp, tond.ni_vp, _cnd);
>   } else {
> 



Re: rpki-client unify max request defines

2022-05-24 Thread Theo Buehler
On Tue, May 24, 2022 at 10:03:31AM +0200, Claudio Jeker wrote:
> This diff moves and renames the defines that define the maximum number of
> parallel requests for http and rsync.
> The defines are now MAX_HTTP_REQUESTS and MAX_RSYNC_REQUESTS the values
> remane the same.

ok

> Also move the memset of pollfd sets in http.c into the loop. It is not
> needed but I prefer it that way.

Also ok. I would commit this separately as it is completely unrelated.

> 
> -- 
> :wq Claudio
> 
> Index: extern.h
> ===
> RCS file: /cvs/src/usr.sbin/rpki-client/extern.h,v
> retrieving revision 1.137
> diff -u -p -r1.137 extern.h
> --- extern.h  11 May 2022 21:19:06 -  1.137
> +++ extern.h  24 May 2022 07:44:22 -
> @@ -702,8 +702,9 @@ int   mkpathat(int, const char *);
>  /* Maximum depth of the RPKI tree. */
>  #define MAX_CERT_DEPTH   12
>  
> -/* Maximum number of concurrent rsync processes. */
> -#define MAX_RSYNC_PROCESSES  16
> +/* Maximum number of concurrent http and rsync requests. */
> +#define MAX_HTTP_REQUESTS64
> +#define MAX_RSYNC_REQUESTS   16
>  
>  /* Maximum allowd repositories per tal */
>  #define MAX_REPO_PER_TAL 1000
> Index: http.c
> ===
> RCS file: /cvs/src/usr.sbin/rpki-client/http.c,v
> retrieving revision 1.60
> diff -u -p -r1.60 http.c
> --- http.c15 May 2022 16:43:34 -  1.60
> +++ http.c24 May 2022 07:44:22 -
> @@ -71,9 +71,8 @@
>  #define HTTP_BUF_SIZE(32 * 1024)
>  #define HTTP_IDLE_TIMEOUT10
>  #define HTTP_IO_TIMEOUT  (3 * 60)
> -#define MAX_CONNECTIONS  64
>  #define MAX_CONTENTLEN   (2 * 1024 * 1024 * 1024LL)
> -#define NPFDS(MAX_CONNECTIONS + 1)
> +#define NPFDS(MAX_HTTP_REQUESTS + 1)
>  
>  enum res {
>   DONE,
> @@ -620,7 +619,7 @@ http_req_schedule(struct http_request *r
>   return 1;
>   }
>  
> - if (http_conn_count < MAX_CONNECTIONS) {
> + if (http_conn_count < MAX_HTTP_REQUESTS) {
>   http_new(req);
>   return 1;
>   }
> @@ -1793,8 +1792,6 @@ proc_http(char *bind_addr, int fd)
>   if (pledge("stdio inet dns recvfd", NULL) == -1)
>   err(1, "pledge");
>  
> - memset(, 0, sizeof(pfds));
> -
>   msgbuf_init();
>   msgq.fd = fd;
>  
> @@ -1803,6 +1800,7 @@ proc_http(char *bind_addr, int fd)
>   int timeout;
>   size_t i;
>  
> + memset(, 0, sizeof(pfds));
>   pfds[0].fd = fd;
>   pfds[0].events = POLLIN;
>   if (msgq.queued)
> Index: rsync.c
> ===
> RCS file: /cvs/src/usr.sbin/rpki-client/rsync.c,v
> retrieving revision 1.37
> diff -u -p -r1.37 rsync.c
> --- rsync.c   20 Apr 2022 15:38:24 -  1.37
> +++ rsync.c   24 May 2022 07:44:22 -
> @@ -147,7 +147,7 @@ proc_rsync(char *prog, char *bind_addr, 
>   struct msgbufmsgq;
>   struct ibuf *b, *inbuf = NULL;
>   sigset_t mask, oldmask;
> - struct rsyncproc ids[MAX_RSYNC_PROCESSES] = { 0 };
> + struct rsyncproc ids[MAX_RSYNC_REQUESTS] = { 0 };
>  
>   if (pledge("stdio rpath proc exec unveil", NULL) == -1)
>   err(1, "pledge");
> @@ -211,7 +211,7 @@ proc_rsync(char *prog, char *bind_addr, 
>   int st;
>  
>   pfd.events = 0;
> - if (nprocs < MAX_RSYNC_PROCESSES)
> + if (nprocs < MAX_RSYNC_REQUESTS)
>   pfd.events |= POLLIN;
>   if (msgq.queued)
>   pfd.events |= POLLOUT;
> @@ -230,10 +230,10 @@ proc_rsync(char *prog, char *bind_addr, 
>   while ((pid = waitpid(WAIT_ANY, , WNOHANG)) > 0) {
>   int ok = 1;
>  
> - for (i = 0; i < MAX_RSYNC_PROCESSES; i++)
> + for (i = 0; i < MAX_RSYNC_REQUESTS; i++)
>   if (ids[i].pid == pid)
>   break;
> - if (i >= MAX_RSYNC_PROCESSES)
> + if (i >= MAX_RSYNC_REQUESTS)
>   errx(1, "waitpid: %d unexpected", pid);
>  
>   if (!WIFEXITED(st)) {
> @@ -278,7 +278,7 @@ proc_rsync(char *prog, char *bind_addr, 
>  
>   if (!(pfd.revents & POLLIN))
>   continue;
> - if (nprocs >= MAX_RSYNC_PROCESSES)
> + if (nprocs >= MAX_RSYNC_REQUESTS)
>   continue;
>  
>   b = io_buf_read(fd, );
> @@ -340,10 +340,10 @@ proc_rsync(char *prog, char *bind_addr, 
>  
>   /* Augment the list of running processes. */
>  
> - for (i 

rpki-client unify max request defines

2022-05-24 Thread Claudio Jeker
This diff moves and renames the defines that define the maximum number of
parallel requests for http and rsync.
The defines are now MAX_HTTP_REQUESTS and MAX_RSYNC_REQUESTS the values
remane the same.
Also move the memset of pollfd sets in http.c into the loop. It is not
needed but I prefer it that way.

-- 
:wq Claudio

Index: extern.h
===
RCS file: /cvs/src/usr.sbin/rpki-client/extern.h,v
retrieving revision 1.137
diff -u -p -r1.137 extern.h
--- extern.h11 May 2022 21:19:06 -  1.137
+++ extern.h24 May 2022 07:44:22 -
@@ -702,8 +702,9 @@ int mkpathat(int, const char *);
 /* Maximum depth of the RPKI tree. */
 #define MAX_CERT_DEPTH 12
 
-/* Maximum number of concurrent rsync processes. */
-#define MAX_RSYNC_PROCESSES16
+/* Maximum number of concurrent http and rsync requests. */
+#define MAX_HTTP_REQUESTS  64
+#define MAX_RSYNC_REQUESTS 16
 
 /* Maximum allowd repositories per tal */
 #define MAX_REPO_PER_TAL   1000
Index: http.c
===
RCS file: /cvs/src/usr.sbin/rpki-client/http.c,v
retrieving revision 1.60
diff -u -p -r1.60 http.c
--- http.c  15 May 2022 16:43:34 -  1.60
+++ http.c  24 May 2022 07:44:22 -
@@ -71,9 +71,8 @@
 #define HTTP_BUF_SIZE  (32 * 1024)
 #define HTTP_IDLE_TIMEOUT  10
 #define HTTP_IO_TIMEOUT(3 * 60)
-#define MAX_CONNECTIONS64
 #define MAX_CONTENTLEN (2 * 1024 * 1024 * 1024LL)
-#define NPFDS  (MAX_CONNECTIONS + 1)
+#define NPFDS  (MAX_HTTP_REQUESTS + 1)
 
 enum res {
DONE,
@@ -620,7 +619,7 @@ http_req_schedule(struct http_request *r
return 1;
}
 
-   if (http_conn_count < MAX_CONNECTIONS) {
+   if (http_conn_count < MAX_HTTP_REQUESTS) {
http_new(req);
return 1;
}
@@ -1793,8 +1792,6 @@ proc_http(char *bind_addr, int fd)
if (pledge("stdio inet dns recvfd", NULL) == -1)
err(1, "pledge");
 
-   memset(, 0, sizeof(pfds));
-
msgbuf_init();
msgq.fd = fd;
 
@@ -1803,6 +1800,7 @@ proc_http(char *bind_addr, int fd)
int timeout;
size_t i;
 
+   memset(, 0, sizeof(pfds));
pfds[0].fd = fd;
pfds[0].events = POLLIN;
if (msgq.queued)
Index: rsync.c
===
RCS file: /cvs/src/usr.sbin/rpki-client/rsync.c,v
retrieving revision 1.37
diff -u -p -r1.37 rsync.c
--- rsync.c 20 Apr 2022 15:38:24 -  1.37
+++ rsync.c 24 May 2022 07:44:22 -
@@ -147,7 +147,7 @@ proc_rsync(char *prog, char *bind_addr, 
struct msgbufmsgq;
struct ibuf *b, *inbuf = NULL;
sigset_t mask, oldmask;
-   struct rsyncproc ids[MAX_RSYNC_PROCESSES] = { 0 };
+   struct rsyncproc ids[MAX_RSYNC_REQUESTS] = { 0 };
 
if (pledge("stdio rpath proc exec unveil", NULL) == -1)
err(1, "pledge");
@@ -211,7 +211,7 @@ proc_rsync(char *prog, char *bind_addr, 
int st;
 
pfd.events = 0;
-   if (nprocs < MAX_RSYNC_PROCESSES)
+   if (nprocs < MAX_RSYNC_REQUESTS)
pfd.events |= POLLIN;
if (msgq.queued)
pfd.events |= POLLOUT;
@@ -230,10 +230,10 @@ proc_rsync(char *prog, char *bind_addr, 
while ((pid = waitpid(WAIT_ANY, , WNOHANG)) > 0) {
int ok = 1;
 
-   for (i = 0; i < MAX_RSYNC_PROCESSES; i++)
+   for (i = 0; i < MAX_RSYNC_REQUESTS; i++)
if (ids[i].pid == pid)
break;
-   if (i >= MAX_RSYNC_PROCESSES)
+   if (i >= MAX_RSYNC_REQUESTS)
errx(1, "waitpid: %d unexpected", pid);
 
if (!WIFEXITED(st)) {
@@ -278,7 +278,7 @@ proc_rsync(char *prog, char *bind_addr, 
 
if (!(pfd.revents & POLLIN))
continue;
-   if (nprocs >= MAX_RSYNC_PROCESSES)
+   if (nprocs >= MAX_RSYNC_REQUESTS)
continue;
 
b = io_buf_read(fd, );
@@ -340,10 +340,10 @@ proc_rsync(char *prog, char *bind_addr, 
 
/* Augment the list of running processes. */
 
-   for (i = 0; i < MAX_RSYNC_PROCESSES; i++)
+   for (i = 0; i < MAX_RSYNC_REQUESTS; i++)
if (ids[i].pid == 0)
break;
-   assert(i < MAX_RSYNC_PROCESSES);
+   assert(i < MAX_RSYNC_REQUESTS);
ids[i].id