Re: armv7 kyua runs via chroot on aarch64: zfs tests leave behind processes from timed out tests

2023-08-03 Thread Mark Millard
On Aug 3, 2023, at 07:18, Mark Millard  wrote:

> On Aug 3, 2023, at 00:19, Mark Millard  wrote:
> 
>> This is after the patch (leading whitespace might
>> not have been preserved in what you see):
>> 
>> # git -C /usr/main-src/ diff sys/dev/md/
>> diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
>> index a719dccb1955..365296ec4276 100644
>> --- a/sys/dev/md/md.c
>> +++ b/sys/dev/md/md.c
>> @@ -147,8 +147,15 @@ struct md_ioctl32 {
>>   int md_fwsectors;
>>   uint32_tmd_label;
>>   int md_pad[MDNPAD];
>> +#ifdef __aarch64__
>> +   uint32_tmd_pad0;
>> +#endif
>> } __attribute__((__packed__));
>> +#ifdef __aarch64__
>> +CTASSERT((sizeof(struct md_ioctl32)) == 440);
>> +#else
>> CTASSERT((sizeof(struct md_ioctl32)) == 436);
>> +#endif
>> 
>> #defineMDIOCATTACH_32  _IOC_NEWTYPE(MDIOCATTACH, struct md_ioctl32)
>> #defineMDIOCDETACH_32  _IOC_NEWTYPE(MDIOCDETACH, struct md_ioctl32)
>> 
>> 
>> The kyua run is still in process, but at this point there is
>> the following accumulation from the zfs testing timouts:
>> 
>> # ps -alxdww
>> UID   PID  PPID C PRI NI   VSZ   RSS MWCHAN   STAT TTTIME COMMAND
>> . . .
>> 0 17491 1 6  20  0 36460 12324 -T - 0:24.71 |-- 
>> fsync_integrity /testdir2316/testfile2316
>> 0 17551 1 5  20  0 10600  7512 tx->tx_s D - 0:00.00 |-- 
>> /sbin/zpool destroy -f testpool.2316
>> 0 17739 1 7  20  0 10600  7308 zfs tear D - 0:00.00 |-- 
>> /sbin/zpool destroy -f testpool.2316
>> 0 17841 1 3  20  0 10600  7316 tx->tx_s D - 0:00.00 |-- 
>> /sbin/zpool destroy -f testpool.2316
>> 0 17860 1 0  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 17888 1 3  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 17907 1 6  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 17928 1 7  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 17955 1 0  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 17976 1 4  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 17995 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18023 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18043 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18064 1 3  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18085 1 0  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18114 1 7  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18135 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18157 1 6  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18177 1 6  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18205 1 4  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18224 1 1  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18255 1 3  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18275 1 1  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18296 1 5  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18317 1 4  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18345 1 4  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18365 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18386 1 3  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18412 1 1  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18447 1 5  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18466 1 5  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18516 1 6  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18535 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
>> 0 18632 1 0  20  0 10080  6956 spa_name D - 0:00.00 |-- 
>> /sbin/zfs upgrade
> 
> It has added:
> 
>  0 18656 1 7  20  0  10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18748 1 0  20  0  10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18767 1 4  20  0  10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18858 1 5  20  0  10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18877 1 0  20  0  10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18907 1 7  20  0  10080  6956 spa_name D - 0:00.00 |-- 
>

Re: armv7 kyua runs via chroot on aarch64: zfs tests leave behind processes from timed out tests

2023-08-03 Thread Mark Millard
On Aug 3, 2023, at 00:19, Mark Millard  wrote:

> This is after the patch (leading whitespace might
> not have been preserved in what you see):
> 
> # git -C /usr/main-src/ diff sys/dev/md/
> diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
> index a719dccb1955..365296ec4276 100644
> --- a/sys/dev/md/md.c
> +++ b/sys/dev/md/md.c
> @@ -147,8 +147,15 @@ struct md_ioctl32 {
>int md_fwsectors;
>uint32_tmd_label;
>int md_pad[MDNPAD];
> +#ifdef __aarch64__
> +   uint32_tmd_pad0;
> +#endif
> } __attribute__((__packed__));
> +#ifdef __aarch64__
> +CTASSERT((sizeof(struct md_ioctl32)) == 440);
> +#else
> CTASSERT((sizeof(struct md_ioctl32)) == 436);
> +#endif
> 
> #defineMDIOCATTACH_32  _IOC_NEWTYPE(MDIOCATTACH, struct md_ioctl32)
> #defineMDIOCDETACH_32  _IOC_NEWTYPE(MDIOCDETACH, struct md_ioctl32)
> 
> 
> The kyua run is still in process, but at this point there is
> the following accumulation from the zfs testing timouts:
> 
> # ps -alxdww
> UID   PID  PPID C PRI NI   VSZ   RSS MWCHAN   STAT TTTIME COMMAND
> . . .
>  0 17491 1 6  20  0 36460 12324 -T - 0:24.71 |-- 
> fsync_integrity /testdir2316/testfile2316
>  0 17551 1 5  20  0 10600  7512 tx->tx_s D - 0:00.00 |-- 
> /sbin/zpool destroy -f testpool.2316
>  0 17739 1 7  20  0 10600  7308 zfs tear D - 0:00.00 |-- 
> /sbin/zpool destroy -f testpool.2316
>  0 17841 1 3  20  0 10600  7316 tx->tx_s D - 0:00.00 |-- 
> /sbin/zpool destroy -f testpool.2316
>  0 17860 1 0  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 17888 1 3  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 17907 1 6  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 17928 1 7  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 17955 1 0  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 17976 1 4  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 17995 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18023 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18043 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18064 1 3  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18085 1 0  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18114 1 7  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18135 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18157 1 6  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18177 1 6  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18205 1 4  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18224 1 1  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18255 1 3  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18275 1 1  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18296 1 5  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18317 1 4  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18345 1 4  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18365 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18386 1 3  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18412 1 1  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18447 1 5  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18466 1 5  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18516 1 6  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18535 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade
>  0 18632 1 0  20  0 10080  6956 spa_name D - 0:00.00 |-- 
> /sbin/zfs upgrade

It has added:

  0 18656 1 7  20  0  10080  6956 spa_name D - 0:00.00 |-- 
/sbin/zfs upgrade
  0 18748 1 0  20  0  10080  6956 spa_name D - 0:00.00 |-- 
/sbin/zfs upgrade
  0 18767 1 4  20  0  10080  6956 spa_name D - 0:00.00 |-- 
/sbin/zfs upgrade
  0 18858 1 5  20  0  10080  6956 spa_name D - 0:00.00 |-- 
/sbin/zfs upgrade
  0 18877 1 0  20  0  10080  6956 spa_name D - 0:00.00 |-- 
/sbin/zfs upgrade
  0 18907 1 7  20  0  10080  6956 spa_name D - 0:00.00 |-- 
/sbin/zfs upgrade
  0 18926 1 5  20  0  10080  6956 spa_name D - 0:00.00 |-- 
/sbin/zfs upgrade
  0 18956 1 7  20  0  1008

Heads-up: MAXCPU increasing shortly

2023-08-03 Thread Ed Maste
On Fri, 5 May 2023 at 09:38, Ed Maste  wrote:
>
> FreeBSD supports up to 256 CPU cores in the default kernel configuration
> (on Tier-1 architectures).  Systems with more than 256 cores are
> available now, and will become increasingly common over FreeBSD 14’s
> lifetime.  The FreeBSD Foundation is supporting the effort to increase
> MAXCPU, and PR269572[1] is open to track tasks and changes.

I intend to commit the change to increase amd64 MAXCPU to 1024 very
soon. After updating your src tree and building a new kernel you will
need to rebuild kernel modules that come from outside of the src tree
(such as drm-kmod or VirtualBox).

[1] https://bugs.freebsd.org/269572



armv7 kyua runs via chroot on aarch64: zfs tests leave behind processes from timed out tests

2023-08-03 Thread Mark Millard
This is after the patch (leading whitespace might
not have been preserved in what you see):

# git -C /usr/main-src/ diff sys/dev/md/
diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
index a719dccb1955..365296ec4276 100644
--- a/sys/dev/md/md.c
+++ b/sys/dev/md/md.c
@@ -147,8 +147,15 @@ struct md_ioctl32 {
int md_fwsectors;
uint32_tmd_label;
int md_pad[MDNPAD];
+#ifdef __aarch64__
+   uint32_tmd_pad0;
+#endif
 } __attribute__((__packed__));
+#ifdef __aarch64__
+CTASSERT((sizeof(struct md_ioctl32)) == 440);
+#else
 CTASSERT((sizeof(struct md_ioctl32)) == 436);
+#endif
 
 #defineMDIOCATTACH_32  _IOC_NEWTYPE(MDIOCATTACH, struct md_ioctl32)
 #defineMDIOCDETACH_32  _IOC_NEWTYPE(MDIOCDETACH, struct md_ioctl32)


The kyua run is still in process, but at this point there is
the following accumulation from the zfs testing timouts:

# ps -alxdww
UID   PID  PPID C PRI NI   VSZ   RSS MWCHAN   STAT TTTIME COMMAND
. . .
  0 17491 1 6  20  0 36460 12324 -T - 0:24.71 |-- 
fsync_integrity /testdir2316/testfile2316
  0 17551 1 5  20  0 10600  7512 tx->tx_s D - 0:00.00 |-- 
/sbin/zpool destroy -f testpool.2316
  0 17739 1 7  20  0 10600  7308 zfs tear D - 0:00.00 |-- 
/sbin/zpool destroy -f testpool.2316
  0 17841 1 3  20  0 10600  7316 tx->tx_s D - 0:00.00 |-- 
/sbin/zpool destroy -f testpool.2316
  0 17860 1 0  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 17888 1 3  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 17907 1 6  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 17928 1 7  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 17955 1 0  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 17976 1 4  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 17995 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18023 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18043 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18064 1 3  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18085 1 0  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18114 1 7  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18135 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18157 1 6  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18177 1 6  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18205 1 4  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18224 1 1  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18255 1 3  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18275 1 1  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18296 1 5  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18317 1 4  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18345 1 4  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18365 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18386 1 3  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18412 1 1  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18447 1 5  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18466 1 5  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18516 1 6  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18535 1 2  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade
  0 18632 1 0  20  0 10080  6956 spa_name D - 0:00.00 |-- /sbin/zfs 
upgrade

Lots of these are from 300s timeouts but some are from 1200s or
1800s or 3600s timeouts.

For reference:

sys/cddl/zfs/tests/txg_integrity/txg_integrity_test:fsync_integrity_001_pos  -> 
 broken: Test case body timed out  [1800.053s]
sys/cddl/zfs/tests/txg_integrity/txg_integrity_test:txg_integrity_001_pos  ->  
passed  [63.702s]
sys/cddl/zfs/tests/userquota/userquota_test:groupspace_001_pos  ->  skipped: 
Required program 'runwattr' not found in PATH  [0.003s]
sys/cddl/zfs/tests/userquota/userquota_test:groupspace_002_pos  ->  skipped: 
Required program 'runwattr' not found in PATH  [0.002s]
sys/cddl/zfs/tests/userquota/userquota_test:userquota_001_pos  ->  skipped: 
Required program 'runwattr' not found in PATH  [0.002s]
sys/cddl/zfs/tests/userquota/userquota_test:userquota_002_pos  ->  broken: Test 
case cleanup timed out  [0.148s]
sys/cddl/zfs/tests/userquota/userq