for the record from slack - i have reproduced issue on real hw with local SAS 
drives attached to LSI 3008

```igork@sshgate:~$ ssh [email protected]
DilOS (based on illumos) Version 1.3.7.202-20170911-3+462       2017-09-11
root@shelf1:~# diskinfo 
TYPE    DISK                    VID      PID              SIZE          RMV SSD
SCSI    c0t5000CCA05CDB8C6Cd0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDCE150d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDDE17Cd0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDE3DCCd0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDEE470d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDFCAD0d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDFFA34d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDE4710d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDF3E94d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDE2DD8d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDFBD90d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDD6994d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDBC198d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDEF18Cd0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CE01870d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDF3E90d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CE024B8d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDFA804d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CE01140d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDEDBF8d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDCDD04d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDEA3E4d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CE087E4d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDCEC70d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDE3400d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CCB0D1Cd0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDFD6ECd0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDF6CE0d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDCB2E0d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDD7744d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CD51290d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CBD5FD4d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDEA4B0d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CD9A354d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CD39D18d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
SCSI    c0t5000CCA05CDD9ED4d0   HGST     HUS724040ALS640  3726.02 GiB   no  no 
UNKNOWN c1t0d0                  INTEL    SSDSC2BB120G4     111.79 GiB   no  yes
UNKNOWN c1t1d0                  INTEL    SSDSC2BB120G4     111.79 GiB   no  yes
root@shelf1:~# zpool create tst3 c0t5000CCA05CD39D18d0 c0t5000CCA05CD9A354d0 
c0t5000CCA05CDEA4B0d0 c0t5000CCA05CBD5FD4d0
root@shelf1:~# zpool list -v
NAME                      SIZE  ALLOC   FREE  EXPANDSZ   FRAG    CAP  DEDUP  
HEALTH  ALTROOT
d36                      3.62T  2.61M  3.62T         -     0%     0%  1.00x  
ONLINE  -
  c0t5000CCA05CDD9ED4d0  3.62T  2.61M  3.62T         -     0%     0%
rpool                     111G  20.8G  90.2G         -     1%    18%  1.00x  
ONLINE  -
  c1t0d0                  111G  20.8G  90.2G         -     1%    18%
tst3                     14.5T    96K  14.5T         -     0%     0%  1.00x  
ONLINE  -
  c0t5000CCA05CD39D18d0  3.62T    36K  3.62T         -     0%     0%
  c0t5000CCA05CD9A354d0  3.62T    24K  3.62T         -     0%     0%
  c0t5000CCA05CDEA4B0d0  3.62T      0  3.62T         -     0%     0%
  c0t5000CCA05CBD5FD4d0  3.62T    36K  3.62T         -     0%     0%

root@shelf1:~# sudo zfs create -o logbias=throughput -o sync=always -V 64M 
tst3/space4
root@shelf1:~# ls /dev/zvol/rdsk/tst3/space4
/dev/zvol/rdsk/tst3/space4
root@shelf1:~# sudo fio --filename=/dev/zvol/rdsk/tst3/space4 --sync=1 
--rw=write --bs=1K --numjobs=8 --iodepth=1 --size=10MB --name=panic
panic: (g=0): rw=write, bs=1K-1K/1K-1K/1K-1K, ioengine=psync, iodepth=1
...
fio-2.10
clock setaffinity failed: No such process
clock setaffinity failed: No such process
clock setaffinity failed: Operation not supported
clock setaffinity failed: No such process
clock setaffinity failed: No such process
clock setaffinity failed: No such process
clock setaffinity failed: No such process
clock setaffinity failed: No such process
clock setaffinity failed: No such process
clock setaffinity failed: No such process
clock setaffinity failed: No such process
clock setaffinity failed: Operation not supported
clock setaffinity failed: Operation not supported
clock setaffinity failed: No such process
clock setaffinity failed: No such process
clock setaffinity failed: No such process
clock setaffinity failed: No such process
clock setaffinity failed: No such process
clock setaffinity failed: No such process
clock setaffinity failed: Operation not supported
clock setaffinity failed: No such process
clock setaffinity failed: No such process
Starting 8 processes
Jobs: 8 (f=8): [W(8)] [2.2% done] [0KB/368KB/0KB /s] [0/368/0 iops] [eta 
03m:42s]


-- 
You are receiving this because you are subscribed to this thread.
Reply to this email directly or view it on GitHub:
https://github.com/openzfs/openzfs/pull/462#issuecomment-329547888
------------------------------------------
openzfs-developer
Archives: 
https://openzfs.topicbox.com/groups/developer/discussions/T23be73c52829b560-Ma6c0071085f693d0110cc078
Powered by Topicbox: https://topicbox.com

Reply via email to