Hi Kevin,

I'd be happy to help. I had to make a few assumptions, but here's an example. I 
assumed that you want to create and write files that are on average 10k and 
read files which are the same size. 

Here's a simple script which creates a set of files and then does 60% reads of 
those files, and 40% writes to another set of filenames:

<pre>
set $dir=/tmp
set $nfiles=1000
set $meandirwidth=10
set $filesize=10k
set $nthreads=2
set $meaniosize=128k

define fileset 
name=readfileset,path=$dir,size=$filesize,entries=$nfiles,dirwidth=$meandirwidth,prealloc
define fileset 
name=writefileset,path=$dir,size=$filesize,entries=$nfiles,dirwidth=$meandirwidth

define process name=filereader,instances=1
{
  thread name=filereaderthread,memsize=10m,instances=$nthreads
  {
    flowop createfile name=createfile1,filesetname=writefileset,fd=1
    flowop writewholefile name=writewholefile1,iosize=$meaniosize,fd=1
    flowop closefile name=closefile1,fd=1
    flowop createfile name=createfile2,filesetname=writefileset,fd=1
    flowop writewholefile name=writewholefile2,iosize=$meaniosize,fd=1
    flowop closefile name=closefile2,fd=1
    flowop openfile name=openfile3,filesetname=readfileset,fd=1
    flowop readwholefile name=readfile3,fd=1
    flowop closefile name=closefile3,fd=1
    flowop openfile name=openfile4,filesetname=readfileset,fd=1
    flowop readwholefile name=readfile4,fd=1
    flowop closefile name=closefile4,fd=1
    flowop openfile name=openfile5,filesetname=readfileset,fd=1
    flowop readwholefile name=readfile5,fd=1
    flowop closefile name=closefile5,fd=1
  }
}
</pre>

The script defaults to 2 threads doing 2 create/writes then 3 open/read/close. 
If we add more threads, it will increase parallelism without changing the 
ratios.

When I run this, I see the correct 60r/40w mix.

<pre>
/opt/filebench/bin/filebench
filebench> load kevin
 7353: 0.076: Kevin Version 1.0 personality successfully loaded
filebench> run 10
 7353: 0.138: Fileset writefileset: 1000 files, avg dir = 10.0, avg depth = 
3.0, mbytes=9
 7353: 5.664: Removed any existing fileset writefileset in 6 seconds
 7353: 5.665: Creating fileset writefileset...
 7353: 5.698: Preallocated 0 of 1000 of fileset writefileset in 1 seconds
 7353: 5.769: Fileset readfileset: 1000 files, avg dir = 10.0, avg depth = 3.0, 
mbytes=9
 7353: 8.000: Removed any existing fileset readfileset in 3 seconds
 7353: 8.001: Creating fileset readfileset...
 7353: 17.070: Preallocated 1000 of 1000 of fileset readfileset in 10 seconds
 7353: 17.072: Creating/pre-allocating files
 7353: 17.072: Starting 1 filereader instances
 7360: 18.081: Starting 2 filereaderthread threads
 7353: 21.091: Running...
 7353: 31.161: Run took 10 seconds...
 7353: 31.918: Per-Operation Breakdown
closefile5                 41ops/s   0.0mb/s      0.0ms/op       27us/op-cpu
readfile5                  41ops/s   0.4mb/s     12.5ms/op      346us/op-cpu
openfile5                  41ops/s   0.0mb/s      0.2ms/op       89us/op-cpu
closefile4                 41ops/s   0.0mb/s      0.0ms/op       27us/op-cpu
readfile4                  41ops/s   0.3mb/s     13.0ms/op      338us/op-cpu
openfile4                  41ops/s   0.0mb/s      0.1ms/op       90us/op-cpu
closefile3                 41ops/s   0.0mb/s      0.0ms/op       27us/op-cpu
readfile3                  41ops/s   0.4mb/s     17.8ms/op      357us/op-cpu
openfile3                  41ops/s   0.0mb/s      0.2ms/op       89us/op-cpu
closefile2                 41ops/s   0.0mb/s      0.0ms/op       52us/op-cpu
writewholefile2            41ops/s   0.4mb/s      0.4ms/op      380us/op-cpu
createfile2                41ops/s   0.0mb/s      0.9ms/op      495us/op-cpu
closefile1                 41ops/s   0.0mb/s      0.0ms/op       58us/op-cpu
writewholefile1            41ops/s   0.3mb/s      0.5ms/op      407us/op-cpu
createfile1                41ops/s   0.0mb/s      1.2ms/op      611us/op-cpu

 7353: 31.993: 
IO Summary:       6255 ops 612.8 ops/s, (123/82 r/w)   1.9mb/s,    838us 
cpu/op,   9.4ms latency
</pre>

Please let me know if this is what you were looking for?

Regards,

Richard.
This message posted from opensolaris.org
_______________________________________________
perf-discuss mailing list
perf-discuss@opensolaris.org

Reply via email to