Okay, just for fun, here are some results with the new scheduler.
  I injected periodic yields into the code to simulate the
yielding that would happen automatically if the code was using
send and receive.  First the code:


shared long count = 0;

shared static ~this() {
        writefln("count = %s", count);
}

void childThread() {
        foreach(i; 0 .. 1_000) {
                atomicOp!"+="(count, 1);
                if (scheduler && 0 == i % 100)
                        scheduler.yield();
        }
}

void mainThread() {
        foreach(i; 0 .. 100_000) {
                auto tid = spawn(&childThread);
        }
}

void runFibers() {
        scheduler = new FiberScheduler;

        scheduler.start(() {
                mainThread();
        });
}

void main(string[] args) {
        if (args.length > 1 && args[1] == "threads")
                mainThread();
        else if (args.length > 1 && args[1] == "fibers")
                runFibers();
        else writeln("specify threads or fibers");
}


And the results:


$ time concurrency threads
count = 100000000

real    1m11.033s
user    1m23.944s
sys     0m29.272s

$ time concurrency fibers
count = 100000000

real    0m5.998s
user    0m3.536s
sys     0m2.455s


I've got to say that I was surprised how fast 1 million kernel
threads were for this task.  That's orders of magnitude beyond
what I'd consider a sane number.

Reply via email to