On Monday, 28 October 2024 at 01:06:58 UTC, Kyle Ingraham wrote:
...
The second though only achieves ~20k requests per second. In
that demo I try to make vibe.d's concurrency system available
during request handling. NGINX Unit's event loop is run in its
own thread. When requests arrive, Unit sends them to the main
thread for handling on vibe.d's event loop. I've tried a few
methods to increase performance...
Apparently, vibe.d's event loop is not fully compatible with
NGINX Unit's loop, causing performance loss. I wonder if it would
be wise to use something like an IntrusiveQueue or task pool to
make it compatible? For example, something like this:
```d
alias IQ = IntrusiveQueue;
struct IntrusiveQueue(T)
{
import core.atomic;
private {
T[] buffer;
size_t head, tail;
alias acq = MemoryOrder.acq;
alias rel = MemoryOrder.rel;
}
size_t capacity;
this(size_t capacity) {
this.capacity = capacity;
buffer.length = capacity;
}
alias push = enqueue;
bool enqueue(T item) {
auto currTail = tail.atomicLoad!acq;
auto nextTail = (currTail + 1) % capacity;
if (nextTail == head.atomicLoad!acq)
return false;
buffer[currTail] = item;
atomicStore!rel(tail, nextTail);
return true;
}
alias fetch = dequeue;
bool dequeue(ref T item) {
auto currHead = head.atomicLoad!acq;
if (currHead == tail.atomicLoad!acq)
return false;
auto nextTail = (currHead + 1) % capacity;
item = buffer[currHead];
atomicStore!rel(head, nextTail);
return true;
}
}
unittest
{
enum start = 41;
auto queue = IQ!int(10);
queue.push(start);
queue.push(start + 1);
int item;
if (queue.fetch(item)) assert(item == start);
if (queue.fetch(item)) assert(item == start + 1);
}
```
SDB@79