Big refactoring step.
- Moves a lot of stuff from vibe.core.core to vibe.core.task - Introduces TaskScheduler to unify the scheduling process - Refines how tasks are scheduled and processed (can push to the front of the task queue and uses a marker task to keep track of the spot up to which to process) - Start to add proper support for task interrupts and timeouts by properly cancelling in-flight async operations - Work on ManualEvent - still not functional for the shared case - Implement proper IP address parsing in NetworkAddress
This commit is contained in:
parent
c3857d1bc9
commit
3b0e4e0452
File diff suppressed because it is too large
Load diff
|
@ -859,6 +859,7 @@ private struct LogOutputRange {
|
||||||
|
|
||||||
void put(dchar ch)
|
void put(dchar ch)
|
||||||
{
|
{
|
||||||
|
static import std.utf;
|
||||||
if (ch < 128) put(cast(char)ch);
|
if (ch < 128) put(cast(char)ch);
|
||||||
else {
|
else {
|
||||||
char[4] buf;
|
char[4] buf;
|
||||||
|
|
|
@ -14,6 +14,7 @@ import std.functional : toDelegate;
|
||||||
import std.socket : AddressFamily, UnknownAddress;
|
import std.socket : AddressFamily, UnknownAddress;
|
||||||
import vibe.core.log;
|
import vibe.core.log;
|
||||||
import vibe.internal.async;
|
import vibe.internal.async;
|
||||||
|
import core.time : Duration;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -29,13 +30,26 @@ NetworkAddress resolveHost(string host, AddressFamily address_family = AddressFa
|
||||||
/// ditto
|
/// ditto
|
||||||
NetworkAddress resolveHost(string host, ushort address_family, bool use_dns = true)
|
NetworkAddress resolveHost(string host, ushort address_family, bool use_dns = true)
|
||||||
{
|
{
|
||||||
|
import std.socket : parseAddress;
|
||||||
|
version (Windows) import std.c.windows.winsock : sockaddr_in, sockaddr_in6;
|
||||||
|
else import core.sys.posix.netinet.in_ : sockaddr_in, sockaddr_in6;
|
||||||
|
|
||||||
|
enforce(host.length > 0, "Host name must not be empty.");
|
||||||
|
if (host[0] == ':' || host[0] >= '0' && host[0] <= '9') {
|
||||||
|
auto addr = parseAddress(host);
|
||||||
|
enforce(address_family == AddressFamily.UNSPEC || addr.addressFamily == address_family);
|
||||||
NetworkAddress ret;
|
NetworkAddress ret;
|
||||||
ret.family = address_family;
|
ret.family = addr.addressFamily;
|
||||||
if (host == "127.0.0.1") {
|
switch (addr.addressFamily) with(AddressFamily) {
|
||||||
ret.family = AddressFamily.INET;
|
default: throw new Exception("Unsupported address family");
|
||||||
ret.sockAddrInet4.sin_addr.s_addr = 0x0100007F;
|
case INET: *ret.sockAddrInet4 = *cast(sockaddr_in*)addr.name; break;
|
||||||
} else assert(false);
|
case INET6: *ret.sockAddrInet6 = *cast(sockaddr_in6*)addr.name; break;
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
} else {
|
||||||
|
enforce(use_dns, "Malformed IP address string.");
|
||||||
|
assert(false, "DNS lookup not implemented."); // TODO
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -103,7 +117,11 @@ TCPConnection connectTCP(NetworkAddress addr)
|
||||||
|
|
||||||
scope uaddr = new UnknownAddress;
|
scope uaddr = new UnknownAddress;
|
||||||
addr.toUnknownAddress(uaddr);
|
addr.toUnknownAddress(uaddr);
|
||||||
auto result = eventDriver.asyncAwait!"connectStream"(uaddr);
|
// FIXME: make this interruptible
|
||||||
|
auto result = asyncAwaitUninterruptible!(ConnectCallback,
|
||||||
|
cb => eventDriver.connectStream(uaddr, cb)
|
||||||
|
//cb => eventDriver.cancelConnect(cb)
|
||||||
|
);
|
||||||
enforce(result[1] == ConnectStatus.connected, "Failed to connect to "~addr.toString()~": "~result[1].to!string);
|
enforce(result[1] == ConnectStatus.connected, "Failed to connect to "~addr.toString()~": "~result[1].to!string);
|
||||||
return TCPConnection(result[0]);
|
return TCPConnection(result[0]);
|
||||||
}
|
}
|
||||||
|
@ -354,7 +372,10 @@ mixin(tracer);
|
||||||
// TODO: timeout!!
|
// TODO: timeout!!
|
||||||
if (m_context.readBuffer.length > 0) return true;
|
if (m_context.readBuffer.length > 0) return true;
|
||||||
auto mode = timeout <= 0.seconds ? IOMode.immediate : IOMode.once;
|
auto mode = timeout <= 0.seconds ? IOMode.immediate : IOMode.once;
|
||||||
auto res = eventDriver.asyncAwait!"readSocket"(m_socket, m_context.readBuffer.peekDst(), mode);
|
auto res = asyncAwait!(IOCallback,
|
||||||
|
cb => eventDriver.readSocket(m_socket, m_context.readBuffer.peekDst(), mode, cb),
|
||||||
|
cb => eventDriver.cancelRead(m_socket)
|
||||||
|
);
|
||||||
logTrace("Socket %s, read %s bytes: %s", res[0], res[2], res[1]);
|
logTrace("Socket %s, read %s bytes: %s", res[0], res[2], res[1]);
|
||||||
|
|
||||||
assert(m_context.readBuffer.length == 0);
|
assert(m_context.readBuffer.length == 0);
|
||||||
|
@ -403,7 +424,9 @@ mixin(tracer);
|
||||||
mixin(tracer);
|
mixin(tracer);
|
||||||
if (bytes.length == 0) return;
|
if (bytes.length == 0) return;
|
||||||
|
|
||||||
auto res = eventDriver.asyncAwait!"writeSocket"(m_socket, bytes, IOMode.all);
|
auto res = asyncAwait!(IOCallback,
|
||||||
|
cb => eventDriver.writeSocket(m_socket, bytes, IOMode.all, cb),
|
||||||
|
cb => eventDriver.cancelWrite(m_socket));
|
||||||
|
|
||||||
switch (res[1]) {
|
switch (res[1]) {
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -126,7 +126,7 @@ class LocalTaskSemaphore
|
||||||
//import vibe.utils.memory;
|
//import vibe.utils.memory;
|
||||||
|
|
||||||
private {
|
private {
|
||||||
struct Waiter {
|
static struct Waiter {
|
||||||
ManualEvent signal;
|
ManualEvent signal;
|
||||||
ubyte priority;
|
ubyte priority;
|
||||||
uint seq;
|
uint seq;
|
||||||
|
@ -632,28 +632,126 @@ ManualEvent createManualEvent()
|
||||||
{
|
{
|
||||||
return ManualEvent.init;
|
return ManualEvent.init;
|
||||||
}
|
}
|
||||||
|
/// ditto
|
||||||
|
shared(ManualEvent) createSharedManualEvent()
|
||||||
|
{
|
||||||
|
return shared(ManualEvent).init;
|
||||||
|
}
|
||||||
|
|
||||||
/** A manually triggered cross-task event.
|
/** A manually triggered cross-task event.
|
||||||
|
|
||||||
Note: the ownership can be shared between multiple fibers and threads.
|
Note: the ownership can be shared between multiple fibers and threads.
|
||||||
*/
|
*/
|
||||||
struct ManualEvent {
|
struct ManualEvent {
|
||||||
|
import core.thread : Thread;
|
||||||
|
import vibe.internal.async : asyncAwait, asyncAwaitUninterruptible;
|
||||||
|
|
||||||
|
private {
|
||||||
|
static struct Waiter {
|
||||||
|
Waiter* next;
|
||||||
|
immutable EventID event;
|
||||||
|
immutable EventDriver driver;
|
||||||
|
immutable Thread thread;
|
||||||
|
StackSList!ThreadWaiter tasks;
|
||||||
|
}
|
||||||
|
static struct ThreadWaiter {
|
||||||
|
ThreadWaiter* next;
|
||||||
|
Task task;
|
||||||
|
void delegate() @safe nothrow notifier;
|
||||||
|
|
||||||
|
void wait(void delegate() @safe nothrow del) @safe nothrow { assert(notifier is null); notifier = del; }
|
||||||
|
void cancel() @safe nothrow { notifier = null; }
|
||||||
|
|
||||||
|
void wait(void delegate() @safe nothrow del)
|
||||||
|
shared @safe nothrow {
|
||||||
|
notifier = del;
|
||||||
|
if (!next) eventDriver.waitForEvent(ms_threadEvent, &onEvent);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void onEvent(EventID event)
|
||||||
|
@safe nothrow {
|
||||||
|
assert(event == ms_threadEvent);
|
||||||
|
notifier();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
int m_emitCount;
|
||||||
|
Waiter* m_waiters;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// thread destructor in vibe.core.core will decrement the ref. count
|
||||||
|
package static EventID ms_threadEvent;
|
||||||
|
|
||||||
|
enum EmitMode {
|
||||||
|
single,
|
||||||
|
all
|
||||||
|
}
|
||||||
|
|
||||||
|
//@disable this(this);
|
||||||
|
|
||||||
|
deprecated("ManualEvent is always non-null!")
|
||||||
bool opCast() const nothrow { return true; }
|
bool opCast() const nothrow { return true; }
|
||||||
int emitCount() const nothrow { return 0; }
|
deprecated("ManualEvent is always non-null!")
|
||||||
int emit() nothrow { return 0; }
|
bool opCast() const shared nothrow { return true; }
|
||||||
int wait() { assert(false); }
|
|
||||||
int wait(int) { import vibe.core.core : sleep; sleep(30.seconds); assert(false); }
|
|
||||||
int wait(Duration, int) { assert(false); }
|
|
||||||
int waitUninterruptible() nothrow { assert(false); }
|
|
||||||
int waitUninterruptible(int) nothrow { assert(false); }
|
|
||||||
int waitUninterruptible(Duration, int) nothrow { assert(false); }
|
|
||||||
}
|
|
||||||
/+interface ManualEvent {
|
|
||||||
/// A counter that is increased with every emit() call
|
/// A counter that is increased with every emit() call
|
||||||
@property int emitCount() const nothrow;
|
int emitCount() const nothrow { return m_emitCount; }
|
||||||
|
/// ditto
|
||||||
|
int emitCount() const shared nothrow { return atomicLoad(m_emitCount); }
|
||||||
|
|
||||||
/// Emits the signal, waking up all owners of the signal.
|
/// Emits the signal, waking up all owners of the signal.
|
||||||
void emit() nothrow;
|
int emit(EmitMode mode = EmitMode.all)
|
||||||
|
shared nothrow {
|
||||||
|
import core.atomic : atomicOp, cas;
|
||||||
|
|
||||||
|
auto ec = atomicOp!"+="(m_emitCount, 1);
|
||||||
|
auto thisthr = Thread.getThis();
|
||||||
|
|
||||||
|
final switch (mode) {
|
||||||
|
case EmitMode.all:
|
||||||
|
// FIXME: would be nice to have atomicSwap instead
|
||||||
|
auto w = cast(Waiter*)atomicLoad(m_waiters);
|
||||||
|
if (w !is null && !cas(&m_waiters, cast(shared(Waiter)*)w, cast(shared(Waiter)*)null))
|
||||||
|
return ec;
|
||||||
|
while (w !is null) {
|
||||||
|
if (w.thread is thisthr) {
|
||||||
|
// Note: emitForThisThread can result in w getting deallocated at any
|
||||||
|
// time, so we need to copy any fields first
|
||||||
|
auto tasks = w.tasks;
|
||||||
|
w = w.next;
|
||||||
|
emitForThisThread(w.tasks.m_first, mode);
|
||||||
|
} else {
|
||||||
|
auto evt = w.event;
|
||||||
|
w = w.next;
|
||||||
|
eventDriver.triggerEvent(evt, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case EmitMode.single:
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
return ec;
|
||||||
|
}
|
||||||
|
/// ditto
|
||||||
|
int emit(EmitMode mode = EmitMode.all)
|
||||||
|
nothrow {
|
||||||
|
auto ec = m_emitCount++;
|
||||||
|
|
||||||
|
final switch (mode) {
|
||||||
|
case EmitMode.all:
|
||||||
|
auto w = m_waiters;
|
||||||
|
m_waiters = null;
|
||||||
|
if (w !is null) {
|
||||||
|
assert(w.thread is Thread.getThis(), "Unshared ManualEvent has waiters in foreign thread!");
|
||||||
|
assert(w.next is null, "Unshared ManualEvent has waiters in multiple threads!");
|
||||||
|
emitForThisThread(w.tasks.m_first, EmitMode.all);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case EmitMode.single:
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
return ec;
|
||||||
|
}
|
||||||
|
|
||||||
/** Acquires ownership and waits until the signal is emitted.
|
/** Acquires ownership and waits until the signal is emitted.
|
||||||
|
|
||||||
|
@ -661,7 +759,9 @@ struct ManualEvent {
|
||||||
May throw an $(D InterruptException) if the task gets interrupted
|
May throw an $(D InterruptException) if the task gets interrupted
|
||||||
using $(D Task.interrupt()).
|
using $(D Task.interrupt()).
|
||||||
*/
|
*/
|
||||||
void wait();
|
int wait() { return wait(this.emitCount); }
|
||||||
|
/// ditto
|
||||||
|
int wait() shared { return wait(this.emitCount); }
|
||||||
|
|
||||||
/** Acquires ownership and waits until the emit count differs from the given one.
|
/** Acquires ownership and waits until the emit count differs from the given one.
|
||||||
|
|
||||||
|
@ -669,7 +769,9 @@ struct ManualEvent {
|
||||||
May throw an $(D InterruptException) if the task gets interrupted
|
May throw an $(D InterruptException) if the task gets interrupted
|
||||||
using $(D Task.interrupt()).
|
using $(D Task.interrupt()).
|
||||||
*/
|
*/
|
||||||
int wait(int reference_emit_count);
|
int wait(int emit_count) { return wait(Duration.max, emit_count); }
|
||||||
|
/// ditto
|
||||||
|
int wait(int emit_count) shared { return wait(Duration.max, emit_count); }
|
||||||
|
|
||||||
/** Acquires ownership and waits until the emit count differs from the given one or until a timeout is reaced.
|
/** Acquires ownership and waits until the emit count differs from the given one or until a timeout is reaced.
|
||||||
|
|
||||||
|
@ -677,32 +779,218 @@ struct ManualEvent {
|
||||||
May throw an $(D InterruptException) if the task gets interrupted
|
May throw an $(D InterruptException) if the task gets interrupted
|
||||||
using $(D Task.interrupt()).
|
using $(D Task.interrupt()).
|
||||||
*/
|
*/
|
||||||
int wait(Duration timeout, int reference_emit_count);
|
int wait(Duration timeout, int emit_count)
|
||||||
|
{
|
||||||
|
Waiter w;
|
||||||
|
ThreadWaiter tw;
|
||||||
|
|
||||||
|
int ec = this.emitCount;
|
||||||
|
while (ec <= emit_count) {
|
||||||
|
// wait for getting resumed directly by emit/emitForThisThread
|
||||||
|
acquireWaiter(w, tw);
|
||||||
|
asyncAwait!(void delegate() @safe nothrow,
|
||||||
|
cb => tw.wait(cb),
|
||||||
|
cb => tw.cancel()
|
||||||
|
)(timeout);
|
||||||
|
ec = this.emitCount;
|
||||||
|
}
|
||||||
|
return ec;
|
||||||
|
}
|
||||||
|
/// ditto
|
||||||
|
int wait(Duration timeout, int emit_count)
|
||||||
|
shared {
|
||||||
|
shared(Waiter) w;
|
||||||
|
ThreadWaiter tw;
|
||||||
|
acquireWaiter(w, tw);
|
||||||
|
|
||||||
|
int ec = this.emitCount;
|
||||||
|
while (ec <= emit_count) {
|
||||||
|
if (tw.next) {
|
||||||
|
// if we are not the first waiter for this thread,
|
||||||
|
// wait for getting resumed by emitForThisThread
|
||||||
|
asyncAwait!(void delegate() @safe nothrow,
|
||||||
|
cb => tw.wait(cb),
|
||||||
|
cb => tw.cancel()
|
||||||
|
)(timeout);
|
||||||
|
ec = this.emitCount;
|
||||||
|
} else {
|
||||||
|
// if we are the first waiter for this thread,
|
||||||
|
// wait for the thread event to get emitted
|
||||||
|
/*asyncAwait!(EventCallback, void delegate() @safe nothrow,
|
||||||
|
cb => eventDriver.waitForEvent(ms_threadEvent, cb),
|
||||||
|
cb => tw.wait(cb),
|
||||||
|
cb => eventDriver.cancelWaitForEvent(ms_threadEvent)
|
||||||
|
)(timeout);
|
||||||
|
emitForThisThread(w.waiters);
|
||||||
|
ec = this.emitCount;*/
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ec;
|
||||||
|
}
|
||||||
|
|
||||||
/** Same as $(D wait), but defers throwing any $(D InterruptException).
|
/** Same as $(D wait), but defers throwing any $(D InterruptException).
|
||||||
|
|
||||||
This method is annotated $(D nothrow) at the expense that it cannot be
|
This method is annotated $(D nothrow) at the expense that it cannot be
|
||||||
interrupted.
|
interrupted.
|
||||||
*/
|
*/
|
||||||
int waitUninterruptible(int reference_emit_count) nothrow;
|
int waitUninterruptible() nothrow { return waitUninterruptible(this.emitCount); }
|
||||||
|
///
|
||||||
|
int waitUninterruptible() shared nothrow { return waitUninterruptible(this.emitCount); }
|
||||||
/// ditto
|
/// ditto
|
||||||
int waitUninterruptible(Duration timeout, int reference_emit_count) nothrow;
|
int waitUninterruptible(int emit_count) nothrow { return waitUninterruptible(Duration.max, emit_count); }
|
||||||
}+/
|
/// ditto
|
||||||
|
int waitUninterruptible(int emit_count) shared nothrow { return waitUninterruptible(Duration.max, emit_count); }
|
||||||
|
/// ditto
|
||||||
|
int waitUninterruptible(Duration timeout, int emit_count)
|
||||||
|
nothrow {
|
||||||
|
Waiter w;
|
||||||
|
ThreadWaiter tw;
|
||||||
|
acquireWaiter(w, tw);
|
||||||
|
|
||||||
|
int ec = this.emitCount;
|
||||||
|
while (ec <= emit_count) {
|
||||||
|
asyncAwaitUninterruptible!(void delegate(),
|
||||||
|
cb => tw.wait(cb),
|
||||||
|
cb => tw.cancel()
|
||||||
|
)(timeout);
|
||||||
|
ec = this.emitCount;
|
||||||
|
}
|
||||||
|
return ec;
|
||||||
|
}
|
||||||
|
/// ditto
|
||||||
|
int waitUninterruptible(Duration timeout, int emit_count)
|
||||||
|
shared nothrow {
|
||||||
|
/*Waiter w;
|
||||||
|
ThreadWaiter tw;
|
||||||
|
auto event = acquireWaiter(w, tw);
|
||||||
|
|
||||||
|
int ec = this.emitCount;
|
||||||
|
while (ec <= emit_count) {
|
||||||
|
asyncAwaitUninterruptible!(void delegate(),
|
||||||
|
cb => tw.wait(cb),
|
||||||
|
cb => tw.cancel()
|
||||||
|
)(timeout);
|
||||||
|
ec = this.emitCount;
|
||||||
|
}
|
||||||
|
return ec;*/
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static bool emitForThisThread(ThreadWaiter* waiters, EmitMode mode)
|
||||||
|
nothrow {
|
||||||
|
if (!waiters) return false;
|
||||||
|
|
||||||
|
final switch (mode) {
|
||||||
|
case EmitMode.all:
|
||||||
|
while (waiters) {
|
||||||
|
if (waiters.notifier !is null)
|
||||||
|
waiters.notifier();
|
||||||
|
waiters = waiters.next;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case EmitMode.single:
|
||||||
|
assert(false, "TODO!");
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void acquireWaiter(ref Waiter w, ref ThreadWaiter tw)
|
||||||
|
nothrow {
|
||||||
|
// FIXME: this doesn't work! if task a starts to wait, task b afterwards, and then a finishes its wait before b, the Waiter will be dangling
|
||||||
|
tw.task = Task.getThis();
|
||||||
|
|
||||||
|
if (m_waiters) {
|
||||||
|
m_waiters.tasks.add(&tw);
|
||||||
|
} else {
|
||||||
|
m_waiters = &w;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void acquireWaiter(ref shared(Waiter) w, ref ThreadWaiter tw)
|
||||||
|
nothrow shared {
|
||||||
|
tw.task = Task.getThis();
|
||||||
|
|
||||||
|
if (ms_threadEvent == EventID.init)
|
||||||
|
ms_threadEvent = eventDriver.createEvent();
|
||||||
|
|
||||||
|
if (m_waiters) {
|
||||||
|
//m_waiters.tasks.add(&tw);
|
||||||
|
assert(false);
|
||||||
|
} else {
|
||||||
|
m_waiters = &w;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private struct StackSList(T)
|
||||||
|
{
|
||||||
|
import core.atomic : cas;
|
||||||
|
|
||||||
|
private T* m_first;
|
||||||
|
|
||||||
|
@property T* first() { return m_first; }
|
||||||
|
@property shared(T)* first() shared { return atomicLoad(m_first); }
|
||||||
|
|
||||||
|
void add(shared(T)* elem)
|
||||||
|
shared {
|
||||||
|
do elem.next = atomicLoad(m_first);
|
||||||
|
while (cas(&m_first, elem.next, elem));
|
||||||
|
}
|
||||||
|
|
||||||
|
void remove(shared(T)* elem)
|
||||||
|
shared {
|
||||||
|
while (true) {
|
||||||
|
shared(T)* w = atomicLoad(m_first), wp;
|
||||||
|
while (w !is elem) {
|
||||||
|
wp = w;
|
||||||
|
w = atomicLoad(w.next);
|
||||||
|
}
|
||||||
|
if (wp !is null) {
|
||||||
|
if (cas(&wp.next, w, w.next))
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
if (cas(&m_first, w, w.next))
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool empty() const { return m_first is null; }
|
||||||
|
|
||||||
|
void add(T* elem)
|
||||||
|
{
|
||||||
|
elem.next = m_first;
|
||||||
|
m_first = elem;
|
||||||
|
}
|
||||||
|
|
||||||
|
void remove(T* elem)
|
||||||
|
{
|
||||||
|
T* w = m_first, wp;
|
||||||
|
while (w !is elem) {
|
||||||
|
assert(w !is null);
|
||||||
|
wp = w;
|
||||||
|
w = w.next;
|
||||||
|
}
|
||||||
|
if (wp) wp.next = w.next;
|
||||||
|
else m_first = w.next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private struct TaskMutexImpl(bool INTERRUPTIBLE) {
|
private struct TaskMutexImpl(bool INTERRUPTIBLE) {
|
||||||
import std.stdio;
|
import std.stdio;
|
||||||
private {
|
private {
|
||||||
shared(bool) m_locked = false;
|
shared(bool) m_locked = false;
|
||||||
shared(uint) m_waiters = 0;
|
shared(uint) m_waiters = 0;
|
||||||
ManualEvent m_signal;
|
shared(ManualEvent) m_signal;
|
||||||
debug Task m_owner;
|
debug Task m_owner;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setup()
|
void setup()
|
||||||
{
|
{
|
||||||
m_signal = createManualEvent();
|
m_signal = createSharedManualEvent();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -751,13 +1039,13 @@ private struct RecursiveTaskMutexImpl(bool INTERRUPTIBLE) {
|
||||||
Task m_owner;
|
Task m_owner;
|
||||||
size_t m_recCount = 0;
|
size_t m_recCount = 0;
|
||||||
shared(uint) m_waiters = 0;
|
shared(uint) m_waiters = 0;
|
||||||
ManualEvent m_signal;
|
shared(ManualEvent) m_signal;
|
||||||
@property bool m_locked() const { return m_recCount > 0; }
|
@property bool m_locked() const { return m_recCount > 0; }
|
||||||
}
|
}
|
||||||
|
|
||||||
void setup()
|
void setup()
|
||||||
{
|
{
|
||||||
m_signal = createManualEvent();
|
m_signal = createSharedManualEvent();
|
||||||
m_mutex = new core.sync.mutex.Mutex;
|
m_mutex = new core.sync.mutex.Mutex;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -812,7 +1100,7 @@ private struct TaskConditionImpl(bool INTERRUPTIBLE, LOCKABLE) {
|
||||||
private {
|
private {
|
||||||
LOCKABLE m_mutex;
|
LOCKABLE m_mutex;
|
||||||
|
|
||||||
ManualEvent m_signal;
|
shared(ManualEvent) m_signal;
|
||||||
}
|
}
|
||||||
|
|
||||||
static if (is(LOCKABLE == Lockable)) {
|
static if (is(LOCKABLE == Lockable)) {
|
||||||
|
@ -833,7 +1121,7 @@ private struct TaskConditionImpl(bool INTERRUPTIBLE, LOCKABLE) {
|
||||||
void setup(LOCKABLE mtx)
|
void setup(LOCKABLE mtx)
|
||||||
{
|
{
|
||||||
m_mutex = mtx;
|
m_mutex = mtx;
|
||||||
m_signal = createManualEvent();
|
m_signal = createSharedManualEvent();
|
||||||
}
|
}
|
||||||
|
|
||||||
@property LOCKABLE mutex() { return m_mutex; }
|
@property LOCKABLE mutex() { return m_mutex; }
|
||||||
|
@ -955,9 +1243,9 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
|
||||||
|
|
||||||
//Queue Events
|
//Queue Events
|
||||||
/** The event used to wake reading tasks waiting for the lock while it is blocked. */
|
/** The event used to wake reading tasks waiting for the lock while it is blocked. */
|
||||||
ManualEvent m_readyForReadLock;
|
shared(ManualEvent) m_readyForReadLock;
|
||||||
/** The event used to wake writing tasks waiting for the lock while it is blocked. */
|
/** The event used to wake writing tasks waiting for the lock while it is blocked. */
|
||||||
ManualEvent m_readyForWriteLock;
|
shared(ManualEvent) m_readyForWriteLock;
|
||||||
|
|
||||||
/** The underlying mutex that gates the access to the shared state. */
|
/** The underlying mutex that gates the access to the shared state. */
|
||||||
Mutex m_counterMutex;
|
Mutex m_counterMutex;
|
||||||
|
@ -967,8 +1255,8 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
|
||||||
{
|
{
|
||||||
m_policy = policy;
|
m_policy = policy;
|
||||||
m_counterMutex = new Mutex();
|
m_counterMutex = new Mutex();
|
||||||
m_readyForReadLock = createManualEvent();
|
m_readyForReadLock = createSharedManualEvent();
|
||||||
m_readyForWriteLock = createManualEvent();
|
m_readyForWriteLock = createSharedManualEvent();
|
||||||
}
|
}
|
||||||
|
|
||||||
@disable this(this);
|
@disable this(this);
|
||||||
|
|
|
@ -7,8 +7,8 @@
|
||||||
*/
|
*/
|
||||||
module vibe.core.task;
|
module vibe.core.task;
|
||||||
|
|
||||||
|
import vibe.core.log;
|
||||||
import vibe.core.sync;
|
import vibe.core.sync;
|
||||||
import vibe.internal.array : FixedRingBuffer;
|
|
||||||
|
|
||||||
import core.thread;
|
import core.thread;
|
||||||
import std.exception;
|
import std.exception;
|
||||||
|
@ -58,29 +58,29 @@ struct Task {
|
||||||
}
|
}
|
||||||
|
|
||||||
nothrow {
|
nothrow {
|
||||||
@property inout(TaskFiber) fiber() inout @trusted { return cast(inout(TaskFiber))m_fiber; }
|
package @property inout(TaskFiber) taskFiber() inout @trusted { return cast(inout(TaskFiber))m_fiber; }
|
||||||
|
@property inout(Fiber) fiber() inout @trusted { return this.taskFiber; }
|
||||||
@property size_t taskCounter() const @safe { return m_taskCounter; }
|
@property size_t taskCounter() const @safe { return m_taskCounter; }
|
||||||
@property inout(Thread) thread() inout @safe { if (m_fiber) return this.fiber.thread; return null; }
|
@property inout(Thread) thread() inout @safe { if (m_fiber) return this.taskFiber.thread; return null; }
|
||||||
|
|
||||||
/** Determines if the task is still running.
|
/** Determines if the task is still running.
|
||||||
*/
|
*/
|
||||||
@property bool running()
|
@property bool running()
|
||||||
const @trusted {
|
const @trusted {
|
||||||
assert(m_fiber !is null, "Invalid task handle");
|
assert(m_fiber !is null, "Invalid task handle");
|
||||||
try if (this.fiber.state == Fiber.State.TERM) return false; catch (Throwable) {}
|
try if (this.taskFiber.state == Fiber.State.TERM) return false; catch (Throwable) {}
|
||||||
return this.fiber.m_running && this.fiber.m_taskCounter == m_taskCounter;
|
return this.taskFiber.m_running && this.taskFiber.m_taskCounter == m_taskCounter;
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: this is not thread safe!
|
// FIXME: this is not thread safe!
|
||||||
@property ref ThreadInfo tidInfo() { return m_fiber ? fiber.tidInfo : s_tidInfo; }
|
@property ref ThreadInfo tidInfo() { return m_fiber ? taskFiber.tidInfo : s_tidInfo; }
|
||||||
@property Tid tid() { return tidInfo.ident; }
|
@property Tid tid() { return tidInfo.ident; }
|
||||||
}
|
}
|
||||||
|
|
||||||
T opCast(T)() const nothrow if (is(T == bool)) { return m_fiber !is null; }
|
T opCast(T)() const nothrow if (is(T == bool)) { return m_fiber !is null; }
|
||||||
|
|
||||||
void join() { if (running) fiber.join(); }
|
void join() { if (running) taskFiber.join(); }
|
||||||
void interrupt() { if (running) fiber.interrupt(); }
|
void interrupt() { if (running) taskFiber.interrupt(); }
|
||||||
void terminate() { if (running) fiber.terminate(); }
|
|
||||||
|
|
||||||
string toString() const { import std.string; return format("%s:%s", cast(void*)m_fiber, m_taskCounter); }
|
string toString() const { import std.string; return format("%s:%s", cast(void*)m_fiber, m_taskCounter); }
|
||||||
|
|
||||||
|
@ -88,6 +88,155 @@ struct Task {
|
||||||
bool opEquals(in Task other) const nothrow @safe { return m_fiber is other.m_fiber && m_taskCounter == other.m_taskCounter; }
|
bool opEquals(in Task other) const nothrow @safe { return m_fiber is other.m_fiber && m_taskCounter == other.m_taskCounter; }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Implements a task local storage variable.
|
||||||
|
|
||||||
|
Task local variables, similar to thread local variables, exist separately
|
||||||
|
in each task. Consequently, they do not need any form of synchronization
|
||||||
|
when accessing them.
|
||||||
|
|
||||||
|
Note, however, that each TaskLocal variable will increase the memory footprint
|
||||||
|
of any task that uses task local storage. There is also an overhead to access
|
||||||
|
TaskLocal variables, higher than for thread local variables, but generelly
|
||||||
|
still O(1) (since actual storage acquisition is done lazily the first access
|
||||||
|
can require a memory allocation with unknown computational costs).
|
||||||
|
|
||||||
|
Notice:
|
||||||
|
FiberLocal instances MUST be declared as static/global thread-local
|
||||||
|
variables. Defining them as a temporary/stack variable will cause
|
||||||
|
crashes or data corruption!
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
---
|
||||||
|
TaskLocal!string s_myString = "world";
|
||||||
|
|
||||||
|
void taskFunc()
|
||||||
|
{
|
||||||
|
assert(s_myString == "world");
|
||||||
|
s_myString = "hello";
|
||||||
|
assert(s_myString == "hello");
|
||||||
|
}
|
||||||
|
|
||||||
|
shared static this()
|
||||||
|
{
|
||||||
|
// both tasks will get independent storage for s_myString
|
||||||
|
runTask(&taskFunc);
|
||||||
|
runTask(&taskFunc);
|
||||||
|
}
|
||||||
|
---
|
||||||
|
*/
|
||||||
|
struct TaskLocal(T)
|
||||||
|
{
|
||||||
|
private {
|
||||||
|
size_t m_offset = size_t.max;
|
||||||
|
size_t m_id;
|
||||||
|
T m_initValue;
|
||||||
|
bool m_hasInitValue = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
this(T init_val) { m_initValue = init_val; m_hasInitValue = true; }
|
||||||
|
|
||||||
|
@disable this(this);
|
||||||
|
|
||||||
|
void opAssign(T value) { this.storage = value; }
|
||||||
|
|
||||||
|
@property ref T storage()
|
||||||
|
{
|
||||||
|
auto fiber = TaskFiber.getThis();
|
||||||
|
|
||||||
|
// lazily register in FLS storage
|
||||||
|
if (m_offset == size_t.max) {
|
||||||
|
static assert(T.alignof <= 8, "Unsupported alignment for type "~T.stringof);
|
||||||
|
assert(TaskFiber.ms_flsFill % 8 == 0, "Misaligned fiber local storage pool.");
|
||||||
|
m_offset = TaskFiber.ms_flsFill;
|
||||||
|
m_id = TaskFiber.ms_flsCounter++;
|
||||||
|
|
||||||
|
|
||||||
|
TaskFiber.ms_flsFill += T.sizeof;
|
||||||
|
while (TaskFiber.ms_flsFill % 8 != 0)
|
||||||
|
TaskFiber.ms_flsFill++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure the current fiber has enough FLS storage
|
||||||
|
if (fiber.m_fls.length < TaskFiber.ms_flsFill) {
|
||||||
|
fiber.m_fls.length = TaskFiber.ms_flsFill + 128;
|
||||||
|
fiber.m_flsInit.length = TaskFiber.ms_flsCounter + 64;
|
||||||
|
}
|
||||||
|
|
||||||
|
// return (possibly default initialized) value
|
||||||
|
auto data = fiber.m_fls.ptr[m_offset .. m_offset+T.sizeof];
|
||||||
|
if (!fiber.m_flsInit[m_id]) {
|
||||||
|
fiber.m_flsInit[m_id] = true;
|
||||||
|
import std.traits : hasElaborateDestructor, hasAliasing;
|
||||||
|
static if (hasElaborateDestructor!T || hasAliasing!T) {
|
||||||
|
void function(void[], size_t) destructor = (void[] fls, size_t offset){
|
||||||
|
static if (hasElaborateDestructor!T) {
|
||||||
|
auto obj = cast(T*)&fls[offset];
|
||||||
|
// call the destructor on the object if a custom one is known declared
|
||||||
|
obj.destroy();
|
||||||
|
}
|
||||||
|
else static if (hasAliasing!T) {
|
||||||
|
// zero the memory to avoid false pointers
|
||||||
|
foreach (size_t i; offset .. offset + T.sizeof) {
|
||||||
|
ubyte* u = cast(ubyte*)&fls[i];
|
||||||
|
*u = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
FLSInfo fls_info;
|
||||||
|
fls_info.fct = destructor;
|
||||||
|
fls_info.offset = m_offset;
|
||||||
|
|
||||||
|
// make sure flsInfo has enough space
|
||||||
|
if (ms_flsInfo.length <= m_id)
|
||||||
|
ms_flsInfo.length = m_id + 64;
|
||||||
|
|
||||||
|
ms_flsInfo[m_id] = fls_info;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m_hasInitValue) {
|
||||||
|
static if (__traits(compiles, emplace!T(data, m_initValue)))
|
||||||
|
emplace!T(data, m_initValue);
|
||||||
|
else assert(false, "Cannot emplace initialization value for type "~T.stringof);
|
||||||
|
} else emplace!T(data);
|
||||||
|
}
|
||||||
|
return (cast(T[])data)[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
alias storage this;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** Exception that is thrown by Task.interrupt.
|
||||||
|
*/
|
||||||
|
class InterruptException : Exception {
|
||||||
|
this()
|
||||||
|
@safe nothrow {
|
||||||
|
super("Task interrupted.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
High level state change events for a Task
|
||||||
|
*/
|
||||||
|
enum TaskEvent {
|
||||||
|
preStart, /// Just about to invoke the fiber which starts execution
|
||||||
|
postStart, /// After the fiber has returned for the first time (by yield or exit)
|
||||||
|
start, /// Just about to start execution
|
||||||
|
yield, /// Temporarily paused
|
||||||
|
resume, /// Resumed from a prior yield
|
||||||
|
end, /// Ended normally
|
||||||
|
fail /// Ended with an exception
|
||||||
|
}
|
||||||
|
|
||||||
|
alias TaskEventCallback = void function(TaskEvent, Task) nothrow;
|
||||||
|
|
||||||
|
/**
|
||||||
|
The maximum combined size of all parameters passed to a task delegate
|
||||||
|
|
||||||
|
See_Also: runTask
|
||||||
|
*/
|
||||||
|
enum maxTaskParameterSize = 128;
|
||||||
|
|
||||||
|
|
||||||
/** The base class for a task aka Fiber.
|
/** The base class for a task aka Fiber.
|
||||||
|
@ -95,24 +244,135 @@ struct Task {
|
||||||
This class represents a single task that is executed concurrently
|
This class represents a single task that is executed concurrently
|
||||||
with other tasks. Each task is owned by a single thread.
|
with other tasks. Each task is owned by a single thread.
|
||||||
*/
|
*/
|
||||||
class TaskFiber : Fiber {
|
final package class TaskFiber : Fiber {
|
||||||
private {
|
static if ((void*).sizeof >= 8) enum defaultTaskStackSize = 16*1024*1024;
|
||||||
Thread m_thread;
|
else enum defaultTaskStackSize = 512*1024;
|
||||||
import std.concurrency : ThreadInfo;
|
|
||||||
ThreadInfo m_tidInfo;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected {
|
private {
|
||||||
|
import std.concurrency : ThreadInfo;
|
||||||
|
import std.bitmanip : BitArray;
|
||||||
|
|
||||||
|
// task queue management (TaskScheduler.m_taskQueue)
|
||||||
|
TaskFiber m_prev, m_next;
|
||||||
|
TaskFiberQueue* m_queue;
|
||||||
|
|
||||||
|
Thread m_thread;
|
||||||
|
ThreadInfo m_tidInfo;
|
||||||
shared size_t m_taskCounter;
|
shared size_t m_taskCounter;
|
||||||
shared bool m_running;
|
shared bool m_running;
|
||||||
|
|
||||||
|
Task[] m_joiners;
|
||||||
|
|
||||||
|
// task local storage
|
||||||
|
BitArray m_flsInit;
|
||||||
|
void[] m_fls;
|
||||||
|
|
||||||
|
bool m_interrupt; // Task.interrupt() is progress
|
||||||
|
|
||||||
|
static TaskFiber ms_globalDummyFiber;
|
||||||
|
static FLSInfo[] ms_flsInfo;
|
||||||
|
static size_t ms_flsFill = 0; // thread-local
|
||||||
|
static size_t ms_flsCounter = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected this(void delegate() fun, size_t stack_size)
|
|
||||||
nothrow {
|
package TaskFuncInfo m_taskFunc;
|
||||||
super(fun, stack_size);
|
package __gshared size_t ms_taskStackSize = defaultTaskStackSize;
|
||||||
|
package __gshared debug TaskEventCallback ms_taskEventCallback;
|
||||||
|
|
||||||
|
this()
|
||||||
|
@trusted nothrow {
|
||||||
|
super(&run, ms_taskStackSize);
|
||||||
m_thread = Thread.getThis();
|
m_thread = Thread.getThis();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static TaskFiber getThis()
|
||||||
|
@safe nothrow {
|
||||||
|
auto f = () @trusted nothrow {
|
||||||
|
return Fiber.getThis();
|
||||||
|
} ();
|
||||||
|
if (f) return cast(TaskFiber)f;
|
||||||
|
if (!ms_globalDummyFiber) ms_globalDummyFiber = new TaskFiber;
|
||||||
|
return ms_globalDummyFiber;
|
||||||
|
}
|
||||||
|
|
||||||
|
@property State state()
|
||||||
|
@trusted const nothrow {
|
||||||
|
return super.state;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private void run()
|
||||||
|
{
|
||||||
|
import std.encoding : sanitize;
|
||||||
|
import std.concurrency : Tid;
|
||||||
|
import vibe.core.core : isEventLoopRunning, recycleFiber, taskScheduler, yield;
|
||||||
|
|
||||||
|
version (VibeDebugCatchAll) alias UncaughtException = Throwable;
|
||||||
|
else alias UncaughtException = Exception;
|
||||||
|
try {
|
||||||
|
while (true) {
|
||||||
|
while (!m_taskFunc.func) {
|
||||||
|
try {
|
||||||
|
Fiber.yield();
|
||||||
|
} catch (Exception e) {
|
||||||
|
logWarn("CoreTaskFiber was resumed with exception but without active task!");
|
||||||
|
logDiagnostic("Full error: %s", e.toString().sanitize());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
auto task = m_taskFunc;
|
||||||
|
m_taskFunc = TaskFuncInfo.init;
|
||||||
|
Task handle = this.task;
|
||||||
|
try {
|
||||||
|
m_running = true;
|
||||||
|
scope(exit) m_running = false;
|
||||||
|
|
||||||
|
std.concurrency.thisTid; // force creation of a message box
|
||||||
|
|
||||||
|
debug if (ms_taskEventCallback) ms_taskEventCallback(TaskEvent.start, handle);
|
||||||
|
if (!isEventLoopRunning) {
|
||||||
|
logTrace("Event loop not running at task start - yielding.");
|
||||||
|
vibe.core.core.yield();
|
||||||
|
logTrace("Initial resume of task.");
|
||||||
|
}
|
||||||
|
task.func(&task);
|
||||||
|
debug if (ms_taskEventCallback) ms_taskEventCallback(TaskEvent.end, handle);
|
||||||
|
} catch (Exception e) {
|
||||||
|
debug if (ms_taskEventCallback) ms_taskEventCallback(TaskEvent.fail, handle);
|
||||||
|
import std.encoding;
|
||||||
|
logCritical("Task terminated with uncaught exception: %s", e.msg);
|
||||||
|
logDebug("Full error: %s", e.toString().sanitize());
|
||||||
|
}
|
||||||
|
|
||||||
|
this.tidInfo.ident = Tid.init; // clear message box
|
||||||
|
|
||||||
|
foreach (t; m_joiners) taskScheduler.switchTo(t);
|
||||||
|
m_joiners.length = 0;
|
||||||
|
m_joiners.assumeSafeAppend();
|
||||||
|
|
||||||
|
// make sure that the task does not get left behind in the yielder queue if terminated during yield()
|
||||||
|
if (m_queue) m_queue.remove(this);
|
||||||
|
|
||||||
|
// zero the fls initialization ByteArray for memory safety
|
||||||
|
foreach (size_t i, ref bool b; m_flsInit) {
|
||||||
|
if (b) {
|
||||||
|
if (ms_flsInfo !is null && ms_flsInfo.length >= i && ms_flsInfo[i] != FLSInfo.init)
|
||||||
|
ms_flsInfo[i].destroy(m_fls);
|
||||||
|
b = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// make the fiber available for the next task
|
||||||
|
recycleFiber(this);
|
||||||
|
}
|
||||||
|
} catch(UncaughtException th) {
|
||||||
|
logCritical("CoreTaskFiber was terminated unexpectedly: %s", th.msg);
|
||||||
|
logDiagnostic("Full error: %s", th.toString().sanitize());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/** Returns the thread that owns this task.
|
/** Returns the thread that owns this task.
|
||||||
*/
|
*/
|
||||||
@property inout(Thread) thread() inout @safe nothrow { return m_thread; }
|
@property inout(Thread) thread() inout @safe nothrow { return m_thread; }
|
||||||
|
@ -123,31 +383,289 @@ class TaskFiber : Fiber {
|
||||||
|
|
||||||
@property ref inout(ThreadInfo) tidInfo() inout nothrow { return m_tidInfo; }
|
@property ref inout(ThreadInfo) tidInfo() inout nothrow { return m_tidInfo; }
|
||||||
|
|
||||||
|
@property size_t taskCounter() const { return m_taskCounter; }
|
||||||
|
|
||||||
/** Blocks until the task has ended.
|
/** Blocks until the task has ended.
|
||||||
*/
|
*/
|
||||||
abstract void join();
|
void join()
|
||||||
|
{
|
||||||
|
import vibe.core.core : hibernate, yield;
|
||||||
|
|
||||||
/** Throws an InterruptExeption within the task as soon as it calls a blocking function.
|
auto caller = Task.getThis();
|
||||||
*/
|
if (!m_running) return;
|
||||||
abstract void interrupt();
|
if (caller != Task.init) {
|
||||||
|
assert(caller.fiber !is this, "A task cannot join itself.");
|
||||||
|
assert(caller.thread is this.thread, "Joining tasks in foreign threads is currently not supported.");
|
||||||
|
m_joiners ~= caller;
|
||||||
|
} else assert(Thread.getThis() is this.thread, "Joining tasks in different threads is not yet supported.");
|
||||||
|
auto run_count = m_taskCounter;
|
||||||
|
if (caller == Task.init) vibe.core.core.yield(); // let the task continue (it must be yielded currently)
|
||||||
|
while (m_running && run_count == m_taskCounter) hibernate();
|
||||||
|
}
|
||||||
|
|
||||||
/** Terminates the task without notice as soon as it calls a blocking function.
|
/** Throws an InterruptExeption within the task as soon as it calls an interruptible function.
|
||||||
*/
|
*/
|
||||||
abstract void terminate();
|
void interrupt()
|
||||||
|
{
|
||||||
|
import vibe.core.core : taskScheduler;
|
||||||
|
|
||||||
|
auto caller = Task.getThis();
|
||||||
|
if (caller != Task.init) {
|
||||||
|
assert(caller != this.task, "A task cannot interrupt itself.");
|
||||||
|
assert(caller.thread is this.thread, "Interrupting tasks in different threads is not yet supported.");
|
||||||
|
} else assert(Thread.getThis() is this.thread, "Interrupting tasks in different threads is not yet supported.");
|
||||||
|
m_interrupt = true;
|
||||||
|
taskScheduler.switchTo(this.task);
|
||||||
|
}
|
||||||
|
|
||||||
void bumpTaskCounter()
|
void bumpTaskCounter()
|
||||||
@safe nothrow {
|
@safe nothrow {
|
||||||
import core.atomic : atomicOp;
|
import core.atomic : atomicOp;
|
||||||
() @trusted { atomicOp!"+="(this.m_taskCounter, 1); } ();
|
() @trusted { atomicOp!"+="(this.m_taskCounter, 1); } ();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
package void handleInterrupt(scope void delegate() @safe nothrow on_interrupt)
|
||||||
/** Exception that is thrown by Task.interrupt.
|
@safe nothrow {
|
||||||
*/
|
assert(Task.getThis().fiber is this, "Handling interrupt outside of the corresponding fiber.");
|
||||||
class InterruptException : Exception {
|
if (m_interrupt && on_interrupt) {
|
||||||
this()
|
m_interrupt = false;
|
||||||
{
|
on_interrupt();
|
||||||
super("Task interrupted.");
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
package struct TaskFuncInfo {
|
||||||
|
void function(TaskFuncInfo*) func;
|
||||||
|
void[2*size_t.sizeof] callable = void;
|
||||||
|
void[maxTaskParameterSize] args = void;
|
||||||
|
|
||||||
|
@property ref C typedCallable(C)()
|
||||||
|
{
|
||||||
|
static assert(C.sizeof <= callable.sizeof);
|
||||||
|
return *cast(C*)callable.ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
@property ref A typedArgs(A)()
|
||||||
|
{
|
||||||
|
static assert(A.sizeof <= args.sizeof);
|
||||||
|
return *cast(A*)args.ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void initCallable(C)()
|
||||||
|
{
|
||||||
|
C cinit;
|
||||||
|
this.callable[0 .. C.sizeof] = cast(void[])(&cinit)[0 .. 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
void initArgs(A)()
|
||||||
|
{
|
||||||
|
A ainit;
|
||||||
|
this.args[0 .. A.sizeof] = cast(void[])(&ainit)[0 .. 1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
package struct TaskScheduler {
|
||||||
|
private {
|
||||||
|
TaskFiberQueue m_taskQueue;
|
||||||
|
TaskFiber m_markerTask;
|
||||||
|
}
|
||||||
|
|
||||||
|
@safe nothrow:
|
||||||
|
|
||||||
|
@disable this(this);
|
||||||
|
|
||||||
|
@property size_t scheduledTaskCount() const { return m_taskQueue.length; }
|
||||||
|
|
||||||
|
/** Lets other pending tasks execute before continuing execution.
|
||||||
|
|
||||||
|
This will give other tasks or events a chance to be processed. If
|
||||||
|
multiple tasks call this function, they will be processed in a
|
||||||
|
fírst-in-first-out manner.
|
||||||
|
*/
|
||||||
|
void yield()
|
||||||
|
{
|
||||||
|
auto t = Task.getThis();
|
||||||
|
if (t == Task.init) return; // not really a task -> no-op
|
||||||
|
if (t.taskFiber.m_queue !is null) return; // already scheduled to be resumed
|
||||||
|
m_taskQueue.insertBack(t.taskFiber);
|
||||||
|
doYield(t);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Holds execution until the task gets explicitly resumed.
|
||||||
|
|
||||||
|
|
||||||
|
*/
|
||||||
|
void hibernate()
|
||||||
|
{
|
||||||
|
import vibe.core.core : isEventLoopRunning;
|
||||||
|
auto thist = Task.getThis();
|
||||||
|
if (thist == Task.init) {
|
||||||
|
assert(!isEventLoopRunning, "Event processing outside of a fiber should only happen before the event loop is running!?");
|
||||||
|
static import vibe.core.core;
|
||||||
|
vibe.core.core.processEvents();
|
||||||
|
} else {
|
||||||
|
doYield(thist);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Immediately switches execution to the specified task without giving up execution privilege.
|
||||||
|
|
||||||
|
This forces immediate execution of the specified task. After the tasks finishes or yields,
|
||||||
|
the calling task will continue execution.
|
||||||
|
*/
|
||||||
|
void switchTo(Task t)
|
||||||
|
{
|
||||||
|
auto thist = Task.getThis();
|
||||||
|
auto thisthr = thist ? thist.taskFiber.thread : () @trusted { return Thread.getThis(); } ();
|
||||||
|
assert(t.thread is thisthr, "Cannot switch to a task that lives in a different thread.");
|
||||||
|
if (thist == Task.init) {
|
||||||
|
resumeTask(t);
|
||||||
|
} else {
|
||||||
|
assert(!thist.taskFiber.m_queue, "Task already scheduled to be resumed... FIXME: should this really be an error?");
|
||||||
|
m_taskQueue.insertFront(thist.taskFiber);
|
||||||
|
m_taskQueue.insertFront(t.taskFiber);
|
||||||
|
doYield(thist);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Runs any pending tasks.
|
||||||
|
|
||||||
|
A pending tasks is a task that is scheduled to be resumed by either `yield` or
|
||||||
|
`switchTo`.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Returns `true` $(I iff) there are more tasks left to process.
|
||||||
|
*/
|
||||||
|
bool schedule()
|
||||||
|
{
|
||||||
|
if (!m_markerTask) m_markerTask = new TaskFiber; // TODO: avoid allocating an actual task here!
|
||||||
|
|
||||||
|
assert(Task.getThis() == Task.init, "TaskScheduler.schedule() may not be called from a task!");
|
||||||
|
assert(!m_markerTask.m_queue, "TaskScheduler.schedule() was called recursively!");
|
||||||
|
|
||||||
|
// keep track of the end of the queue, so that we don't process tasks
|
||||||
|
// infinitely
|
||||||
|
m_taskQueue.insertBack(m_markerTask);
|
||||||
|
|
||||||
|
while (m_taskQueue.front !is m_markerTask) {
|
||||||
|
auto t = m_taskQueue.front;
|
||||||
|
m_taskQueue.popFront();
|
||||||
|
resumeTask(t.task);
|
||||||
|
|
||||||
|
assert(!m_taskQueue.empty, "Marker task got removed from tasks queue!?");
|
||||||
|
if (m_taskQueue.empty) return false; // handle gracefully in release mode
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove marker task
|
||||||
|
m_taskQueue.popFront();
|
||||||
|
|
||||||
|
return !m_taskQueue.empty;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resumes execution of a yielded task.
|
||||||
|
private void resumeTask(Task t)
|
||||||
|
{
|
||||||
|
import std.encoding : sanitize;
|
||||||
|
|
||||||
|
auto uncaught_exception = () @trusted nothrow { return t.fiber.call!(Fiber.Rethrow.no)(); } ();
|
||||||
|
|
||||||
|
if (uncaught_exception) {
|
||||||
|
auto th = cast(Throwable)uncaught_exception;
|
||||||
|
assert(th, "Fiber returned exception object that is not a Throwable!?");
|
||||||
|
|
||||||
|
assert(() @trusted nothrow { return t.fiber.state; } () == Fiber.State.TERM);
|
||||||
|
logError("Task terminated with unhandled exception: %s", th.msg);
|
||||||
|
logDebug("Full error: %s", () @trusted { return th.toString().sanitize; } ());
|
||||||
|
|
||||||
|
// always pass Errors on
|
||||||
|
if (auto err = cast(Error)th) throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void doYield(Task task)
|
||||||
|
{
|
||||||
|
debug if (TaskFiber.ms_taskEventCallback) () @trusted { TaskFiber.ms_taskEventCallback(TaskEvent.yield, task); } ();
|
||||||
|
() @trusted { Fiber.yield(); } ();
|
||||||
|
debug if (TaskFiber.ms_taskEventCallback) () @trusted { TaskFiber.ms_taskEventCallback(TaskEvent.resume, task); } ();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private struct TaskFiberQueue {
|
||||||
|
@safe nothrow:
|
||||||
|
|
||||||
|
TaskFiber first, last;
|
||||||
|
size_t length;
|
||||||
|
|
||||||
|
@disable this(this);
|
||||||
|
|
||||||
|
@property bool empty() const { return first is null; }
|
||||||
|
|
||||||
|
@property TaskFiber front() { return first; }
|
||||||
|
|
||||||
|
void insertFront(TaskFiber task)
|
||||||
|
{
|
||||||
|
assert(task.m_queue == null, "Task is already scheduled to be resumed!");
|
||||||
|
assert(task.m_prev is null, "Task has m_prev set without being in a queue!?");
|
||||||
|
assert(task.m_next is null, "Task has m_next set without being in a queue!?");
|
||||||
|
task.m_queue = &this;
|
||||||
|
if (empty) {
|
||||||
|
first = task;
|
||||||
|
last = task;
|
||||||
|
} else {
|
||||||
|
first.m_prev = task;
|
||||||
|
task.m_next = first;
|
||||||
|
first = task;
|
||||||
|
}
|
||||||
|
length++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void insertBack(TaskFiber task)
|
||||||
|
{
|
||||||
|
assert(task.m_queue == null, "Task is already scheduled to be resumed!");
|
||||||
|
assert(task.m_prev is null, "Task has m_prev set without being in a queue!?");
|
||||||
|
assert(task.m_next is null, "Task has m_next set without being in a queue!?");
|
||||||
|
task.m_queue = &this;
|
||||||
|
if (empty) {
|
||||||
|
first = task;
|
||||||
|
last = task;
|
||||||
|
} else {
|
||||||
|
last.m_next = task;
|
||||||
|
task.m_prev = last;
|
||||||
|
last = task;
|
||||||
|
}
|
||||||
|
length++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void popFront()
|
||||||
|
{
|
||||||
|
if (first is last) last = null;
|
||||||
|
assert(first && first.m_queue == &this, "Popping from empty or mismatching queue");
|
||||||
|
auto next = first.m_next;
|
||||||
|
if (next) next.m_prev = null;
|
||||||
|
first.m_next = null;
|
||||||
|
first.m_queue = null;
|
||||||
|
first = next;
|
||||||
|
length--;
|
||||||
|
}
|
||||||
|
|
||||||
|
void remove(TaskFiber task)
|
||||||
|
{
|
||||||
|
assert(task.m_queue is &this, "Task is not contained in task queue.");
|
||||||
|
if (task.m_prev) task.m_prev.m_next = task.m_next;
|
||||||
|
else first = task.m_next;
|
||||||
|
if (task.m_next) task.m_next.m_prev = task.m_prev;
|
||||||
|
else last = task.m_prev;
|
||||||
|
task.m_queue = null;
|
||||||
|
task.m_prev = null;
|
||||||
|
task.m_next = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private struct FLSInfo {
|
||||||
|
void function(void[], size_t) fct;
|
||||||
|
size_t offset;
|
||||||
|
void destroy(void[] fls) {
|
||||||
|
fct(fls, offset);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,15 +2,39 @@ module vibe.internal.async;
|
||||||
|
|
||||||
import std.traits : ParameterTypeTuple;
|
import std.traits : ParameterTypeTuple;
|
||||||
import std.typecons : tuple;
|
import std.typecons : tuple;
|
||||||
import vibe.core.core;
|
import vibe.core.core : hibernate, switchToTask;
|
||||||
|
import vibe.core.task : InterruptException, Task;
|
||||||
import vibe.core.log;
|
import vibe.core.log;
|
||||||
import core.time : Duration, seconds;
|
import core.time : Duration, seconds;
|
||||||
|
|
||||||
|
|
||||||
auto asyncAwait(string method, Object, ARGS...)(Object object, ARGS args)
|
auto asyncAwait(Callback, alias action, alias cancel, string func = __FUNCTION__)()
|
||||||
|
if (!is(Object == Duration)) {
|
||||||
|
return asyncAwaitImpl!(true, Callback, action, cancel, func)(Duration.max);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto asyncAwait(Callback, alias action, alias cancel, string func = __FUNCTION__)(Duration timeout)
|
||||||
{
|
{
|
||||||
alias CB = ParameterTypeTuple!(__traits(getMember, Object, method))[$-1];
|
return asyncAwaitImpl!(true, Callback, action, cancel, func)(timeout);
|
||||||
alias CBTypes = ParameterTypeTuple!CB;
|
}
|
||||||
|
|
||||||
|
auto asyncAwaitUninterruptible(Callback, alias action, string func = __FUNCTION__)()
|
||||||
|
nothrow {
|
||||||
|
return asyncAwaitImpl!(false, Callback, action, (cb) { assert(false); }, func)(Duration.max);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto asyncAwaitUninterruptible(Callback, alias action, alias cancel, string func = __FUNCTION__)(Duration timeout)
|
||||||
|
nothrow {
|
||||||
|
assert(timeout >= 0.seconds);
|
||||||
|
asyncAwaitImpl!(false, Callback, action, cancel, func)(timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
private auto asyncAwaitImpl(bool interruptible, Callback, alias action, alias cancel, string func)(Duration timeout)
|
||||||
|
@safe if (!is(Object == Duration)) {
|
||||||
|
alias CBTypes = ParameterTypeTuple!Callback;
|
||||||
|
|
||||||
|
assert(timeout >= 0.seconds);
|
||||||
|
assert(timeout == Duration.max, "TODO!");
|
||||||
|
|
||||||
bool fired = false;
|
bool fired = false;
|
||||||
CBTypes ret;
|
CBTypes ret;
|
||||||
|
@ -21,25 +45,28 @@ auto asyncAwait(string method, Object, ARGS...)(Object object, ARGS args)
|
||||||
logTrace("Got result.");
|
logTrace("Got result.");
|
||||||
fired = true;
|
fired = true;
|
||||||
ret = params;
|
ret = params;
|
||||||
if (t != Task.init)
|
if (t != Task.init) switchToTask(t);
|
||||||
resumeTask(t);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logTrace("Calling %s...", method);
|
scope cbdel = &callback;
|
||||||
__traits(getMember, object, method)(args, &callback);
|
|
||||||
|
logTrace("Calling async function in "~func);
|
||||||
|
action(cbdel);
|
||||||
if (!fired) {
|
if (!fired) {
|
||||||
logTrace("Need to wait...");
|
logTrace("Need to wait...");
|
||||||
t = Task.getThis();
|
t = Task.getThis();
|
||||||
do yieldForEvent();
|
do {
|
||||||
while (!fired);
|
static if (interruptible) {
|
||||||
|
bool interrupted = false;
|
||||||
|
hibernate(() @safe nothrow {
|
||||||
|
cancel(cbdel);
|
||||||
|
interrupted = true;
|
||||||
|
});
|
||||||
|
if (interrupted)
|
||||||
|
throw new InterruptException; // FIXME: the original operation needs to be stopped! or the callback will still be called"
|
||||||
|
} else hibernate();
|
||||||
|
} while (!fired);
|
||||||
}
|
}
|
||||||
logTrace("Return result.");
|
logTrace("Return result.");
|
||||||
return tuple(ret);
|
return tuple(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto asyncAwait(string method, Object, ARGS...)(Duration timeout, Object object, ARGS args)
|
|
||||||
{
|
|
||||||
assert(timeout >= 0.seconds);
|
|
||||||
if (timeout == Duration.max) return asyncAwait(object, args);
|
|
||||||
else assert(false, "TODO!");
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in a new issue