From 7e2d1dd038035f01216a8fc82422723e6ca20588 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=B6nke=20Ludwig?= Date: Tue, 1 Mar 2016 20:30:42 +0100 Subject: [PATCH] Initial commit. The library is able to support simple TCP servers in the current state. The API is still mostly compatible with mainline vibe.d, but the driver systen has been replaced by the eventcore library and sockets/files/timers/... are now structs with automatic reference counting instead of GC collected classes. The stream interfaces have been removed for now. --- .gitignore | 1 + dub.sdl | 7 + dub.selections.json | 6 + examples/bench-dummy-http-server/dub.json | 6 + examples/bench-dummy-http-server/source/app.d | 300 +++ source/vibe/core/args.d | 225 ++ source/vibe/core/concurrency.d | 1190 +++++++++++ source/vibe/core/connectionpool.d | 149 ++ source/vibe/core/core.d | 1844 +++++++++++++++++ source/vibe/core/file.d | 638 ++++++ source/vibe/core/log.d | 879 ++++++++ source/vibe/core/net.d | 540 +++++ source/vibe/core/path.d | 25 + source/vibe/core/sync.d | 1346 ++++++++++++ source/vibe/core/task.d | 153 ++ source/vibe/internal/array.d | 634 ++++++ source/vibe/internal/async.d | 45 + source/vibe/internal/hashmap.d | 375 ++++ source/vibe/internal/memory.d | 872 ++++++++ source/vibe/internal/string.d | 235 +++ source/vibe/internal/traits.d | 384 ++++ source/vibe/internal/typetuple.d | 123 ++ 22 files changed, 9977 insertions(+) create mode 100644 .gitignore create mode 100644 dub.sdl create mode 100644 dub.selections.json create mode 100644 examples/bench-dummy-http-server/dub.json create mode 100644 examples/bench-dummy-http-server/source/app.d create mode 100644 source/vibe/core/args.d create mode 100644 source/vibe/core/concurrency.d create mode 100644 source/vibe/core/connectionpool.d create mode 100644 source/vibe/core/core.d create mode 100644 source/vibe/core/file.d create mode 100644 source/vibe/core/log.d create mode 100644 source/vibe/core/net.d create mode 100644 source/vibe/core/path.d create mode 100644 source/vibe/core/sync.d create mode 100644 source/vibe/core/task.d create mode 100644 source/vibe/internal/array.d create mode 100644 source/vibe/internal/async.d create mode 100644 source/vibe/internal/hashmap.d create mode 100644 source/vibe/internal/memory.d create mode 100644 source/vibe/internal/string.d create mode 100644 source/vibe/internal/traits.d create mode 100644 source/vibe/internal/typetuple.d diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..38bd39f --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.dub diff --git a/dub.sdl b/dub.sdl new file mode 100644 index 0000000..3800af6 --- /dev/null +++ b/dub.sdl @@ -0,0 +1,7 @@ +name "vibe-core" +description "The I/O core library of vibe.d." +authors "Sönke Ludwig" +copyright "Copyright © 2016, rejectedsoftware e.K." +license "MIT" + +dependency "eventcore" version="*" diff --git a/dub.selections.json b/dub.selections.json new file mode 100644 index 0000000..7144c73 --- /dev/null +++ b/dub.selections.json @@ -0,0 +1,6 @@ +{ + "fileVersion": 1, + "versions": { + "eventcore": "~master" + } +} diff --git a/examples/bench-dummy-http-server/dub.json b/examples/bench-dummy-http-server/dub.json new file mode 100644 index 0000000..348baaa --- /dev/null +++ b/examples/bench-dummy-http-server/dub.json @@ -0,0 +1,6 @@ +{ + "name": "bench-http-server", + "dependencies": { + "vibe-core": {"path": "../../"}, + } +} diff --git a/examples/bench-dummy-http-server/source/app.d b/examples/bench-dummy-http-server/source/app.d new file mode 100644 index 0000000..b2b5a93 --- /dev/null +++ b/examples/bench-dummy-http-server/source/app.d @@ -0,0 +1,300 @@ +import vibe.core.core; +import vibe.core.log; +import vibe.core.net; +//import vibe.stream.operations; + +import std.functional : toDelegate; + +void main() +{ + void staticAnswer(TCPConnection conn) + nothrow @safe { + try { + while (!conn.empty) { + while (true) { + CountingRange r; + conn.readLine(r); + if (!r.count) break; + } + conn.write(cast(const(ubyte)[])"HTTP/1.1 200 OK\r\nContent-Length: 13\r\nContent-Type: text/plain\r\n\r\nHello, World!"); + conn.flush(); + } + } catch (Exception e) { + scope (failure) assert(false); + logError("Error processing request: %s", e.msg); + } + } + + auto listener = listenTCP(8080, &staticAnswer, "127.0.0.1"); + + runEventLoop(); +} + +struct CountingRange { + @safe nothrow @nogc: + ulong count = 0; + void put(ubyte) { count++; } + void put(in ubyte[] arr) { count += arr.length; } +} + + +import std.range.primitives : isOutputRange; + +/** + Reads and returns a single line from the stream. + + Throws: + An exception if either the stream end was hit without hitting a newline first, or + if more than max_bytes have been read from the stream. +*/ +ubyte[] readLine(InputStream)(InputStream stream, size_t max_bytes = size_t.max, string linesep = "\r\n", Allocator alloc = defaultAllocator()) /*@ufcs*/ +{ + auto output = AllocAppender!(ubyte[])(alloc); + output.reserve(max_bytes < 64 ? max_bytes : 64); + readLine(stream, output, max_bytes, linesep); + return output.data(); +} +/// ditto +void readLine(InputStream, OutputStream)(InputStream stream, OutputStream dst, size_t max_bytes = size_t.max, string linesep = "\r\n") +{ + import vibe.stream.wrapper; + auto dstrng = StreamOutputRange(dst); + readLine(stream, dstrng, max_bytes, linesep); +} +/// ditto +void readLine(R, InputStream)(InputStream stream, ref R dst, size_t max_bytes = size_t.max, string linesep = "\r\n") + if (isOutputRange!(R, ubyte)) +{ + readUntil(stream, dst, cast(const(ubyte)[])linesep, max_bytes); +} + + +/** + Reads all data of a stream until the specified end marker is detected. + + Params: + stream = The input stream which is searched for end_marker + end_marker = The byte sequence which is searched in the stream + max_bytes = An optional limit of how much data is to be read from the + input stream; if the limit is reaached before hitting the end + marker, an exception is thrown. + alloc = An optional allocator that is used to build the result string + in the string variant of this function + dst = The output stream, to which the prefix to the end marker of the + input stream is written + + Returns: + The string variant of this function returns the complete prefix to the + end marker of the input stream, excluding the end marker itself. + + Throws: + An exception if either the stream end was hit without hitting a marker + first, or if more than max_bytes have been read from the stream in + case of max_bytes != 0. + + Remarks: + This function uses an algorithm inspired by the + $(LINK2 http://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string_search_algorithm, + Boyer-Moore string search algorithm). However, contrary to the original + algorithm, it will scan the whole input string exactly once, without + jumping over portions of it. This allows the algorithm to work with + constant memory requirements and without the memory copies that would + be necessary for streams that do not hold their complete data in + memory. + + The current implementation has a run time complexity of O(n*m+m²) and + O(n+m) in typical cases, with n being the length of the scanned input + string and m the length of the marker. +*/ +ubyte[] readUntil(InputStream)(InputStream stream, in ubyte[] end_marker, size_t max_bytes = size_t.max, Allocator alloc = defaultAllocator()) /*@ufcs*/ +{ + auto output = AllocAppender!(ubyte[])(alloc); + output.reserve(max_bytes < 64 ? max_bytes : 64); + readUntil(stream, output, end_marker, max_bytes); + return output.data(); +} +/// ditto +void readUntil(InputStream, OutputStream)(InputStream stream, OutputStream dst, in ubyte[] end_marker, ulong max_bytes = ulong.max) /*@ufcs*/ +{ + import vibe.stream.wrapper; + auto dstrng = StreamOutputRange(dst); + readUntil(stream, dstrng, end_marker, max_bytes); +} +/// ditto +void readUntil(R, InputStream)(InputStream stream, ref R dst, in ubyte[] end_marker, ulong max_bytes = ulong.max) /*@ufcs*/ + if (isOutputRange!(R, ubyte)) +{ + assert(max_bytes > 0 && end_marker.length > 0); + + if (end_marker.length <= 2) + readUntilSmall(stream, dst, end_marker, max_bytes); + else + readUntilGeneric(stream, dst, end_marker, max_bytes); +} + +private void readUntilSmall(R, InputStream)(InputStream stream, ref R dst, in ubyte[] end_marker, ulong max_bytes = ulong.max) +@safe { + import std.algorithm.comparison : min, max; + import std.algorithm.searching : countUntil; + + assert(end_marker.length >= 1 && end_marker.length <= 2); + + size_t nmatched = 0; + size_t nmarker = end_marker.length; + + while (true) { + enforce(!stream.empty, "Reached EOF while searching for end marker."); + enforce(max_bytes > 0, "Reached maximum number of bytes while searching for end marker."); + auto max_peek = max(max_bytes, max_bytes+nmarker); // account for integer overflow + auto pm = stream.peek()[0 .. min($, max_bytes)]; + if (!pm.length) { // no peek support - inefficient route + ubyte[2] buf = void; + auto l = nmarker - nmatched; + stream.read(buf[0 .. l]); + foreach (i; 0 .. l) { + if (buf[i] == end_marker[nmatched]) { + nmatched++; + } else if (buf[i] == end_marker[0]) { + foreach (j; 0 .. nmatched) dst.put(end_marker[j]); + nmatched = 1; + } else { + foreach (j; 0 .. nmatched) dst.put(end_marker[j]); + nmatched = 0; + dst.put(buf[i]); + } + if (nmatched == nmarker) return; + } + } else { + auto idx = pm.countUntil(end_marker[0]); + if (idx < 0) { + dst.put(pm); + max_bytes -= pm.length; + stream.skip(pm.length); + } else { + dst.put(pm[0 .. idx]); + stream.skip(idx+1); + if (nmarker == 2) { + ubyte[1] next; + stream.read(next); + if (next[0] == end_marker[1]) + return; + dst.put(end_marker[0]); + dst.put(next[0]); + } else return; + } + } + } +} + +private final class Buffer { ubyte[64*1024-4*size_t.sizeof] bytes = void; } // 64k - some headroom for + +private void readUntilGeneric(R, InputStream)(InputStream stream, ref R dst, in ubyte[] end_marker, ulong max_bytes = ulong.max) /*@ufcs*/ + if (isOutputRange!(R, ubyte)) +{ + import std.algorithm.comparison : min; + // allocate internal jump table to optimize the number of comparisons + size_t[8] nmatchoffsetbuffer = void; + size_t[] nmatchoffset; + if (end_marker.length <= nmatchoffsetbuffer.length) nmatchoffset = nmatchoffsetbuffer[0 .. end_marker.length]; + else nmatchoffset = new size_t[end_marker.length]; + + // precompute the jump table + nmatchoffset[0] = 0; + foreach( i; 1 .. end_marker.length ){ + nmatchoffset[i] = i; + foreach_reverse( j; 1 .. i ) + if( end_marker[j .. i] == end_marker[0 .. i-j] ){ + nmatchoffset[i] = i-j; + break; + } + assert(nmatchoffset[i] > 0 && nmatchoffset[i] <= i); + } + + size_t nmatched = 0; + scope bufferobj = new Buffer; // FIXME: use heap allocation + auto buf = bufferobj.bytes[]; + + ulong bytes_read = 0; + + void skip2(size_t nbytes) + { + bytes_read += nbytes; + stream.skip(nbytes); + } + + while( !stream.empty ){ + enforce(bytes_read < max_bytes, "Reached byte limit before reaching end marker."); + + // try to get as much data as possible, either by peeking into the stream or + // by reading as much as isguaranteed to not exceed the end marker length + // the block size is also always limited by the max_bytes parameter. + size_t nread = 0; + auto least_size = stream.leastSize(); // NOTE: blocks until data is available + auto max_read = max_bytes - bytes_read; + auto str = stream.peek(); // try to get some data for free + if( str.length == 0 ){ // if not, read as much as possible without reading past the end + nread = min(least_size, end_marker.length-nmatched, buf.length, max_read); + stream.read(buf[0 .. nread]); + str = buf[0 .. nread]; + bytes_read += nread; + } else if( str.length > max_read ){ + str.length = cast(size_t)max_read; + } + + // remember how much of the marker was already matched before processing the current block + size_t nmatched_start = nmatched; + + // go through the current block trying to match the marker + size_t i = 0; + for (i = 0; i < str.length; i++) { + auto ch = str[i]; + // if we have a mismatch, use the jump table to try other possible prefixes + // of the marker + while( nmatched > 0 && ch != end_marker[nmatched] ) + nmatched -= nmatchoffset[nmatched]; + + // if we then have a match, increase the match count and test for full match + if (ch == end_marker[nmatched]) + if (++nmatched == end_marker.length) { + i++; + break; + } + } + + + // write out any false match part of previous blocks + if( nmatched_start > 0 ){ + if( nmatched <= i ) dst.put(end_marker[0 .. nmatched_start]); + else dst.put(end_marker[0 .. nmatched_start-nmatched+i]); + } + + // write out any unmatched part of the current block + if( nmatched < i ) dst.put(str[0 .. i-nmatched]); + + // got a full, match => out + if (nmatched >= end_marker.length) { + // in case of a full match skip data in the stream until the end of + // the marker + skip2(i - nread); + return; + } + + // otherwise skip this block in the stream + skip2(str.length - nread); + } + + enforce(false, "Reached EOF before reaching end marker."); +} + +static if (!is(typeof(TCPConnection.init.skip(0)))) +{ + private void skip(ref TCPConnection str, ulong count) + { + ubyte[156] buf = void; + while (count > 0) { + auto n = min(buf.length, count); + str.read(buf[0 .. n]); + count -= n; + } + } +} diff --git a/source/vibe/core/args.d b/source/vibe/core/args.d new file mode 100644 index 0000000..5882782 --- /dev/null +++ b/source/vibe/core/args.d @@ -0,0 +1,225 @@ +/** + Parses and allows querying the command line arguments and configuration + file. + + The optional configuration file (vibe.conf) is a JSON file, containing an + object with the keys corresponding to option names, and values corresponding + to their values. It is searched for in the local directory, user's home + directory, or /etc/vibe/ (POSIX only), whichever is found first. + + Copyright: © 2012-2016 RejectedSoftware e.K. + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. + Authors: Sönke Ludwig, Vladimir Panteleev +*/ +module vibe.core.args; + +import vibe.core.log; +//import vibe.data.json; + +import std.algorithm : any, map, sort; +import std.array : array, join, replicate, split; +import std.exception; +import std.file; +import std.getopt; +import std.path : buildPath; +import std.string : format, stripRight, wrap; + +import core.runtime; + + +/** + Finds and reads an option from the configuration file or command line. + + Command line options take precedence over configuration file entries. + + Params: + names = Option names. Separate multiple name variants with "|", + as for $(D std.getopt). + pvalue = Pointer to store the value. Unchanged if value was not found. + help_text = Text to be displayed when the application is run with + --help. + + Returns: + $(D true) if the value was found, $(D false) otherwise. + + See_Also: readRequiredOption +*/ +bool readOption(T)(string names, T* pvalue, string help_text) +{ + // May happen due to http://d.puremagic.com/issues/show_bug.cgi?id=9881 + if (g_args is null) init(); + + OptionInfo info; + info.names = names.split("|").sort!((a, b) => a.length < b.length)().array(); + info.hasValue = !is(T == bool); + info.helpText = help_text; + assert(!g_options.any!(o => o.names == info.names)(), "readOption() may only be called once per option name."); + g_options ~= info; + + immutable olen = g_args.length; + getopt(g_args, getoptConfig, names, pvalue); + if (g_args.length < olen) return true; + + /*if (g_haveConfig) { + foreach (name; info.names) + if (auto pv = name in g_config) { + *pvalue = pv.to!T; + return true; + } + }*/ + + return false; +} + + +/** + The same as readOption, but throws an exception if the given option is missing. + + See_Also: readOption +*/ +T readRequiredOption(T)(string names, string help_text) +{ + string formattedNames() { + return names.split("|").map!(s => s.length == 1 ? "-" ~ s : "--" ~ s).join("/"); + } + T ret; + enforce(readOption(names, &ret, help_text) || g_help, + format("Missing mandatory option %s.", formattedNames())); + return ret; +} + + +/** + Prints a help screen consisting of all options encountered in getOption calls. +*/ +void printCommandLineHelp() +{ + enum dcolumn = 20; + enum ncolumns = 80; + + logInfo("Usage: %s \n", g_args[0]); + foreach (opt; g_options) { + string shortopt; + string[] longopts; + if (opt.names[0].length == 1 && !opt.hasValue) { + shortopt = "-"~opt.names[0]; + longopts = opt.names[1 .. $]; + } else { + shortopt = " "; + longopts = opt.names; + } + + string optionString(string name) + { + if (name.length == 1) return "-"~name~(opt.hasValue ? " " : ""); + else return "--"~name~(opt.hasValue ? "=" : ""); + } + + string[] lopts; foreach(lo; longopts) lopts ~= optionString(lo); + auto optstr = format(" %s %s", shortopt, lopts.join(", ")); + if (optstr.length < dcolumn) optstr ~= replicate(" ", dcolumn - optstr.length); + + auto indent = replicate(" ", dcolumn+1); + auto desc = wrap(opt.helpText, ncolumns - dcolumn - 2, optstr.length > dcolumn ? indent : "", indent).stripRight(); + + if (optstr.length > dcolumn) + logInfo("%s\n%s", optstr, desc); + else logInfo("%s %s", optstr, desc); + } +} + + +/** + Checks for unrecognized command line options and display a help screen. + + This function is called automatically from vibe.appmain to check for + correct command line usage. It will print a help screen in case of + unrecognized options. + + Params: + args_out = Optional parameter for storing any arguments not handled + by any readOption call. If this is left to null, an error + will be triggered whenever unhandled arguments exist. + + Returns: + If "--help" was passed, the function returns false. In all other + cases either true is returned or an exception is thrown. +*/ +bool finalizeCommandLineOptions(string[]* args_out = null) +{ + scope(exit) g_args = null; + + if (args_out) { + *args_out = g_args; + } else if (g_args.length > 1) { + logError("Unrecognized command line option: %s\n", g_args[1]); + printCommandLineHelp(); + throw new Exception("Unrecognized command line option."); + } + + if (g_help) { + printCommandLineHelp(); + return false; + } + + return true; +} + + +private struct OptionInfo { + string[] names; + bool hasValue; + string helpText; +} + +private { + __gshared string[] g_args; + __gshared bool g_haveConfig; + //__gshared Json g_config; + __gshared OptionInfo[] g_options; + __gshared bool g_help; +} + +private string[] getConfigPaths() +{ + string[] result = [""]; + import std.process : environment; + version (Windows) + result ~= environment.get("USERPROFILE"); + else + result ~= [environment.get("HOME"), "/etc/vibe/"]; + return result; +} + +// this is invoked by the first readOption call (at least vibe.core will perform one) +private void init() +{ + version (VibeDisableCommandLineParsing) {} + else g_args = Runtime.args; + + if (!g_args.length) g_args = ["dummy"]; + + // TODO: let different config files override individual fields + auto searchpaths = getConfigPaths(); + foreach (spath; searchpaths) { + auto cpath = buildPath(spath, configName); + if (cpath.exists) { + scope(failure) logError("Failed to parse config file %s.", cpath); + auto text = cpath.readText(); + //g_config = text.parseJson(); + g_haveConfig = true; + break; + } + } + + if (!g_haveConfig) + logDiagnostic("No config file found in %s", searchpaths); + + readOption("h|help", &g_help, "Prints this help screen."); +} + +private enum configName = "vibe.conf"; + +private template ValueTuple(T...) { alias ValueTuple = T; } + +private alias getoptConfig = ValueTuple!(std.getopt.config.passThrough, std.getopt.config.bundling); diff --git a/source/vibe/core/concurrency.d b/source/vibe/core/concurrency.d new file mode 100644 index 0000000..09c153e --- /dev/null +++ b/source/vibe/core/concurrency.d @@ -0,0 +1,1190 @@ +/** + Functions and structures for dealing with threads and concurrent access. + + This module is modeled after std.concurrency, but provides a fiber-aware alternative + to it. All blocking operations will yield the calling fiber instead of blocking it. + + Copyright: © 2013-2014 RejectedSoftware e.K. + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. + Authors: Sönke Ludwig +*/ +module vibe.core.concurrency; + +import core.time; +import std.traits; +import std.typecons; +import std.typetuple; +import std.variant; +import std.string; +import vibe.core.task; +//import vibe.utils.memory; + +public import std.concurrency; + +private extern (C) pure nothrow void _d_monitorenter(Object h); +private extern (C) pure nothrow void _d_monitorexit(Object h); + +/** + Locks the given shared object and returns a ScopedLock for accessing any unshared members. + + Using this function will ensure that there are no data races. For this reason, the class + type T is required to contain no unshared or unisolated aliasing. + + See_Also: core.concurrency.isWeaklyIsolated +*/ +ScopedLock!T lock(T : const(Object))(shared(T) object) +pure nothrow @safe { + return ScopedLock!T(object); +} +/// ditto +void lock(T : const(Object))(shared(T) object, scope void delegate(scope T) accessor) +nothrow { + auto l = lock(object); + accessor(l.unsafeGet()); +} + +/// +unittest { + import vibe.core.concurrency; + + static class Item { + private double m_value; + + this(double value) pure { m_value = value; } + + @property double value() const pure { return m_value; } + } + + static class Manager { + private { + string m_name; + Isolated!(Item) m_ownedItem; + Isolated!(shared(Item)[]) m_items; + } + + pure this(string name) + { + m_name = name; + auto itm = makeIsolated!Item(3.5); + m_ownedItem = itm.move; + } + + void addItem(shared(Item) item) pure { m_items ~= item; } + + double getTotalValue() + const pure { + double sum = 0; + + // lock() is required to access shared objects + foreach (itm; m_items.unsafeGet) { + auto l = itm.lock(); + sum += l.value; + } + + // owned objects can be accessed without locking + sum += m_ownedItem.value; + + return sum; + } + } + + void test() + { + import std.stdio; + + auto man = cast(shared)new Manager("My manager"); + { + auto l = man.lock(); + l.addItem(new shared(Item)(1.5)); + l.addItem(new shared(Item)(0.5)); + } + + writefln("Total value: %s", man.lock().getTotalValue()); + } +} + + +/** + Proxy structure that keeps the monitor of the given object locked until it + goes out of scope. + + Any unshared members of the object are safely accessible during this time. The usual + way to use it is by calling lock. + + See_Also: lock +*/ +struct ScopedLock(T) +{ + static assert(is(T == class), "ScopedLock is only usable with classes."); +// static assert(isWeaklyIsolated!(FieldTypeTuple!T), T.stringof~" contains non-immutable, non-shared references. Accessing it in a multi-threaded environment is not safe."); + + private Rebindable!T m_ref; + + @disable this(this); + + this(shared(T) obj) + pure nothrow @trusted + { + assert(obj !is null, "Attempting to lock null object."); + m_ref = cast(T)obj; + _d_monitorenter(getObject()); + assert(getObject().__monitor !is null); + } + + ~this() + pure nothrow @trusted + { + assert(m_ref !is null); + assert(getObject().__monitor !is null); + _d_monitorexit(getObject()); + } + + /** + Returns an unshared reference to the locked object. + + Note that using this function breaks type safety. Be sure to not escape the reference beyond + the life time of the lock. + */ + @property inout(T) unsafeGet() inout nothrow { return m_ref; } + + inout(T) opDot() inout nothrow { return m_ref; } + //pragma(msg, "In ScopedLock!("~T.stringof~")"); + //pragma(msg, isolatedRefMethods!T()); +// mixin(isolatedAggregateMethodsString!T()); + + private Object getObject() + pure nothrow { + static if( is(Rebindable!T == struct) ) return cast(Unqual!T)m_ref.get(); + else return cast(Unqual!T)m_ref; + } +} + + +/** + Creates a new isolated object. + + Isolated objects contain no mutable aliasing outside of their own reference tree. They can thus + be safely converted to immutable and they can be safely passed between threads. + + The function returns an instance of Isolated that will allow proxied access to the members of + the object, as well as providing means to convert the object to immutable or to an ordinary + mutable object. +*/ +pure Isolated!T makeIsolated(T, ARGS...)(ARGS args) +{ + static if (is(T == class)) return Isolated!T(new T(args)); + else static if (is(T == struct)) return T(args); + else static if (isPointer!T && is(PointerTarget!T == struct)) { + alias TB = PointerTarget!T; + return Isolated!T(new TB(args)); + } else static assert(false, "makeIsolated works only for class and (pointer to) struct types."); +} + +/// +unittest { + import vibe.core.concurrency; + import vibe.core.core; + + static class Item { + double value; + string name; + } + + static void modifyItem(Isolated!Item itm) + { + itm.value = 1.3; + // TODO: send back to initiating thread + } + + void test() + { + immutable(Item)[] items; + + // create immutable item procedurally + auto itm = makeIsolated!Item(); + itm.value = 2.4; + itm.name = "Test"; + items ~= itm.freeze(); + + // send isolated item to other thread + auto itm2 = makeIsolated!Item(); + runWorkerTask(&modifyItem, itm2.move()); + // ... + } +} + +unittest { + static class C { this(int x) pure {} } + static struct S { this(int x) pure {} } + + alias CI = typeof(makeIsolated!C(0)); + alias SI = typeof(makeIsolated!S(0)); + alias SPI = typeof(makeIsolated!(S*)(0)); + static assert(isStronglyIsolated!CI); + static assert(is(CI == IsolatedRef!C)); + static assert(isStronglyIsolated!SI); + static assert(is(SI == S)); + static assert(isStronglyIsolated!SPI); + static assert(is(SPI == IsolatedRef!S)); +} + + +/** + Creates a new isolated array. +*/ +pure Isolated!(T[]) makeIsolatedArray(T)(size_t size) +{ + Isolated!(T[]) ret; + ret.length = size; + return ret.move(); +} + +/// +unittest { + import vibe.core.concurrency; + import vibe.core.core; + + static void compute(Tid tid, Isolated!(double[]) array, size_t start_index) + { + foreach( i; 0 .. array.length ) + array[i] = (start_index + i) * 0.5; + + send(tid, array.move()); + } + + void test() + { + import std.stdio; + + // compute contents of an array using multiple threads + auto arr = makeIsolatedArray!double(256); + + // partition the array (no copying takes place) + size_t[] indices = [64, 128, 192, 256]; + Isolated!(double[])[] subarrays = arr.splice(indices); + + // start processing in threads + Tid[] tids; + foreach (i, idx; indices) + tids ~= runWorkerTaskH(&compute, thisTid, subarrays[i].move(), idx).tid; + + // collect results + auto resultarrays = new Isolated!(double[])[tids.length]; + foreach( i, tid; tids ) + resultarrays[i] = receiveOnly!(Isolated!(double[])).move(); + + // BUG: the arrays must be sorted here, but since there is no way to tell + // from where something was received, this is difficult here. + + // merge results (no copying takes place again) + foreach( i; 1 .. resultarrays.length ) + resultarrays[0].merge(resultarrays[i]); + + // convert the final result to immutable + auto result = resultarrays[0].freeze(); + + writefln("Result: %s", result); + } +} + + +/** + Unsafe facility to assume that an existing reference is unique. +*/ +Isolated!T assumeIsolated(T)(T object) +{ + return Isolated!T(object); +} + +/** + Encapsulates the given type in a way that guarantees memory isolation. + + See_Also: makeIsolated, makeIsolatedArray +*/ +template Isolated(T) +{ + static if( isWeaklyIsolated!T ){ + alias Isolated = T; + } else static if( is(T == class) ){ + alias Isolated = IsolatedRef!T; + } else static if( isPointer!T ){ + alias Isolated = IsolatedRef!(PointerTarget!T); + } else static if( isDynamicArray!T ){ + alias Isolated = IsolatedArray!(typeof(T.init[0])); + } else static if( isAssociativeArray!T ){ + alias Isolated = IsolatedAssociativeArray!(KeyType!T, ValueType!T); + } else static assert(false, T.stringof~": Unsupported type for Isolated!T - must be class, pointer, array or associative array."); +} + + +// unit tests fails with DMD 2.064 due to some cyclic import regression +unittest +{ + static class CE {} + static struct SE {} + + static assert(is(Isolated!CE == IsolatedRef!CE)); + static assert(is(Isolated!(SE*) == IsolatedRef!SE)); + static assert(is(Isolated!(SE[]) == IsolatedArray!SE)); + version(EnablePhobosFails){ + // AAs don't work because they are impure + static assert(is(Isolated!(SE[string]) == IsolatedAssociativeArray!(string, SE))); + } +} + + +/// private +private struct IsolatedRef(T) +{ + pure: + static assert(isWeaklyIsolated!(FieldTypeTuple!T), T.stringof ~ " contains non-immutable/non-shared references. Isolation cannot be guaranteed."); + enum __isWeakIsolatedType = true; + static if( isStronglyIsolated!(FieldTypeTuple!T) ) + enum __isIsolatedType = true; + + alias BaseType = T; + + static if( is(T == class) ){ + alias Tref = T; + alias Tiref = immutable(T); + } else { + alias Tref = T*; + alias Tiref = immutable(T)*; + } + + private Tref m_ref; + + //mixin isolatedAggregateMethods!T; + //pragma(msg, isolatedAggregateMethodsString!T()); + mixin(isolatedAggregateMethodsString!T()); + + @disable this(this); + + private this(Tref obj) + { + m_ref = obj; + } + + this(ref IsolatedRef src) + { + m_ref = src.m_ref; + src.m_ref = null; + } + + void opAssign(ref IsolatedRef src) + { + m_ref = src.m_ref; + src.m_ref = null; + } + + /** + Returns the raw reference. + + Note that using this function breaks type safety. Be sure to not escape the reference. + */ + inout(Tref) unsafeGet() inout { return m_ref; } + + /** + Move the contained reference to a new IsolatedRef. + + Since IsolatedRef is not copyable, using this function may be necessary when + passing a reference to a function or when returning it. The reference in + this instance will be set to null after the call returns. + */ + IsolatedRef move() { auto r = m_ref; m_ref = null; return IsolatedRef(r); } + /// ditto + void move(ref IsolatedRef target) { target.m_ref = m_ref; m_ref = null; } + + /** + Convert the isolated reference to a normal mutable reference. + + The reference in this instance will be set to null after the call returns. + */ + Tref extract() + { + auto ret = m_ref; + m_ref = null; + return ret; + } + + /** + Converts the isolated reference to immutable. + + The reference in this instance will be set to null after the call has returned. + Note that this method is only available for strongly isolated references, + which means references that do not contain shared aliasing. + */ + Tiref freeze()() + { + static assert(isStronglyIsolated!(FieldTypeTuple!T), "freeze() can only be called on strongly isolated values, but "~T.stringof~" contains shared references."); + auto ret = m_ref; + m_ref = null; + return cast(immutable)ret; + } + + /** + Performs an up- or down-cast of the reference and moves it to a new IsolatedRef instance. + + The reference in this instance will be set to null after the call has returned. + */ + U opCast(U)() + if (isInstanceOf!(IsolatedRef, U) && (is(U.BaseType : BaseType) || is(BaseType : U.BaseType))) + { + auto r = U(cast(U.BaseType)m_ref); + m_ref = null; + return r; + } + + /** + Determines if the contained reference is non-null. + + This method allows Isolated references to be used in boolean expressions without having to + extract the reference. + */ + U opCast(U)() const if(is(U == bool)) { return m_ref !is null; } +} + + +/// private +private struct IsolatedArray(T) +{ + static assert(isWeaklyIsolated!T, T.stringof ~ " contains non-immutable references. Isolation cannot be guaranteed."); + enum __isWeakIsolatedType = true; + static if( isStronglyIsolated!T ) + enum __isIsolatedType = true; + + alias BaseType = T[]; + + private T[] m_array; + + mixin isolatedArrayMethods!T; + + @disable this(this); + + /** + Returns the raw reference. + + Note that using this function breaks type safety. Be sure to not escape the reference. + */ + inout(T[]) unsafeGet() inout { return m_array; } + + IsolatedArray!T move() pure { auto r = m_array; m_array = null; return IsolatedArray(r); } + void move(ref IsolatedArray target) pure { target.m_array = m_array; m_array = null; } + + T[] extract() + pure { + auto arr = m_array; + m_array = null; + return arr; + } + + immutable(T)[] freeze()() pure + { + static assert(isStronglyIsolated!T, "Freeze can only be called on strongly isolated values, but "~T.stringof~" contains shared references."); + auto arr = m_array; + m_array = null; + return cast(immutable)arr; + } + + + /** + Splits the array into individual slices at the given incides. + + The indices must be in ascending order. Any items that are larger than + the last given index will remain in this IsolatedArray. + */ + IsolatedArray!T[] splice(in size_t[] indices...) pure + in { + //import std.algorithm : isSorted; + assert(indices.length > 0, "At least one splice index must be given."); + //assert(isSorted(indices), "Indices must be in ascending order."); + assert(indices[$-1] <= m_array.length, "Splice index out of bounds."); + } + body { + auto ret = new IsolatedArray!T[indices.length]; + size_t lidx = 0; + foreach( i, sidx; indices ){ + ret[i].m_array = m_array[lidx .. sidx]; + lidx = sidx; + } + m_array = m_array[lidx .. $]; + return ret; + } + + void merge(ref IsolatedArray!T array) pure + in { + assert(array.m_array.ptr == m_array.ptr+m_array.length || array.m_array.ptr+array.length == m_array.ptr, + "Argument to merge() must be a neighbouring array partition."); + } + body { + if( array.m_array.ptr == m_array.ptr + m_array.length ){ + m_array = m_array.ptr[0 .. m_array.length + array.length]; + } else { + m_array = array.m_array.ptr[0 .. m_array.length + array.length]; + } + array.m_array.length = 0; + } +} + + +/// private +private struct IsolatedAssociativeArray(K, V) +{ + pure: + static assert(isWeaklyIsolated!K, "Key type has aliasing. Memory isolation cannot be guaranteed."); + static assert(isWeaklyIsolated!V, "Value type has aliasing. Memory isolation cannot be guaranteed."); + + enum __isWeakIsolatedType = true; + static if( isStronglyIsolated!K && isStronglyIsolated!V ) + enum __isIsolatedType = true; + + alias BaseType = V[K]; + + private { + V[K] m_aa; + } + + mixin isolatedAssociativeArrayMethods!(K, V); + + /** + Returns the raw reference. + + Note that using this function breaks type safety. Be sure to not escape the reference. + */ + inout(V[K]) unsafeGet() inout { return m_aa; } + + IsolatedAssociativeArray move() { auto r = m_aa; m_aa = null; return IsolatedAssociativeArray(r); } + void move(ref IsolatedAssociativeArray target) { target.m_aa = m_aa; m_aa = null; } + + V[K] extract() + { + auto arr = m_aa; + m_aa = null; + return arr; + } + + static if( is(typeof(IsolatedAssociativeArray.__isIsolatedType)) ){ + immutable(V)[K] freeze() + { + auto arr = m_aa; + m_aa = null; + return cast(immutable(V)[K])(arr); + } + + immutable(V[K]) freeze2() + { + auto arr = m_aa; + m_aa = null; + return cast(immutable(V[K]))(arr); + } + } +} + + +/** Encapsulates a reference in a way that disallows escaping it or any contained references. +*/ +template ScopedRef(T) +{ + static if( isAggregateType!T ) alias ScopedRef = ScopedRefAggregate!T; + else static if( isAssociativeArray!T ) alias ScopedRef = ScopedRefAssociativeArray!T; + else static if( isArray!T ) alias ScopedRef = ScopedRefArray!T; + else static if( isBasicType!T ) alias ScopedRef = ScopedRefBasic!T; + else static assert(false, "Unsupported type for ScopedRef: "~T.stringof); +} + +/// private +private struct ScopedRefBasic(T) +{ + private T* m_ref; + + @disable this(this); + + this(ref T tref) pure { m_ref = &tref; } + + //void opAssign(T value) { *m_ref = value; } + + ref T unsafeGet() pure { return *m_ref; } + + alias unsafeGet this; +} + +/// private +private struct ScopedRefAggregate(T) +{ + private T* m_ref; + + @disable this(this); + + this(ref T tref) pure { m_ref = &tref; } + + //void opAssign(T value) { *m_ref = value; } + + ref T unsafeGet() pure { return *m_ref; } + + static if( is(T == shared) ){ + auto lock() pure { return .lock(unsafeGet()); } + } else { + mixin(isolatedAggregateMethodsString!T()); + //mixin isolatedAggregateMethods!T; + } +} + +/// private +private struct ScopedRefArray(T) +{ + alias V = typeof(T.init[0]) ; + private T* m_ref; + + private @property ref T m_array() pure { return *m_ref; } + private @property ref const(T) m_array() const pure { return *m_ref; } + + mixin isolatedArrayMethods!(V, !is(T == const) && !is(T == immutable)); + + @disable this(this); + + this(ref T tref) pure { m_ref = &tref; } + + //void opAssign(T value) { *m_ref = value; } + + ref T unsafeGet() pure { return *m_ref; } +} + +/// private +private struct ScopedRefAssociativeArray(K, V) +{ + alias K = KeyType!T; + alias V = ValueType!T; + private T* m_ref; + + private @property ref T m_array() pure { return *m_ref; } + private @property ref const(T) m_array() const pure { return *m_ref; } + + mixin isolatedAssociativeArrayMethods!(K, V); + + @disable this(this); + + this(ref T tref) pure { m_ref = &tref; } + + //void opAssign(T value) { *m_ref = value; } + + ref T unsafeGet() pure { return *m_ref; } + +} + +/******************************************************************************/ +/* COMMON MIXINS FOR NON-REF-ESCAPING WRAPPER STRUCTS */ +/******************************************************************************/ + +/// private +/*private mixin template(T) isolatedAggregateMethods +{ + mixin(isolatedAggregateMethodsString!T()); +}*/ + +/// private +private string isolatedAggregateMethodsString(T)() +{ + import vibe.internal.traits; + + string ret = generateModuleImports!T(); + //pragma(msg, "Type '"~T.stringof~"'"); + foreach( mname; __traits(allMembers, T) ){ + static if (isPublicMember!(T, mname)) { + static if (isRWPlainField!(T, mname)) { + alias mtype = typeof(__traits(getMember, T, mname)) ; + auto mtypename = fullyQualifiedName!mtype; + //pragma(msg, " field " ~ mname ~ " : " ~ mtype.stringof); + ret ~= "@property ScopedRef!(const("~mtypename~")) "~mname~"() const pure { return ScopedRef!(const("~mtypename~"))(m_ref."~mname~"); }\n"; + ret ~= "@property ScopedRef!("~mtypename~") "~mname~"() pure { return ScopedRef!("~mtypename~")(m_ref."~mname~"); }\n"; + static if( !is(mtype == const) && !is(mtype == immutable) ){ + static if( isWeaklyIsolated!mtype ){ + ret ~= "@property void "~mname~"("~mtypename~" value) pure { m_ref."~mname~" = value; }\n"; + } else { + ret ~= "@property void "~mname~"(AT)(AT value) pure { static assert(isWeaklyIsolated!AT); m_ref."~mname~" = value.unsafeGet(); }\n"; + } + } + } else { + foreach( method; __traits(getOverloads, T, mname) ){ + alias ftype = FunctionTypeOf!method; + + // only pure functions are allowed (or they could escape references to global variables) + // don't allow non-isolated references to be escaped + if( functionAttributes!ftype & FunctionAttribute.pure_ && + isWeaklyIsolated!(ReturnType!ftype) ) + { + static if( __traits(isStaticFunction, method) ){ + //pragma(msg, " static method " ~ mname ~ " : " ~ ftype.stringof); + ret ~= "static "~fullyQualifiedName!(ReturnType!ftype)~" "~mname~"("; + foreach( i, P; ParameterTypeTuple!ftype ){ + if( i > 0 ) ret ~= ", "; + ret ~= fullyQualifiedName!P ~ " p"~i.stringof; + } + ret ~= "){ return "~fullyQualifiedName!T~"."~mname~"("; + foreach( i, P; ParameterTypeTuple!ftype ){ + if( i > 0 ) ret ~= ", "; + ret ~= "p"~i.stringof; + } + ret ~= "); }\n"; + } else if (mname != "__ctor") { + //pragma(msg, " normal method " ~ mname ~ " : " ~ ftype.stringof); + if( is(ftype == const) ) ret ~= "const "; + if( is(ftype == shared) ) ret ~= "shared "; + if( is(ftype == immutable) ) ret ~= "immutable "; + if( functionAttributes!ftype & FunctionAttribute.pure_ ) ret ~= "pure "; + if( functionAttributes!ftype & FunctionAttribute.property ) ret ~= "@property "; + ret ~= fullyQualifiedName!(ReturnType!ftype)~" "~mname~"("; + foreach( i, P; ParameterTypeTuple!ftype ){ + if( i > 0 ) ret ~= ", "; + ret ~= fullyQualifiedName!P ~ " p"~i.stringof; + } + ret ~= "){ return m_ref."~mname~"("; + foreach( i, P; ParameterTypeTuple!ftype ){ + if( i > 0 ) ret ~= ", "; + ret ~= "p"~i.stringof; + } + ret ~= "); }\n"; + } + } + } + } + } //else pragma(msg, " non-public field " ~ mname); + } + return ret; +} + + +/// private +private mixin template isolatedArrayMethods(T, bool mutableRef = true) +{ + @property size_t length() const pure { return m_array.length; } + + @property bool empty() const pure { return m_array.length == 0; } + + static if( mutableRef ){ + @property void length(size_t value) pure { m_array.length = value; } + + + void opCatAssign(T item) pure + { + static if( isCopyable!T ) m_array ~= item; + else { + m_array.length++; + m_array[$-1] = item; + } + } + + void opCatAssign(IsolatedArray!T array) pure + { + static if( isCopyable!T ) m_array ~= array.m_array; + else { + size_t start = m_array.length; + m_array.length += array.length; + foreach( i, ref itm; array.m_array ) + m_array[start+i] = itm; + } + } + } + + ScopedRef!(const(T)) opIndex(size_t idx) const pure { return ScopedRef!(const(T))(m_array[idx]); } + ScopedRef!T opIndex(size_t idx) pure { return ScopedRef!T(m_array[idx]); } + + static if( !is(T == const) && !is(T == immutable) ) + void opIndexAssign(T value, size_t idx) pure { m_array[idx] = value; } + + int opApply(int delegate(ref size_t, ref ScopedRef!T) del) + pure { + foreach( idx, ref v; m_array ){ + auto noref = ScopedRef!T(v); + if( auto ret = (cast(int delegate(ref size_t, ref ScopedRef!T) pure)del)(idx, noref) ) + return ret; + } + return 0; + } + + int opApply(int delegate(ref size_t, ref ScopedRef!(const(T))) del) + const pure { + foreach( idx, ref v; m_array ){ + auto noref = ScopedRef!(const(T))(v); + if( auto ret = (cast(int delegate(ref size_t, ref ScopedRef!(const(T))) pure)del)(idx, noref) ) + return ret; + } + return 0; + } + + int opApply(int delegate(ref ScopedRef!T) del) + pure { + foreach( v; m_array ){ + auto noref = ScopedRef!T(v); + if( auto ret = (cast(int delegate(ref ScopedRef!T) pure)del)(noref) ) + return ret; + } + return 0; + } + + int opApply(int delegate(ref ScopedRef!(const(T))) del) + const pure { + foreach( v; m_array ){ + auto noref = ScopedRef!(const(T))(v); + if( auto ret = (cast(int delegate(ref ScopedRef!(const(T))) pure)del)(noref) ) + return ret; + } + return 0; + } +} + + +/// private +private mixin template isolatedAssociativeArrayMethods(K, V, bool mutableRef = true) +{ + @property size_t length() const pure { return m_aa.length; } + @property bool empty() const pure { return m_aa.length == 0; } + + static if( !is(V == const) && !is(V == immutable) ) + void opIndexAssign(V value, K key) pure { m_aa[key] = value; } + + inout(V) opIndex(K key) inout pure { return m_aa[key]; } + + int opApply(int delegate(ref ScopedRef!K, ref ScopedRef!V) del) + pure { + foreach( ref k, ref v; m_aa ) + if( auto ret = (cast(int delegate(ref ScopedRef!K, ref ScopedRef!V) pure)del)(k, v) ) + return ret; + return 0; + } + + int opApply(int delegate(ref ScopedRef!V) del) + pure { + foreach( ref v; m_aa ) + if( auto ret = (cast(int delegate(ref ScopedRef!V) pure)del)(v) ) + return ret; + return 0; + } + + int opApply(int delegate(ref ScopedRef!(const(K)), ref ScopedRef!(const(V))) del) + const pure { + foreach( ref k, ref v; m_aa ) + if( auto ret = (cast(int delegate(ref ScopedRef!(const(K)), ref ScopedRef!(const(V))) pure)del)(k, v) ) + return ret; + return 0; + } + + int opApply(int delegate(ref ScopedRef!(const(V))) del) + const pure { + foreach( v; m_aa ) + if( auto ret = (cast(int delegate(ref ScopedRef!(const(V))) pure)del)(v) ) + return ret; + return 0; + } +} + + +/******************************************************************************/ +/* UTILITY FUNCTIONALITY */ +/******************************************************************************/ + +// private +private @property string generateModuleImports(T)() +{ + bool[string] visited; + //pragma(msg, "generateModuleImports "~T.stringof); + return generateModuleImportsImpl!T(visited); +} + +private @property string generateModuleImportsImpl(T, TYPES...)(ref bool[string] visited) +{ + string ret; + + //pragma(msg, T); + //pragma(msg, TYPES); + + static if( !haveTypeAlready!(T, TYPES) ){ + void addModule(string mod){ + if( mod !in visited ){ + ret ~= "static import "~mod~";\n"; + visited[mod] = true; + } + } + + static if( isAggregateType!T && !is(typeof(T.__isWeakIsolatedType)) ){ // hack to avoid a recursive template instantiation when Isolated!T is passed to moduleName + addModule(moduleName!T); + + foreach( member; __traits(allMembers, T) ){ + //static if( isPublicMember!(T, member) ){ + static if( !is(typeof(__traits(getMember, T, member))) ){ + // ignore sub types + } else static if( !is(FunctionTypeOf!(__traits(getMember, T, member)) == function) ){ + alias mtype = typeof(__traits(getMember, T, member)) ; + ret ~= generateModuleImportsImpl!(mtype, T, TYPES)(visited); + } else static if( is(T == class) || is(T == interface) ){ + foreach( overload; MemberFunctionsTuple!(T, member) ){ + ret ~= generateModuleImportsImpl!(ReturnType!overload, T, TYPES)(visited); + foreach( P; ParameterTypeTuple!overload ) + ret ~= generateModuleImportsImpl!(P, T, TYPES)(visited); + } + } // TODO: handle structs! + //} + } + } + else static if( isPointer!T ) ret ~= generateModuleImportsImpl!(PointerTarget!T, T, TYPES)(visited); + else static if( isArray!T ) ret ~= generateModuleImportsImpl!(typeof(T.init[0]), T, TYPES)(visited); + else static if( isAssociativeArray!T ) ret ~= generateModuleImportsImpl!(KeyType!T, T, TYPES)(visited) ~ generateModuleImportsImpl!(ValueType!T, T, TYPES)(visited); + } + + return ret; +} + +template haveTypeAlready(T, TYPES...) +{ + static if( TYPES.length == 0 ) enum haveTypeAlready = false; + else static if( is(T == TYPES[0]) ) enum haveTypeAlready = true; + else alias haveTypeAlready = haveTypeAlready!(T, TYPES[1 ..$]); +} + + +/******************************************************************************/ +/* Additional traits useful for handling isolated data */ +/******************************************************************************/ + +/** + Determines if the given list of types has any non-immutable aliasing outside of their object tree. + + The types in particular may only contain plain data, pointers or arrays to immutable data, or references + encapsulated in stdx.typecons.Isolated. +*/ +template isStronglyIsolated(T...) +{ + static if (T.length == 0) enum bool isStronglyIsolated = true; + else static if (T.length > 1) enum bool isStronglyIsolated = isStronglyIsolated!(T[0 .. $/2]) && isStronglyIsolated!(T[$/2 .. $]); + else { + static if (is(T[0] == immutable)) enum bool isStronglyIsolated = true; + else static if(isInstanceOf!(Rebindable, T[0])) enum bool isStronglyIsolated = isStronglyIsolated!(typeof(T[0].get())); + else static if (is(typeof(T[0].__isIsolatedType))) enum bool isStronglyIsolated = true; + else static if (is(T[0] == class)) enum bool isStronglyIsolated = false; + else static if (is(T[0] == interface)) enum bool isStronglyIsolated = false; // can't know if the implementation is isolated + else static if (is(T[0] == delegate)) enum bool isStronglyIsolated = false; // can't know to what a delegate points + else static if (isDynamicArray!(T[0])) enum bool isStronglyIsolated = is(typeof(T[0].init[0]) == immutable); + else static if (isAssociativeArray!(T[0])) enum bool isStronglyIsolated = false; // TODO: be less strict here + else static if (isSomeFunction!(T[0])) enum bool isStronglyIsolated = true; // functions are immutable + else static if (isPointer!(T[0])) enum bool isStronglyIsolated = is(typeof(*T[0].init) == immutable); + else static if (isAggregateType!(T[0])) enum bool isStronglyIsolated = isStronglyIsolated!(FieldTypeTuple!(T[0])); + else enum bool isStronglyIsolated = true; + } +} + + +/** + Determines if the given list of types has any non-immutable and unshared aliasing outside of their object tree. + + The types in particular may only contain plain data, pointers or arrays to immutable or shared data, or references + encapsulated in stdx.typecons.Isolated. Values that do not have unshared and unisolated aliasing are safe to be passed + between threads. +*/ +template isWeaklyIsolated(T...) +{ + static if (T.length == 0) enum bool isWeaklyIsolated = true; + else static if (T.length > 1) enum bool isWeaklyIsolated = isWeaklyIsolated!(T[0 .. $/2]) && isWeaklyIsolated!(T[$/2 .. $]); + else { + static if(is(T[0] == immutable)) enum bool isWeaklyIsolated = true; + else static if (is(T[0] == shared)) enum bool isWeaklyIsolated = true; + else static if (is(T[0] == Tid)) enum bool isWeaklyIsolated = true; + else static if (isInstanceOf!(Rebindable, T[0])) enum bool isWeaklyIsolated = isWeaklyIsolated!(typeof(T[0].get())); + else static if (is(T[0] : Throwable)) enum bool isWeaklyIsolated = true; // WARNING: this is unsafe, but needed for send/receive! + else static if (is(typeof(T[0].__isIsolatedType))) enum bool isWeaklyIsolated = true; + else static if (is(typeof(T[0].__isWeakIsolatedType))) enum bool isWeaklyIsolated = true; + else static if (is(T[0] == class)) enum bool isWeaklyIsolated = false; + else static if (is(T[0] == interface)) enum bool isWeaklyIsolated = false; // can't know if the implementation is isolated + else static if (is(T[0] == delegate)) enum bool isWeaklyIsolated = T[0].stringof.endsWith(" shared"); // can't know to what a delegate points - FIXME: use something better than a string comparison + else static if (isDynamicArray!(T[0])) enum bool isWeaklyIsolated = is(typeof(T[0].init[0]) == immutable); + else static if (isAssociativeArray!(T[0])) enum bool isWeaklyIsolated = false; // TODO: be less strict here + else static if (isSomeFunction!(T[0])) enum bool isWeaklyIsolated = true; // functions are immutable + else static if (isPointer!(T[0])) enum bool isWeaklyIsolated = is(typeof(*T[0].init) == immutable) || is(typeof(*T[0].init) == shared); + else static if (isAggregateType!(T[0])) enum bool isWeaklyIsolated = isWeaklyIsolated!(FieldTypeTuple!(T[0])); + else enum bool isWeaklyIsolated = true; + } +} + +unittest { + static class A { int x; string y; } + + static struct B { + string a; // strongly isolated + Isolated!A b; // strongly isolated + version(EnablePhobosFails) + Isolated!(Isolated!A[]) c; // strongly isolated + version(EnablePhobosFails) + Isolated!(Isolated!A[string]) c; // AA implementation does not like this + version(EnablePhobosFails) + Isolated!(int[string]) d; // strongly isolated + } + + static struct C { + string a; // strongly isolated + shared(A) b; // weakly isolated + Isolated!A c; // strongly isolated + shared(A*) d; // weakly isolated + shared(A[]) e; // weakly isolated + shared(A[string]) f; // weakly isolated + } + + static struct D { A a; } // not isolated + static struct E { void delegate() a; } // not isolated + static struct F { void function() a; } // strongly isolated (functions are immutable) + static struct G { void test(); } // strongly isolated + static struct H { A[] a; } // not isolated + static interface I {} + + static assert(!isStronglyIsolated!A); + static assert(isStronglyIsolated!(FieldTypeTuple!A)); + static assert(isStronglyIsolated!B); + static assert(!isStronglyIsolated!C); + static assert(!isStronglyIsolated!D); + static assert(!isStronglyIsolated!E); + static assert(isStronglyIsolated!F); + static assert(isStronglyIsolated!G); + static assert(!isStronglyIsolated!H); + static assert(!isStronglyIsolated!I); + + static assert(!isWeaklyIsolated!A); + static assert(isWeaklyIsolated!(FieldTypeTuple!A)); + static assert(isWeaklyIsolated!B); + static assert(isWeaklyIsolated!C); + static assert(!isWeaklyIsolated!D); + static assert(!isWeaklyIsolated!E); + static assert(isWeaklyIsolated!F); + static assert(isWeaklyIsolated!G); + static assert(!isWeaklyIsolated!H); + static assert(!isWeaklyIsolated!I); +} + + +template isCopyable(T) +{ + static if( __traits(compiles, {foreach( t; [T.init]){}}) ) enum isCopyable = true; + else enum isCopyable = false; +} + + +/******************************************************************************/ +/* Future (promise) suppport */ +/******************************************************************************/ + +/** + Represents a values that will be computed asynchronously. + + This type uses $(D alias this) to enable transparent access to the result + value. +*/ +struct Future(T) { + private { + FreeListRef!(shared(T)) m_result; + Task m_task; + } + + /// Checks if the values was fully computed. + @property bool ready() const { return !m_task.running; } + + /** Returns the computed value. + + This function waits for the computation to finish, if necessary, and + then returns the final value. In case of an uncaught exception + happening during the computation, the exception will be thrown + instead. + */ + ref T getResult() + { + if (!ready) m_task.join(); + assert(ready, "Task still running after join()!?"); + return *cast(T*)m_result.get(); // casting away shared is safe, because this is a unique reference + } + + alias getResult this; + + private void init() + { + m_result = FreeListRef!(shared(T))(); + } +} + + +/** + Starts an asynchronous computation and returns a future for the result value. + + If the supplied callable and arguments are all weakly isolated, + $(D vibe.core.core.runWorkerTask) will be used to perform the computation. + Otherwise, $(D vibe.core.core.runTask) will be used. + + Params: + callable: A callable value, can be either a function, a delegate, or a + user defined type that defines an $(D opCall). + args: Arguments to pass to the callable. + + Returns: + Returns a $(D Future) object that can be used to access the result. + + See_also: $(D isWeaklyIsolated) +*/ +Future!(ReturnType!CALLABLE) async(CALLABLE, ARGS...)(CALLABLE callable, ARGS args) + if (is(typeof(callable(args)) == ReturnType!CALLABLE)) +{ + import vibe.core.core; + alias RET = ReturnType!CALLABLE; + Future!RET ret; + ret.init(); + static void compute(FreeListRef!(shared(RET)) dst, CALLABLE callable, ARGS args) { + *dst = cast(shared(RET))callable(args); + } + static if (isWeaklyIsolated!CALLABLE && isWeaklyIsolated!ARGS) { + ret.m_task = runWorkerTaskH(&compute, ret.m_result, callable, args); + } else { + ret.m_task = runTask(&compute, ret.m_result, callable, args); + } + return ret; +} + +/// +unittest { + import vibe.core.core; + import vibe.core.log; + + void test() + { + static if (__VERSION__ >= 2065) { + auto val = async({ + logInfo("Starting to compute value in worker task."); + sleep(500.msecs); // simulate some lengthy computation + logInfo("Finished computing value in worker task."); + return 32; + }); + + logInfo("Starting computation in main task"); + sleep(200.msecs); // simulate some lengthy computation + logInfo("Finished computation in main task. Waiting for async value."); + logInfo("Result: %s", val.getResult()); + } + } +} + + +/******************************************************************************/ +/* std.concurrency compatible interface for message passing */ +/******************************************************************************/ + +void send(ARGS...)(Task task, ARGS args) { std.concurrency.send(task.tidInfo.ident, args); } +void prioritySend(ARGS...)(Task task, ARGS args) { std.concurrency.prioritySend(task.tidInfo.ident, args); } + +package class VibedScheduler : Scheduler { + import core.sync.mutex; + import vibe.core.core; + import vibe.core.sync; + + override void start(void delegate() op) { op(); } + override void spawn(void delegate() op) { runTask(op); } + override void yield() {} + override @property ref ThreadInfo thisInfo(){ return Task.getThis().tidInfo; } + override TaskCondition newCondition(Mutex m) + { + try { + return new TaskCondition(m); + } catch(Exception e) { assert(false, e.msg); } + } +} diff --git a/source/vibe/core/connectionpool.d b/source/vibe/core/connectionpool.d new file mode 100644 index 0000000..d0dec81 --- /dev/null +++ b/source/vibe/core/connectionpool.d @@ -0,0 +1,149 @@ +/** + Generic connection pool for reusing persistent connections across fibers. + + Copyright: © 2012-2016 RejectedSoftware e.K. + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. + Authors: Sönke Ludwig +*/ +module vibe.core.connectionpool; + +import vibe.core.log; + +import core.thread; +import vibe.core.sync; +//import vibe.utils.memory; + +/** + Generic connection pool class. + + The connection pool is creating connections using the supplied factory + function as needed whenever `lockConnection` is called. Connections are + associated to the calling fiber, as long as any copy of the returned + `LockedConnection` object still exists. Connections that are not associated + to any fiber will be kept in a pool of open connections for later reuse. + + Note that, after retrieving a connection with `lockConnection`, the caller + has to make sure that the connection is actually physically open and to + reopen it if necessary. The `ConnectionPool` class has no knowledge of the + internals of the connection objects. +*/ +class ConnectionPool(Connection) +{ + private { + Connection delegate() m_connectionFactory; + Connection[] m_connections; + int[const(Connection)] m_lockCount; + FreeListRef!LocalTaskSemaphore m_sem; + debug Thread m_thread; + } + + this(Connection delegate() connection_factory, uint max_concurrent = uint.max) + { + m_connectionFactory = connection_factory; + m_sem = FreeListRef!LocalTaskSemaphore(max_concurrent); + debug m_thread = Thread.getThis(); + } + + /** Determines the maximum number of concurrently open connections. + + Attempting to lock more connections that this number will cause the + calling fiber to be blocked until one of the locked connections + becomes available for reuse. + */ + @property void maxConcurrency(uint max_concurrent) { + m_sem.maxLocks = max_concurrent; + } + /// ditto + @property uint maxConcurrency() { + return m_sem.maxLocks; + } + + /** Retrieves a connection to temporarily associate with the calling fiber. + + The returned `LockedConnection` object uses RAII and reference counting + to determine when to unlock the connection. + */ + LockedConnection!Connection lockConnection() + { + debug assert(m_thread is Thread.getThis(), "ConnectionPool was called from a foreign thread!"); + + m_sem.lock(); + size_t cidx = size_t.max; + foreach( i, c; m_connections ){ + auto plc = c in m_lockCount; + if( !plc || *plc == 0 ){ + cidx = i; + break; + } + } + + Connection conn; + if( cidx != size_t.max ){ + logTrace("returning %s connection %d of %d", Connection.stringof, cidx, m_connections.length); + conn = m_connections[cidx]; + } else { + logDebug("creating new %s connection, all %d are in use", Connection.stringof, m_connections.length); + conn = m_connectionFactory(); // NOTE: may block + logDebug(" ... %s", cast(void*)conn); + } + m_lockCount[conn] = 1; + if( cidx == size_t.max ){ + m_connections ~= conn; + logDebug("Now got %d connections", m_connections.length); + } + auto ret = LockedConnection!Connection(this, conn); + return ret; + } +} + +struct LockedConnection(Connection) { + private { + ConnectionPool!Connection m_pool; + Task m_task; + Connection m_conn; + debug uint m_magic = 0xB1345AC2; + } + + private this(ConnectionPool!Connection pool, Connection conn) + { + assert(conn !is null); + m_pool = pool; + m_conn = conn; + m_task = Task.getThis(); + } + + this(this) + { + debug assert(m_magic == 0xB1345AC2, "LockedConnection value corrupted."); + if( m_conn ){ + auto fthis = Task.getThis(); + assert(fthis is m_task); + m_pool.m_lockCount[m_conn]++; + logTrace("conn %s copy %d", cast(void*)m_conn, m_pool.m_lockCount[m_conn]); + } + } + + ~this() + { + debug assert(m_magic == 0xB1345AC2, "LockedConnection value corrupted."); + if( m_conn ){ + auto fthis = Task.getThis(); + assert(fthis is m_task, "Locked connection destroyed in foreign task."); + auto plc = m_conn in m_pool.m_lockCount; + assert(plc !is null); + assert(*plc >= 1); + //logTrace("conn %s destroy %d", cast(void*)m_conn, *plc-1); + if( --*plc == 0 ){ + m_pool.m_sem.unlock(); + //logTrace("conn %s release", cast(void*)m_conn); + } + m_conn = null; + } + } + + + @property int __refCount() const { return m_pool.m_lockCount.get(m_conn, 0); } + @property inout(Connection) __conn() inout { return m_conn; } + + alias __conn this; +} diff --git a/source/vibe/core/core.d b/source/vibe/core/core.d new file mode 100644 index 0000000..d904849 --- /dev/null +++ b/source/vibe/core/core.d @@ -0,0 +1,1844 @@ +/** + This module contains the core functionality of the vibe.d framework. + + Copyright: © 2012-2016 RejectedSoftware e.K. + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. + Authors: Sönke Ludwig +*/ +module vibe.core.core; + +public import vibe.core.task; + +import eventcore.core; +import vibe.core.args; +import vibe.core.concurrency; +import vibe.core.log; +import vibe.core.sync : ManualEvent, createManualEvent; +import vibe.internal.async; +//import vibe.utils.array; +import std.algorithm; +import std.conv; +import std.encoding; +import core.exception; +import std.exception; +import std.functional; +import std.range : empty, front, popFront; +import std.string; +import std.variant; +import std.typecons : Typedef, Tuple, tuple; +import core.atomic; +import core.sync.condition; +import core.sync.mutex; +import core.stdc.stdlib; +import core.thread; + +alias TaskEventCb = void function(TaskEvent, Task) nothrow; + +version(Posix) +{ + import core.sys.posix.signal; + import core.sys.posix.unistd; + import core.sys.posix.pwd; + + static if (__traits(compiles, {import core.sys.posix.grp; getgrgid(0);})) { + import core.sys.posix.grp; + } else { + extern (C) { + struct group { + char* gr_name; + char* gr_passwd; + gid_t gr_gid; + char** gr_mem; + } + group* getgrgid(gid_t); + group* getgrnam(in char*); + } + } +} + +version (Windows) +{ + import core.stdc.signal; +} + + +/**************************************************************************************************/ +/* Public functions */ +/**************************************************************************************************/ + +/** + Starts the vibe event loop. + + Note that this function is usually called automatically by the vibe framework. However, if + you provide your own main() function, you need to call it manually. + + The event loop will continue running during the whole life time of the application. + Tasks will be started and handled from within the event loop. +*/ +int runEventLoop() +{ + logDebug("Starting event loop."); + s_eventLoopRunning = true; + scope (exit) { + s_eventLoopRunning = false; + s_exitEventLoop = false; + st_threadShutdownCondition.notifyAll(); + } + + // runs any yield()ed tasks first + assert(!s_exitEventLoop); + s_exitEventLoop = false; + notifyIdle(); + if (getExitFlag()) return 0; + + // handle exit flag in the main thread to exit when + // exitEventLoop(true) is called from a thread) + if (Thread.getThis() is st_threads[0].thread) + runTask(toDelegate(&watchExitFlag)); + + while (s_yieldedTasks.length || eventDriver.waiterCount) { + if (eventDriver.processEvents() == ExitReason.exited) + break; + } + + logDebug("Event loop done (%s).", eventDriver.waiterCount); + return 0; +} + +/** + Stops the currently running event loop. + + Calling this function will cause the event loop to stop event processing and + the corresponding call to runEventLoop() will return to its caller. + + Params: + shutdown_all_threads = If true, exits event loops of all threads - + false by default. Note that the event loops of all threads are + automatically stopped when the main thread exits, so usually + there is no need to set shutdown_all_threads to true. +*/ +void exitEventLoop(bool shutdown_all_threads = false) +{ + logDebug("exitEventLoop called (%s)", shutdown_all_threads); + + assert(s_eventLoopRunning || shutdown_all_threads); + if (shutdown_all_threads) { + atomicStore(st_term, true); + st_threadsSignal.emit(); + } + + // shutdown the calling thread + s_exitEventLoop = true; + if (s_eventLoopRunning) eventDriver.exit(); +} + +/** + Process all pending events without blocking. + + Checks if events are ready to trigger immediately, and run their callbacks if so. + + Returns: Returns false $(I iff) exitEventLoop was called in the process. +*/ +bool processEvents() +{ + if (!eventDriver.processEvents(Duration.max)) return false; + notifyIdle(); + return true; +} + +/** + Sets a callback that is called whenever no events are left in the event queue. + + The callback delegate is called whenever all events in the event queue have been + processed. Returning true from the callback will cause another idle event to + be triggered immediately after processing any events that have arrived in the + meantime. Returning false will instead wait until another event has arrived first. +*/ +void setIdleHandler(void delegate() del) +{ + s_idleHandler = { del(); return false; }; +} +/// ditto +void setIdleHandler(bool delegate() del) +{ + s_idleHandler = del; +} + +/** + Runs a new asynchronous task. + + task will be called synchronously from within the vibeRunTask call. It will + continue to run until vibeYield() or any of the I/O or wait functions is + called. + + Note that the maximum size of all args must not exceed `maxTaskParameterSize`. +*/ +Task runTask(ARGS...)(void delegate(ARGS) task, ARGS args) +{ + auto tfi = makeTaskFuncInfo(task, args); + return runTask_internal(tfi); +} + +private Task runTask_internal(ref TaskFuncInfo tfi) +@safe nothrow { + import std.typecons : Tuple, tuple; + + CoreTask f; + while (!f && !s_availableFibers.empty) { + f = s_availableFibers.back; + s_availableFibers.popBack(); + if (() @trusted nothrow { return f.state; } () != Fiber.State.HOLD) f = null; + } + + if (f is null) { + // if there is no fiber available, create one. + if (s_availableFibers.capacity == 0) s_availableFibers.capacity = 1024; + logDebugV("Creating new fiber..."); + s_fiberCount++; + f = new CoreTask; + } + + f.m_taskFunc = tfi; + + f.bumpTaskCounter(); + auto handle = f.task(); + + debug Task self = Task.getThis(); + debug if (s_taskEventCallback) { + if (self != Task.init) () @trusted { s_taskEventCallback(TaskEvent.yield, self); } (); + () @trusted { s_taskEventCallback(TaskEvent.preStart, handle); } (); + } + resumeTask(handle, null, true); + debug if (s_taskEventCallback) { + () @trusted { s_taskEventCallback(TaskEvent.postStart, handle); } (); + if (self != Task.init) () @trusted { s_taskEventCallback(TaskEvent.resume, self); } (); + } + + return handle; +} + +/** + Runs a new asynchronous task in a worker thread. + + Only function pointers with weakly isolated arguments are allowed to be + able to guarantee thread-safety. +*/ +void runWorkerTask(FT, ARGS...)(FT func, auto ref ARGS args) + if (is(typeof(*func) == function)) +{ + foreach (T; ARGS) static assert(isWeaklyIsolated!T, "Argument type "~T.stringof~" is not safe to pass between threads."); + runWorkerTask_unsafe(func, args); +} + +/// ditto +void runWorkerTask(alias method, T, ARGS...)(shared(T) object, auto ref ARGS args) + if (is(typeof(__traits(getMember, object, __traits(identifier, method))))) +{ + foreach (T; ARGS) static assert(isWeaklyIsolated!T, "Argument type "~T.stringof~" is not safe to pass between threads."); + auto func = &__traits(getMember, object, __traits(identifier, method)); + runWorkerTask_unsafe(func, args); +} + +/** + Runs a new asynchronous task in a worker thread, returning the task handle. + + This function will yield and wait for the new task to be created and started + in the worker thread, then resume and return it. + + Only function pointers with weakly isolated arguments are allowed to be + able to guarantee thread-safety. +*/ +Task runWorkerTaskH(FT, ARGS...)(FT func, auto ref ARGS args) + if (is(typeof(*func) == function)) +{ + foreach (T; ARGS) static assert(isWeaklyIsolated!T, "Argument type "~T.stringof~" is not safe to pass between threads."); + + alias PrivateTask = Typedef!(Task, Task.init, __PRETTY_FUNCTION__); + Task caller = Task.getThis(); + + // workaround for runWorkerTaskH to work when called outside of a task + if (caller == Task.init) { + Task ret; + runTask({ ret = runWorkerTaskH(func, args); }).join(); + return ret; + } + + assert(caller != Task.init, "runWorkderTaskH can currently only be called from within a task."); + static void taskFun(Task caller, FT func, ARGS args) { + PrivateTask callee = Task.getThis(); + caller.prioritySend(callee); + mixin(callWithMove!ARGS("func", "args")); + } + runWorkerTask_unsafe(&taskFun, caller, func, args); + return cast(Task)receiveOnly!PrivateTask(); +} +/// ditto +Task runWorkerTaskH(alias method, T, ARGS...)(shared(T) object, auto ref ARGS args) + if (is(typeof(__traits(getMember, object, __traits(identifier, method))))) +{ + foreach (T; ARGS) static assert(isWeaklyIsolated!T, "Argument type "~T.stringof~" is not safe to pass between threads."); + + auto func = &__traits(getMember, object, __traits(identifier, method)); + alias FT = typeof(func); + + alias PrivateTask = Typedef!(Task, Task.init, __PRETTY_FUNCTION__); + Task caller = Task.getThis(); + + // workaround for runWorkerTaskH to work when called outside of a task + if (caller == Task.init) { + Task ret; + runTask({ ret = runWorkerTaskH!method(object, args); }).join(); + return ret; + } + + assert(caller != Task.init, "runWorkderTaskH can currently only be called from within a task."); + static void taskFun(Task caller, FT func, ARGS args) { + PrivateTask callee = Task.getThis(); + caller.prioritySend(callee); + mixin(callWithMove!ARGS("func", "args")); + } + runWorkerTask_unsafe(&taskFun, caller, func, args); + return cast(Task)receiveOnly!PrivateTask(); +} + +/// Running a worker task using a function +unittest { + static void workerFunc(int param) + { + logInfo("Param: %s", param); + } + + static void test() + { + runWorkerTask(&workerFunc, 42); + runWorkerTask(&workerFunc, cast(ubyte)42); // implicit conversion #719 + runWorkerTaskDist(&workerFunc, 42); + runWorkerTaskDist(&workerFunc, cast(ubyte)42); // implicit conversion #719 + } +} + +/// Running a worker task using a class method +unittest { + static class Test { + void workerMethod(int param) + shared { + logInfo("Param: %s", param); + } + } + + static void test() + { + auto cls = new shared Test; + runWorkerTask!(Test.workerMethod)(cls, 42); + runWorkerTask!(Test.workerMethod)(cls, cast(ubyte)42); // #719 + runWorkerTaskDist!(Test.workerMethod)(cls, 42); + runWorkerTaskDist!(Test.workerMethod)(cls, cast(ubyte)42); // #719 + } +} + +/// Running a worker task using a function and communicating with it +unittest { + static void workerFunc(Task caller) + { + int counter = 10; + while (receiveOnly!string() == "ping" && --counter) { + logInfo("pong"); + caller.send("pong"); + } + caller.send("goodbye"); + + } + + static void test() + { + Task callee = runWorkerTaskH(&workerFunc, Task.getThis); + do { + logInfo("ping"); + callee.send("ping"); + } while (receiveOnly!string() == "pong"); + } + + static void work719(int) {} + static void test719() { runWorkerTaskH(&work719, cast(ubyte)42); } +} + +/// Running a worker task using a class method and communicating with it +unittest { + static class Test { + void workerMethod(Task caller) shared { + int counter = 10; + while (receiveOnly!string() == "ping" && --counter) { + logInfo("pong"); + caller.send("pong"); + } + caller.send("goodbye"); + } + } + + static void test() + { + auto cls = new shared Test; + Task callee = runWorkerTaskH!(Test.workerMethod)(cls, Task.getThis()); + do { + logInfo("ping"); + callee.send("ping"); + } while (receiveOnly!string() == "pong"); + } + + static class Class719 { + void work(int) shared {} + } + static void test719() { + auto cls = new shared Class719; + runWorkerTaskH!(Class719.work)(cls, cast(ubyte)42); + } +} + +unittest { // run and join worker task from outside of a task + __gshared int i = 0; + auto t = runWorkerTaskH({ sleep(5.msecs); i = 1; }); + // FIXME: joining between threads not yet supported + //t.join(); + //assert(i == 1); +} + +private void runWorkerTask_unsafe(CALLABLE, ARGS...)(CALLABLE callable, ref ARGS args) +{ + import std.traits : ParameterTypeTuple; + import vibe.internal.meta.traits : areConvertibleTo; + import vibe.internal.meta.typetuple; + + alias FARGS = ParameterTypeTuple!CALLABLE; + static assert(areConvertibleTo!(Group!ARGS, Group!FARGS), + "Cannot convert arguments '"~ARGS.stringof~"' to function arguments '"~FARGS.stringof~"'."); + + setupWorkerThreads(); + + auto tfi = makeTaskFuncInfo(callable, args); + + synchronized (st_threadsMutex) st_workerTasks ~= tfi; + st_threadsSignal.emit(); +} + + +/** + Runs a new asynchronous task in all worker threads concurrently. + + This function is mainly useful for long-living tasks that distribute their + work across all CPU cores. Only function pointers with weakly isolated + arguments are allowed to be able to guarantee thread-safety. + + The number of tasks started is guaranteed to be equal to + `workerThreadCount`. +*/ +void runWorkerTaskDist(FT, ARGS...)(FT func, auto ref ARGS args) + if (is(typeof(*func) == function)) +{ + foreach (T; ARGS) static assert(isWeaklyIsolated!T, "Argument type "~T.stringof~" is not safe to pass between threads."); + runWorkerTaskDist_unsafe(func, args); +} +/// ditto +void runWorkerTaskDist(alias method, T, ARGS...)(shared(T) object, ARGS args) +{ + auto func = &__traits(getMember, object, __traits(identifier, method)); + foreach (T; ARGS) static assert(isWeaklyIsolated!T, "Argument type "~T.stringof~" is not safe to pass between threads."); + + runWorkerTaskDist_unsafe(func, args); +} + +private void runWorkerTaskDist_unsafe(CALLABLE, ARGS...)(ref CALLABLE callable, ref ARGS args) +{ + import std.traits : ParameterTypeTuple; + import vibe.internal.meta.traits : areConvertibleTo; + import vibe.internal.meta.typetuple; + + alias FARGS = ParameterTypeTuple!CALLABLE; + static assert(areConvertibleTo!(Group!ARGS, Group!FARGS), + "Cannot convert arguments '"~ARGS.stringof~"' to function arguments '"~FARGS.stringof~"'."); + + setupWorkerThreads(); + + auto tfi = makeTaskFuncInfo(callable, args); + + synchronized (st_threadsMutex) { + foreach (ref ctx; st_threads) + if (ctx.isWorker) + ctx.taskQueue ~= tfi; + } + st_threadsSignal.emit(); +} + +private TaskFuncInfo makeTaskFuncInfo(CALLABLE, ARGS...)(ref CALLABLE callable, ref ARGS args) +{ + import std.algorithm : move; + import std.traits : hasElaborateAssign; + + struct TARGS { ARGS expand; } + + static assert(CALLABLE.sizeof <= TaskFuncInfo.callable.length); + static assert(TARGS.sizeof <= maxTaskParameterSize, + "The arguments passed to run(Worker)Task must not exceed "~ + maxTaskParameterSize.to!string~" bytes in total size."); + + static void callDelegate(TaskFuncInfo* tfi) { + assert(tfi.func is &callDelegate); + + // copy original call data to stack + CALLABLE c; + TARGS args; + move(*(cast(CALLABLE*)tfi.callable.ptr), c); + move(*(cast(TARGS*)tfi.args.ptr), args); + + // reset the info + tfi.func = null; + + // make the call + mixin(callWithMove!ARGS("c", "args.expand")); + } + + TaskFuncInfo tfi; + tfi.func = &callDelegate; + + () @trusted { + static if (hasElaborateAssign!CALLABLE) tfi.initCallable!CALLABLE(); + static if (hasElaborateAssign!TARGS) tfi.initArgs!TARGS(); + tfi.typedCallable!CALLABLE = callable; + foreach (i, A; ARGS) { + static if (needsMove!A) args[i].move(tfi.typedArgs!TARGS.expand[i]); + else tfi.typedArgs!TARGS.expand[i] = args[i]; + } + } (); + return tfi; +} + +import core.cpuid : threadsPerCPU; +/** + Sets up num worker threads. + + This function gives explicit control over the number of worker threads. + Note, to have an effect the function must be called prior to related worker + tasks functions which set up the default number of worker threads + implicitly. + + Params: + num = The number of worker threads to initialize. Defaults to + `logicalProcessorCount`. + See_also: `runWorkerTask`, `runWorkerTaskH`, `runWorkerTaskDist` +*/ +public void setupWorkerThreads(uint num = logicalProcessorCount()) +{ + static bool s_workerThreadsStarted = false; + if (s_workerThreadsStarted) return; + s_workerThreadsStarted = true; + + synchronized (st_threadsMutex) { + if (st_threads.any!(t => t.isWorker)) + return; + + foreach (i; 0 .. num) { + auto thr = new Thread(&workerThreadFunc); + thr.name = format("Vibe Task Worker #%s", i); + st_threads ~= ThreadContext(thr, true); + thr.start(); + } + } +} + + +/** + Determines the number of logical processors in the system. + + This number includes virtual cores on hyper-threading enabled CPUs. +*/ +public @property uint logicalProcessorCount() +{ + version (linux) { + import core.sys.linux.sys.sysinfo; + return get_nprocs(); + } else version (OSX) { + int count; + size_t count_len = count.sizeof; + sysctlbyname("hw.logicalcpu", &count, &count_len, null, 0); + return cast(uint)count_len; + } else version (Windows) { + import core.sys.windows.windows; + SYSTEM_INFO sysinfo; + GetSystemInfo(&sysinfo); + return sysinfo.dwNumberOfProcessors; + } else static assert(false, "Unsupported OS!"); +} +version (OSX) private extern(C) int sysctlbyname(const(char)* name, void* oldp, size_t* oldlen, void* newp, size_t newlen); + +/** + Suspends the execution of the calling task to let other tasks and events be + handled. + + Calling this function in short intervals is recommended if long CPU + computations are carried out by a task. It can also be used in conjunction + with Signals to implement cross-fiber events with no polling. + + Throws: + May throw an `InterruptException` if `Task.interrupt()` gets called on + the calling task. +*/ +void yield() +@safe { + // throw any deferred exceptions + processDeferredExceptions(); + + auto t = CoreTask.getThis(); + if (t && t !is CoreTask.ms_coreTask) { + assert(!t.m_queue, "Calling yield() when already yielded!?"); + if (!t.m_queue) + s_yieldedTasks.insertBack(t); + scope (exit) assert(t.m_queue is null, "Task not removed from yielders queue after being resumed."); + rawYield(); + } else { + // Let yielded tasks execute + () @trusted { notifyIdle(); } (); + } +} + + +/** + Yields execution of this task until an event wakes it up again. + + Beware that the task will starve if no event wakes it up. +*/ +void rawYield() +@safe { + yieldForEvent(); +} + +/** + Suspends the execution of the calling task for the specified amount of time. + + Note that other tasks of the same thread will continue to run during the + wait time, in contrast to $(D core.thread.Thread.sleep), which shouldn't be + used in vibe.d applications. +*/ +void sleep(Duration timeout) +{ + assert(timeout >= 0.seconds, "Argument to sleep must not be negative."); + if (timeout <= 0.seconds) return; + auto tm = setTimer(timeout, null); + tm.wait(); +} +/// +unittest { + import vibe.core.core : sleep; + import vibe.core.log : logInfo; + import core.time : msecs; + + void test() + { + logInfo("Sleeping for half a second..."); + sleep(500.msecs); + logInfo("Done sleeping."); + } +} + + +/** + Returns a new armed timer. + + Note that timers can only work if an event loop is running. + + Params: + timeout = Determines the minimum amount of time that elapses before the timer fires. + callback = This delegate will be called when the timer fires + periodic = Speficies if the timer fires repeatedly or only once + + Returns: + Returns a Timer object that can be used to identify and modify the timer. + + See_also: createTimer +*/ +Timer setTimer(Duration timeout, void delegate() nothrow @safe callback, bool periodic = false) +{ + auto tm = createTimer(callback); + tm.rearm(timeout, periodic); + return tm; +} +/// +unittest { + void printTime() + { + import std.datetime; + logInfo("The time is: %s", Clock.currTime()); + } + + void test() + { + import vibe.core.core; + // start a periodic timer that prints the time every second + setTimer(1.seconds, toDelegate(&printTime), true); + } +} + + +/** + Creates a new timer without arming it. + + See_also: setTimer +*/ +Timer createTimer(void delegate() nothrow @safe callback) +{ + void cb(TimerID tm) + nothrow @safe { + if (callback !is null) + callback(); + } + return Timer(eventDriver.createTimer(&cb)); // FIXME: avoid heap closure! +} + + +/** + Creates an event to wait on an existing file descriptor. + + The file descriptor usually needs to be a non-blocking socket for this to + work. + + Params: + file_descriptor = The Posix file descriptor to watch + event_mask = Specifies which events will be listened for + + Returns: + Returns a newly created FileDescriptorEvent associated with the given + file descriptor. +*/ +FileDescriptorEvent createFileDescriptorEvent(int file_descriptor, FileDescriptorEvent.Trigger event_mask) +{ + return FileDescriptorEvent(file_descriptor, event_mask); +} + + +/** + Sets the stack size to use for tasks. + + The default stack size is set to 512 KiB on 32-bit systems and to 16 MiB + on 64-bit systems, which is sufficient for most tasks. Tuning this value + can be used to reduce memory usage for large numbers of concurrent tasks + or to avoid stack overflows for applications with heavy stack use. + + Note that this function must be called at initialization time, before any + task is started to have an effect. + + Also note that the stack will initially not consume actual physical memory - + it just reserves virtual address space. Only once the stack gets actually + filled up with data will physical memory then be reserved page by page. This + means that the stack can safely be set to large sizes on 64-bit systems + without having to worry about memory usage. +*/ +void setTaskStackSize(size_t sz) +{ + s_taskStackSize = sz; +} + + +/** + The number of worker threads used for processing worker tasks. + + Note that this function will cause the worker threads to be started, + if they haven't already. + + See_also: `runWorkerTask`, `runWorkerTaskH`, `runWorkerTaskDist`, + `setupWorkerThreads` +*/ +@property size_t workerThreadCount() + out(count) { assert(count > 0); } +body { + setupWorkerThreads(); + return st_threads.count!(c => c.isWorker); +} + + +/** + Sets the effective user and group ID to the ones configured for privilege lowering. + + This function is useful for services run as root to give up on the privileges that + they only need for initialization (such as listening on ports <= 1024 or opening + system log files). +*/ +void lowerPrivileges(string uname, string gname) +{ + if (!isRoot()) return; + if (uname != "" || gname != "") { + static bool tryParse(T)(string s, out T n) + { + import std.conv, std.ascii; + if (!isDigit(s[0])) return false; + n = parse!T(s); + return s.length==0; + } + int uid = -1, gid = -1; + if (uname != "" && !tryParse(uname, uid)) uid = getUID(uname); + if (gname != "" && !tryParse(gname, gid)) gid = getGID(gname); + setUID(uid, gid); + } else logWarn("Vibe was run as root, and no user/group has been specified for privilege lowering. Running with full permissions."); +} + +// ditto +void lowerPrivileges() +{ + lowerPrivileges(s_privilegeLoweringUserName, s_privilegeLoweringGroupName); +} + + +/** + Sets a callback that is invoked whenever a task changes its status. + + This function is useful mostly for implementing debuggers that + analyze the life time of tasks, including task switches. Note that + the callback will only be called for debug builds. +*/ +void setTaskEventCallback(TaskEventCb func) +{ + debug s_taskEventCallback = func; +} + + +/** + A version string representing the current vibe version +*/ +enum vibeVersionString = "0.7.27"; + + +/** + The maximum combined size of all parameters passed to a task delegate + + See_Also: runTask +*/ +enum maxTaskParameterSize = 128; + + +/** + Generic file descriptor event. + + This kind of event can be used to wait for events on a non-blocking + file descriptor. Note that this can usually only be used on socket + based file descriptors. +*/ +struct FileDescriptorEvent { + /** Event mask selecting the kind of events to listen for. + */ + enum Trigger { + none = 0, /// Match no event (invalid value) + read = 1<<0, /// React on read-ready events + write = 1<<1, /// React on write-ready events + any = read|write /// Match any kind of event + } + + private this(int fd, Trigger event_mask) + { + assert(false); + } + + /** Waits for the selected event to occur. + + Params: + which = Optional event mask to react only on certain events + timeout = Maximum time to wait for an event + + Returns: + The overload taking the timeout parameter returns true if + an event was received on time and false otherwise. + */ + void wait(Trigger which = Trigger.any) + { + wait(Duration.max, which); + } + /// ditto + bool wait(Duration timeout, Trigger which = Trigger.any) + { + assert(false); + } +} + + +/** + Represents a timer. +*/ +struct Timer { + private { + EventDriver m_driver; + TimerID m_id; + debug uint m_magicNumber = 0x4d34f916; + } + + private this(TimerID id) + { + m_driver = eventDriver; + m_id = id; + } + + this(this) + { + debug assert(m_magicNumber == 0x4d34f916); + if (m_driver) m_driver.addRef(m_id); + } + + ~this() + { + debug assert(m_magicNumber == 0x4d34f916); + if (m_driver) m_driver.releaseRef(m_id); + } + + /// True if the timer is yet to fire. + @property bool pending() { return m_driver.isTimerPending(m_id); } + + /// The internal ID of the timer. + @property size_t id() const { return m_id; } + + bool opCast() const { return m_driver !is null; } + + /** Resets the timer to the specified timeout + */ + void rearm(Duration dur, bool periodic = false) + in { assert(dur > 0.seconds); } + body { m_driver.setTimer(m_id, dur, periodic ? dur : 0.seconds); } + + /** Resets the timer and avoids any firing. + */ + void stop() { m_driver.stopTimer(m_id); } + + /** Waits until the timer fires. + */ + void wait() + { + assert (!m_driver.isTimerPeriodic(m_id), "Cannot wait for a periodic timer."); + if (!this.pending) return; + m_driver.asyncAwait!"waitTimer"(m_id); + } +} + + +/** + Implements a task local storage variable. + + Task local variables, similar to thread local variables, exist separately + in each task. Consequently, they do not need any form of synchronization + when accessing them. + + Note, however, that each TaskLocal variable will increase the memory footprint + of any task that uses task local storage. There is also an overhead to access + TaskLocal variables, higher than for thread local variables, but generelly + still O(1) (since actual storage acquisition is done lazily the first access + can require a memory allocation with unknown computational costs). + + Notice: + FiberLocal instances MUST be declared as static/global thread-local + variables. Defining them as a temporary/stack variable will cause + crashes or data corruption! + + Examples: + --- + TaskLocal!string s_myString = "world"; + + void taskFunc() + { + assert(s_myString == "world"); + s_myString = "hello"; + assert(s_myString == "hello"); + } + + shared static this() + { + // both tasks will get independent storage for s_myString + runTask(&taskFunc); + runTask(&taskFunc); + } + --- +*/ +struct TaskLocal(T) +{ + private { + size_t m_offset = size_t.max; + size_t m_id; + T m_initValue; + bool m_hasInitValue = false; + } + + this(T init_val) { m_initValue = init_val; m_hasInitValue = true; } + + @disable this(this); + + void opAssign(T value) { this.storage = value; } + + @property ref T storage() + { + auto fiber = CoreTask.getThis(); + + // lazily register in FLS storage + if (m_offset == size_t.max) { + static assert(T.alignof <= 8, "Unsupported alignment for type "~T.stringof); + assert(CoreTask.ms_flsFill % 8 == 0, "Misaligned fiber local storage pool."); + m_offset = CoreTask.ms_flsFill; + m_id = CoreTask.ms_flsCounter++; + + + CoreTask.ms_flsFill += T.sizeof; + while (CoreTask.ms_flsFill % 8 != 0) + CoreTask.ms_flsFill++; + } + + // make sure the current fiber has enough FLS storage + if (fiber.m_fls.length < CoreTask.ms_flsFill) { + fiber.m_fls.length = CoreTask.ms_flsFill + 128; + fiber.m_flsInit.length = CoreTask.ms_flsCounter + 64; + } + + // return (possibly default initialized) value + auto data = fiber.m_fls.ptr[m_offset .. m_offset+T.sizeof]; + if (!fiber.m_flsInit[m_id]) { + fiber.m_flsInit[m_id] = true; + import std.traits : hasElaborateDestructor, hasAliasing; + static if (hasElaborateDestructor!T || hasAliasing!T) { + void function(void[], size_t) destructor = (void[] fls, size_t offset){ + static if (hasElaborateDestructor!T) { + auto obj = cast(T*)&fls[offset]; + // call the destructor on the object if a custom one is known declared + obj.destroy(); + } + else static if (hasAliasing!T) { + // zero the memory to avoid false pointers + foreach (size_t i; offset .. offset + T.sizeof) { + ubyte* u = cast(ubyte*)&fls[i]; + *u = 0; + } + } + }; + FLSInfo fls_info; + fls_info.fct = destructor; + fls_info.offset = m_offset; + + // make sure flsInfo has enough space + if (fiber.ms_flsInfo.length <= m_id) + fiber.ms_flsInfo.length = m_id + 64; + + fiber.ms_flsInfo[m_id] = fls_info; + } + + if (m_hasInitValue) { + static if (__traits(compiles, emplace!T(data, m_initValue))) + emplace!T(data, m_initValue); + else assert(false, "Cannot emplace initialization value for type "~T.stringof); + } else emplace!T(data); + } + return (cast(T[])data)[0]; + } + + alias storage this; +} + +private struct FLSInfo { + void function(void[], size_t) fct; + size_t offset; + void destroy(void[] fls) { + fct(fls, offset); + } +} + +/** + High level state change events for a Task +*/ +enum TaskEvent { + preStart, /// Just about to invoke the fiber which starts execution + postStart, /// After the fiber has returned for the first time (by yield or exit) + start, /// Just about to start execution + yield, /// Temporarily paused + resume, /// Resumed from a prior yield + end, /// Ended normally + fail /// Ended with an exception +} + + +/**************************************************************************************************/ +/* private types */ +/**************************************************************************************************/ + +private class CoreTask : TaskFiber { + import std.bitmanip; + private { + static CoreTask ms_coreTask; + CoreTask m_nextInQueue; + CoreTaskQueue* m_queue; + TaskFuncInfo m_taskFunc; + Exception m_exception; + Task[] m_yielders; + + // task local storage + static FLSInfo[] ms_flsInfo; + static size_t ms_flsFill = 0; // thread-local + static size_t ms_flsCounter = 0; + BitArray m_flsInit; + void[] m_fls; + } + + static CoreTask getThis() + @safe nothrow { + auto f = () @trusted nothrow { + return Fiber.getThis(); + } (); + if (f) return cast(CoreTask)f; + if (!ms_coreTask) ms_coreTask = new CoreTask; + return ms_coreTask; + } + + this() + @trusted nothrow { + super(&run, s_taskStackSize); + } + + @property State state() + @trusted const nothrow { + return super.state; + } + + @property size_t taskCounter() const { return m_taskCounter; } + + private void run() + { + version (VibeDebugCatchAll) alias UncaughtException = Throwable; + else alias UncaughtException = Exception; + try { + while(true){ + while (!m_taskFunc.func) { + try { + Fiber.yield(); + } catch( Exception e ){ + logWarn("CoreTaskFiber was resumed with exception but without active task!"); + logDiagnostic("Full error: %s", e.toString().sanitize()); + } + } + + auto task = m_taskFunc; + m_taskFunc = TaskFuncInfo.init; + Task handle = this.task; + try { + m_running = true; + scope(exit) m_running = false; + + std.concurrency.thisTid; // force creation of a message box + + debug if (s_taskEventCallback) s_taskEventCallback(TaskEvent.start, handle); + if (!s_eventLoopRunning) { + logTrace("Event loop not running at task start - yielding."); + .yield(); + logTrace("Initial resume of task."); + } + task.func(&task); + debug if (s_taskEventCallback) s_taskEventCallback(TaskEvent.end, handle); + } catch( Exception e ){ + debug if (s_taskEventCallback) s_taskEventCallback(TaskEvent.fail, handle); + import std.encoding; + logCritical("Task terminated with uncaught exception: %s", e.msg); + logDebug("Full error: %s", e.toString().sanitize()); + } + + this.tidInfo.ident = Tid.init; // clear message box + + // check for any unhandled deferred exceptions + if (m_exception !is null) { + if (cast(InterruptException)m_exception) { + logDebug("InterruptException not handled by task before exit."); + } else { + logCritical("Deferred exception not handled by task before exit: %s", m_exception.msg); + logDebug("Full error: %s", m_exception.toString().sanitize()); + } + } + + foreach (t; m_yielders) s_yieldedTasks.insertBack(cast(CoreTask)t.fiber); + m_yielders.length = 0; + + // make sure that the task does not get left behind in the yielder queue if terminated during yield() + if (m_queue) { + resumeYieldedTasks(); + assert(m_queue is null, "Still in yielder queue at the end of task after resuming all yielders!?"); + } + + // zero the fls initialization ByteArray for memory safety + foreach (size_t i, ref bool b; m_flsInit) { + if (b) { + if (ms_flsInfo !is null && ms_flsInfo.length >= i && ms_flsInfo[i] != FLSInfo.init) + ms_flsInfo[i].destroy(m_fls); + b = false; + } + } + + // make the fiber available for the next task + if (s_availableFibers.full) + s_availableFibers.capacity = 2 * s_availableFibers.capacity; + + s_availableFibers.put(this); + } + } catch(UncaughtException th) { + logCritical("CoreTaskFiber was terminated unexpectedly: %s", th.msg); + logDiagnostic("Full error: %s", th.toString().sanitize()); + s_fiberCount--; + } + } + + override void join() + { + auto caller = Task.getThis(); + if (!m_running) return; + if (caller != Task.init) { + assert(caller.fiber !is this, "A task cannot join itself."); + assert(caller.thread is this.thread, "Joining tasks in foreign threads is currently not supported."); + m_yielders ~= caller; + } else assert(Thread.getThis() is this.thread, "Joining tasks in different threads is not yet supported."); + auto run_count = m_taskCounter; + if (caller == Task.init) .yield(); // let the task continue (it must be yielded currently) + while (m_running && run_count == m_taskCounter) rawYield(); + } + + override void interrupt() + { + auto caller = Task.getThis(); + if (caller != Task.init) { + assert(caller != this.task, "A task cannot interrupt itself."); + assert(caller.thread is this.thread, "Interrupting tasks in different threads is not yet supported."); + } else assert(Thread.getThis() is this.thread, "Interrupting tasks in different threads is not yet supported."); + yieldAndResumeTask(this.task, new InterruptException); + } + + override void terminate() + { + assert(false, "Not implemented"); + } +} + + +private void setupGcTimer() +{ + s_gcTimer = createTimer(() @trusted { + import core.memory; + logTrace("gc idle collect"); + GC.collect(); + GC.minimize(); + s_ignoreIdleForGC = true; + }); + s_gcCollectTimeout = dur!"seconds"(2); +} + +package(vibe) void yieldForEventDeferThrow() +@safe nothrow { + yieldForEventDeferThrow(Task.getThis()); +} + +package(vibe) void processDeferredExceptions() +@safe { + processDeferredExceptions(Task.getThis()); +} + +package(vibe) void yieldForEvent() +@safe { + auto task = Task.getThis(); + processDeferredExceptions(task); + yieldForEventDeferThrow(task); + processDeferredExceptions(task); +} + +package(vibe) void resumeTask(Task task, Exception event_exception = null) +@safe nothrow { + assert(Task.getThis() == Task.init, "Calling resumeTask from another task."); + resumeTask(task, event_exception, false); +} + +package(vibe) void yieldAndResumeTask(Task task, Exception event_exception = null) +@safe { + auto thisct = CoreTask.getThis(); + + if (thisct is null || thisct is CoreTask.ms_coreTask) { + resumeTask(task, event_exception); + return; + } + + auto otherct = cast(CoreTask)task.fiber; + assert(!thisct || otherct.thread is thisct.thread, "Resuming task in foreign thread."); + assert(() @trusted { return otherct.state; } () == Fiber.State.HOLD, "Resuming fiber that is not on HOLD."); + + if (event_exception) otherct.m_exception = event_exception; + if (!otherct.m_queue) s_yieldedTasks.insertBack(otherct); + yield(); +} + +package(vibe) void resumeTask(Task task, Exception event_exception, bool initial_resume) +@safe nothrow { + assert(initial_resume || task.running, "Resuming terminated task."); + resumeCoreTask(cast(CoreTask)task.fiber, event_exception); +} + +package(vibe) void resumeCoreTask(CoreTask ctask, Exception event_exception = null) +nothrow @safe { + assert(ctask.thread is () @trusted { return Thread.getThis(); } (), "Resuming task in foreign thread."); + assert(() @trusted nothrow { return ctask.state; } () == Fiber.State.HOLD, "Resuming fiber that is not on HOLD"); + assert(ctask.m_queue is null, "Manually resuming task that is already scheduled to resumed."); + + if( event_exception ){ + extrap(); + ctask.m_exception = event_exception; + } + + auto uncaught_exception = () @trusted nothrow { return ctask.call!(Fiber.Rethrow.no)(); } (); + + if (uncaught_exception) { + auto th = cast(Throwable)uncaught_exception; + assert(th, "Fiber returned exception object that is not a Throwable!?"); + extrap(); + + assert(() @trusted nothrow { return ctask.state; } () == Fiber.State.TERM); + logError("Task terminated with unhandled exception: %s", th.msg); + logDebug("Full error: %s", () @trusted { return th.toString().sanitize; } ()); + + // always pass Errors on + if (auto err = cast(Error)th) throw err; + } +} + +package(vibe) void notifyIdle() +{ + bool again = !getExitFlag(); + while (again) { + if (s_idleHandler) + again = s_idleHandler(); + else again = false; + + resumeYieldedTasks(); + + again = (again || !s_yieldedTasks.empty) && !getExitFlag(); + + if (again) { + auto er = eventDriver.processEvents(0.seconds); + if (er.among!(ExitReason.exited, ExitReason.idle)) { + logDebug("Setting exit flag due to driver signalling exit"); + s_exitEventLoop = true; + return; + } + } + } + if (!s_yieldedTasks.empty) logDebug("Exiting from idle processing although there are still yielded tasks"); + + if (!s_ignoreIdleForGC && s_gcTimer) { + s_gcTimer.rearm(s_gcCollectTimeout); + } else s_ignoreIdleForGC = false; +} + +private void resumeYieldedTasks() +{ + for (auto limit = s_yieldedTasks.length; limit > 0 && !s_yieldedTasks.empty; limit--) { + auto tf = s_yieldedTasks.front; + s_yieldedTasks.popFront(); + if (tf.state == Fiber.State.HOLD) resumeCoreTask(tf); + } +} + +private void yieldForEventDeferThrow(Task task) +@safe nothrow { + if (task != Task.init) { + debug if (s_taskEventCallback) () @trusted { s_taskEventCallback(TaskEvent.yield, task); } (); + () @trusted { task.fiber.yield(); } (); + debug if (s_taskEventCallback) () @trusted { s_taskEventCallback(TaskEvent.resume, task); } (); + // leave fiber.m_exception untouched, so that it gets thrown on the next yieldForEvent call + } else { + assert(!s_eventLoopRunning, "Event processing outside of a fiber should only happen before the event loop is running!?"); + s_eventException = null; + eventDriver.processEvents(); + // leave m_eventException untouched, so that it gets thrown on the next yieldForEvent call + } +} + +private void processDeferredExceptions(Task task) +@safe { + if (task != Task.init) { + auto fiber = cast(CoreTask)task.fiber; + if (auto e = fiber.m_exception) { + fiber.m_exception = null; + throw e; + } + } else { + if (auto e = s_eventException) { + s_eventException = null; + throw e; + } + } +} + +private struct ThreadContext { + Thread thread; + bool isWorker; + TaskFuncInfo[] taskQueue; + + this(Thread thr, bool worker) { this.thread = thr; this.isWorker = worker; } +} + +private struct TaskFuncInfo { + void function(TaskFuncInfo*) func; + void[2*size_t.sizeof] callable = void; + void[maxTaskParameterSize] args = void; + + @property ref C typedCallable(C)() + { + static assert(C.sizeof <= callable.sizeof); + return *cast(C*)callable.ptr; + } + + @property ref A typedArgs(A)() + { + static assert(A.sizeof <= args.sizeof); + return *cast(A*)args.ptr; + } + + void initCallable(C)() + { + C cinit; + this.callable[0 .. C.sizeof] = cast(void[])(&cinit)[0 .. 1]; + } + + void initArgs(A)() + { + A ainit; + this.args[0 .. A.sizeof] = cast(void[])(&ainit)[0 .. 1]; + } +} + +alias TaskArgsVariant = VariantN!maxTaskParameterSize; + +/**************************************************************************************************/ +/* private functions */ +/**************************************************************************************************/ + +private { + static if ((void*).sizeof >= 8) enum defaultTaskStackSize = 16*1024*1024; + else enum defaultTaskStackSize = 512*1024; + + __gshared size_t s_taskStackSize = defaultTaskStackSize; + Duration s_gcCollectTimeout; + Timer s_gcTimer; + bool s_ignoreIdleForGC = false; + Exception s_eventException; + + __gshared core.sync.mutex.Mutex st_threadsMutex; + __gshared ManualEvent st_threadsSignal; + __gshared ThreadContext[] st_threads; + __gshared TaskFuncInfo[] st_workerTasks; + __gshared Condition st_threadShutdownCondition; + __gshared debug TaskEventCb s_taskEventCallback; + shared bool st_term = false; + + bool s_exitEventLoop = false; + bool s_eventLoopRunning = false; + bool delegate() s_idleHandler; + CoreTaskQueue s_yieldedTasks; + Variant[string] s_taskLocalStorageGlobal; // for use outside of a task + FixedRingBuffer!CoreTask s_availableFibers; + size_t s_fiberCount; + + string s_privilegeLoweringUserName; + string s_privilegeLoweringGroupName; +} + +private bool getExitFlag() +{ + return s_exitEventLoop || atomicLoad(st_term); +} + +// per process setup +shared static this() +{ + version(Windows){ + version(VibeLibeventDriver) enum need_wsa = true; + else version(VibeWin32Driver) enum need_wsa = true; + else enum need_wsa = false; + static if (need_wsa) { + logTrace("init winsock"); + // initialize WinSock2 + import std.c.windows.winsock; + WSADATA data; + WSAStartup(0x0202, &data); + + } + } + + // COMPILER BUG: Must be some kind of module constructor order issue: + // without this, the stdout/stderr handles are not initialized before + // the log module is set up. + import std.stdio; File f; f.close(); + + initializeLogModule(); + + logTrace("create driver core"); + + st_threadsMutex = new Mutex; + st_threadShutdownCondition = new Condition(st_threadsMutex); + + version(Posix){ + logTrace("setup signal handler"); + // support proper shutdown using signals + sigset_t sigset; + sigemptyset(&sigset); + sigaction_t siginfo; + siginfo.sa_handler = &onSignal; + siginfo.sa_mask = sigset; + siginfo.sa_flags = SA_RESTART; + sigaction(SIGINT, &siginfo, null); + sigaction(SIGTERM, &siginfo, null); + + siginfo.sa_handler = &onBrokenPipe; + sigaction(SIGPIPE, &siginfo, null); + } + + version(Windows){ + // WORKAROUND: we don't care about viral @nogc attribute here! + import std.traits; + signal(SIGABRT, cast(ParameterTypeTuple!signal[1])&onSignal); + signal(SIGTERM, cast(ParameterTypeTuple!signal[1])&onSignal); + signal(SIGINT, cast(ParameterTypeTuple!signal[1])&onSignal); + } + + auto thisthr = Thread.getThis(); + thisthr.name = "Main"; + assert(st_threads.length == 0, "Main thread not the first thread!?"); + st_threads ~= ThreadContext(thisthr, false); + + st_threadsSignal = createManualEvent(); + + version(VibeIdleCollect) { + logTrace("setup gc"); + driverCore.setupGcTimer(); + } + + version (VibeNoDefaultArgs) {} + else { + readOption("uid|user", &s_privilegeLoweringUserName, "Sets the user name or id used for privilege lowering."); + readOption("gid|group", &s_privilegeLoweringGroupName, "Sets the group name or id used for privilege lowering."); + } + + import std.concurrency; + scheduler = new VibedScheduler; +} + +shared static ~this() +{ + eventDriver.dispose(); + + size_t tasks_left; + + synchronized (st_threadsMutex) { + if( !st_workerTasks.empty ) tasks_left = st_workerTasks.length; + } + + if (!s_yieldedTasks.empty) tasks_left += s_yieldedTasks.length; + if (tasks_left > 0) { + logWarn("There were still %d tasks running at exit.", tasks_left); + } +} + +// per thread setup +static this() +{ + /// workaround for: + // object.Exception@src/rt/minfo.d(162): Aborting: Cycle detected between modules with ctors/dtors: + // vibe.core.core -> vibe.core.drivers.native -> vibe.core.drivers.libasync -> vibe.core.core + if (Thread.getThis().isDaemon && Thread.getThis().name == "CmdProcessor") return; + + assert(st_threadsSignal); + + auto thisthr = Thread.getThis(); + synchronized (st_threadsMutex) + if (!st_threads.any!(c => c.thread is thisthr)) + st_threads ~= ThreadContext(thisthr, false); + + //CoreTask.ms_coreTask = new CoreTask; +} + +static ~this() +{ + version(VibeLibasyncDriver) { + import vibe.core.drivers.libasync; + if (LibasyncDriver.isControlThread) + return; + } + auto thisthr = Thread.getThis(); + + bool is_main_thread = false; + + synchronized (st_threadsMutex) { + auto idx = st_threads.countUntil!(c => c.thread is thisthr); + + // if we are the main thread, wait for all others before terminating + is_main_thread = idx == 0; + if (is_main_thread) { // we are the main thread, wait for others + atomicStore(st_term, true); + st_threadsSignal.emit(); + // wait for all non-daemon threads to shut down + while (st_threads[1 .. $].any!(th => !th.thread.isDaemon)) { + logDiagnostic("Main thread still waiting for other threads: %s", + st_threads[1 .. $].map!(t => t.thread.name ~ (t.isWorker ? " (worker thread)" : "")).join(", ")); + st_threadShutdownCondition.wait(); + } + logDiagnostic("Main thread exiting"); + } + + assert(idx >= 0, "No more threads registered"); + if (idx >= 0) { + st_threads[idx] = st_threads[$-1]; + st_threads.length--; + } + } + + // delay deletion of the main event driver to "~shared static this()" + if (!is_main_thread) eventDriver.dispose(); + + st_threadShutdownCondition.notifyAll(); +} + +private void workerThreadFunc() +nothrow { + try { + assert(st_threadsSignal); + if (getExitFlag()) return; + logDebug("entering worker thread"); + runTask(toDelegate(&handleWorkerTasks)); + logDebug("running event loop"); + if (!getExitFlag()) runEventLoop(); + logDebug("Worker thread exit."); + } catch (Exception e) { + scope (failure) exit(-1); + logFatal("Worker thread terminated due to uncaught exception: %s", e.msg); + logDebug("Full error: %s", e.toString().sanitize()); + } catch (InvalidMemoryOperationError e) { + import std.stdio; + scope(failure) assert(false); + writeln("Error message: ", e.msg); + writeln("Full error: ", e.toString().sanitize()); + exit(-1); + } catch (Throwable th) { + logFatal("Worker thread terminated due to uncaught error: %s", th.msg); + logDebug("Full error: %s", th.toString().sanitize()); + exit(-1); + } +} + +private void handleWorkerTasks() +{ + logDebug("worker thread enter"); + + auto thisthr = Thread.getThis(); + + logDebug("worker thread loop enter"); + while (true) { + auto emit_count = st_threadsSignal.emitCount; + TaskFuncInfo task; + + synchronized (st_threadsMutex) { + auto idx = st_threads.countUntil!(c => c.thread is thisthr); + assert(idx >= 0); + logDebug("worker thread check"); + + if (getExitFlag()) { + if (st_threads[idx].taskQueue.length > 0) + logWarn("Worker thread shuts down with specific worker tasks left in its queue."); + if (st_threads.count!(c => c.isWorker) == 1 && st_workerTasks.length > 0) + logWarn("Worker threads shut down with worker tasks still left in the queue."); + break; + } + + if (!st_workerTasks.empty) { + logDebug("worker thread got task"); + task = st_workerTasks.front; + st_workerTasks.popFront(); + } else if (!st_threads[idx].taskQueue.empty) { + logDebug("worker thread got specific task"); + task = st_threads[idx].taskQueue.front; + st_threads[idx].taskQueue.popFront(); + } + } + + if (task.func !is null) runTask_internal(task); + else emit_count = st_threadsSignal.wait(emit_count); + } + + logDebug("worker thread exit"); + eventDriver.exit(); +} + +private void watchExitFlag() +{ + auto emit_count = st_threadsSignal.emitCount; + while (true) { + synchronized (st_threadsMutex) { + if (getExitFlag()) break; + } + + emit_count = st_threadsSignal.wait(emit_count); + } + + logDebug("main thread exit"); + eventDriver.exit(); +} + +private extern(C) void extrap() +@safe nothrow { + logTrace("exception trap"); +} + +private extern(C) void onSignal(int signal) +nothrow { + atomicStore(st_term, true); + try st_threadsSignal.emit(); catch (Throwable) {} + + logInfo("Received signal %d. Shutting down.", signal); +} + +private extern(C) void onBrokenPipe(int signal) +nothrow { + logTrace("Broken pipe."); +} + +version(Posix) +{ + private bool isRoot() { return geteuid() == 0; } + + private void setUID(int uid, int gid) + { + logInfo("Lowering privileges to uid=%d, gid=%d...", uid, gid); + if (gid >= 0) { + enforce(getgrgid(gid) !is null, "Invalid group id!"); + enforce(setegid(gid) == 0, "Error setting group id!"); + } + //if( initgroups(const char *user, gid_t group); + if (uid >= 0) { + enforce(getpwuid(uid) !is null, "Invalid user id!"); + enforce(seteuid(uid) == 0, "Error setting user id!"); + } + } + + private int getUID(string name) + { + auto pw = getpwnam(name.toStringz()); + enforce(pw !is null, "Unknown user name: "~name); + return pw.pw_uid; + } + + private int getGID(string name) + { + auto gr = getgrnam(name.toStringz()); + enforce(gr !is null, "Unknown group name: "~name); + return gr.gr_gid; + } +} else version(Windows){ + private bool isRoot() { return false; } + + private void setUID(int uid, int gid) + { + enforce(false, "UID/GID not supported on Windows."); + } + + private int getUID(string name) + { + enforce(false, "Privilege lowering not supported on Windows."); + assert(false); + } + + private int getGID(string name) + { + enforce(false, "Privilege lowering not supported on Windows."); + assert(false); + } +} + +private struct CoreTaskQueue { + @safe nothrow: + + CoreTask first, last; + size_t length; + + @disable this(this); + + @property bool empty() const { return first is null; } + + @property CoreTask front() { return first; } + + void insertBack(CoreTask task) + { + assert(task.m_queue == null, "Task is already scheduled to be resumed!"); + assert(task.m_nextInQueue is null, "Task has m_nextInQueue set without being in a queue!?"); + task.m_queue = &this; + if (empty) + first = task; + else + last.m_nextInQueue = task; + last = task; + length++; + } + + void popFront() + { + if (first is last) last = null; + assert(first && first.m_queue == &this); + auto next = first.m_nextInQueue; + first.m_nextInQueue = null; + first.m_queue = null; + first = next; + length--; + } +} + +// mixin string helper to call a function with arguments that potentially have +// to be moved +private string callWithMove(ARGS...)(string func, string args) +{ + import std.string; + string ret = func ~ "("; + foreach (i, T; ARGS) { + if (i > 0) ret ~= ", "; + ret ~= format("%s[%s]", args, i); + static if (needsMove!T) ret ~= ".move"; + } + return ret ~ ");"; +} + +private template needsMove(T) +{ + template isCopyable(T) + { + enum isCopyable = __traits(compiles, (T a) { return a; }); + } + + template isMoveable(T) + { + enum isMoveable = __traits(compiles, (T a) { return a.move; }); + } + + enum needsMove = !isCopyable!T; + + static assert(isCopyable!T || isMoveable!T, + "Non-copyable type "~T.stringof~" must be movable with a .move property."); +} + +unittest { + enum E { a, move } + static struct S { + @disable this(this); + @property S move() { return S.init; } + } + static struct T { @property T move() { return T.init; } } + static struct U { } + static struct V { + @disable this(); + @disable this(this); + @property V move() { return V.init; } + } + static struct W { @disable this(); } + + static assert(needsMove!S); + static assert(!needsMove!int); + static assert(!needsMove!string); + static assert(!needsMove!E); + static assert(!needsMove!T); + static assert(!needsMove!U); + static assert(needsMove!V); + static assert(!needsMove!W); +} diff --git a/source/vibe/core/file.d b/source/vibe/core/file.d new file mode 100644 index 0000000..4bfe806 --- /dev/null +++ b/source/vibe/core/file.d @@ -0,0 +1,638 @@ +/** + File handling functions and types. + + Copyright: © 2012-2016 RejectedSoftware e.K. + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. + Authors: Sönke Ludwig +*/ +module vibe.core.file; + +//public import vibe.core.stream; +//public import vibe.inet.url; +import vibe.core.path; + +import core.stdc.stdio; +import core.sys.posix.unistd; +import core.sys.posix.fcntl; +import core.sys.posix.sys.stat; +import std.conv : octal; +import vibe.core.log; +import std.datetime; +import std.exception; +import std.file; +import std.path; +import std.string; + + +version(Posix){ + private extern(C) int mkstemps(char* templ, int suffixlen); +} + + +/** + Opens a file stream with the specified mode. +*/ +FileStream openFile(Path path, FileMode mode = FileMode.read) +{ + assert(false); + //return eventDriver.openFile(path, mode); +} +/// ditto +FileStream openFile(string path, FileMode mode = FileMode.read) +{ + return openFile(Path(path), mode); +} + + +/** + Read a whole file into a buffer. + + If the supplied buffer is large enough, it will be used to store the + contents of the file. Otherwise, a new buffer will be allocated. + + Params: + path = The path of the file to read + buffer = An optional buffer to use for storing the file contents +*/ +ubyte[] readFile(Path path, ubyte[] buffer = null, size_t max_size = size_t.max) +{ + auto fil = openFile(path); + scope (exit) fil.close(); + enforce(fil.size <= max_size, "File is too big."); + auto sz = cast(size_t)fil.size; + auto ret = sz <= buffer.length ? buffer[0 .. sz] : new ubyte[sz]; + fil.read(ret); + return ret; +} +/// ditto +ubyte[] readFile(string path, ubyte[] buffer = null, size_t max_size = size_t.max) +{ + return readFile(Path(path), buffer, max_size); +} + + +/** + Write a whole file at once. +*/ +void writeFile(Path path, in ubyte[] contents) +{ + auto fil = openFile(path, FileMode.createTrunc); + scope (exit) fil.close(); + fil.write(contents); +} +/// ditto +void writeFile(string path, in ubyte[] contents) +{ + writeFile(Path(path), contents); +} + +/** + Convenience function to append to a file. +*/ +void appendToFile(Path path, string data) { + auto fil = openFile(path, FileMode.append); + scope(exit) fil.close(); + fil.write(data); +} +/// ditto +void appendToFile(string path, string data) +{ + appendToFile(Path(path), data); +} + +/** + Read a whole UTF-8 encoded file into a string. + + The resulting string will be sanitized and will have the + optional byte order mark (BOM) removed. +*/ +string readFileUTF8(Path path) +{ + import vibe.internal.string; + + return stripUTF8Bom(sanitizeUTF8(readFile(path))); +} +/// ditto +string readFileUTF8(string path) +{ + return readFileUTF8(Path(path)); +} + + +/** + Write a string into a UTF-8 encoded file. + + The file will have a byte order mark (BOM) prepended. +*/ +void writeFileUTF8(Path path, string contents) +{ + static immutable ubyte[] bom = [0xEF, 0xBB, 0xBF]; + auto fil = openFile(path, FileMode.createTrunc); + scope (exit) fil.close(); + fil.write(bom); + fil.write(contents); +} + +/** + Creates and opens a temporary file for writing. +*/ +FileStream createTempFile(string suffix = null) +{ + version(Windows){ + import std.conv : to; + char[L_tmpnam] tmp; + tmpnam(tmp.ptr); + auto tmpname = to!string(tmp.ptr); + if( tmpname.startsWith("\\") ) tmpname = tmpname[1 .. $]; + tmpname ~= suffix; + return openFile(tmpname, FileMode.createTrunc); + } else { + enum pattern ="/tmp/vtmp.XXXXXX"; + scope templ = new char[pattern.length+suffix.length+1]; + templ[0 .. pattern.length] = pattern; + templ[pattern.length .. $-1] = (suffix)[]; + templ[$-1] = '\0'; + assert(suffix.length <= int.max); + auto fd = mkstemps(templ.ptr, cast(int)suffix.length); + enforce(fd >= 0, "Failed to create temporary file."); + assert(false); + //return eventDriver.adoptFile(fd, Path(templ[0 .. $-1].idup), FileMode.createTrunc); + } +} + +/** + Moves or renames a file. + + Params: + from = Path to the file/directory to move/rename. + to = The target path + copy_fallback = Determines if copy/remove should be used in case of the + source and destination path pointing to different devices. +*/ +void moveFile(Path from, Path to, bool copy_fallback = false) +{ + moveFile(from.toNativeString(), to.toNativeString(), copy_fallback); +} +/// ditto +void moveFile(string from, string to, bool copy_fallback = false) +{ + if (!copy_fallback) { + std.file.rename(from, to); + } else { + try { + std.file.rename(from, to); + } catch (FileException e) { + std.file.copy(from, to); + std.file.remove(from); + } + } +} + +/** + Copies a file. + + Note that attributes and time stamps are currently not retained. + + Params: + from = Path of the source file + to = Path for the destination file + overwrite = If true, any file existing at the destination path will be + overwritten. If this is false, an exception will be thrown should + a file already exist at the destination path. + + Throws: + An Exception if the copy operation fails for some reason. +*/ +void copyFile(Path from, Path to, bool overwrite = false) +{ + { + auto src = openFile(from, FileMode.read); + scope(exit) src.close(); + enforce(overwrite || !existsFile(to), "Destination file already exists."); + auto dst = openFile(to, FileMode.createTrunc); + scope(exit) dst.close(); + dst.write(src); + } + + // TODO: retain attributes and time stamps +} +/// ditto +void copyFile(string from, string to) +{ + copyFile(Path(from), Path(to)); +} + +/** + Removes a file +*/ +void removeFile(Path path) +{ + removeFile(path.toNativeString()); +} +/// ditto +void removeFile(string path) +{ + std.file.remove(path); +} + +/** + Checks if a file exists +*/ +bool existsFile(Path path) nothrow +{ + return existsFile(path.toNativeString()); +} +/// ditto +bool existsFile(string path) nothrow +{ + // This was *annotated* nothrow in 2.067. + static if (__VERSION__ < 2067) + scope(failure) assert(0, "Error: existsFile should never throw"); + return std.file.exists(path); +} + +/** Stores information about the specified file/directory into 'info' + + Throws: A `FileException` is thrown if the file does not exist. +*/ +FileInfo getFileInfo(Path path) +{ + auto ent = DirEntry(path.toNativeString()); + return makeFileInfo(ent); +} +/// ditto +FileInfo getFileInfo(string path) +{ + return getFileInfo(Path(path)); +} + +/** + Creates a new directory. +*/ +void createDirectory(Path path) +{ + mkdir(path.toNativeString()); +} +/// ditto +void createDirectory(string path) +{ + createDirectory(Path(path)); +} + +/** + Enumerates all files in the specified directory. +*/ +void listDirectory(Path path, scope bool delegate(FileInfo info) del) +{ + foreach( DirEntry ent; dirEntries(path.toNativeString(), SpanMode.shallow) ) + if( !del(makeFileInfo(ent)) ) + break; +} +/// ditto +void listDirectory(string path, scope bool delegate(FileInfo info) del) +{ + listDirectory(Path(path), del); +} +/// ditto +int delegate(scope int delegate(ref FileInfo)) iterateDirectory(Path path) +{ + int iterator(scope int delegate(ref FileInfo) del){ + int ret = 0; + listDirectory(path, (fi){ + ret = del(fi); + return ret == 0; + }); + return ret; + } + return &iterator; +} +/// ditto +int delegate(scope int delegate(ref FileInfo)) iterateDirectory(string path) +{ + return iterateDirectory(Path(path)); +} + +/** + Starts watching a directory for changes. +*/ +DirectoryWatcher watchDirectory(Path path, bool recursive = true) +{ + assert(false); + //return eventDriver.watchDirectory(path, recursive); +} +// ditto +DirectoryWatcher watchDirectory(string path, bool recursive = true) +{ + return watchDirectory(Path(path), recursive); +} + +/** + Returns the current working directory. +*/ +Path getWorkingDirectory() +{ + return Path(std.file.getcwd()); +} + + +/** Contains general information about a file. +*/ +struct FileInfo { + /// Name of the file (not including the path) + string name; + + /// Size of the file (zero for directories) + ulong size; + + /// Time of the last modification + SysTime timeModified; + + /// Time of creation (not available on all operating systems/file systems) + SysTime timeCreated; + + /// True if this is a symlink to an actual file + bool isSymlink; + + /// True if this is a directory or a symlink pointing to a directory + bool isDirectory; +} + +/** + Specifies how a file is manipulated on disk. +*/ +enum FileMode { + /// The file is opened read-only. + read, + /// The file is opened for read-write random access. + readWrite, + /// The file is truncated if it exists or created otherwise and then opened for read-write access. + createTrunc, + /// The file is opened for appending data to it and created if it does not exist. + append +} + +/** + Accesses the contents of a file as a stream. +*/ +struct FileStream { + import std.algorithm.comparison : min; + import vibe.core.core : yield; + import core.stdc.errno; + + version (Windows) {} else + { + enum O_BINARY = 0; + } + + private { + int m_fileDescriptor; + Path m_path; + ulong m_size; + ulong m_ptr = 0; + FileMode m_mode; + bool m_ownFD = true; + } + + this(Path path, FileMode mode) + { + auto pathstr = path.toNativeString(); + final switch(mode){ + case FileMode.read: + m_fileDescriptor = open(pathstr.toStringz(), O_RDONLY|O_BINARY); + break; + case FileMode.readWrite: + m_fileDescriptor = open(pathstr.toStringz(), O_RDWR|O_BINARY); + break; + case FileMode.createTrunc: + m_fileDescriptor = open(pathstr.toStringz(), O_RDWR|O_CREAT|O_TRUNC|O_BINARY, octal!644); + break; + case FileMode.append: + m_fileDescriptor = open(pathstr.toStringz(), O_WRONLY|O_CREAT|O_APPEND|O_BINARY, octal!644); + break; + } + if( m_fileDescriptor < 0 ) + //throw new Exception(format("Failed to open '%s' with %s: %d", pathstr, cast(int)mode, errno)); + throw new Exception("Failed to open file '"~pathstr~"'."); + + this(m_fileDescriptor, path, mode); + } + + this(int fd, Path path, FileMode mode) + { + assert(fd >= 0); + m_fileDescriptor = fd; + m_path = path; + m_mode = mode; + + version(linux){ + // stat_t seems to be defined wrong on linux/64 + m_size = lseek(m_fileDescriptor, 0, SEEK_END); + } else { + stat_t st; + fstat(m_fileDescriptor, &st); + m_size = st.st_size; + + // (at least) on windows, the created file is write protected + version(Windows){ + if( mode == FileMode.createTrunc ) + chmod(path.toNativeString().toStringz(), S_IREAD|S_IWRITE); + } + } + lseek(m_fileDescriptor, 0, SEEK_SET); + + logDebug("opened file %s with %d bytes as %d", path.toNativeString(), m_size, m_fileDescriptor); + } + + ~this() + { + close(); + } + + @property int fd() { return m_fileDescriptor; } + + /// The path of the file. + @property Path path() const { return m_path; } + + /// Determines if the file stream is still open + @property bool isOpen() const { return m_fileDescriptor >= 0; } + @property ulong size() const { return m_size; } + @property bool readable() const { return m_mode != FileMode.append; } + @property bool writable() const { return m_mode != FileMode.read; } + + void takeOwnershipOfFD() + { + enforce(m_ownFD); + m_ownFD = false; + } + + void seek(ulong offset) + { + version (Win32) { + enforce(offset <= off_t.max, "Cannot seek above 4GB on Windows x32."); + auto pos = lseek(m_fileDescriptor, cast(off_t)offset, SEEK_SET); + } else auto pos = lseek(m_fileDescriptor, offset, SEEK_SET); + enforce(pos == offset, "Failed to seek in file."); + m_ptr = offset; + } + + ulong tell() { return m_ptr; } + + /// Closes the file handle. + void close() + { + if( m_fileDescriptor != -1 && m_ownFD ){ + .close(m_fileDescriptor); + m_fileDescriptor = -1; + } + } + + @property bool empty() const { assert(this.readable); return m_ptr >= m_size; } + @property ulong leastSize() const { assert(this.readable); return m_size - m_ptr; } + @property bool dataAvailableForRead() { return true; } + + const(ubyte)[] peek() + { + return null; + } + + void read(ubyte[] dst) + { + assert(this.readable); + while (dst.length > 0) { + enforce(dst.length <= leastSize); + auto sz = min(dst.length, 4096); + enforce(.read(m_fileDescriptor, dst.ptr, cast(int)sz) == sz, "Failed to read data from disk."); + dst = dst[sz .. $]; + m_ptr += sz; + yield(); + } + } + + void write(in ubyte[] bytes_) + { + const(ubyte)[] bytes = bytes_; + assert(this.writable); + while (bytes.length > 0) { + auto sz = min(bytes.length, 4096); + auto ret = .write(m_fileDescriptor, bytes.ptr, cast(int)sz); + import std.format : format; + enforce(ret == sz, format("Failed to write data to disk. %s %s %s %s", sz, errno, ret, m_fileDescriptor)); + bytes = bytes[sz .. $]; + m_ptr += sz; + yield(); + } + } + + void write(InputStream)(InputStream stream, ulong nbytes = 0) + { + writeDefault(stream, nbytes); + } + + void flush() + { + assert(this.writable); + } + + void finalize() + { + flush(); + } +} + +private void writeDefault(OutputStream, InputStream)(ref OutputStream dst, InputStream stream, ulong nbytes = 0) +{ + assert(false); + /* + static struct Buffer { ubyte[64*1024] bytes = void; } + auto bufferobj = FreeListRef!(Buffer, false)(); + auto buffer = bufferobj.bytes[]; + + //logTrace("default write %d bytes, empty=%s", nbytes, stream.empty); + if (nbytes == 0) { + while (!stream.empty) { + size_t chunk = min(stream.leastSize, buffer.length); + assert(chunk > 0, "leastSize returned zero for non-empty stream."); + //logTrace("read pipe chunk %d", chunk); + stream.read(buffer[0 .. chunk]); + dst.write(buffer[0 .. chunk]); + } + } else { + while (nbytes > 0) { + size_t chunk = min(nbytes, buffer.length); + //logTrace("read pipe chunk %d", chunk); + stream.read(buffer[0 .. chunk]); + dst.write(buffer[0 .. chunk]); + nbytes -= chunk; + } + } + */ +} + + +/** + Interface for directory watcher implementations. + + Directory watchers monitor the contents of a directory (wither recursively or non-recursively) + for changes, such as file additions, deletions or modifications. +*/ +interface DirectoryWatcher { + /// The path of the watched directory + @property Path path() const; + + /// Indicates if the directory is watched recursively + @property bool recursive() const; + + /** Fills the destination array with all changes that occurred since the last call. + + The function will block until either directory changes have occurred or until the + timeout has elapsed. Specifying a negative duration will cause the function to + wait without a timeout. + + Params: + dst = The destination array to which the changes will be appended + timeout = Optional timeout for the read operation + + Returns: + If the call completed successfully, true is returned. + */ + bool readChanges(ref DirectoryChange[] dst, Duration timeout = dur!"seconds"(-1)); +} + + +/** Specifies the kind of change in a watched directory. +*/ +enum DirectoryChangeType { + /// A file or directory was added + added, + /// A file or directory was deleted + removed, + /// A file or directory was modified + modified +} + + +/** Describes a single change in a watched directory. +*/ +struct DirectoryChange { + /// The type of change + DirectoryChangeType type; + + /// Path of the file/directory that was changed + Path path; +} + + +private FileInfo makeFileInfo(DirEntry ent) +{ + FileInfo ret; + ret.name = baseName(ent.name); + if( ret.name.length == 0 ) ret.name = ent.name; + assert(ret.name.length > 0); + ret.size = ent.size; + ret.timeModified = ent.timeLastModified; + version(Windows) ret.timeCreated = ent.timeCreated; + else ret.timeCreated = ent.timeLastModified; + ret.isSymlink = ent.isSymlink; + ret.isDirectory = ent.isDir; + return ret; +} diff --git a/source/vibe/core/log.d b/source/vibe/core/log.d new file mode 100644 index 0000000..f0c905d --- /dev/null +++ b/source/vibe/core/log.d @@ -0,0 +1,879 @@ +/** + Central logging facility for vibe. + + Copyright: © 2012-2014 RejectedSoftware e.K. + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. + Authors: Sönke Ludwig +*/ +module vibe.core.log; + +import vibe.core.args; +import vibe.core.concurrency : ScopedLock, lock; +import vibe.core.sync; + +import std.algorithm; +import std.array; +import std.datetime; +import std.format; +import std.stdio; +import core.atomic; +import core.thread; + +import std.traits : isSomeString; +import std.range.primitives : isInputRange, isOutputRange; + +/** + Sets the minimum log level to be printed using the default console logger. + + This level applies to the default stdout/stderr logger only. +*/ +void setLogLevel(LogLevel level) +nothrow @safe { + if (ss_stdoutLogger) + ss_stdoutLogger.lock().minLevel = level; +} + + +/** + Sets the log format used for the default console logger. + + This level applies to the default stdout/stderr logger only. + + Params: + fmt = The log format for the stderr (default is `FileLogger.Format.thread`) + infoFmt = The log format for the stdout (default is `FileLogger.Format.plain`) +*/ +void setLogFormat(FileLogger.Format fmt, FileLogger.Format infoFmt = FileLogger.Format.plain) +nothrow @safe { + if (ss_stdoutLogger) { + auto l = ss_stdoutLogger.lock(); + l.format = fmt; + l.infoFormat = infoFmt; + } +} + + +/** + Sets a log file for disk file logging. + + Multiple calls to this function will register multiple log + files for output. +*/ +void setLogFile(string filename, LogLevel min_level = LogLevel.error) +{ + auto logger = cast(shared)new FileLogger(filename); + { + auto l = logger.lock(); + l.minLevel = min_level; + l.format = FileLogger.Format.threadTime; + } + registerLogger(logger); +} + + +/** + Registers a new logger instance. + + The specified Logger will receive all log messages in its Logger.log + method after it has been registered. + + Examples: + --- + auto logger = cast(shared)new HTMLLogger("log.html"); + logger.lock().format = FileLogger.Format.threadTime; + registerLogger(logger); + --- + + See_Also: deregisterLogger +*/ +void registerLogger(shared(Logger) logger) +nothrow { + ss_loggers ~= logger; +} + + +/** + Deregisters an active logger instance. + + See_Also: registerLogger +*/ +void deregisterLogger(shared(Logger) logger) +nothrow { + for (size_t i = 0; i < ss_loggers.length; ) { + if (ss_loggers[i] !is logger) i++; + else ss_loggers = ss_loggers[0 .. i] ~ ss_loggers[i+1 .. $]; + } +} + + +/** + Logs a message. + + Params: + level = The log level for the logged message + fmt = See http://dlang.org/phobos/std_format.html#format-string + args = Any input values needed for formatting +*/ +void log(LogLevel level, /*string mod = __MODULE__, string func = __FUNCTION__,*/ string file = __FILE__, int line = __LINE__, S, T...)(S fmt, lazy T args) + nothrow if (isSomeString!S) +{ + static assert(level != LogLevel.none); + try { + foreach (l; getLoggers()) + if (l.minLevel <= level) { // WARNING: TYPE SYSTEM HOLE: accessing field of shared class! + auto ll = l.lock(); + auto rng = LogOutputRange(ll, file, line, level); + /*() @trusted {*/ rng.formattedWrite(fmt, args); //} (); // formattedWrite is not @safe at least up to 2.068.0 + rng.finalize(); + } + } catch(Exception e) debug assert(false, e.msg); +} +/// ditto +void logTrace(/*string mod = __MODULE__, string func = __FUNCTION__,*/ string file = __FILE__, int line = __LINE__, S, T...)(S fmt, lazy T args) nothrow { log!(LogLevel.trace/*, mod, func*/, file, line)(fmt, args); } +/// ditto +void logDebugV(/*string mod = __MODULE__, string func = __FUNCTION__,*/ string file = __FILE__, int line = __LINE__, S, T...)(S fmt, lazy T args) nothrow { log!(LogLevel.debugV/*, mod, func*/, file, line)(fmt, args); } +/// ditto +void logDebug(/*string mod = __MODULE__, string func = __FUNCTION__,*/ string file = __FILE__, int line = __LINE__, S, T...)(S fmt, lazy T args) nothrow { log!(LogLevel.debug_/*, mod, func*/, file, line)(fmt, args); } +/// ditto +void logDiagnostic(/*string mod = __MODULE__, string func = __FUNCTION__,*/ string file = __FILE__, int line = __LINE__, S, T...)(S fmt, lazy T args) nothrow { log!(LogLevel.diagnostic/*, mod, func*/, file, line)(fmt, args); } +/// ditto +void logInfo(/*string mod = __MODULE__, string func = __FUNCTION__,*/ string file = __FILE__, int line = __LINE__, S, T...)(S fmt, lazy T args) nothrow { log!(LogLevel.info/*, mod, func*/, file, line)(fmt, args); } +/// ditto +void logWarn(/*string mod = __MODULE__, string func = __FUNCTION__,*/ string file = __FILE__, int line = __LINE__, S, T...)(S fmt, lazy T args) nothrow { log!(LogLevel.warn/*, mod, func*/, file, line)(fmt, args); } +/// ditto +void logError(/*string mod = __MODULE__, string func = __FUNCTION__,*/ string file = __FILE__, int line = __LINE__, S, T...)(S fmt, lazy T args) nothrow { log!(LogLevel.error/*, mod, func*/, file, line)(fmt, args); } +/// ditto +void logCritical(/*string mod = __MODULE__, string func = __FUNCTION__,*/ string file = __FILE__, int line = __LINE__, S, T...)(S fmt, lazy T args) nothrow { log!(LogLevel.critical/*, mod, func*/, file, line)(fmt, args); } +/// ditto +void logFatal(string file = __FILE__, int line = __LINE__, S, T...)(S fmt, lazy T args) nothrow { log!(LogLevel.fatal, file, line)(fmt, args); } + +/// +@safe unittest { + void test() nothrow + { + logInfo("Hello, World!"); + logWarn("This may not be %s.", "good"); + log!(LogLevel.info)("This is a %s.", "test"); + } +} + +/// Specifies the log level for a particular log message. +enum LogLevel { + trace, /// Developer information for locating events when no useful stack traces are available + debugV, /// Developer information useful for algorithm debugging - for verbose output + debug_, /// Developer information useful for algorithm debugging + diagnostic, /// Extended user information (e.g. for more detailed error information) + info, /// Informational message for normal user education + warn, /// Unexpected condition that could indicate an error but has no direct consequences + error, /// Normal error that is handled gracefully + critical, /// Error that severely influences the execution of the application + fatal, /// Error that forces the application to terminate + none, /// Special value used to indicate no logging when set as the minimum log level + + verbose1 = diagnostic, /// Alias for diagnostic messages + verbose2 = debug_, /// Alias for debug messages + verbose3 = debugV, /// Alias for verbose debug messages + verbose4 = trace, /// Alias for trace messages +} + +/// Represents a single logged line +struct LogLine { + string mod; + string func; + string file; + int line; + LogLevel level; + Thread thread; + string threadName; + uint threadID; + Fiber fiber; + uint fiberID; + SysTime time; + string text; /// Legacy field used in `Logger.log` +} + +/// Abstract base class for all loggers +class Logger { + LogLevel minLevel = LogLevel.min; + + private { + LogLine m_curLine; + Appender!string m_curLineText; + } + + final bool acceptsLevel(LogLevel value) nothrow pure @safe { return value >= this.minLevel; } + + /** Legacy logging interface relying on dynamic memory allocation. + + Override `beginLine`, `put`, `endLine` instead for a more efficient and + possibly allocation-free implementation. + */ + void log(ref LogLine line) @safe {} + + /// Starts a new log line. + void beginLine(ref LogLine line_info) + @safe { + m_curLine = line_info; + m_curLineText = appender!string(); + } + + /// Writes part of a log line message. + void put(scope const(char)[] text) + @safe { + m_curLineText.put(text); + } + + /// Finalizes a log line. + void endLine() + @safe { + m_curLine.text = m_curLineText.data; + log(m_curLine); + m_curLine.text = null; + m_curLineText = Appender!string.init; + } +} + + +/** + Plain-text based logger for logging to regular files or stdout/stderr +*/ +final class FileLogger : Logger { + /// The log format used by the FileLogger + enum Format { + plain, /// Output only the plain log message + thread, /// Prefix "[thread-id:fiber-id loglevel]" + threadTime /// Prefix "[thread-id:fiber-id timestamp loglevel]" + } + + private { + File m_infoFile; + File m_diagFile; + File m_curFile; + } + + Format format = Format.thread; + Format infoFormat = Format.plain; + + this(File info_file, File diag_file) + { + m_infoFile = info_file; + m_diagFile = diag_file; + } + + this(string filename) + { + m_infoFile = File(filename, "ab"); + m_diagFile = m_infoFile; + } + + override void beginLine(ref LogLine msg) + @trusted // FILE isn't @safe (as of DMD 2.065) + { + string pref; + final switch (msg.level) { + case LogLevel.trace: pref = "trc"; m_curFile = m_diagFile; break; + case LogLevel.debugV: pref = "dbv"; m_curFile = m_diagFile; break; + case LogLevel.debug_: pref = "dbg"; m_curFile = m_diagFile; break; + case LogLevel.diagnostic: pref = "dia"; m_curFile = m_diagFile; break; + case LogLevel.info: pref = "INF"; m_curFile = m_infoFile; break; + case LogLevel.warn: pref = "WRN"; m_curFile = m_diagFile; break; + case LogLevel.error: pref = "ERR"; m_curFile = m_diagFile; break; + case LogLevel.critical: pref = "CRITICAL"; m_curFile = m_diagFile; break; + case LogLevel.fatal: pref = "FATAL"; m_curFile = m_diagFile; break; + case LogLevel.none: assert(false); + } + + auto fmt = (m_curFile is m_diagFile) ? this.format : this.infoFormat; + + final switch (fmt) { + case Format.plain: break; + case Format.thread: m_curFile.writef("[%08X:%08X %s] ", msg.threadID, msg.fiberID, pref); break; + case Format.threadTime: + auto tm = msg.time; + static if (is(typeof(tm.fracSecs))) auto msecs = tm.fracSecs.total!"msecs"; // 2.069 has deprecated "fracSec" + else auto msecs = tm.fracSec.msecs; + m_curFile.writef("[%08X:%08X %d.%02d.%02d %02d:%02d:%02d.%03d %s] ", + msg.threadID, msg.fiberID, + tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second, msecs, + pref); + break; + } + } + + override void put(scope const(char)[] text) + { + static if (__VERSION__ <= 2066) + () @trusted { m_curFile.write(text); } (); + else m_curFile.write(text); + } + + override void endLine() + { + static if (__VERSION__ <= 2066) + () @trusted { m_curFile.writeln(); } (); + else m_curFile.writeln(); + m_curFile.flush(); + } +} + +/** + Logger implementation for logging to an HTML file with dynamic filtering support. +*/ +final class HTMLLogger : Logger { + private { + File m_logFile; + } + + this(string filename = "log.html") + { + m_logFile = File(filename, "wt"); + writeHeader(); + } + + ~this() + { + //version(FinalizerDebug) writeln("HtmlLogWritet.~this"); + writeFooter(); + m_logFile.close(); + //version(FinalizerDebug) writeln("HtmlLogWritet.~this out"); + } + + @property void minLogLevel(LogLevel value) pure nothrow @safe { this.minLevel = value; } + + override void beginLine(ref LogLine msg) + @trusted // FILE isn't @safe (as of DMD 2.065) + { + if( !m_logFile.isOpen ) return; + + final switch (msg.level) { + case LogLevel.none: assert(false); + case LogLevel.trace: m_logFile.write(`
`); break; + case LogLevel.debugV: m_logFile.write(`
`); break; + case LogLevel.debug_: m_logFile.write(`
`); break; + case LogLevel.diagnostic: m_logFile.write(`
`); break; + case LogLevel.info: m_logFile.write(`
`); break; + case LogLevel.warn: m_logFile.write(`
`); break; + case LogLevel.error: m_logFile.write(`
`); break; + case LogLevel.critical: m_logFile.write(`
`); break; + case LogLevel.fatal: m_logFile.write(`
`); break; + } + m_logFile.writef(`
%s
`, msg.time.toISOExtString()); + if (msg.thread) + m_logFile.writef(`
%s
`, msg.thread.name); + m_logFile.write(`
`); + } + + override void put(scope const(char)[] text) + { + auto dst = () @trusted { return m_logFile.lockingTextWriter(); } (); // LockingTextWriter not @safe for DMD 2.066 + while (!text.empty && (text.front == ' ' || text.front == '\t')) { + foreach (i; 0 .. text.front == ' ' ? 1 : 4) + () @trusted { dst.put(" "); } (); // LockingTextWriter not @safe for DMD 2.066 + text.popFront(); + } + () @trusted { filterHTMLEscape(dst, text); } (); // LockingTextWriter not @safe for DMD 2.066 + } + + override void endLine() + { + () @trusted { // not @safe for DMD 2.066 + m_logFile.write(`
`); + m_logFile.writeln(`
`); + } (); + m_logFile.flush(); + } + + private void writeHeader(){ + if( !m_logFile.isOpen ) return; + + m_logFile.writeln( +` + + HTML Log + + + + +
+
+ Minimum Log Level: + +
+
+
+
`); + m_logFile.flush(); + } + + private void writeFooter(){ + if( !m_logFile.isOpen ) return; + + m_logFile.writeln( +`
+ +`); + m_logFile.flush(); + } +} + + +/** Helper stuff. +*/ +/** Writes the HTML escaped version of a given string to an output range. +*/ +void filterHTMLEscape(R, S)(ref R dst, S str, HTMLEscapeFlags flags = HTMLEscapeFlags.escapeNewline) + if (isOutputRange!(R, dchar) && isInputRange!S) +{ + for (;!str.empty;str.popFront()) + filterHTMLEscape(dst, str.front, flags); +} + +/** + Writes the HTML escaped version of a character to an output range. +*/ +void filterHTMLEscape(R)(ref R dst, dchar ch, HTMLEscapeFlags flags = HTMLEscapeFlags.escapeNewline ) +{ + switch (ch) { + default: + if (flags & HTMLEscapeFlags.escapeUnknown) { + dst.put("&#"); + dst.put(to!string(cast(uint)ch)); + dst.put(';'); + } else dst.put(ch); + break; + case '"': + if (flags & HTMLEscapeFlags.escapeQuotes) dst.put("""); + else dst.put('"'); + break; + case '\'': + if (flags & HTMLEscapeFlags.escapeQuotes) dst.put("'"); + else dst.put('\''); + break; + case '\r', '\n': + if (flags & HTMLEscapeFlags.escapeNewline) { + dst.put("&#"); + dst.put(to!string(cast(uint)ch)); + dst.put(';'); + } else dst.put(ch); + break; + case 'a': .. case 'z': goto case; + case 'A': .. case 'Z': goto case; + case '0': .. case '9': goto case; + case ' ', '\t', '-', '_', '.', ':', ',', ';', + '#', '+', '*', '?', '=', '(', ')', '/', '!', + '%' , '{', '}', '[', ']', '`', '´', '$', '^', '~': + dst.put(cast(char)ch); + break; + case '<': dst.put("<"); break; + case '>': dst.put(">"); break; + case '&': dst.put("&"); break; + } +} + + +enum HTMLEscapeFlags { + escapeMinimal = 0, + escapeQuotes = 1<<0, + escapeNewline = 1<<1, + escapeUnknown = 1<<2 +} +/***************************** +*/ + +import std.conv; +/** + A logger that logs in syslog format according to RFC 5424. + + Messages can be logged to files (via file streams) or over the network (via + TCP or SSL streams). + + Standards: Conforms to RFC 5424. +*/ +final class SyslogLogger(OutputStream) : Logger { + private { + string m_hostName; + string m_appName; + OutputStream m_ostream; + Facility m_facility; + } + + /// Facilities + enum Facility { + kern, /// kernel messages + user, /// user-level messages + mail, /// mail system + daemon, /// system daemons + auth, /// security/authorization messages + syslog, /// messages generated internally by syslogd + lpr, /// line printer subsystem + news, /// network news subsystem + uucp, /// UUCP subsystem + clockDaemon, /// clock daemon + authpriv, /// security/authorization messages + ftp, /// FTP daemon + ntp, /// NTP subsystem + logAudit, /// log audit + logAlert, /// log alert + cron, /// clock daemon + local0, /// local use 0 + local1, /// local use 1 + local2, /// local use 2 + local3, /// local use 3 + local4, /// local use 4 + local5, /// local use 5 + local6, /// local use 6 + local7, /// local use 7 + } + + /// Severities + private enum Severity { + emergency, /// system is unusable + alert, /// action must be taken immediately + critical, /// critical conditions + error, /// error conditions + warning, /// warning conditions + notice, /// normal but significant condition + info, /// informational messages + debug_, /// debug-level messages + } + + /// syslog message format (version 1) + /// see section 6 in RFC 5424 + private enum SYSLOG_MESSAGE_FORMAT_VERSION1 = "<%.3s>1 %s %.255s %.48s %.128s %.32s %s %s"; + /// + private enum NILVALUE = "-"; + /// + private enum BOM = x"EFBBBF"; + + /** + Construct a SyslogLogger. + + The log messages are sent to the given OutputStream stream using the given + Facility facility.Optionally the appName and hostName can be set. The + appName defaults to null. The hostName defaults to hostName(). + + Note that the passed stream's write function must not use logging with + a level for that this Logger's acceptsLevel returns true. Because this + Logger uses the stream's write function when it logs and would hence + log forevermore. + */ + this(OutputStream stream, Facility facility, string appName = null, string hostName = hostName()) + { + m_hostName = hostName != "" ? hostName : NILVALUE; + m_appName = appName != "" ? appName : NILVALUE; + m_ostream = stream; + m_facility = facility; + this.minLevel = LogLevel.debug_; + } + + /** + Logs the given LogLine msg. + + It uses the msg's time, level, and text field. + */ + override void beginLine(ref LogLine msg) + @trusted { // OutputStream isn't @safe + auto tm = msg.time; + import core.time; + // at most 6 digits for fractional seconds according to RFC + static if (is(typeof(tm.fracSecs))) tm.fracSecs = tm.fracSecs.total!"usecs".dur!"usecs"; + else tm.fracSec = FracSec.from!"usecs"(tm.fracSec.usecs); + auto timestamp = tm.toISOExtString(); + + Severity syslogSeverity; + // map LogLevel to syslog's severity + final switch(msg.level) { + case LogLevel.none: assert(false); + case LogLevel.trace: return; + case LogLevel.debugV: return; + case LogLevel.debug_: syslogSeverity = Severity.debug_; break; + case LogLevel.diagnostic: syslogSeverity = Severity.info; break; + case LogLevel.info: syslogSeverity = Severity.notice; break; + case LogLevel.warn: syslogSeverity = Severity.warning; break; + case LogLevel.error: syslogSeverity = Severity.error; break; + case LogLevel.critical: syslogSeverity = Severity.critical; break; + case LogLevel.fatal: syslogSeverity = Severity.alert; break; + } + + assert(msg.level >= LogLevel.debug_); + import std.conv : to; // temporary workaround for issue 1016 (DMD cross-module template overloads error out before second attempted module) + auto priVal = m_facility * 8 + syslogSeverity; + + alias procId = NILVALUE; + alias msgId = NILVALUE; + alias structuredData = NILVALUE; + + auto text = msg.text; + import std.format : formattedWrite; + import vibe.stream.wrapper : StreamOutputRange; + auto str = StreamOutputRange(m_ostream); + (&str).formattedWrite(SYSLOG_MESSAGE_FORMAT_VERSION1, priVal, + timestamp, m_hostName, BOM ~ m_appName, procId, msgId, + structuredData, BOM); + } + + override void put(scope const(char)[] text) + @trusted { + m_ostream.write(text); + } + + override void endLine() + @trusted { + m_ostream.write("\n"); + m_ostream.flush(); + } + + unittest + { + import vibe.core.file; + auto fstream = createTempFile(); + auto logger = new SyslogLogger(fstream, Facility.local1, "appname", null); + LogLine msg; + import std.datetime; + import core.thread; + static if (is(typeof(SysTime.init.fracSecs))) auto fs = 1.dur!"usecs"; + else auto fs = FracSec.from!"usecs"(1); + msg.time = SysTime(DateTime(0, 1, 1, 0, 0, 0), fs); + + foreach (lvl; [LogLevel.debug_, LogLevel.diagnostic, LogLevel.info, LogLevel.warn, LogLevel.error, LogLevel.critical, LogLevel.fatal]) { + msg.level = lvl; + logger.beginLine(msg); + logger.put("αβγ"); + logger.endLine(); + } + fstream.close(); + + import std.file; + import std.string; + auto lines = splitLines(readText(fstream.path().toNativeString()), KeepTerminator.yes); + assert(lines.length == 7); + assert(lines[0] == "<143>1 0000-01-01T00:00:00.000001 - " ~ BOM ~ "appname - - - " ~ BOM ~ "αβγ\n"); + assert(lines[1] == "<142>1 0000-01-01T00:00:00.000001 - " ~ BOM ~ "appname - - - " ~ BOM ~ "αβγ\n"); + assert(lines[2] == "<141>1 0000-01-01T00:00:00.000001 - " ~ BOM ~ "appname - - - " ~ BOM ~ "αβγ\n"); + assert(lines[3] == "<140>1 0000-01-01T00:00:00.000001 - " ~ BOM ~ "appname - - - " ~ BOM ~ "αβγ\n"); + assert(lines[4] == "<139>1 0000-01-01T00:00:00.000001 - " ~ BOM ~ "appname - - - " ~ BOM ~ "αβγ\n"); + assert(lines[5] == "<138>1 0000-01-01T00:00:00.000001 - " ~ BOM ~ "appname - - - " ~ BOM ~ "αβγ\n"); + assert(lines[6] == "<137>1 0000-01-01T00:00:00.000001 - " ~ BOM ~ "appname - - - " ~ BOM ~ "αβγ\n"); + removeFile(fstream.path().toNativeString()); + } +} + +/// Returns: this host's host name. +/// +/// If the host name cannot be determined the function returns null. +private string hostName() +{ + string hostName; + + version (Posix) { + import core.sys.posix.sys.utsname; + utsname name; + if (uname(&name)) return hostName; + hostName = name.nodename.to!string(); + + import std.socket; + auto ih = new InternetHost; + if (!ih.getHostByName(hostName)) return hostName; + hostName = ih.name; + } + // TODO: determine proper host name on windows + + return hostName; +} + +private { + __gshared shared(Logger)[] ss_loggers; + shared(FileLogger) ss_stdoutLogger; +} + +private shared(Logger)[] getLoggers() nothrow @trusted { return ss_loggers; } + +package void initializeLogModule() +{ + version (Windows) { + version (VibeWinrtDriver) enum disable_stdout = true; + else { + enum disable_stdout = false; + if (!GetStdHandle(STD_OUTPUT_HANDLE) || !GetStdHandle(STD_ERROR_HANDLE)) return; + } + } else enum disable_stdout = false; + + static if (!disable_stdout) { + ss_stdoutLogger = cast(shared)new FileLogger(stdout, stderr); + { + auto l = ss_stdoutLogger.lock(); + l.minLevel = LogLevel.info; + l.format = FileLogger.Format.plain; + } + registerLogger(ss_stdoutLogger); + + bool[4] verbose; + version (VibeNoDefaultArgs) {} + else { + readOption("verbose|v" , &verbose[0], "Enables diagnostic messages (verbosity level 1)."); + readOption("vverbose|vv", &verbose[1], "Enables debugging output (verbosity level 2)."); + readOption("vvv" , &verbose[2], "Enables high frequency debugging output (verbosity level 3)."); + readOption("vvvv" , &verbose[3], "Enables high frequency trace output (verbosity level 4)."); + } + + foreach_reverse (i, v; verbose) + if (v) { + setLogFormat(FileLogger.Format.thread); + setLogLevel(cast(LogLevel)(LogLevel.diagnostic - i)); + break; + } + } +} + +private struct LogOutputRange { + LogLine info; + ScopedLock!Logger* logger; + + @safe: + + this(ref ScopedLock!Logger logger, string file, int line, LogLevel level) + { + () @trusted { this.logger = &logger; } (); + try { + () @trusted { this.info.time = Clock.currTime(UTC()); }(); // not @safe as of 2.065 + //this.info.mod = mod; + //this.info.func = func; + this.info.file = file; + this.info.line = line; + this.info.level = level; + this.info.thread = () @trusted { return Thread.getThis(); }(); // not @safe as of 2.065 + this.info.threadID = makeid(this.info.thread); + this.info.fiber = () @trusted { return Fiber.getThis(); }(); // not @safe as of 2.065 + this.info.fiberID = makeid(this.info.fiber); + } catch (Exception e) { + try { + () @trusted { writefln("Error during logging: %s", e.toString()); }(); // not @safe as of 2.065 + } catch(Exception) {} + assert(false, "Exception during logging: "~e.msg); + } + + this.logger.beginLine(info); + } + + void finalize() + { + logger.endLine(); + } + + void put(scope const(char)[] text) + { + import std.string : indexOf; + auto idx = text.indexOf('\n'); + if (idx >= 0) { + logger.put(text[0 .. idx]); + logger.endLine(); + logger.beginLine(info); + logger.put(text[idx+1 .. $]); + } else logger.put(text); + } + + void put(char ch) @trusted { put((&ch)[0 .. 1]); } + + void put(dchar ch) + { + if (ch < 128) put(cast(char)ch); + else { + char[4] buf; + auto len = std.utf.encode(buf, ch); + put(buf[0 .. len]); + } + } + + private uint makeid(T)(T ptr) @trusted { return (cast(ulong)cast(void*)ptr & 0xFFFFFFFF) ^ (cast(ulong)cast(void*)ptr >> 32); } +} + +private version (Windows) { + import core.sys.windows.windows; + enum STD_OUTPUT_HANDLE = cast(DWORD)-11; + enum STD_ERROR_HANDLE = cast(DWORD)-12; + extern(System) HANDLE GetStdHandle(DWORD nStdHandle); +} + +unittest { // make sure the default logger doesn't allocate/is usable within finalizers + bool destroyed = false; + + class Test { + ~this() + { + logInfo("logInfo doesn't allocate."); + destroyed = true; + } + } + + auto t = new Test; + destroy(t); + assert(destroyed); +} diff --git a/source/vibe/core/net.d b/source/vibe/core/net.d new file mode 100644 index 0000000..3dc6e96 --- /dev/null +++ b/source/vibe/core/net.d @@ -0,0 +1,540 @@ +/** + TCP/UDP connection and server handling. + + Copyright: © 2012-2016 RejectedSoftware e.K. + Authors: Sönke Ludwig + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. +*/ +module vibe.core.net; + +import eventcore.core; +import std.exception : enforce; +import std.format : format; +import std.functional : toDelegate; +import std.socket : AddressFamily, UnknownAddress; +import vibe.core.log; +import vibe.internal.async; + + +/** + Resolves the given host name/IP address string. + + Setting use_dns to false will only allow IP address strings but also guarantees + that the call will not block. +*/ +NetworkAddress resolveHost(string host, AddressFamily address_family = AddressFamily.UNSPEC, bool use_dns = true) +{ + return resolveHost(host, cast(ushort)address_family, use_dns); +} +/// ditto +NetworkAddress resolveHost(string host, ushort address_family, bool use_dns = true) +{ + NetworkAddress ret; + ret.family = address_family; + if (host == "127.0.0.1") { + ret.family = AddressFamily.INET; + ret.sockAddrInet4.sin_addr.s_addr = 0x0100007F; + } else assert(false); + return ret; +} + + +/** + Starts listening on the specified port. + + 'connection_callback' will be called for each client that connects to the + server socket. Each new connection gets its own fiber. The stream parameter + then allows to perform blocking I/O on the client socket. + + The address parameter can be used to specify the network + interface on which the server socket is supposed to listen for connections. + By default, all IPv4 and IPv6 interfaces will be used. +*/ +TCPListener[] listenTCP(ushort port, TCPConnectionDelegate connection_callback, TCPListenOptions options = TCPListenOptions.defaults) +{ + TCPListener[] ret; + try ret ~= listenTCP(port, connection_callback, "::", options); + catch (Exception e) logDiagnostic("Failed to listen on \"::\": %s", e.msg); + try ret ~= listenTCP(port, connection_callback, "0.0.0.0", options); + catch (Exception e) logDiagnostic("Failed to listen on \"0.0.0.0\": %s", e.msg); + enforce(ret.length > 0, format("Failed to listen on all interfaces on port %s", port)); + return ret; +} +/// ditto +TCPListener listenTCP(ushort port, TCPConnectionDelegate connection_callback, string address, TCPListenOptions options = TCPListenOptions.defaults) +{ + auto addr = resolveHost(address); + addr.port = port; + auto sock = eventDriver.listenStream(addr.toUnknownAddress, (StreamListenSocketFD ls, StreamSocketFD s) @safe nothrow { + import vibe.core.core : runTask; + runTask(connection_callback, TCPConnection(s)); + }); + return TCPListener(sock); +} + +/** + Starts listening on the specified port. + + This function is the same as listenTCP but takes a function callback instead of a delegate. +*/ +TCPListener[] listenTCP_s(ushort port, TCPConnectionFunction connection_callback, TCPListenOptions options = TCPListenOptions.defaults) +{ + return listenTCP(port, toDelegate(connection_callback), options); +} +/// ditto +TCPListener listenTCP_s(ushort port, TCPConnectionFunction connection_callback, string address, TCPListenOptions options = TCPListenOptions.defaults) +{ + return listenTCP(port, toDelegate(connection_callback), address, options); +} + +/** + Establishes a connection to the given host/port. +*/ +TCPConnection connectTCP(string host, ushort port) +{ + NetworkAddress addr = resolveHost(host); + addr.port = port; + return connectTCP(addr); +} +/// ditto +TCPConnection connectTCP(NetworkAddress addr) +{ + import std.conv : to; + + scope uaddr = new UnknownAddress; + addr.toUnknownAddress(uaddr); + auto result = eventDriver.asyncAwait!"connectStream"(uaddr); + enforce(result[1] == ConnectStatus.connected, "Failed to connect to "~addr.toString()~": "~result[1].to!string); + return TCPConnection(result[0]); +} + + +/** + Creates a bound UDP socket suitable for sending and receiving packets. +*/ +UDPConnection listenUDP(ushort port, string bind_address = "0.0.0.0") +{ + assert(false); +} + + +/// Callback invoked for incoming TCP connections. +@safe nothrow alias TCPConnectionDelegate = void delegate(TCPConnection stream); +/// ditto +@safe nothrow alias TCPConnectionFunction = void delegate(TCPConnection stream); + + +/** + Represents a network/socket address. +*/ +struct NetworkAddress { + version (Windows) import std.c.windows.winsock; + else import core.sys.posix.netinet.in_; + + @safe: + + private union { + sockaddr addr; + sockaddr_in addr_ip4; + sockaddr_in6 addr_ip6; + } + + /** Family of the socket address. + */ + @property ushort family() const pure nothrow { return addr.sa_family; } + /// ditto + @property void family(AddressFamily val) pure nothrow { addr.sa_family = cast(ubyte)val; } + /// ditto + @property void family(ushort val) pure nothrow { addr.sa_family = cast(ubyte)val; } + + /** The port in host byte order. + */ + @property ushort port() + const pure nothrow { + ushort nport; + switch (this.family) { + default: assert(false, "port() called for invalid address family."); + case AF_INET: nport = addr_ip4.sin_port; break; + case AF_INET6: nport = addr_ip6.sin6_port; break; + } + return () @trusted { return ntoh(nport); } (); + } + /// ditto + @property void port(ushort val) + pure nothrow { + auto nport = () @trusted { return hton(val); } (); + switch (this.family) { + default: assert(false, "port() called for invalid address family."); + case AF_INET: addr_ip4.sin_port = nport; break; + case AF_INET6: addr_ip6.sin6_port = nport; break; + } + } + + /** A pointer to a sockaddr struct suitable for passing to socket functions. + */ + @property inout(sockaddr)* sockAddr() inout pure nothrow { return &addr; } + + /** Size of the sockaddr struct that is returned by sockAddr(). + */ + @property int sockAddrLen() + const pure nothrow { + switch (this.family) { + default: assert(false, "sockAddrLen() called for invalid address family."); + case AF_INET: return addr_ip4.sizeof; + case AF_INET6: return addr_ip6.sizeof; + } + } + + @property inout(sockaddr_in)* sockAddrInet4() inout pure nothrow + in { assert (family == AF_INET); } + body { return &addr_ip4; } + + @property inout(sockaddr_in6)* sockAddrInet6() inout pure nothrow + in { assert (family == AF_INET6); } + body { return &addr_ip6; } + + /** Returns a string representation of the IP address + */ + string toAddressString() + const { + import std.array : appender; + import std.string : format; + import std.format : formattedWrite; + ubyte[2] _dummy = void; // Workaround for DMD regression in master + + switch (this.family) { + default: assert(false, "toAddressString() called for invalid address family."); + case AF_INET: + ubyte[4] ip = () @trusted { return (cast(ubyte*)&addr_ip4.sin_addr.s_addr)[0 .. 4]; } (); + return format("%d.%d.%d.%d", ip[0], ip[1], ip[2], ip[3]); + case AF_INET6: + ubyte[16] ip = addr_ip6.sin6_addr.s6_addr; + auto ret = appender!string(); + ret.reserve(40); + foreach (i; 0 .. 8) { + if (i > 0) ret.put(':'); + _dummy[] = ip[i*2 .. i*2+2]; + ret.formattedWrite("%x", bigEndianToNative!ushort(_dummy)); + } + return ret.data; + } + } + + /** Returns a full string representation of the address, including the port number. + */ + string toString() + const { + auto ret = toAddressString(); + switch (this.family) { + default: assert(false, "toString() called for invalid address family."); + case AF_INET: return ret ~ format(":%s", port); + case AF_INET6: return format("[%s]:%s", ret, port); + } + } + + UnknownAddress toUnknownAddress() + const { + auto ret = new UnknownAddress; + toUnknownAddress(ret); + return ret; + } + + void toUnknownAddress(scope UnknownAddress addr) + const { + *addr.name = *this.sockAddr; + } + + version(Have_libev) {} + else { + unittest { + void test(string ip) { + auto res = () @trusted { return resolveHost(ip, AF_UNSPEC, false); } ().toAddressString(); + assert(res == ip, + "IP "~ip~" yielded wrong string representation: "~res); + } + test("1.2.3.4"); + test("102:304:506:708:90a:b0c:d0e:f10"); + } + } +} + +/** + Represents a single TCP connection. +*/ +struct TCPConnection { + @safe: + + import core.time : seconds; + import vibe.internal.array : FixedRingBuffer; + //static assert(isConnectionStream!TCPConnection); + + struct Context { + FixedRingBuffer!ubyte readBuffer; + } + + private { + StreamSocketFD m_socket; + Context* m_context; + } + + private this(StreamSocketFD socket) + nothrow { + m_socket = socket; + m_context = &eventDriver.userData!Context(socket); + m_context.readBuffer.capacity = 4096; + } + + this(this) + nothrow { + if (m_socket != StreamSocketFD.invalid) + eventDriver.addRef(m_socket); + } + + ~this() + nothrow { + if (m_socket != StreamSocketFD.invalid) + eventDriver.releaseRef(m_socket); + } + + @property void tcpNoDelay(bool enabled) { eventDriver.setTCPNoDelay(m_socket, enabled); } + @property bool tcpNoDelay() const { assert(false); } + @property void keepAlive(bool enable) { assert(false); } + @property bool keepAlive() const { assert(false); } + @property void readTimeout(Duration duration) { } + @property Duration readTimeout() const { assert(false); } + @property string peerAddress() const { return ""; } + @property NetworkAddress localAddress() const { return NetworkAddress.init; } + @property NetworkAddress remoteAddress() const { return NetworkAddress.init; } + @property bool connected() + const { + if (m_socket == StreamSocketFD.invalid) return false; + auto s = eventDriver.getConnectionState(m_socket); + return s >= ConnectionState.connected && s < ConnectionState.activeClose; + } + @property bool empty() { return leastSize == 0; } + @property ulong leastSize() { waitForData(); return m_context.readBuffer.length; } + @property bool dataAvailableForRead() { return waitForData(0.seconds); } + + void close() + nothrow { + //logInfo("close %s", cast(int)m_fd); + if (m_socket != StreamSocketFD.invalid) { + eventDriver.shutdownSocket(m_socket); + eventDriver.releaseRef(m_socket); + m_socket = StreamSocketFD.invalid; + m_context = null; + } + } + + bool waitForData(Duration timeout = Duration.max) + { +mixin(tracer); + // TODO: timeout!! + if (m_context.readBuffer.length > 0) return true; + auto mode = timeout <= 0.seconds ? IOMode.immediate : IOMode.once; + auto res = eventDriver.asyncAwait!"readSocket"(m_socket, m_context.readBuffer.peekDst(), mode); + logTrace("Socket %s, read %s bytes: %s", res[0], res[2], res[1]); + + assert(m_context.readBuffer.length == 0); + m_context.readBuffer.putN(res[2]); + switch (res[1]) { + default: + logInfo("read status %s", res[1]); + throw new Exception("Error reading data from socket."); + case IOStatus.ok: break; + case IOStatus.wouldBlock: assert(mode == IOMode.immediate); break; + case IOStatus.disconnected: break; + } + + return m_context.readBuffer.length > 0; + } + + const(ubyte)[] peek() { return m_context.readBuffer.peek(); } + + void skip(ulong count) + { + import std.algorithm.comparison : min; + + while (count > 0) { + waitForData(); + auto n = min(count, m_context.readBuffer.length); + m_context.readBuffer.popFrontN(n); + if (m_context.readBuffer.empty) m_context.readBuffer.clear(); // start filling at index 0 again + count -= n; + } + } + + void read(ubyte[] dst) + { +mixin(tracer); + import std.algorithm.comparison : min; + while (dst.length > 0) { + enforce(waitForData(), "Reached end of stream while reading data."); + assert(m_context.readBuffer.length > 0); + auto l = min(dst.length, m_context.readBuffer.length); + m_context.readBuffer.read(dst[0 .. l]); + if (m_context.readBuffer.empty) m_context.readBuffer.clear(); // start filling at index 0 again + dst = dst[l .. $]; + } + } + + void write(in ubyte[] bytes) + { +mixin(tracer); + if (bytes.length == 0) return; + + auto res = eventDriver.asyncAwait!"writeSocket"(m_socket, bytes, IOMode.all); + + switch (res[1]) { + default: + throw new Exception("Error writing data to socket."); + case IOStatus.ok: break; + case IOStatus.disconnected: break; + + } + } + + void flush() { +mixin(tracer); + } + void finalize() {} + void write(InputStream)(InputStream stream, ulong nbytes = 0) { writeDefault(stream, nbytes); } + + private void writeDefault(InputStream)(InputStream stream, ulong nbytes = 0) + { + import std.algorithm.comparison : min; + + static struct Buffer { ubyte[64*1024 - 4*size_t.sizeof] bytes = void; } + scope bufferobj = new Buffer; // FIXME: use heap allocation + auto buffer = bufferobj.bytes[]; + + //logTrace("default write %d bytes, empty=%s", nbytes, stream.empty); + if( nbytes == 0 ){ + while( !stream.empty ){ + size_t chunk = min(stream.leastSize, buffer.length); + assert(chunk > 0, "leastSize returned zero for non-empty stream."); + //logTrace("read pipe chunk %d", chunk); + stream.read(buffer[0 .. chunk]); + write(buffer[0 .. chunk]); + } + } else { + while( nbytes > 0 ){ + size_t chunk = min(nbytes, buffer.length); + //logTrace("read pipe chunk %d", chunk); + stream.read(buffer[0 .. chunk]); + write(buffer[0 .. chunk]); + nbytes -= chunk; + } + } + } +} + + +/** + Represents a listening TCP socket. +*/ +struct TCPListener { + private { + StreamListenSocketFD m_socket; + } + + this(StreamListenSocketFD socket) + { + m_socket = socket; + } + + /// The local address at which TCP connections are accepted. + @property NetworkAddress bindAddress() + { + assert(false); + } + + /// Stops listening and closes the socket. + void stopListening() + { + assert(false); + } +} + + +/** + Represents a bound and possibly 'connected' UDP socket. +*/ +struct UDPConnection { + /** Returns the address to which the UDP socket is bound. + */ + @property string bindAddress() const { assert(false); } + + /** Determines if the socket is allowed to send to broadcast addresses. + */ + @property bool canBroadcast() const { assert(false); } + /// ditto + @property void canBroadcast(bool val) { assert(false); } + + /// The local/bind address of the underlying socket. + @property NetworkAddress localAddress() const { assert(false); } + + /** Stops listening for datagrams and frees all resources. + */ + void close() { assert(false); } + + /** Locks the UDP connection to a certain peer. + + Once connected, the UDPConnection can only communicate with the specified peer. + Otherwise communication with any reachable peer is possible. + */ + void connect(string host, ushort port) { assert(false); } + /// ditto + void connect(NetworkAddress address) { assert(false); } + + /** Sends a single packet. + + If peer_address is given, the packet is send to that address. Otherwise the packet + will be sent to the address specified by a call to connect(). + */ + void send(in ubyte[] data, in NetworkAddress* peer_address = null) { assert(false); } + + /** Receives a single packet. + + If a buffer is given, it must be large enough to hold the full packet. + + The timeout overload will throw an Exception if no data arrives before the + specified duration has elapsed. + */ + ubyte[] recv(ubyte[] buf = null, NetworkAddress* peer_address = null) { assert(false); } + /// ditto + ubyte[] recv(Duration timeout, ubyte[] buf = null, NetworkAddress* peer_address = null) { assert(false); } +} + + +/** + Flags to control the behavior of listenTCP. +*/ +enum TCPListenOptions { + /// Don't enable any particular option + defaults = 0, + /// Causes incoming connections to be distributed across the thread pool + distribute = 1<<0, + /// Disables automatic closing of the connection when the connection callback exits + disableAutoClose = 1<<1, +} + +private pure nothrow { + import std.bitmanip; + + ushort ntoh(ushort val) + { + version (LittleEndian) return swapEndian(val); + else version (BigEndian) return val; + else static assert(false, "Unknown endianness."); + } + + ushort hton(ushort val) + { + version (LittleEndian) return swapEndian(val); + else version (BigEndian) return val; + else static assert(false, "Unknown endianness."); + } +} + +private enum tracer = ""; \ No newline at end of file diff --git a/source/vibe/core/path.d b/source/vibe/core/path.d new file mode 100644 index 0000000..bcc7151 --- /dev/null +++ b/source/vibe/core/path.d @@ -0,0 +1,25 @@ +module vibe.core.path; + +struct Path { + nothrow: @safe: + private string m_path; + + this(string p) + { + m_path = p; + } + + string toString() const { return m_path; } + + string toNativeString() const { return m_path; } +} + +struct PathEntry { + nothrow: @safe: + private string m_name; + + this(string name) + { + m_name = name; + } +} diff --git a/source/vibe/core/sync.d b/source/vibe/core/sync.d new file mode 100644 index 0000000..8914193 --- /dev/null +++ b/source/vibe/core/sync.d @@ -0,0 +1,1346 @@ +/** + Interruptible Task synchronization facilities + + Copyright: © 2012-2016 RejectedSoftware e.K. + Authors: Leonid Kramer, Sönke Ludwig, Manuel Frischknecht + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. +*/ +module vibe.core.sync; + +import vibe.core.task; + +import core.atomic; +import core.sync.mutex; +import core.sync.condition; +import eventcore.core; +import std.exception; +import std.stdio; +import std.traits : ReturnType; + + +enum LockMode { + lock, + tryLock, + defer +} + +interface Lockable { + @safe: + void lock(); + void unlock(); + bool tryLock(); +} + +/** RAII lock for the Mutex class. +*/ +struct ScopedMutexLock +{ + @disable this(this); + private { + Mutex m_mutex; + bool m_locked; + LockMode m_mode; + } + + this(core.sync.mutex.Mutex mutex, LockMode mode = LockMode.lock) { + assert(mutex !is null); + m_mutex = mutex; + + final switch (mode) { + case LockMode.lock: lock(); break; + case LockMode.tryLock: tryLock(); break; + case LockMode.defer: break; + } + } + + ~this() + { + if( m_locked ) + m_mutex.unlock(); + } + + @property bool locked() const { return m_locked; } + + void unlock() + { + enforce(m_locked); + m_mutex.unlock(); + m_locked = false; + } + + bool tryLock() + { + enforce(!m_locked); + return m_locked = m_mutex.tryLock(); + } + + void lock() + { + enforce(!m_locked); + m_locked = true; + m_mutex.lock(); + } +} + + +/* + Only for internal use: + Ensures that a mutex is locked while executing the given procedure. + + This function works for all kinds of mutexes, in particular for + $(D core.sync.mutex.Mutex), $(D TaskMutex) and $(D InterruptibleTaskMutex). + + Returns: + Returns the value returned from $(D PROC), if any. +*/ +/// private +ReturnType!PROC performLocked(alias PROC, MUTEX)(MUTEX mutex) +{ + mutex.lock(); + scope (exit) mutex.unlock(); + return PROC(); +} + +/// +unittest { + int protected_var = 0; + auto mtx = new TaskMutex; + mtx.performLocked!({ + protected_var++; + }); +} + + +/** + Thread-local semaphore implementation for tasks. + + When the semaphore runs out of concurrent locks, it will suspend. This class + is used in `vibe.core.connectionpool` to limit the number of concurrent + connections. +*/ +class LocalTaskSemaphore +{ + // requires a queue + import std.container.binaryheap; + import std.container.array; + //import vibe.utils.memory; + + private { + struct Waiter { + ManualEvent signal; + ubyte priority; + uint seq; + } + + BinaryHeap!(Array!Waiter, asc) m_waiters; + uint m_maxLocks; + uint m_locks; + uint m_seq; + } + + this(uint max_locks) + { + m_maxLocks = max_locks; + } + + /// Maximum number of concurrent locks + @property void maxLocks(uint max_locks) { m_maxLocks = max_locks; } + /// ditto + @property uint maxLocks() const { return m_maxLocks; } + + /// Number of concurrent locks still available + @property uint available() const { return m_maxLocks - m_locks; } + + /** Try to acquire a lock. + + If a lock cannot be acquired immediately, returns `false` and leaves the + semaphore in its previous state. + + Returns: + `true` is returned $(I iff) the number of available locks is greater + than one. + */ + bool tryLock() + { + if (available > 0) + { + m_locks++; + return true; + } + return false; + } + + /** Acquires a lock. + + Once the limit of concurrent locks is reaced, this method will block + until the number of locks drops below the limit. + */ + void lock(ubyte priority = 0) + { + import std.algorithm.comparison : min; + + if (tryLock()) + return; + + Waiter w; + w.signal = createManualEvent(); + w.priority = priority; + w.seq = min(0, m_seq - w.priority); + if (++m_seq == uint.max) + rewindSeq(); + + m_waiters.insert(w); + do w.signal.wait(); while (!tryLock()); + // on resume: + destroy(w.signal); + } + + /** Gives up an existing lock. + */ + void unlock() + { + m_locks--; + if (m_waiters.length > 0 && available > 0) { + Waiter w = m_waiters.front(); + w.signal.emit(); // resume one + m_waiters.removeFront(); + } + } + + // if true, a goes after b. ie. b comes out front() + /// private + static bool asc(ref Waiter a, ref Waiter b) + { + if (a.seq == b.seq) { + if (a.priority == b.priority) { + // resolve using the pointer address + return (cast(size_t)&a.signal) > (cast(size_t) &b.signal); + } + // resolve using priority + return a.priority < b.priority; + } + // resolve using seq number + return a.seq > b.seq; + } + + private void rewindSeq() + { + Array!Waiter waiters = m_waiters.release(); + ushort min_seq; + import std.algorithm : min; + foreach (ref waiter; waiters[]) + min_seq = min(waiter.seq, min_seq); + foreach (ref waiter; waiters[]) + waiter.seq -= min_seq; + m_waiters.assume(waiters); + } +} + + +/** + Mutex implementation for fibers. + + This mutex type can be used in exchange for a core.sync.mutex.Mutex, but + does not block the event loop when contention happens. Note that this + mutex does not allow recursive locking. + + Notice: + Because this class is annotated nothrow, it cannot be interrupted + using $(D vibe.core.task.Task.interrupt()). The corresponding + $(D InterruptException) will be deferred until the next blocking + operation yields the event loop. + + Use $(D InterruptibleTaskMutex) as an alternative that can be + interrupted. + + See_Also: InterruptibleTaskMutex, RecursiveTaskMutex, core.sync.mutex.Mutex +*/ +class TaskMutex : core.sync.mutex.Mutex, Lockable { + private TaskMutexImpl!false m_impl; + + this(Object o) { m_impl.setup(); super(o); } + this() { m_impl.setup(); } + + override bool tryLock() nothrow { return m_impl.tryLock(); } + override void lock() nothrow { m_impl.lock(); } + override void unlock() nothrow { m_impl.unlock(); } +} + +unittest { + auto mutex = new TaskMutex; + + { + auto lock = ScopedMutexLock(mutex); + assert(lock.locked); + assert(mutex.m_impl.m_locked); + + auto lock2 = ScopedMutexLock(mutex, LockMode.tryLock); + assert(!lock2.locked); + } + assert(!mutex.m_impl.m_locked); + + auto lock = ScopedMutexLock(mutex, LockMode.tryLock); + assert(lock.locked); + lock.unlock(); + assert(!lock.locked); + + synchronized(mutex){ + assert(mutex.m_impl.m_locked); + } + assert(!mutex.m_impl.m_locked); + + mutex.performLocked!({ + assert(mutex.m_impl.m_locked); + }); + assert(!mutex.m_impl.m_locked); + + static if (__VERSION__ >= 2067) { + with(mutex.ScopedMutexLock) { + assert(mutex.m_impl.m_locked); + } + } +} + +version (VibeLibevDriver) {} else // timers are not implemented for libev, yet +unittest { // test deferred throwing + import vibe.core.core; + + auto mutex = new TaskMutex; + auto t1 = runTask({ + scope (failure) assert(false, "No exception expected in first task!"); + mutex.lock(); + scope (exit) mutex.unlock(); + sleep(20.msecs); + }); + + auto t2 = runTask({ + scope (failure) assert(false, "Only InterruptException supposed to be thrown!"); + mutex.lock(); + scope (exit) mutex.unlock(); + try { + yield(); + assert(false, "Yield is supposed to have thrown an InterruptException."); + } catch (InterruptException) { + // as expected! + } + }); + + runTask({ + // mutex is now locked in first task for 20 ms + // the second tasks is waiting in lock() + t2.interrupt(); + t1.join(); + t2.join(); + assert(!mutex.m_impl.m_locked); // ensure that the scope(exit) has been executed + exitEventLoop(); + }); + + runEventLoop(); +} + +version (VibeLibevDriver) {} else // timers are not implemented for libev, yet +unittest { + runMutexUnitTests!TaskMutex(); +} + + +/** + Alternative to $(D TaskMutex) that supports interruption. + + This class supports the use of $(D vibe.core.task.Task.interrupt()) while + waiting in the $(D lock()) method. However, because the interface is not + $(D nothrow), it cannot be used as an object monitor. + + See_Also: $(D TaskMutex), $(D InterruptibleRecursiveTaskMutex) +*/ +final class InterruptibleTaskMutex : Lockable { + private TaskMutexImpl!true m_impl; + + this() { m_impl.setup(); } + + bool tryLock() nothrow { return m_impl.tryLock(); } + void lock() { m_impl.lock(); } + void unlock() nothrow { m_impl.unlock(); } +} + +version (VibeLibevDriver) {} else // timers are not implemented for libev, yet +unittest { + runMutexUnitTests!InterruptibleTaskMutex(); +} + + + +/** + Recursive mutex implementation for tasks. + + This mutex type can be used in exchange for a core.sync.mutex.Mutex, but + does not block the event loop when contention happens. + + Notice: + Because this class is annotated nothrow, it cannot be interrupted + using $(D vibe.core.task.Task.interrupt()). The corresponding + $(D InterruptException) will be deferred until the next blocking + operation yields the event loop. + + Use $(D InterruptibleRecursiveTaskMutex) as an alternative that can be + interrupted. + + See_Also: TaskMutex, core.sync.mutex.Mutex +*/ +class RecursiveTaskMutex : core.sync.mutex.Mutex, Lockable { + private RecursiveTaskMutexImpl!false m_impl; + + this(Object o) { m_impl.setup(); super(o); } + this() { m_impl.setup(); } + + override bool tryLock() { return m_impl.tryLock(); } + override void lock() { m_impl.lock(); } + override void unlock() { m_impl.unlock(); } +} + +version (VibeLibevDriver) {} else // timers are not implemented for libev, yet +unittest { + runMutexUnitTests!RecursiveTaskMutex(); +} + + +/** + Alternative to $(D RecursiveTaskMutex) that supports interruption. + + This class supports the use of $(D vibe.core.task.Task.interrupt()) while + waiting in the $(D lock()) method. However, because the interface is not + $(D nothrow), it cannot be used as an object monitor. + + See_Also: $(D RecursiveTaskMutex), $(D InterruptibleTaskMutex) +*/ +final class InterruptibleRecursiveTaskMutex : Lockable { + private RecursiveTaskMutexImpl!true m_impl; + + this() { m_impl.setup(); } + + bool tryLock() { return m_impl.tryLock(); } + void lock() { m_impl.lock(); } + void unlock() { m_impl.unlock(); } +} + +version (VibeLibevDriver) {} else // timers are not implemented for libev, yet +unittest { + runMutexUnitTests!InterruptibleRecursiveTaskMutex(); +} + + +private void runMutexUnitTests(M)() +{ + import vibe.core.core; + + auto m = new M; + Task t1, t2; + void runContendedTasks(bool interrupt_t1, bool interrupt_t2) { + assert(!m.m_impl.m_locked); + + // t1 starts first and acquires the mutex for 20 ms + // t2 starts second and has to wait in m.lock() + t1 = runTask({ + assert(!m.m_impl.m_locked); + m.lock(); + assert(m.m_impl.m_locked); + if (interrupt_t1) assertThrown!InterruptException(sleep(100.msecs)); + else assertNotThrown(sleep(20.msecs)); + m.unlock(); + }); + t2 = runTask({ + assert(!m.tryLock()); + if (interrupt_t2) { + try m.lock(); + catch (InterruptException) return; + try yield(); // rethrows any deferred exceptions + catch (InterruptException) { + m.unlock(); + return; + } + assert(false, "Supposed to have thrown an InterruptException."); + } else assertNotThrown(m.lock()); + assert(m.m_impl.m_locked); + sleep(20.msecs); + m.unlock(); + assert(!m.m_impl.m_locked); + }); + } + + // basic lock test + m.performLocked!({ + assert(m.m_impl.m_locked); + }); + assert(!m.m_impl.m_locked); + + // basic contention test + runContendedTasks(false, false); + runTask({ + assert(t1.running && t2.running); + assert(m.m_impl.m_locked); + t1.join(); + assert(!t1.running && t2.running); + yield(); // give t2 a chance to take the lock + assert(m.m_impl.m_locked); + t2.join(); + assert(!t2.running); + assert(!m.m_impl.m_locked); + exitEventLoop(); + }); + runEventLoop(); + assert(!m.m_impl.m_locked); + + // interruption test #1 + runContendedTasks(true, false); + runTask({ + assert(t1.running && t2.running); + assert(m.m_impl.m_locked); + t1.interrupt(); + t1.join(); + assert(!t1.running && t2.running); + yield(); // give t2 a chance to take the lock + assert(m.m_impl.m_locked); + t2.join(); + assert(!t2.running); + assert(!m.m_impl.m_locked); + exitEventLoop(); + }); + runEventLoop(); + assert(!m.m_impl.m_locked); + + // interruption test #2 + runContendedTasks(false, true); + runTask({ + assert(t1.running && t2.running); + assert(m.m_impl.m_locked); + t2.interrupt(); + t2.join(); + assert(!t2.running); + static if (is(M == InterruptibleTaskMutex) || is (M == InterruptibleRecursiveTaskMutex)) + assert(t1.running && m.m_impl.m_locked); + t1.join(); + assert(!t1.running); + assert(!m.m_impl.m_locked); + exitEventLoop(); + }); + runEventLoop(); + assert(!m.m_impl.m_locked); +} + + +/** + Event loop based condition variable or "event" implementation. + + This class can be used in exchange for a $(D core.sync.condition.Condition) + to avoid blocking the event loop when waiting. + + Notice: + Because this class is annotated nothrow, it cannot be interrupted + using $(D vibe.core.task.Task.interrupt()). The corresponding + $(D InterruptException) will be deferred until the next blocking + operation yields to the event loop. + + Use $(D InterruptibleTaskCondition) as an alternative that can be + interrupted. + + Note that it is generally not safe to use a `TaskCondition` together with an + interruptible mutex type. + + See_Also: InterruptibleTaskCondition +*/ +class TaskCondition : core.sync.condition.Condition { + private TaskConditionImpl!(false, Mutex) m_impl; + + this(core.sync.mutex.Mutex mtx) { + m_impl.setup(mtx); + super(mtx); + } + override @property Mutex mutex() { return m_impl.mutex; } + override void wait() { m_impl.wait(); } + override bool wait(Duration timeout) { return m_impl.wait(timeout); } + override void notify() { m_impl.notify(); } + override void notifyAll() { m_impl.notifyAll(); } +} + +/** This example shows the typical usage pattern using a `while` loop to make + sure that the final condition is reached. +*/ +unittest { + import vibe.core.core; + + __gshared Mutex mutex; + __gshared TaskCondition condition; + __gshared int workers_still_running = 0; + + // setup the task condition + mutex = new Mutex; + condition = new TaskCondition(mutex); + + // start up the workers and count how many are running + foreach (i; 0 .. 4) { + workers_still_running++; + runWorkerTask({ + // simulate some work + sleep(100.msecs); + + // notify the waiter that we're finished + synchronized (mutex) + workers_still_running--; + condition.notify(); + }); + } + + // wait until all tasks have decremented the counter back to zero + synchronized (mutex) { + while (workers_still_running > 0) + condition.wait(); + } +} + + +/** + Alternative to `TaskCondition` that supports interruption. + + This class supports the use of `vibe.core.task.Task.interrupt()` while + waiting in the `wait()` method. + + See `TaskCondition` for an example. + + Notice: + Note that it is generally not safe to use an + `InterruptibleTaskCondition` together with an interruptible mutex type. + + See_Also: `TaskCondition` +*/ +final class InterruptibleTaskCondition { + private TaskConditionImpl!(true, Lockable) m_impl; + + this(core.sync.mutex.Mutex mtx) { m_impl.setup(mtx); } + this(Lockable mtx) { m_impl.setup(mtx); } + + @property Lockable mutex() { return m_impl.mutex; } + void wait() { m_impl.wait(); } + bool wait(Duration timeout) { return m_impl.wait(timeout); } + void notify() { m_impl.notify(); } + void notifyAll() { m_impl.notifyAll(); } +} + + +/** Creates a new signal that can be shared between fibers. +*/ +ManualEvent createManualEvent() +{ + return ManualEvent.init; +} + +/** A manually triggered cross-task event. + + Note: the ownership can be shared between multiple fibers and threads. +*/ +struct ManualEvent { + bool opCast() const nothrow { return true; } + int emitCount() const nothrow { return 0; } + int emit() nothrow { return 0; } + int wait() { assert(false); } + int wait(int) { import vibe.core.core : sleep; sleep(30.seconds); assert(false); } + int wait(Duration, int) { assert(false); } + int waitUninterruptible() nothrow { assert(false); } + int waitUninterruptible(int) nothrow { assert(false); } + int waitUninterruptible(Duration, int) nothrow { assert(false); } +} +/+interface ManualEvent { + /// A counter that is increased with every emit() call + @property int emitCount() const nothrow; + + /// Emits the signal, waking up all owners of the signal. + void emit() nothrow; + + /** Acquires ownership and waits until the signal is emitted. + + Throws: + May throw an $(D InterruptException) if the task gets interrupted + using $(D Task.interrupt()). + */ + void wait(); + + /** Acquires ownership and waits until the emit count differs from the given one. + + Throws: + May throw an $(D InterruptException) if the task gets interrupted + using $(D Task.interrupt()). + */ + int wait(int reference_emit_count); + + /** Acquires ownership and waits until the emit count differs from the given one or until a timeout is reaced. + + Throws: + May throw an $(D InterruptException) if the task gets interrupted + using $(D Task.interrupt()). + */ + int wait(Duration timeout, int reference_emit_count); + + /** Same as $(D wait), but defers throwing any $(D InterruptException). + + This method is annotated $(D nothrow) at the expense that it cannot be + interrupted. + */ + int waitUninterruptible(int reference_emit_count) nothrow; + + /// ditto + int waitUninterruptible(Duration timeout, int reference_emit_count) nothrow; +}+/ + + +private struct TaskMutexImpl(bool INTERRUPTIBLE) { + import std.stdio; + private { + shared(bool) m_locked = false; + shared(uint) m_waiters = 0; + ManualEvent m_signal; + debug Task m_owner; + } + + void setup() + { + m_signal = createManualEvent(); + } + + + @trusted bool tryLock() + { + if (cas(&m_locked, false, true)) { + debug m_owner = Task.getThis(); + version(MutexPrint) writefln("mutex %s lock %s", cast(void*)this, atomicLoad(m_waiters)); + return true; + } + return false; + } + + @trusted void lock() + { + if (tryLock()) return; + debug assert(m_owner == Task() || m_owner != Task.getThis(), "Recursive mutex lock."); + atomicOp!"+="(m_waiters, 1); + version(MutexPrint) writefln("mutex %s wait %s", cast(void*)this, atomicLoad(m_waiters)); + scope(exit) atomicOp!"-="(m_waiters, 1); + auto ecnt = m_signal.emitCount(); + while (!tryLock()) { + static if (INTERRUPTIBLE) ecnt = m_signal.wait(ecnt); + else ecnt = m_signal.waitUninterruptible(ecnt); + } + } + + @trusted void unlock() + { + assert(m_locked); + debug { + assert(m_owner == Task.getThis()); + m_owner = Task(); + } + atomicStore!(MemoryOrder.rel)(m_locked, false); + version(MutexPrint) writefln("mutex %s unlock %s", cast(void*)this, atomicLoad(m_waiters)); + if (atomicLoad(m_waiters) > 0) + m_signal.emit(); + } +} + +private struct RecursiveTaskMutexImpl(bool INTERRUPTIBLE) { + import std.stdio; + private { + core.sync.mutex.Mutex m_mutex; + Task m_owner; + size_t m_recCount = 0; + shared(uint) m_waiters = 0; + ManualEvent m_signal; + @property bool m_locked() const { return m_recCount > 0; } + } + + void setup() + { + m_signal = createManualEvent(); + m_mutex = new core.sync.mutex.Mutex; + } + + @trusted bool tryLock() + { + auto self = Task.getThis(); + return m_mutex.performLocked!({ + if (!m_owner) { + assert(m_recCount == 0); + m_recCount = 1; + m_owner = self; + return true; + } else if (m_owner == self) { + m_recCount++; + return true; + } + return false; + }); + } + + @trusted void lock() + { + if (tryLock()) return; + atomicOp!"+="(m_waiters, 1); + version(MutexPrint) writefln("mutex %s wait %s", cast(void*)this, atomicLoad(m_waiters)); + scope(exit) atomicOp!"-="(m_waiters, 1); + auto ecnt = m_signal.emitCount(); + while (!tryLock()) { + static if (INTERRUPTIBLE) ecnt = m_signal.wait(ecnt); + else ecnt = m_signal.waitUninterruptible(ecnt); + } + } + + @trusted void unlock() + { + auto self = Task.getThis(); + m_mutex.performLocked!({ + assert(m_owner == self); + assert(m_recCount > 0); + m_recCount--; + if (m_recCount == 0) { + m_owner = Task.init; + } + }); + version(MutexPrint) writefln("mutex %s unlock %s", cast(void*)this, atomicLoad(m_waiters)); + if (atomicLoad(m_waiters) > 0) + m_signal.emit(); + } +} + +private struct TaskConditionImpl(bool INTERRUPTIBLE, LOCKABLE) { + private { + LOCKABLE m_mutex; + + ManualEvent m_signal; + } + + static if (is(LOCKABLE == Lockable)) { + final class MutexWrapper : Lockable { + private core.sync.mutex.Mutex m_mutex; + this(core.sync.mutex.Mutex mtx) { m_mutex = mtx; } + @trusted void lock() { m_mutex.lock(); } + @trusted void unlock() { m_mutex.unlock(); } + @trusted bool tryLock() { return m_mutex.tryLock(); } + } + + void setup(core.sync.mutex.Mutex mtx) + { + setup(new MutexWrapper(mtx)); + } + } + + void setup(LOCKABLE mtx) + { + m_mutex = mtx; + m_signal = createManualEvent(); + } + + @property LOCKABLE mutex() { return m_mutex; } + + @trusted void wait() + { + if (auto tm = cast(TaskMutex)m_mutex) { + assert(tm.m_impl.m_locked); + debug assert(tm.m_impl.m_owner == Task.getThis()); + } + + auto refcount = m_signal.emitCount; + m_mutex.unlock(); + scope(exit) m_mutex.lock(); + static if (INTERRUPTIBLE) m_signal.wait(refcount); + else m_signal.waitUninterruptible(refcount); + } + + @trusted bool wait(Duration timeout) + { + assert(!timeout.isNegative()); + if (auto tm = cast(TaskMutex)m_mutex) { + assert(tm.m_impl.m_locked); + debug assert(tm.m_impl.m_owner == Task.getThis()); + } + + auto refcount = m_signal.emitCount; + m_mutex.unlock(); + scope(exit) m_mutex.lock(); + + static if (INTERRUPTIBLE) return m_signal.wait(timeout, refcount) != refcount; + else return m_signal.waitUninterruptible(timeout, refcount) != refcount; + } + + @trusted void notify() + { + m_signal.emit(); + } + + @trusted void notifyAll() + { + m_signal.emit(); + } +} + +/** Contains the shared state of a $(D TaskReadWriteMutex). + * + * Since a $(D TaskReadWriteMutex) consists of two actual Mutex + * objects that rely on common memory, this class implements + * the actual functionality of their method calls. + * + * The method implementations are based on two static parameters + * ($(D INTERRUPTIBLE) and $(D INTENT)), which are configured through + * template arguments: + * + * - $(D INTERRUPTIBLE) determines whether the mutex implementation + * are interruptible by vibe.d's $(D vibe.core.task.Task.interrupt()) + * method or not. + * + * - $(D INTENT) describes the intent, with which a locking operation is + * performed (i.e. $(D READ_ONLY) or $(D READ_WRITE)). RO locking allows for + * multiple Tasks holding the mutex, whereas RW locking will cause + * a "bottleneck" so that only one Task can write to the protected + * data at once. + */ +private struct ReadWriteMutexState(bool INTERRUPTIBLE) +{ + /** The policy with which the mutex should operate. + * + * The policy determines how the acquisition of the locks is + * performed and can be used to tune the mutex according to the + * underlying algorithm in which it is used. + * + * According to the provided policy, the mutex will either favor + * reading or writing tasks and could potentially starve the + * respective opposite. + * + * cf. $(D core.sync.rwmutex.ReadWriteMutex.Policy) + */ + enum Policy : int + { + /** Readers are prioritized, writers may be starved as a result. */ + PREFER_READERS = 0, + /** Writers are prioritized, readers may be starved as a result. */ + PREFER_WRITERS + } + + /** The intent with which a locking operation is performed. + * + * Since both locks share the same underlying algorithms, the actual + * intent with which a lock operation is performed (i.e read/write) + * are passed as a template parameter to each method. + */ + enum LockingIntent : bool + { + /** Perform a read lock/unlock operation. Multiple reading locks can be + * active at a time. */ + READ_ONLY = 0, + /** Perform a write lock/unlock operation. Only a single writer can + * hold a lock at any given time. */ + READ_WRITE = 1 + } + + private { + //Queue counters + /** The number of reading tasks waiting for the lock to become available. */ + shared(uint) m_waitingForReadLock = 0; + /** The number of writing tasks waiting for the lock to become available. */ + shared(uint) m_waitingForWriteLock = 0; + + //Lock counters + /** The number of reading tasks that currently hold the lock. */ + uint m_activeReadLocks = 0; + /** The number of writing tasks that currently hold the lock (binary). */ + ubyte m_activeWriteLocks = 0; + + /** The policy determining the lock's behavior. */ + Policy m_policy; + + //Queue Events + /** The event used to wake reading tasks waiting for the lock while it is blocked. */ + ManualEvent m_readyForReadLock; + /** The event used to wake writing tasks waiting for the lock while it is blocked. */ + ManualEvent m_readyForWriteLock; + + /** The underlying mutex that gates the access to the shared state. */ + Mutex m_counterMutex; + } + + this(Policy policy) + { + m_policy = policy; + m_counterMutex = new Mutex(); + m_readyForReadLock = createManualEvent(); + m_readyForWriteLock = createManualEvent(); + } + + @disable this(this); + + /** The policy with which the lock has been created. */ + @property policy() const { return m_policy; } + + version(RWMutexPrint) + { + /** Print out debug information during lock operations. */ + void printInfo(string OP, LockingIntent INTENT)() nothrow + { + import std.string; + try + { + import std.stdio; + writefln("RWMutex: %s (%s), active: RO: %d, RW: %d; waiting: RO: %d, RW: %d", + OP.leftJustify(10,' '), + INTENT == LockingIntent.READ_ONLY ? "RO" : "RW", + m_activeReadLocks, m_activeWriteLocks, + m_waitingForReadLock, m_waitingForWriteLock + ); + } + catch (Throwable t){} + } + } + + /** An internal shortcut method to determine the queue event for a given intent. */ + @property ref auto queueEvent(LockingIntent INTENT)() + { + static if (INTENT == LockingIntent.READ_ONLY) + return m_readyForReadLock; + else + return m_readyForWriteLock; + } + + /** An internal shortcut method to determine the queue counter for a given intent. */ + @property ref auto queueCounter(LockingIntent INTENT)() + { + static if (INTENT == LockingIntent.READ_ONLY) + return m_waitingForReadLock; + else + return m_waitingForWriteLock; + } + + /** An internal shortcut method to determine the current emitCount of the queue counter for a given intent. */ + int emitCount(LockingIntent INTENT)() + { + return queueEvent!INTENT.emitCount(); + } + + /** An internal shortcut method to determine the active counter for a given intent. */ + @property ref auto activeCounter(LockingIntent INTENT)() + { + static if (INTENT == LockingIntent.READ_ONLY) + return m_activeReadLocks; + else + return m_activeWriteLocks; + } + + /** An internal shortcut method to wait for the queue event for a given intent. + * + * This method is used during the `lock()` operation, after a + * `tryLock()` operation has been unsuccessfully finished. + * The active fiber will yield and be suspended until the queue event + * for the given intent will be fired. + */ + int wait(LockingIntent INTENT)(int count) + { + static if (INTERRUPTIBLE) + return queueEvent!INTENT.wait(count); + else + return queueEvent!INTENT.waitUninterruptible(count); + } + + /** An internal shortcut method to notify tasks waiting for the lock to become available again. + * + * This method is called whenever the number of owners of the mutex hits + * zero; this is basically the counterpart to `wait()`. + * It wakes any Task currently waiting for the mutex to be released. + */ + @trusted void notify(LockingIntent INTENT)() + { + static if (INTENT == LockingIntent.READ_ONLY) + { //If the last reader unlocks the mutex, notify all waiting writers + if (atomicLoad(m_waitingForWriteLock) > 0) + m_readyForWriteLock.emit(); + } + else + { //If a writer unlocks the mutex, notify both readers and writers + if (atomicLoad(m_waitingForReadLock) > 0) + m_readyForReadLock.emit(); + + if (atomicLoad(m_waitingForWriteLock) > 0) + m_readyForWriteLock.emit(); + } + } + + /** An internal method that performs the acquisition attempt in different variations. + * + * Since both locks rely on a common TaskMutex object which gates the access + * to their common data acquisition attempts for this lock are more complex + * than for simple mutex variants. This method will thus be performing the + * `tryLock()` operation in two variations, depending on the callee: + * + * If called from the outside ($(D WAIT_FOR_BLOCKING_MUTEX) = false), the method + * will instantly fail if the underlying mutex is locked (i.e. during another + * `tryLock()` or `unlock()` operation), in order to guarantee the fastest + * possible locking attempt. + * + * If used internally by the `lock()` method ($(D WAIT_FOR_BLOCKING_MUTEX) = true), + * the operation will wait for the mutex to be available before deciding if + * the lock can be acquired, since the attempt would anyway be repeated until + * it succeeds. This will prevent frequent retries under heavy loads and thus + * should ensure better performance. + */ + @trusted bool tryLock(LockingIntent INTENT, bool WAIT_FOR_BLOCKING_MUTEX)() + { + //Log a debug statement for the attempt + version(RWMutexPrint) + printInfo!("tryLock",INTENT)(); + + //Try to acquire the lock + static if (!WAIT_FOR_BLOCKING_MUTEX) + { + if (!m_counterMutex.tryLock()) + return false; + } + else + m_counterMutex.lock(); + + scope(exit) + m_counterMutex.unlock(); + + //Log a debug statement for the attempt + version(RWMutexPrint) + printInfo!("checkCtrs",INTENT)(); + + //Check if there's already an active writer + if (m_activeWriteLocks > 0) + return false; + + //If writers are preferred over readers, check whether there + //currently is a writer in the waiting queue and abort if + //that's the case. + static if (INTENT == LockingIntent.READ_ONLY) + if (m_policy.PREFER_WRITERS && m_waitingForWriteLock > 0) + return false; + + //If we are locking the mutex for writing, make sure that + //there's no reader active. + static if (INTENT == LockingIntent.READ_WRITE) + if (m_activeReadLocks > 0) + return false; + + //We can successfully acquire the lock! + //Log a debug statement for the success. + version(RWMutexPrint) + printInfo!("lock",INTENT)(); + + //Increase the according counter + //(number of active readers/writers) + //and return a success code. + activeCounter!INTENT += 1; + return true; + } + + /** Attempt to acquire the lock for a given intent. + * + * Returns: + * `true`, if the lock was successfully acquired; + * `false` otherwise. + */ + @trusted bool tryLock(LockingIntent INTENT)() + { + //Try to lock this mutex without waiting for the underlying + //TaskMutex - fail if it is already blocked. + return tryLock!(INTENT,false)(); + } + + /** Acquire the lock for the given intent; yield and suspend until the lock has been acquired. */ + @trusted void lock(LockingIntent INTENT)() + { + //Prepare a waiting action before the first + //`tryLock()` call in order to avoid a race + //condition that could lead to the queue notification + //not being fired. + auto count = emitCount!INTENT; + atomicOp!"+="(queueCounter!INTENT,1); + scope(exit) + atomicOp!"-="(queueCounter!INTENT,1); + + //Try to lock the mutex + auto locked = tryLock!(INTENT,true)(); + if (locked) + return; + + //Retry until we successfully acquired the lock + while(!locked) + { + version(RWMutexPrint) + printInfo!("wait",INTENT)(); + + count = wait!INTENT(count); + locked = tryLock!(INTENT,true)(); + } + } + + /** Unlock the mutex after a successful acquisition. */ + @trusted void unlock(LockingIntent INTENT)() + { + version(RWMutexPrint) + printInfo!("unlock",INTENT)(); + + debug assert(activeCounter!INTENT > 0); + + synchronized(m_counterMutex) + { + //Decrement the counter of active lock holders. + //If the counter hits zero, notify waiting Tasks + activeCounter!INTENT -= 1; + if (activeCounter!INTENT == 0) + { + version(RWMutexPrint) + printInfo!("notify",INTENT)(); + + notify!INTENT(); + } + } + } +} + +/** A ReadWriteMutex implementation for fibers. + * + * This mutex can be used in exchange for a $(D core.sync.mutex.ReadWriteMutex), + * but does not block the event loop in contention situations. The `reader` and `writer` + * members are used for locking. Locking the `reader` mutex allows access to multiple + * readers at once, while the `writer` mutex only allows a single writer to lock it at + * any given time. Locks on `reader` and `writer` are mutually exclusive (i.e. whenever a + * writer is active, no readers can be active at the same time, and vice versa). + * + * Notice: + * Mutexes implemented by this class cannot be interrupted + * using $(D vibe.core.task.Task.interrupt()). The corresponding + * InterruptException will be deferred until the next blocking + * operation yields the event loop. + * + * Use $(D InterruptibleTaskReadWriteMutex) as an alternative that can be + * interrupted. + * + * cf. $(D core.sync.mutex.ReadWriteMutex) + */ +class TaskReadWriteMutex +{ + private { + alias State = ReadWriteMutexState!false; + alias LockingIntent = State.LockingIntent; + alias READ_ONLY = LockingIntent.READ_ONLY; + alias READ_WRITE = LockingIntent.READ_WRITE; + + /** The shared state used by the reader and writer mutexes. */ + State m_state; + } + + /** The policy with which the mutex should operate. + * + * The policy determines how the acquisition of the locks is + * performed and can be used to tune the mutex according to the + * underlying algorithm in which it is used. + * + * According to the provided policy, the mutex will either favor + * reading or writing tasks and could potentially starve the + * respective opposite. + * + * cf. $(D core.sync.rwmutex.ReadWriteMutex.Policy) + */ + alias Policy = State.Policy; + + /** A common baseclass for both of the provided mutexes. + * + * The intent for the according mutex is specified through the + * $(D INTENT) template argument, which determines if a mutex is + * used for read or write locking. + */ + final class Mutex(LockingIntent INTENT): core.sync.mutex.Mutex, Lockable + { + /** Try to lock the mutex. cf. $(D core.sync.mutex.Mutex) */ + override bool tryLock() { return m_state.tryLock!INTENT(); } + /** Lock the mutex. cf. $(D core.sync.mutex.Mutex) */ + override void lock() { m_state.lock!INTENT(); } + /** Unlock the mutex. cf. $(D core.sync.mutex.Mutex) */ + override void unlock() { m_state.unlock!INTENT(); } + } + alias Reader = Mutex!READ_ONLY; + alias Writer = Mutex!READ_WRITE; + + Reader reader; + Writer writer; + + this(Policy policy = Policy.PREFER_WRITERS) + { + m_state = State(policy); + reader = new Reader(); + writer = new Writer(); + } + + /** The policy with which the lock has been created. */ + @property Policy policy() const { return m_state.policy; } +} + +/** Alternative to $(D TaskReadWriteMutex) that supports interruption. + * + * This class supports the use of $(D vibe.core.task.Task.interrupt()) while + * waiting in the `lock()` method. + * + * cf. $(D core.sync.mutex.ReadWriteMutex) + */ +class InterruptibleTaskReadWriteMutex +{ + private { + alias State = ReadWriteMutexState!true; + alias LockingIntent = State.LockingIntent; + alias READ_ONLY = LockingIntent.READ_ONLY; + alias READ_WRITE = LockingIntent.READ_WRITE; + + /** The shared state used by the reader and writer mutexes. */ + State m_state; + } + + /** The policy with which the mutex should operate. + * + * The policy determines how the acquisition of the locks is + * performed and can be used to tune the mutex according to the + * underlying algorithm in which it is used. + * + * According to the provided policy, the mutex will either favor + * reading or writing tasks and could potentially starve the + * respective opposite. + * + * cf. $(D core.sync.rwmutex.ReadWriteMutex.Policy) + */ + alias Policy = State.Policy; + + /** A common baseclass for both of the provided mutexes. + * + * The intent for the according mutex is specified through the + * $(D INTENT) template argument, which determines if a mutex is + * used for read or write locking. + * + */ + final class Mutex(LockingIntent INTENT): core.sync.mutex.Mutex, Lockable + { + /** Try to lock the mutex. cf. $(D core.sync.mutex.Mutex) */ + override bool tryLock() { return m_state.tryLock!INTENT(); } + /** Lock the mutex. cf. $(D core.sync.mutex.Mutex) */ + override void lock() { m_state.lock!INTENT(); } + /** Unlock the mutex. cf. $(D core.sync.mutex.Mutex) */ + override void unlock() { m_state.unlock!INTENT(); } + } + alias Reader = Mutex!READ_ONLY; + alias Writer = Mutex!READ_WRITE; + + Reader reader; + Writer writer; + + this(Policy policy = Policy.PREFER_WRITERS) + { + m_state = State(policy); + reader = new Reader(); + writer = new Writer(); + } + + /** The policy with which the lock has been created. */ + @property Policy policy() const { return m_state.policy; } +} \ No newline at end of file diff --git a/source/vibe/core/task.d b/source/vibe/core/task.d new file mode 100644 index 0000000..e746d6a --- /dev/null +++ b/source/vibe/core/task.d @@ -0,0 +1,153 @@ +/** + Contains interfaces and enums for evented I/O drivers. + + Copyright: © 2012-2016 RejectedSoftware e.K. + Authors: Sönke Ludwig + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. +*/ +module vibe.core.task; + +import vibe.core.sync; +import vibe.internal.array : FixedRingBuffer; + +import core.thread; +import std.exception; +import std.traits; +import std.typecons; +import std.variant; + + +/** Represents a single task as started using vibe.core.runTask. + + Note that the Task type is considered weakly isolated and thus can be + passed between threads using vibe.core.concurrency.send or by passing + it as a parameter to vibe.core.core.runWorkerTask. +*/ +struct Task { + private { + shared(TaskFiber) m_fiber; + size_t m_taskCounter; + import std.concurrency : ThreadInfo, Tid; + static ThreadInfo s_tidInfo; + } + + private this(TaskFiber fiber, size_t task_counter) + @safe nothrow { + () @trusted { m_fiber = cast(shared)fiber; } (); + m_taskCounter = task_counter; + } + + this(in Task other) nothrow { m_fiber = cast(shared(TaskFiber))other.m_fiber; m_taskCounter = other.m_taskCounter; } + + /** Returns the Task instance belonging to the calling task. + */ + static Task getThis() nothrow @safe + { + // In 2067, synchronized statements where annotated nothrow. + // DMD#4115, Druntime#1013, Druntime#1021, Phobos#2704 + // However, they were "logically" nothrow before. + static if (__VERSION__ <= 2066) + scope (failure) assert(0, "Internal error: function should be nothrow"); + + auto fiber = () @trusted { return Fiber.getThis(); } (); + if (!fiber) return Task.init; + auto tfiber = cast(TaskFiber)fiber; + assert(tfiber !is null, "Invalid or null fiber used to construct Task handle."); + if (!tfiber.m_running) return Task.init; + return () @trusted { return Task(tfiber, tfiber.m_taskCounter); } (); + } + + nothrow { + @property inout(TaskFiber) fiber() inout @trusted { return cast(inout(TaskFiber))m_fiber; } + @property size_t taskCounter() const @safe { return m_taskCounter; } + @property inout(Thread) thread() inout @safe { if (m_fiber) return this.fiber.thread; return null; } + + /** Determines if the task is still running. + */ + @property bool running() + const @trusted { + assert(m_fiber !is null, "Invalid task handle"); + try if (this.fiber.state == Fiber.State.TERM) return false; catch (Throwable) {} + return this.fiber.m_running && this.fiber.m_taskCounter == m_taskCounter; + } + + // FIXME: this is not thread safe! + @property ref ThreadInfo tidInfo() { return m_fiber ? fiber.tidInfo : s_tidInfo; } + @property Tid tid() { return tidInfo.ident; } + } + + T opCast(T)() const nothrow if (is(T == bool)) { return m_fiber !is null; } + + void join() { if (running) fiber.join(); } + void interrupt() { if (running) fiber.interrupt(); } + void terminate() { if (running) fiber.terminate(); } + + string toString() const { import std.string; return format("%s:%s", cast(void*)m_fiber, m_taskCounter); } + + bool opEquals(in ref Task other) const nothrow @safe { return m_fiber is other.m_fiber && m_taskCounter == other.m_taskCounter; } + bool opEquals(in Task other) const nothrow @safe { return m_fiber is other.m_fiber && m_taskCounter == other.m_taskCounter; } +} + + + +/** The base class for a task aka Fiber. + + This class represents a single task that is executed concurrently + with other tasks. Each task is owned by a single thread. +*/ +class TaskFiber : Fiber { + private { + Thread m_thread; + import std.concurrency : ThreadInfo; + ThreadInfo m_tidInfo; + } + + protected { + shared size_t m_taskCounter; + shared bool m_running; + } + + protected this(void delegate() fun, size_t stack_size) + nothrow { + super(fun, stack_size); + m_thread = Thread.getThis(); + } + + /** Returns the thread that owns this task. + */ + @property inout(Thread) thread() inout @safe nothrow { return m_thread; } + + /** Returns the handle of the current Task running on this fiber. + */ + @property Task task() @safe nothrow { return Task(this, m_taskCounter); } + + @property ref inout(ThreadInfo) tidInfo() inout nothrow { return m_tidInfo; } + + /** Blocks until the task has ended. + */ + abstract void join(); + + /** Throws an InterruptExeption within the task as soon as it calls a blocking function. + */ + abstract void interrupt(); + + /** Terminates the task without notice as soon as it calls a blocking function. + */ + abstract void terminate(); + + void bumpTaskCounter() + @safe nothrow { + import core.atomic : atomicOp; + () @trusted { atomicOp!"+="(this.m_taskCounter, 1); } (); + } +} + + +/** Exception that is thrown by Task.interrupt. +*/ +class InterruptException : Exception { + this() + { + super("Task interrupted."); + } +} diff --git a/source/vibe/internal/array.d b/source/vibe/internal/array.d new file mode 100644 index 0000000..1e1ad72 --- /dev/null +++ b/source/vibe/internal/array.d @@ -0,0 +1,634 @@ +/** + Utility functions for array processing + + Copyright: © 2012 RejectedSoftware e.K. + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. + Authors: Sönke Ludwig +*/ +module vibe.internal.array; + +import vibe.internal.memory; + +import std.algorithm; +import std.range : isInputRange, isOutputRange; +import std.traits; +static import std.utf; + + +void removeFromArray(T)(ref T[] array, T item) +{ + foreach( i; 0 .. array.length ) + if( array[i] is item ){ + removeFromArrayIdx(array, i); + return; + } +} + +void removeFromArrayIdx(T)(ref T[] array, size_t idx) +{ + foreach( j; idx+1 .. array.length) + array[j-1] = array[j]; + array.length = array.length-1; +} + +enum AppenderResetMode { + keepData, + freeData, + reuseData +} + +struct AllocAppender(ArrayType : E[], E) { + alias ElemType = Unqual!E; + + static assert(!hasIndirections!E && !hasElaborateDestructor!E); + + private { + ElemType[] m_data; + ElemType[] m_remaining; + Allocator m_alloc; + bool m_allocatedBuffer = false; + } + + this(Allocator alloc, ElemType[] initial_buffer = null) + { + m_alloc = alloc; + m_data = initial_buffer; + m_remaining = initial_buffer; + } + + @disable this(this); + + @property ArrayType data() { return cast(ArrayType)m_data[0 .. m_data.length - m_remaining.length]; } + + void reset(AppenderResetMode reset_mode = AppenderResetMode.keepData) + { + if (reset_mode == AppenderResetMode.keepData) m_data = null; + else if (reset_mode == AppenderResetMode.freeData) { if (m_allocatedBuffer) m_alloc.free(m_data); m_data = null; } + m_remaining = m_data; + } + + /** Grows the capacity of the internal buffer so that it can hold a minumum amount of elements. + + Params: + amount = The minimum amount of elements that shall be appendable without + triggering a re-allocation. + + */ + void reserve(size_t amount) + { + size_t nelems = m_data.length - m_remaining.length; + if (!m_data.length) { + m_data = cast(ElemType[])m_alloc.alloc(amount*E.sizeof); + m_remaining = m_data; + m_allocatedBuffer = true; + } + if (m_remaining.length < amount) { + debug { + import std.digest.crc; + auto checksum = crc32Of(m_data[0 .. nelems]); + } + if (m_allocatedBuffer) m_data = cast(ElemType[])m_alloc.realloc(m_data, (nelems+amount)*E.sizeof); + else { + auto newdata = cast(ElemType[])m_alloc.alloc((nelems+amount)*E.sizeof); + newdata[0 .. nelems] = m_data[0 .. nelems]; + m_data = newdata; + m_allocatedBuffer = true; + } + debug assert(crc32Of(m_data[0 .. nelems]) == checksum); + } + m_remaining = m_data[nelems .. m_data.length]; + } + + void put(E el) + { + if( m_remaining.length == 0 ) grow(1); + m_remaining[0] = el; + m_remaining = m_remaining[1 .. $]; + } + + void put(ArrayType arr) + { + if (m_remaining.length < arr.length) grow(arr.length); + m_remaining[0 .. arr.length] = arr[]; + m_remaining = m_remaining[arr.length .. $]; + } + + static if( !hasAliasing!E ){ + void put(in ElemType[] arr){ + put(cast(ArrayType)arr); + } + } + + static if( is(ElemType == char) ){ + void put(dchar el) + { + if( el < 128 ) put(cast(char)el); + else { + char[4] buf; + auto len = std.utf.encode(buf, el); + put(cast(ArrayType)buf[0 .. len]); + } + } + } + + static if( is(ElemType == wchar) ){ + void put(dchar el) + { + if( el < 128 ) put(cast(wchar)el); + else { + wchar[3] buf; + auto len = std.utf.encode(buf, el); + put(cast(ArrayType)buf[0 .. len]); + } + } + } + + static if (!is(E == immutable) || !hasAliasing!E) { + /** Appends a number of bytes in-place. + + The delegate will get the memory slice of the memory that follows + the already written data. Use `reserve` to ensure that this slice + has enough room. The delegate should overwrite as much of the + slice as desired and then has to return the number of elements + that should be appended (counting from the start of the slice). + */ + void append(scope size_t delegate(scope ElemType[] dst) del) + { + auto n = del(m_remaining); + assert(n <= m_remaining.length); + m_remaining = m_remaining[n .. $]; + } + } + + void grow(size_t min_free) + { + if( !m_data.length && min_free < 16 ) min_free = 16; + + auto min_size = m_data.length + min_free - m_remaining.length; + auto new_size = max(m_data.length, 16); + while( new_size < min_size ) + new_size = (new_size * 3) / 2; + reserve(new_size - m_data.length + m_remaining.length); + } +} + +unittest { + auto a = AllocAppender!string(defaultAllocator()); + a.put("Hello"); + a.put(' '); + a.put("World"); + assert(a.data == "Hello World"); + a.reset(); + assert(a.data == ""); +} + +unittest { + char[4] buf; + auto a = AllocAppender!string(defaultAllocator(), buf); + a.put("He"); + assert(a.data == "He"); + assert(a.data.ptr == buf.ptr); + a.put("ll"); + assert(a.data == "Hell"); + assert(a.data.ptr == buf.ptr); + a.put('o'); + assert(a.data == "Hello"); + assert(a.data.ptr != buf.ptr); +} + +unittest { + char[4] buf; + auto a = AllocAppender!string(defaultAllocator(), buf); + a.put("Hello"); + assert(a.data == "Hello"); + assert(a.data.ptr != buf.ptr); +} + +unittest { + auto app = AllocAppender!(int[])(defaultAllocator); + app.reserve(2); + app.append((scope mem) { + assert(mem.length >= 2); + mem[0] = 1; + mem[1] = 2; + return 2; + }); + assert(app.data == [1, 2]); +} + +unittest { + auto app = AllocAppender!string(defaultAllocator); + app.reserve(3); + app.append((scope mem) { + assert(mem.length >= 3); + mem[0] = 'f'; + mem[1] = 'o'; + mem[2] = 'o'; + return 3; + }); + assert(app.data == "foo"); +} + + +struct FixedAppender(ArrayType : E[], size_t NELEM, E) { + alias ElemType = Unqual!E; + private { + ElemType[NELEM] m_data; + size_t m_fill; + } + + void clear() + { + m_fill = 0; + } + + void put(E el) + { + m_data[m_fill++] = el; + } + + static if( is(ElemType == char) ){ + void put(dchar el) + { + if( el < 128 ) put(cast(char)el); + else { + char[4] buf; + auto len = std.utf.encode(buf, el); + put(cast(ArrayType)buf[0 .. len]); + } + } + } + + static if( is(ElemType == wchar) ){ + void put(dchar el) + { + if( el < 128 ) put(cast(wchar)el); + else { + wchar[3] buf; + auto len = std.utf.encode(buf, el); + put(cast(ArrayType)buf[0 .. len]); + } + } + } + + void put(ArrayType arr) + { + m_data[m_fill .. m_fill+arr.length] = (cast(ElemType[])arr)[]; + m_fill += arr.length; + } + + @property ArrayType data() { return cast(ArrayType)m_data[0 .. m_fill]; } + + static if (!is(E == immutable)) { + void reset() { m_fill = 0; } + } +} + + +/** + TODO: clear ring buffer fields upon removal (to run struct destructors, if T is a struct) +*/ +struct FixedRingBuffer(T, size_t N = 0, bool INITIALIZE = true) { + private { + static if( N > 0 ) { + static if (INITIALIZE) T[N] m_buffer; + else T[N] m_buffer = void; + } else T[] m_buffer; + size_t m_start = 0; + size_t m_fill = 0; + } + + static if( N == 0 ){ + bool m_freeOnDestruct; + this(size_t capacity) { m_buffer = new T[capacity]; } + ~this() { if (m_freeOnDestruct && m_buffer.length > 0) delete m_buffer; } + } + + @property bool empty() const { return m_fill == 0; } + + @property bool full() const { return m_fill == m_buffer.length; } + + @property size_t length() const { return m_fill; } + + @property size_t freeSpace() const { return m_buffer.length - m_fill; } + + @property size_t capacity() const { return m_buffer.length; } + + static if( N == 0 ){ + deprecated @property void freeOnDestruct(bool b) { m_freeOnDestruct = b; } + + /// Resets the capacity to zero and explicitly frees the memory for the buffer. + void dispose() + { + delete m_buffer; + m_buffer = null; + m_start = m_fill = 0; + } + + @property void capacity(size_t new_size) + { + if( m_buffer.length ){ + auto newbuffer = new T[new_size]; + auto dst = newbuffer; + auto newfill = min(m_fill, new_size); + read(dst[0 .. newfill]); + if (m_freeOnDestruct && m_buffer.length > 0) delete m_buffer; + m_buffer = newbuffer; + m_start = 0; + m_fill = newfill; + } else { + if (m_freeOnDestruct && m_buffer.length > 0) delete m_buffer; + m_buffer = new T[new_size]; + } + } + } + + @property ref inout(T) front() inout { assert(!empty); return m_buffer[m_start]; } + + @property ref inout(T) back() inout { assert(!empty); return m_buffer[mod(m_start+m_fill-1)]; } + + void clear() + { + popFrontN(length); + assert(m_fill == 0); + m_start = 0; + } + + void put()(T itm) { assert(m_fill < m_buffer.length); m_buffer[mod(m_start + m_fill++)] = itm; } + void put(TC : T)(TC[] itms) + { + if( !itms.length ) return; + assert(m_fill+itms.length <= m_buffer.length); + if( mod(m_start+m_fill) >= mod(m_start+m_fill+itms.length) ){ + size_t chunk1 = m_buffer.length - (m_start+m_fill); + size_t chunk2 = itms.length - chunk1; + m_buffer[m_start+m_fill .. m_buffer.length] = itms[0 .. chunk1]; + m_buffer[0 .. chunk2] = itms[chunk1 .. $]; + } else { + m_buffer[mod(m_start+m_fill) .. mod(m_start+m_fill)+itms.length] = itms[]; + } + m_fill += itms.length; + } + void putN(size_t n) { assert(m_fill+n <= m_buffer.length); m_fill += n; } + + void popFront() { assert(!empty); m_start = mod(m_start+1); m_fill--; } + void popFrontN(size_t n) { assert(length >= n); m_start = mod(m_start + n); m_fill -= n; } + + void popBack() { assert(!empty); m_fill--; } + void popBackN(size_t n) { assert(length >= n); m_fill -= n; } + + void removeAt(Range r) + { + assert(r.m_buffer is m_buffer); + if( m_start + m_fill > m_buffer.length ){ + assert(r.m_start >= m_start && r.m_start < m_buffer.length || r.m_start < mod(m_start+m_fill)); + if( r.m_start > m_start ){ + foreach(i; r.m_start .. m_buffer.length-1) + m_buffer[i] = m_buffer[i+1]; + m_buffer[$-1] = m_buffer[0]; + foreach(i; 0 .. mod(m_start + m_fill - 1)) + m_buffer[i] = m_buffer[i+1]; + } else { + foreach(i; r.m_start .. mod(m_start + m_fill - 1)) + m_buffer[i] = m_buffer[i+1]; + } + } else { + assert(r.m_start >= m_start && r.m_start < m_start+m_fill); + foreach(i; r.m_start .. m_start+m_fill-1) + m_buffer[i] = m_buffer[i+1]; + } + m_fill--; + destroy(m_buffer[mod(m_start+m_fill)]); // TODO: only call destroy for non-POD T + } + + inout(T)[] peek() inout { return m_buffer[m_start .. min(m_start+m_fill, m_buffer.length)]; } + T[] peekDst() { + if (!m_buffer.length) return null; + if( m_start + m_fill < m_buffer.length ) return m_buffer[m_start+m_fill .. $]; + else return m_buffer[mod(m_start+m_fill) .. m_start]; + } + + void read(T[] dst) + { + assert(dst.length <= length); + if( !dst.length ) return; + if( mod(m_start) >= mod(m_start+dst.length) ){ + size_t chunk1 = m_buffer.length - m_start; + size_t chunk2 = dst.length - chunk1; + dst[0 .. chunk1] = m_buffer[m_start .. $]; + dst[chunk1 .. $] = m_buffer[0 .. chunk2]; + } else { + dst[] = m_buffer[m_start .. m_start+dst.length]; + } + popFrontN(dst.length); + } + + int opApply(scope int delegate(ref T itm) del) + { + if( m_start+m_fill > m_buffer.length ){ + foreach(i; m_start .. m_buffer.length) + if( auto ret = del(m_buffer[i]) ) + return ret; + foreach(i; 0 .. mod(m_start+m_fill)) + if( auto ret = del(m_buffer[i]) ) + return ret; + } else { + foreach(i; m_start .. m_start+m_fill) + if( auto ret = del(m_buffer[i]) ) + return ret; + } + return 0; + } + + /// iterate through elements with index + int opApply(scope int delegate(size_t i, ref T itm) del) + { + if( m_start+m_fill > m_buffer.length ){ + foreach(i; m_start .. m_buffer.length) + if( auto ret = del(i - m_start, m_buffer[i]) ) + return ret; + foreach(i; 0 .. mod(m_start+m_fill)) + if( auto ret = del(i + m_buffer.length - m_start, m_buffer[i]) ) + return ret; + } else { + foreach(i; m_start .. m_start+m_fill) + if( auto ret = del(i - m_start, m_buffer[i]) ) + return ret; + } + return 0; + } + + ref inout(T) opIndex(size_t idx) inout { assert(idx < length); return m_buffer[mod(m_start+idx)]; } + + Range opSlice() { return Range(m_buffer, m_start, m_fill); } + + Range opSlice(size_t from, size_t to) + { + assert(from <= to); + assert(to <= m_fill); + return Range(m_buffer, mod(m_start+from), to-from); + } + + size_t opDollar(size_t dim)() const if(dim == 0) { return length; } + + private size_t mod(size_t n) + const { + static if( N == 0 ){ + /*static if(PotOnly){ + return x & (m_buffer.length-1); + } else {*/ + return n % m_buffer.length; + //} + } else static if( ((N - 1) & N) == 0 ){ + return n & (N - 1); + } else return n % N; + } + + static struct Range { + private { + T[] m_buffer; + size_t m_start; + size_t m_length; + } + + private this(T[] buffer, size_t start, size_t length) + { + m_buffer = buffer; + m_start = start; + m_length = length; + } + + @property bool empty() const { return m_length == 0; } + + @property inout(T) front() inout { assert(!empty); return m_buffer[m_start]; } + + void popFront() + { + assert(!empty); + m_start++; + m_length--; + if( m_start >= m_buffer.length ) + m_start = 0; + } + } +} + +unittest { + static assert(isInputRange!(FixedRingBuffer!int) && isOutputRange!(FixedRingBuffer!int, int)); + + FixedRingBuffer!(int, 5) buf; + assert(buf.length == 0 && buf.freeSpace == 5); buf.put(1); // |1 . . . . + assert(buf.length == 1 && buf.freeSpace == 4); buf.put(2); // |1 2 . . . + assert(buf.length == 2 && buf.freeSpace == 3); buf.put(3); // |1 2 3 . . + assert(buf.length == 3 && buf.freeSpace == 2); buf.put(4); // |1 2 3 4 . + assert(buf.length == 4 && buf.freeSpace == 1); buf.put(5); // |1 2 3 4 5 + assert(buf.length == 5 && buf.freeSpace == 0); + assert(buf.front == 1); + buf.popFront(); // .|2 3 4 5 + assert(buf.front == 2); + buf.popFrontN(2); // . . .|4 5 + assert(buf.front == 4); + assert(buf.length == 2 && buf.freeSpace == 3); + buf.put([6, 7, 8]); // 6 7 8|4 5 + assert(buf.length == 5 && buf.freeSpace == 0); + int[5] dst; + buf.read(dst); // . . .|. . + assert(dst == [4, 5, 6, 7, 8]); + assert(buf.length == 0 && buf.freeSpace == 5); + buf.put([1, 2]); // . . .|1 2 + assert(buf.length == 2 && buf.freeSpace == 3); + buf.read(dst[0 .. 2]); //|. . . . . + assert(dst[0 .. 2] == [1, 2]); + + buf.put([0, 0, 0, 1, 2]); //|0 0 0 1 2 + buf.popFrontN(2); //. .|0 1 2 + buf.put([3, 4]); // 3 4|0 1 2 + foreach(i, item; buf) + { + assert(i == item); + } +} + + +/// Write a single batch and drain +struct BatchBuffer(T, size_t N = 0) { + private { + size_t m_fill; + size_t m_first; + static if (N == 0) T[] m_buffer; + else T[N] m_buffer; + } + + static if (N == 0) { + @property void capacity(size_t n) { assert(n >= m_fill); m_buffer.length = n; } + } + + @property bool empty() { return m_first >= m_fill; } + @property size_t capacity() const { return m_buffer.length; } + @property size_t length() { return m_fill - m_first; } + @property ref inout(T) front() inout { assert(!empty); return m_buffer[m_first]; } + void popFront() { assert(!m_empty); m_first++; } + void popFrontN(size_t n) { assert(n <= length); m_first += n; } + inout(T)[] peek() inout { return m_buffer[m_first .. m_fill]; } + T[] peekDst() { assert(empty); return m_buffer; } + void putN(size_t n) { assert(empty && n <= m_buffer.length); m_fill = n; } + void putN(T[] elems) { assert(empty && elems.length <= m_buffer.length); m_buffer[0 .. elems.length] = elems[]; m_fill = elems.length; } +} + + +struct ArraySet(Key) +{ + private { + Key[4] m_staticEntries; + Key[] m_entries; + } + + @property ArraySet dup() + { + return ArraySet(m_staticEntries, m_entries.dup); + } + + bool opBinaryRight(string op)(Key key) if (op == "in") { return contains(key); } + + int opApply(int delegate(ref Key) del) + { + foreach (ref k; m_staticEntries) + if (k != Key.init) + if (auto ret = del(k)) + return ret; + foreach (ref k; m_entries) + if (k != Key.init) + if (auto ret = del(k)) + return ret; + return 0; + } + + bool contains(Key key) + const { + foreach (ref k; m_staticEntries) if (k == key) return true; + foreach (ref k; m_entries) if (k == key) return true; + return false; + } + + void insert(Key key) + { + if (contains(key)) return; + foreach (ref k; m_staticEntries) + if (k == Key.init) { + k = key; + return; + } + foreach (ref k; m_entries) + if (k == Key.init) { + k = key; + return; + } + m_entries ~= key; + } + + void remove(Key key) + { + foreach (ref k; m_staticEntries) if (k == key) { k = Key.init; return; } + foreach (ref k; m_entries) if (k == key) { k = Key.init; return; } + } +} diff --git a/source/vibe/internal/async.d b/source/vibe/internal/async.d new file mode 100644 index 0000000..cb3cbcb --- /dev/null +++ b/source/vibe/internal/async.d @@ -0,0 +1,45 @@ +module vibe.internal.async; + +import std.traits : ParameterTypeTuple; +import std.typecons : tuple; +import vibe.core.core; +import vibe.core.log; +import core.time : Duration, seconds; + + +auto asyncAwait(string method, Object, ARGS...)(Object object, ARGS args) +{ + alias CB = ParameterTypeTuple!(__traits(getMember, Object, method))[$-1]; + alias CBTypes = ParameterTypeTuple!CB; + + bool fired = false; + CBTypes ret; + Task t; + + void callback(CBTypes params) + @safe nothrow { + logTrace("Got result."); + fired = true; + ret = params; + if (t != Task.init) + resumeTask(t); + } + + logTrace("Calling %s...", method); + __traits(getMember, object, method)(args, &callback); + if (!fired) { + logTrace("Need to wait..."); + t = Task.getThis(); + do yieldForEvent(); + while (!fired); + } + logTrace("Return result."); + return tuple(ret); +} + +auto asyncAwait(string method, Object, ARGS...)(Duration timeout, Object object, ARGS args) +{ + assert(timeout >= 0.seconds); + if (timeout == Duration.max) return asyncAwait(object, args); + else assert(false, "TODO!"); +} diff --git a/source/vibe/internal/hashmap.d b/source/vibe/internal/hashmap.d new file mode 100644 index 0000000..c467770 --- /dev/null +++ b/source/vibe/internal/hashmap.d @@ -0,0 +1,375 @@ +/** + Internal hash map implementation. + + Copyright: © 2013 RejectedSoftware e.K. + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. + Authors: Sönke Ludwig +*/ +module vibe.internal.hashmap; + +import vibe.internal.memory; + +import std.conv : emplace; +import std.traits; + + +struct DefaultHashMapTraits(Key) { + enum clearValue = Key.init; + static bool equals(in Key a, in Key b) + { + static if (is(Key == class)) return a is b; + else return a == b; + } + static size_t hashOf(in ref Key k) + { + static if (is(Key == class) && &Key.toHash == &Object.toHash) + return cast(size_t)cast(void*)k; + else static if (__traits(compiles, Key.init.toHash())) + return k.toHash(); + else static if (__traits(compiles, Key.init.toHashShared())) + return k.toHashShared(); + else { + // evil casts to be able to get the most basic operations of + // HashMap nothrow and @nogc + static size_t hashWrapper(in ref Key k) { + static typeinfo = typeid(Key); + return typeinfo.getHash(&k); + } + static @nogc nothrow size_t properlyTypedWrapper(in ref Key k) { return 0; } + return (cast(typeof(&properlyTypedWrapper))&hashWrapper)(k); + } + } +} + +struct HashMap(TKey, TValue, Traits = DefaultHashMapTraits!TKey) +{ + import vibe.internal.traits : isOpApplyDg; + + alias Key = TKey; + alias Value = TValue; + + struct TableEntry { + UnConst!Key key = Traits.clearValue; + Value value; + + this(Key key, Value value) { this.key = cast(UnConst!Key)key; this.value = value; } + } + private { + TableEntry[] m_table; // NOTE: capacity is always POT + size_t m_length; + Allocator m_allocator; + bool m_resizing; + } + + this(Allocator allocator) + { + m_allocator = allocator; + } + + ~this() + { + clear(); + if (m_table.ptr !is null) freeArray(m_allocator, m_table); + } + + @disable this(this); + + @property size_t length() const { return m_length; } + + void remove(Key key) + { + auto idx = findIndex(key); + assert (idx != size_t.max, "Removing non-existent element."); + auto i = idx; + while (true) { + m_table[i].key = Traits.clearValue; + m_table[i].value = Value.init; + + size_t j = i, r; + do { + if (++i >= m_table.length) i -= m_table.length; + if (Traits.equals(m_table[i].key, Traits.clearValue)) { + m_length--; + return; + } + r = Traits.hashOf(m_table[i].key) & (m_table.length-1); + } while ((j= 1 && arity!del <= 2, + "isOpApplyDg should have prevented this"); + static if (arity!del == 1) { + if (int ret = del(m_table[i].value)) + return ret; + } else + if (int ret = del(m_table[i].key, m_table[i].value)) + return ret; + } + return 0; + } + + private size_t findIndex(Key key) + const { + if (m_length == 0) return size_t.max; + size_t start = Traits.hashOf(key) & (m_table.length-1); + auto i = start; + while (!Traits.equals(m_table[i].key, key)) { + if (Traits.equals(m_table[i].key, Traits.clearValue)) return size_t.max; + if (++i >= m_table.length) i -= m_table.length; + if (i == start) return size_t.max; + } + return i; + } + + private size_t findInsertIndex(Key key) + const { + auto hash = Traits.hashOf(key); + size_t target = hash & (m_table.length-1); + auto i = target; + while (!Traits.equals(m_table[i].key, Traits.clearValue) && !Traits.equals(m_table[i].key, key)) { + if (++i >= m_table.length) i -= m_table.length; + assert (i != target, "No free bucket found, HashMap full!?"); + } + return i; + } + + private void grow(size_t amount) + { + auto newsize = m_length + amount; + if (newsize < (m_table.length*2)/3) return; + auto newcap = m_table.length ? m_table.length : 16; + while (newsize >= (newcap*2)/3) newcap *= 2; + resize(newcap); + } + + private void resize(size_t new_size) + @trusted { + assert(!m_resizing); + m_resizing = true; + scope(exit) m_resizing = false; + + if (!m_allocator) m_allocator = defaultAllocator(); + + uint pot = 0; + while (new_size > 1) pot++, new_size /= 2; + new_size = 1 << pot; + + auto oldtable = m_table; + + // allocate the new array, automatically initializes with empty entries (Traits.clearValue) + m_table = allocArray!TableEntry(m_allocator, new_size); + + // perform a move operation of all non-empty elements from the old array to the new one + foreach (ref el; oldtable) + if (!Traits.equals(el.key, Traits.clearValue)) { + auto idx = findInsertIndex(el.key); + (cast(ubyte[])(&m_table[idx])[0 .. 1])[] = (cast(ubyte[])(&el)[0 .. 1])[]; + } + + // all elements have been moved to the new array, so free the old one without calling destructors + if (oldtable !is null) freeArray(m_allocator, oldtable, false); + } +} + +unittest { + import std.conv; + + HashMap!(string, string) map; + + foreach (i; 0 .. 100) { + map[to!string(i)] = to!string(i) ~ "+"; + assert(map.length == i+1); + } + + foreach (i; 0 .. 100) { + auto str = to!string(i); + auto pe = str in map; + assert(pe !is null && *pe == str ~ "+"); + assert(map[str] == str ~ "+"); + } + + foreach (i; 0 .. 50) { + map.remove(to!string(i)); + assert(map.length == 100-i-1); + } + + foreach (i; 50 .. 100) { + auto str = to!string(i); + auto pe = str in map; + assert(pe !is null && *pe == str ~ "+"); + assert(map[str] == str ~ "+"); + } +} + +// test for nothrow/@nogc compliance +static if (__VERSION__ >= 2066) +nothrow unittest { + HashMap!(int, int) map1; + HashMap!(string, string) map2; + map1[1] = 2; + map2["1"] = "2"; + + @nogc nothrow void performNoGCOps() + { + foreach (int v; map1) {} + foreach (int k, int v; map1) {} + assert(1 in map1); + assert(map1.length == 1); + assert(map1[1] == 2); + assert(map1.getNothrow(1, -1) == 2); + + foreach (string v; map2) {} + foreach (string k, string v; map2) {} + assert("1" in map2); + assert(map2.length == 1); + assert(map2["1"] == "2"); + assert(map2.getNothrow("1", "") == "2"); + } + + performNoGCOps(); +} + +unittest { // test for proper use of constructor/post-blit/destructor + static struct Test { + static size_t constructedCounter = 0; + bool constructed = false; + this(int) { constructed = true; constructedCounter++; } + this(this) { if (constructed) constructedCounter++; } + ~this() { if (constructed) constructedCounter--; } + } + + assert(Test.constructedCounter == 0); + + { // sanity check + Test t; + assert(Test.constructedCounter == 0); + t = Test(1); + assert(Test.constructedCounter == 1); + auto u = t; + assert(Test.constructedCounter == 2); + t = Test.init; + assert(Test.constructedCounter == 1); + } + assert(Test.constructedCounter == 0); + + { // basic insertion and hash map resizing + HashMap!(int, Test) map; + foreach (i; 1 .. 67) { + map[i] = Test(1); + assert(Test.constructedCounter == i); + } + } + + assert(Test.constructedCounter == 0); + + { // test clear() and overwriting existing entries + HashMap!(int, Test) map; + foreach (i; 1 .. 67) { + map[i] = Test(1); + assert(Test.constructedCounter == i); + } + map.clear(); + foreach (i; 1 .. 67) { + map[i] = Test(1); + assert(Test.constructedCounter == i); + } + foreach (i; 1 .. 67) { + map[i] = Test(1); + assert(Test.constructedCounter == 66); + } + } + + assert(Test.constructedCounter == 0); + + { // test removing entries and adding entries after remove + HashMap!(int, Test) map; + foreach (i; 1 .. 67) { + map[i] = Test(1); + assert(Test.constructedCounter == i); + } + foreach (i; 1 .. 33) { + map.remove(i); + assert(Test.constructedCounter == 66 - i); + } + foreach (i; 67 .. 130) { + map[i] = Test(1); + assert(Test.constructedCounter == i - 32); + } + } + + assert(Test.constructedCounter == 0); +} + +private template UnConst(T) { + static if (is(T U == const(U))) { + alias UnConst = U; + } else static if (is(T V == immutable(V))) { + alias UnConst = V; + } else alias UnConst = T; +} + +static if (__VERSION__ < 2066) private static bool nogc() { return false; } diff --git a/source/vibe/internal/memory.d b/source/vibe/internal/memory.d new file mode 100644 index 0000000..c2f8524 --- /dev/null +++ b/source/vibe/internal/memory.d @@ -0,0 +1,872 @@ +/** + Utility functions for memory management + + Note that this module currently is a big sand box for testing allocation related stuff. + Nothing here, including the interfaces, is final but rather a lot of experimentation. + + Copyright: © 2012-2013 RejectedSoftware e.K. + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. + Authors: Sönke Ludwig +*/ +module vibe.internal.memory; + +import vibe.internal.traits : synchronizedIsNothrow; + +import core.exception : OutOfMemoryError; +import core.stdc.stdlib; +import core.memory; +import std.conv; +import std.exception : enforceEx; +import std.traits; +import std.algorithm; + +Allocator defaultAllocator() nothrow +{ + version(VibeManualMemoryManagement){ + return manualAllocator(); + } else { + static __gshared Allocator alloc; + if (!alloc) { + alloc = new GCAllocator; + //alloc = new AutoFreeListAllocator(alloc); + //alloc = new DebugAllocator(alloc); + alloc = new LockAllocator(alloc); + } + return alloc; + } +} + +Allocator manualAllocator() nothrow +{ + static __gshared Allocator alloc; + if( !alloc ){ + alloc = new MallocAllocator; + alloc = new AutoFreeListAllocator(alloc); + //alloc = new DebugAllocator(alloc); + alloc = new LockAllocator(alloc); + } + return alloc; +} + +Allocator threadLocalAllocator() nothrow +{ + static Allocator alloc; + if (!alloc) { + version(VibeManualMemoryManagement) alloc = new MallocAllocator; + else alloc = new GCAllocator; + alloc = new AutoFreeListAllocator(alloc); + // alloc = new DebugAllocator(alloc); + } + return alloc; +} + +Allocator threadLocalManualAllocator() nothrow +{ + static Allocator alloc; + if (!alloc) { + alloc = new MallocAllocator; + alloc = new AutoFreeListAllocator(alloc); + // alloc = new DebugAllocator(alloc); + } + return alloc; +} + +auto allocObject(T, bool MANAGED = true, ARGS...)(Allocator allocator, ARGS args) +{ + auto mem = allocator.alloc(AllocSize!T); + static if( MANAGED ){ + static if( hasIndirections!T ) + GC.addRange(mem.ptr, mem.length); + return internalEmplace!T(mem, args); + } + else static if( is(T == class) ) return cast(T)mem.ptr; + else return cast(T*)mem.ptr; +} + +T[] allocArray(T, bool MANAGED = true)(Allocator allocator, size_t n) +{ + auto mem = allocator.alloc(T.sizeof * n); + auto ret = cast(T[])mem; + static if( MANAGED ){ + static if( hasIndirections!T ) + GC.addRange(mem.ptr, mem.length); + // TODO: use memset for class, pointers and scalars + foreach (ref el; ret) { + internalEmplace!T(cast(void[])((&el)[0 .. 1])); + } + } + return ret; +} + +void freeArray(T, bool MANAGED = true)(Allocator allocator, ref T[] array, bool call_destructors = true) +{ + static if (MANAGED) { + static if (hasIndirections!T) + GC.removeRange(array.ptr); + static if (hasElaborateDestructor!T) + if (call_destructors) + foreach_reverse (ref el; array) + destroy(el); + } + allocator.free(cast(void[])array); + array = null; +} + + +interface Allocator { +nothrow: + enum size_t alignment = 0x10; + enum size_t alignmentMask = alignment-1; + + void[] alloc(size_t sz) + out { assert((cast(size_t)__result.ptr & alignmentMask) == 0, "alloc() returned misaligned data."); } + + void[] realloc(void[] mem, size_t new_sz) + in { + assert(mem.ptr !is null, "realloc() called with null array."); + assert((cast(size_t)mem.ptr & alignmentMask) == 0, "misaligned pointer passed to realloc()."); + } + out { assert((cast(size_t)__result.ptr & alignmentMask) == 0, "realloc() returned misaligned data."); } + + void free(void[] mem) + in { + assert(mem.ptr !is null, "free() called with null array."); + assert((cast(size_t)mem.ptr & alignmentMask) == 0, "misaligned pointer passed to free()."); + } +} + + +/** + Simple proxy allocator protecting its base allocator with a mutex. +*/ +class LockAllocator : Allocator { + private { + Allocator m_base; + } + this(Allocator base) nothrow { m_base = base; } + void[] alloc(size_t sz) { + static if (!synchronizedIsNothrow) + scope (failure) assert(0, "Internal error: function should be nothrow"); + + synchronized (this) + return m_base.alloc(sz); + } + void[] realloc(void[] mem, size_t new_sz) + in { + assert(mem.ptr !is null, "realloc() called with null array."); + assert((cast(size_t)mem.ptr & alignmentMask) == 0, "misaligned pointer passed to realloc()."); + } + body { + static if (!synchronizedIsNothrow) + scope (failure) assert(0, "Internal error: function should be nothrow"); + + synchronized(this) + return m_base.realloc(mem, new_sz); + } + void free(void[] mem) + in { + assert(mem.ptr !is null, "free() called with null array."); + assert((cast(size_t)mem.ptr & alignmentMask) == 0, "misaligned pointer passed to free()."); + } + body { + static if (!synchronizedIsNothrow) + scope (failure) assert(0, "Internal error: function should be nothrow"); + synchronized(this) + m_base.free(mem); + } +} + +final class DebugAllocator : Allocator { + import vibe.internal.hashmap : HashMap; + private { + Allocator m_baseAlloc; + HashMap!(void*, size_t) m_blocks; + size_t m_bytes; + size_t m_maxBytes; + } + + this(Allocator base_allocator) nothrow + { + m_baseAlloc = base_allocator; + m_blocks = HashMap!(void*, size_t)(manualAllocator()); + } + + @property size_t allocatedBlockCount() const { return m_blocks.length; } + @property size_t bytesAllocated() const { return m_bytes; } + @property size_t maxBytesAllocated() const { return m_maxBytes; } + + void[] alloc(size_t sz) + { + auto ret = m_baseAlloc.alloc(sz); + assert(ret.length == sz, "base.alloc() returned block with wrong size."); + assert(m_blocks.getNothrow(ret.ptr, size_t.max) == size_t.max, "base.alloc() returned block that is already allocated."); + m_blocks[ret.ptr] = sz; + m_bytes += sz; + if( m_bytes > m_maxBytes ){ + m_maxBytes = m_bytes; + logDebug_("New allocation maximum: %d (%d blocks)", m_maxBytes, m_blocks.length); + } + return ret; + } + + void[] realloc(void[] mem, size_t new_size) + { + auto sz = m_blocks.getNothrow(mem.ptr, size_t.max); + assert(sz != size_t.max, "realloc() called with non-allocated pointer."); + assert(sz == mem.length, "realloc() called with block of wrong size."); + auto ret = m_baseAlloc.realloc(mem, new_size); + assert(ret.length == new_size, "base.realloc() returned block with wrong size."); + assert(ret.ptr is mem.ptr || m_blocks.getNothrow(ret.ptr, size_t.max) == size_t.max, "base.realloc() returned block that is already allocated."); + m_bytes -= sz; + m_blocks.remove(mem.ptr); + m_blocks[ret.ptr] = new_size; + m_bytes += new_size; + return ret; + } + void free(void[] mem) + { + auto sz = m_blocks.getNothrow(mem.ptr, size_t.max); + assert(sz != size_t.max, "free() called with non-allocated object."); + assert(sz == mem.length, "free() called with block of wrong size."); + m_baseAlloc.free(mem); + m_bytes -= sz; + m_blocks.remove(mem.ptr); + } +} + +final class MallocAllocator : Allocator { + void[] alloc(size_t sz) + { + static err = new immutable OutOfMemoryError; + auto ptr = .malloc(sz + Allocator.alignment); + if (ptr is null) throw err; + return adjustPointerAlignment(ptr)[0 .. sz]; + } + + void[] realloc(void[] mem, size_t new_size) + { + size_t csz = min(mem.length, new_size); + auto p = extractUnalignedPointer(mem.ptr); + size_t oldmisalign = mem.ptr - p; + + auto pn = cast(ubyte*).realloc(p, new_size+Allocator.alignment); + if (p == pn) return pn[oldmisalign .. new_size+oldmisalign]; + + auto pna = cast(ubyte*)adjustPointerAlignment(pn); + auto newmisalign = pna - pn; + + // account for changed alignment after realloc (move memory back to aligned position) + if (oldmisalign != newmisalign) { + if (newmisalign > oldmisalign) { + foreach_reverse (i; 0 .. csz) + pn[i + newmisalign] = pn[i + oldmisalign]; + } else { + foreach (i; 0 .. csz) + pn[i + newmisalign] = pn[i + oldmisalign]; + } + } + + return pna[0 .. new_size]; + } + + void free(void[] mem) + { + .free(extractUnalignedPointer(mem.ptr)); + } +} + +final class GCAllocator : Allocator { + void[] alloc(size_t sz) + { + auto mem = GC.malloc(sz+Allocator.alignment); + auto alignedmem = adjustPointerAlignment(mem); + assert(alignedmem - mem <= Allocator.alignment); + auto ret = alignedmem[0 .. sz]; + ensureValidMemory(ret); + return ret; + } + void[] realloc(void[] mem, size_t new_size) + { + size_t csz = min(mem.length, new_size); + + auto p = extractUnalignedPointer(mem.ptr); + size_t misalign = mem.ptr - p; + assert(misalign <= Allocator.alignment); + + void[] ret; + auto extended = GC.extend(p, new_size - mem.length, new_size - mem.length); + if (extended) { + assert(extended >= new_size+Allocator.alignment); + ret = p[misalign .. new_size+misalign]; + } else { + ret = alloc(new_size); + ret[0 .. csz] = mem[0 .. csz]; + } + ensureValidMemory(ret); + return ret; + } + void free(void[] mem) + { + // For safety reasons, the GCAllocator should never explicitly free memory. + //GC.free(extractUnalignedPointer(mem.ptr)); + } +} + +final class AutoFreeListAllocator : Allocator { + import std.typetuple; + + private { + enum minExponent = 5; + enum freeListCount = 14; + FreeListAlloc[freeListCount] m_freeLists; + Allocator m_baseAlloc; + } + + this(Allocator base_allocator) nothrow + { + m_baseAlloc = base_allocator; + foreach (i; iotaTuple!freeListCount) + m_freeLists[i] = new FreeListAlloc(nthFreeListSize!(i), m_baseAlloc); + } + + void[] alloc(size_t sz) + { + auto idx = getAllocatorIndex(sz); + return idx < freeListCount ? m_freeLists[idx].alloc()[0 .. sz] : m_baseAlloc.alloc(sz); + } + + void[] realloc(void[] data, size_t sz) + { + auto curidx = getAllocatorIndex(data.length); + auto newidx = getAllocatorIndex(sz); + + if (curidx == newidx) { + if (curidx == freeListCount) { + // forward large blocks to the base allocator + return m_baseAlloc.realloc(data, sz); + } else { + // just grow the slice if it still fits into the free list slot + return data.ptr[0 .. sz]; + } + } + + // otherwise re-allocate manually + auto newd = alloc(sz); + assert(newd.ptr+sz <= data.ptr || newd.ptr >= data.ptr+data.length, "New block overlaps old one!?"); + auto len = min(data.length, sz); + newd[0 .. len] = data[0 .. len]; + free(data); + return newd; + } + + void free(void[] data) + { + //logTrace("AFL free %08X(%s)", data.ptr, data.length); + auto idx = getAllocatorIndex(data.length); + if (idx < freeListCount) m_freeLists[idx].free(data.ptr[0 .. 1 << (idx + minExponent)]); + else m_baseAlloc.free(data); + } + + // does a CT optimized binary search for the right allocater + private int getAllocatorIndex(size_t sz) + @safe nothrow @nogc { + //pragma(msg, getAllocatorIndexStr!(0, freeListCount)); + return mixin(getAllocatorIndexStr!(0, freeListCount)); + } + + private template getAllocatorIndexStr(int low, int high) + { + static if (__VERSION__ <= 2066) import std.string : format; + else import std.format : format; + static if (low == high) enum getAllocatorIndexStr = format("%s", low); + else { + enum mid = (low + high) / 2; + enum getAllocatorIndexStr = + "sz > nthFreeListSize!%s ? %s : %s" + .format(mid, getAllocatorIndexStr!(mid+1, high), getAllocatorIndexStr!(low, mid)); + } + } + + unittest { + auto a = new AutoFreeListAllocator(null); + assert(a.getAllocatorIndex(0) == 0); + foreach (i; iotaTuple!freeListCount) { + assert(a.getAllocatorIndex(nthFreeListSize!i-1) == i); + assert(a.getAllocatorIndex(nthFreeListSize!i) == i); + assert(a.getAllocatorIndex(nthFreeListSize!i+1) == i+1); + } + assert(a.getAllocatorIndex(size_t.max) == freeListCount); + } + + private static pure size_t nthFreeListSize(size_t i)() { return 1 << (i + minExponent); } + private template iotaTuple(size_t i) { + static if (i > 1) alias iotaTuple = TypeTuple!(iotaTuple!(i-1), i-1); + else alias iotaTuple = TypeTuple!(0); + } +} + +final class PoolAllocator : Allocator { + static struct Pool { Pool* next; void[] data; void[] remaining; } + static struct Destructor { Destructor* next; void function(void*) destructor; void* object; } + private { + Allocator m_baseAllocator; + Pool* m_freePools; + Pool* m_fullPools; + Destructor* m_destructors; + size_t m_poolSize; + } + + this(size_t pool_size, Allocator base) nothrow + { + m_poolSize = pool_size; + m_baseAllocator = base; + } + + @property size_t totalSize() + { + size_t amt = 0; + for (auto p = m_fullPools; p; p = p.next) + amt += p.data.length; + for (auto p = m_freePools; p; p = p.next) + amt += p.data.length; + return amt; + } + + @property size_t allocatedSize() + { + size_t amt = 0; + for (auto p = m_fullPools; p; p = p.next) + amt += p.data.length; + for (auto p = m_freePools; p; p = p.next) + amt += p.data.length - p.remaining.length; + return amt; + } + + void[] alloc(size_t sz) + { + auto aligned_sz = alignedSize(sz); + + Pool* pprev = null; + Pool* p = cast(Pool*)m_freePools; + while( p && p.remaining.length < aligned_sz ){ + pprev = p; + p = p.next; + } + + if( !p ){ + auto pmem = m_baseAllocator.alloc(AllocSize!Pool); + + p = emplace!Pool(cast(Pool*)pmem.ptr); + p.data = m_baseAllocator.alloc(max(aligned_sz, m_poolSize)); + p.remaining = p.data; + p.next = cast(Pool*)m_freePools; + m_freePools = p; + pprev = null; + } + + auto ret = p.remaining[0 .. aligned_sz]; + p.remaining = p.remaining[aligned_sz .. $]; + if( !p.remaining.length ){ + if( pprev ){ + pprev.next = p.next; + } else { + m_freePools = p.next; + } + p.next = cast(Pool*)m_fullPools; + m_fullPools = p; + } + + return ret[0 .. sz]; + } + + void[] realloc(void[] arr, size_t newsize) + { + auto aligned_sz = alignedSize(arr.length); + auto aligned_newsz = alignedSize(newsize); + + if( aligned_newsz <= aligned_sz ) return arr[0 .. newsize]; // TODO: back up remaining + + auto pool = m_freePools; + bool last_in_pool = pool && arr.ptr+aligned_sz == pool.remaining.ptr; + if( last_in_pool && pool.remaining.length+aligned_sz >= aligned_newsz ){ + pool.remaining = pool.remaining[aligned_newsz-aligned_sz .. $]; + arr = arr.ptr[0 .. aligned_newsz]; + assert(arr.ptr+arr.length == pool.remaining.ptr, "Last block does not align with the remaining space!?"); + return arr[0 .. newsize]; + } else { + auto ret = alloc(newsize); + assert(ret.ptr >= arr.ptr+aligned_sz || ret.ptr+ret.length <= arr.ptr, "New block overlaps old one!?"); + ret[0 .. min(arr.length, newsize)] = arr[0 .. min(arr.length, newsize)]; + return ret; + } + } + + void free(void[] mem) + { + } + + void freeAll() + { + version(VibeManualMemoryManagement){ + // destroy all initialized objects + for (auto d = m_destructors; d; d = d.next) + d.destructor(cast(void*)d.object); + m_destructors = null; + + // put all full Pools into the free pools list + for (Pool* p = cast(Pool*)m_fullPools, pnext; p; p = pnext) { + pnext = p.next; + p.next = cast(Pool*)m_freePools; + m_freePools = cast(Pool*)p; + } + + // free up all pools + for (Pool* p = cast(Pool*)m_freePools; p; p = p.next) + p.remaining = p.data; + } + } + + void reset() + { + version(VibeManualMemoryManagement){ + freeAll(); + Pool* pnext; + for (auto p = cast(Pool*)m_freePools; p; p = pnext) { + pnext = p.next; + m_baseAllocator.free(p.data); + m_baseAllocator.free((cast(void*)p)[0 .. AllocSize!Pool]); + } + m_freePools = null; + } + } + + private static destroy(T)(void* ptr) + { + static if( is(T == class) ) .destroy(cast(T)ptr); + else .destroy(*cast(T*)ptr); + } +} + +final class FreeListAlloc : Allocator +{ +nothrow: + private static struct FreeListSlot { FreeListSlot* next; } + private { + FreeListSlot* m_firstFree = null; + size_t m_nalloc = 0; + size_t m_nfree = 0; + Allocator m_baseAlloc; + immutable size_t m_elemSize; + } + + this(size_t elem_size, Allocator base_allocator) + { + assert(elem_size >= size_t.sizeof); + m_elemSize = elem_size; + m_baseAlloc = base_allocator; + logDebug_("Create FreeListAlloc %d", m_elemSize); + } + + @property size_t elementSize() const { return m_elemSize; } + + void[] alloc(size_t sz) + { + assert(sz == m_elemSize, "Invalid allocation size."); + return alloc(); + } + + void[] alloc() + { + void[] mem; + if( m_firstFree ){ + auto slot = m_firstFree; + m_firstFree = slot.next; + slot.next = null; + mem = (cast(void*)slot)[0 .. m_elemSize]; + debug m_nfree--; + } else { + mem = m_baseAlloc.alloc(m_elemSize); + //logInfo("Alloc %d bytes: alloc: %d, free: %d", SZ, s_nalloc, s_nfree); + } + debug m_nalloc++; + //logInfo("Alloc %d bytes: alloc: %d, free: %d", SZ, s_nalloc, s_nfree); + return mem; + } + + void[] realloc(void[] mem, size_t sz) + { + assert(mem.length == m_elemSize); + assert(sz == m_elemSize); + return mem; + } + + void free(void[] mem) + { + assert(mem.length == m_elemSize, "Memory block passed to free has wrong size."); + auto s = cast(FreeListSlot*)mem.ptr; + s.next = m_firstFree; + m_firstFree = s; + m_nalloc--; + m_nfree++; + } +} + +struct FreeListObjectAlloc(T, bool USE_GC = true, bool INIT = true) +{ + enum ElemSize = AllocSize!T; + enum ElemSlotSize = max(AllocSize!T, Slot.sizeof); + + static if( is(T == class) ){ + alias TR = T; + } else { + alias TR = T*; + } + + struct Slot { Slot* next; } + + private static Slot* s_firstFree; + + static TR alloc(ARGS...)(ARGS args) + { + void[] mem; + if (s_firstFree !is null) { + auto ret = s_firstFree; + s_firstFree = s_firstFree.next; + ret.next = null; + mem = (cast(void*)ret)[0 .. ElemSize]; + } else { + //logInfo("alloc %s/%d", T.stringof, ElemSize); + mem = manualAllocator().alloc(ElemSlotSize); + static if( hasIndirections!T ) GC.addRange(mem.ptr, ElemSlotSize); + } + + static if (INIT) return cast(TR)internalEmplace!(Unqual!T)(mem, args); // FIXME: this emplace has issues with qualified types, but Unqual!T may result in the wrong constructor getting called. + else return cast(TR)mem.ptr; + } + + static void free(TR obj) + { + static if (INIT) { + scope (failure) assert(0, "You shouldn't throw in destructors"); + auto objc = obj; + static if (is(TR == T*)) .destroy(*objc);//typeid(T).destroy(cast(void*)obj); + else .destroy(objc); + } + + auto sl = cast(Slot*)obj; + sl.next = s_firstFree; + s_firstFree = sl; + //static if( hasIndirections!T ) GC.removeRange(cast(void*)obj); + //manualAllocator().free((cast(void*)obj)[0 .. ElemSlotSize]); + } +} + + +template AllocSize(T) +{ + static if (is(T == class)) { + // workaround for a strange bug where AllocSize!SSLStream == 0: TODO: dustmite! + enum dummy = T.stringof ~ __traits(classInstanceSize, T).stringof; + enum AllocSize = __traits(classInstanceSize, T); + } else { + enum AllocSize = T.sizeof; + } +} + +struct FreeListRef(T, bool INIT = true) +{ + alias ObjAlloc = FreeListObjectAlloc!(T, true, INIT); + enum ElemSize = AllocSize!T; + + static if( is(T == class) ){ + alias TR = T; + } else { + alias TR = T*; + } + + private TR m_object; + private size_t m_magic = 0x1EE75817; // workaround for compiler bug + + static FreeListRef opCall(ARGS...)(ARGS args) + { + //logInfo("refalloc %s/%d", T.stringof, ElemSize); + FreeListRef ret; + ret.m_object = ObjAlloc.alloc(args); + ret.refCount = 1; + return ret; + } + + ~this() + { + //if( m_object ) logInfo("~this!%s(): %d", T.stringof, this.refCount); + //if( m_object ) logInfo("ref %s destructor %d", T.stringof, refCount); + //else logInfo("ref %s destructor %d", T.stringof, 0); + clear(); + m_magic = 0; + m_object = null; + } + + this(this) + { + checkInvariants(); + if( m_object ){ + //if( m_object ) logInfo("this!%s(this): %d", T.stringof, this.refCount); + this.refCount++; + } + } + + void opAssign(FreeListRef other) + { + clear(); + m_object = other.m_object; + if( m_object ){ + //logInfo("opAssign!%s(): %d", T.stringof, this.refCount); + refCount++; + } + } + + void clear() + { + checkInvariants(); + if (m_object) { + if (--this.refCount == 0) + ObjAlloc.free(m_object); + } + + m_object = null; + m_magic = 0x1EE75817; + } + + @property const(TR) get() const { checkInvariants(); return m_object; } + @property TR get() { checkInvariants(); return m_object; } + alias get this; + + private @property ref int refCount() + const { + auto ptr = cast(ubyte*)cast(void*)m_object; + ptr += ElemSize; + return *cast(int*)ptr; + } + + private void checkInvariants() + const { + assert(m_magic == 0x1EE75817); + assert(!m_object || refCount > 0); + } +} + +private void* extractUnalignedPointer(void* base) nothrow +{ + ubyte misalign = *(cast(ubyte*)base-1); + assert(misalign <= Allocator.alignment); + return base - misalign; +} + +private void* adjustPointerAlignment(void* base) nothrow +{ + ubyte misalign = Allocator.alignment - (cast(size_t)base & Allocator.alignmentMask); + base += misalign; + *(cast(ubyte*)base-1) = misalign; + return base; +} + +unittest { + void test_align(void* p, size_t adjustment) { + void* pa = adjustPointerAlignment(p); + assert((cast(size_t)pa & Allocator.alignmentMask) == 0, "Non-aligned pointer."); + assert(*(cast(ubyte*)pa-1) == adjustment, "Invalid adjustment "~to!string(p)~": "~to!string(*(cast(ubyte*)pa-1))); + void* pr = extractUnalignedPointer(pa); + assert(pr == p, "Recovered base != original"); + } + void* ptr = .malloc(0x40); + ptr += Allocator.alignment - (cast(size_t)ptr & Allocator.alignmentMask); + test_align(ptr++, 0x10); + test_align(ptr++, 0x0F); + test_align(ptr++, 0x0E); + test_align(ptr++, 0x0D); + test_align(ptr++, 0x0C); + test_align(ptr++, 0x0B); + test_align(ptr++, 0x0A); + test_align(ptr++, 0x09); + test_align(ptr++, 0x08); + test_align(ptr++, 0x07); + test_align(ptr++, 0x06); + test_align(ptr++, 0x05); + test_align(ptr++, 0x04); + test_align(ptr++, 0x03); + test_align(ptr++, 0x02); + test_align(ptr++, 0x01); + test_align(ptr++, 0x10); +} + +private size_t alignedSize(size_t sz) nothrow +{ + return ((sz + Allocator.alignment - 1) / Allocator.alignment) * Allocator.alignment; +} + +unittest { + foreach( i; 0 .. 20 ){ + auto ia = alignedSize(i); + assert(ia >= i); + assert((ia & Allocator.alignmentMask) == 0); + assert(ia < i+Allocator.alignment); + } +} + +private void ensureValidMemory(void[] mem) nothrow +{ + auto bytes = cast(ubyte[])mem; + swap(bytes[0], bytes[$-1]); + swap(bytes[0], bytes[$-1]); +} + +/// See issue #14194 +private T internalEmplace(T, Args...)(void[] chunk, auto ref Args args) + if (is(T == class)) +in { + import std.string, std.format; + assert(chunk.length >= T.sizeof, + format("emplace: Chunk size too small: %s < %s size = %s", + chunk.length, T.stringof, T.sizeof)); + assert((cast(size_t) chunk.ptr) % T.alignof == 0, + format("emplace: Misaligned memory block (0x%X): it must be %s-byte aligned for type %s", chunk.ptr, T.alignof, T.stringof)); + +} body { + enum classSize = __traits(classInstanceSize, T); + auto result = cast(T) chunk.ptr; + + // Initialize the object in its pre-ctor state + chunk[0 .. classSize] = typeid(T).init[]; + + // Call the ctor if any + static if (is(typeof(result.__ctor(args)))) + { + // T defines a genuine constructor accepting args + // Go the classic route: write .init first, then call ctor + result.__ctor(args); + } + else + { + static assert(args.length == 0 && !is(typeof(&T.__ctor)), + "Don't know how to initialize an object of type " + ~ T.stringof ~ " with arguments " ~ Args.stringof); + } + return result; +} + +/// Dittor +private auto internalEmplace(T, Args...)(void[] chunk, auto ref Args args) + if (!is(T == class)) +in { + import std.string, std.format; + assert(chunk.length >= T.sizeof, + format("emplace: Chunk size too small: %s < %s size = %s", + chunk.length, T.stringof, T.sizeof)); + assert((cast(size_t) chunk.ptr) % T.alignof == 0, + format("emplace: Misaligned memory block (0x%X): it must be %s-byte aligned for type %s", chunk.ptr, T.alignof, T.stringof)); + +} body { + return emplace(cast(T*)chunk.ptr, args); +} + +private void logDebug_(ARGS...)(string msg, ARGS args) {} diff --git a/source/vibe/internal/string.d b/source/vibe/internal/string.d new file mode 100644 index 0000000..6de94ea --- /dev/null +++ b/source/vibe/internal/string.d @@ -0,0 +1,235 @@ +/** + Utility functions for string processing + + Copyright: © 2012-2014 RejectedSoftware e.K. + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. + Authors: Sönke Ludwig +*/ +module vibe.internal.string; + +public import std.string; + +import vibe.internal.array; +import vibe.internal.memory; + +import std.algorithm; +import std.array; +import std.ascii; +import std.format; +import std.uni; +import std.utf; +import core.exception; + + +/** + Takes a string with possibly invalid UTF8 sequences and outputs a valid UTF8 string as near to + the original as possible. +*/ +string sanitizeUTF8(in ubyte[] str) +@safe pure { + import std.utf; + auto ret = appender!string(); + ret.reserve(str.length); + + size_t i = 0; + while (i < str.length) { + dchar ch = str[i]; + try ch = std.utf.decode(cast(const(char[]))str, i); + catch( UTFException ){ i++; } + //catch( AssertError ){ i++; } + char[4] dst; + auto len = std.utf.encode(dst, ch); + ret.put(dst[0 .. len]); + } + + return ret.data; +} + +/** + Strips the byte order mark of an UTF8 encoded string. + This is useful when the string is coming from a file. +*/ +string stripUTF8Bom(string str) +@safe pure nothrow { + if (str.length >= 3 && str[0 .. 3] == [0xEF, 0xBB, 0xBF]) + return str[3 ..$]; + return str; +} + + +/** + Checks if all characters in 'str' are contained in 'chars'. +*/ +bool allOf(string str, string chars) +@safe pure { + foreach (dchar ch; str) + if (!chars.canFind(ch)) + return false; + return true; +} + +ptrdiff_t indexOfCT(Char)(in Char[] s, dchar c, CaseSensitive cs = CaseSensitive.yes) +@safe pure { + if (__ctfe) { + if (cs == CaseSensitive.yes) { + foreach (i, dchar ch; s) + if (ch == c) + return i; + } else { + c = std.uni.toLower(c); + foreach (i, dchar ch; s) + if (std.uni.toLower(ch) == c) + return i; + } + return -1; + } else return std.string.indexOf(s, c, cs); +} +ptrdiff_t indexOfCT(Char)(in Char[] s, in Char[] needle) +{ + if (__ctfe) { + if (s.length < needle.length) return -1; + foreach (i; 0 .. s.length - needle.length) + if (s[i .. i+needle.length] == needle) + return i; + return -1; + } else return std.string.indexOf(s, needle); +} + +/** + Checks if any character in 'str' is contained in 'chars'. +*/ +bool anyOf(string str, string chars) +@safe pure { + foreach (ch; str) + if (chars.canFind(ch)) + return true; + return false; +} + + +/// ASCII whitespace trimming (space and tab) +string stripLeftA(string s) +@safe pure nothrow { + while (s.length > 0 && (s[0] == ' ' || s[0] == '\t')) + s = s[1 .. $]; + return s; +} + +/// ASCII whitespace trimming (space and tab) +string stripRightA(string s) +@safe pure nothrow { + while (s.length > 0 && (s[$-1] == ' ' || s[$-1] == '\t')) + s = s[0 .. $-1]; + return s; +} + +/// ASCII whitespace trimming (space and tab) +string stripA(string s) +@safe pure nothrow { + return stripLeftA(stripRightA(s)); +} + +/// Finds the first occurence of any of the characters in `chars` +sizediff_t indexOfAny(string str, string chars) +@safe pure { + foreach (i, char ch; str) + if (chars.canFind(ch)) + return i; + return -1; +} +alias countUntilAny = indexOfAny; + +/** + Finds the closing bracket (works with any of '[', '$(LPAREN)', '<', '{'). + + Params: + str = input string + nested = whether to skip nested brackets + Returns: + The index of the closing bracket or -1 for unbalanced strings + and strings that don't start with a bracket. +*/ +sizediff_t matchBracket(string str, bool nested = true) +@safe pure nothrow { + if (str.length < 2) return -1; + + char open = str[0], close = void; + switch (str[0]) { + case '[': close = ']'; break; + case '(': close = ')'; break; + case '<': close = '>'; break; + case '{': close = '}'; break; + default: return -1; + } + + size_t level = 1; + foreach (i, char c; str[1 .. $]) { + if (nested && c == open) ++level; + else if (c == close) --level; + if (level == 0) return i + 1; + } + return -1; +} + +@safe unittest +{ + static struct Test { string str; sizediff_t res; } + enum tests = [ + Test("[foo]", 4), Test("", 4), Test("{baz}", 4), + Test("[", -1), Test("[foo", -1), Test("ab[f]", -1), + Test("[foo[bar]]", 9), Test("[foo{bar]]", 8), + ]; + foreach (test; tests) + assert(matchBracket(test.str) == test.res); + assert(matchBracket("[foo[bar]]", false) == 8); + static assert(matchBracket("[foo]") == 4); +} + +/// Same as std.string.format, just using an allocator. +string formatAlloc(ARGS...)(Allocator alloc, string fmt, ARGS args) +{ + auto app = AllocAppender!string(alloc); + formattedWrite(&app, fmt, args); + return app.data; +} + +/// Special version of icmp() with optimization for ASCII characters +int icmp2(string a, string b) +@safe pure { + size_t i = 0, j = 0; + + // fast skip equal prefix + size_t min_len = min(a.length, b.length); + while( i < min_len && a[i] == b[i] ) i++; + if( i > 0 && (a[i-1] & 0x80) ) i--; // don't stop half-way in a UTF-8 sequence + j = i; + + // compare the differing character and the rest of the string + while(i < a.length && j < b.length){ + uint ac = cast(uint)a[i]; + uint bc = cast(uint)b[j]; + if( !((ac | bc) & 0x80) ){ + i++; + j++; + if( ac >= 'A' && ac <= 'Z' ) ac += 'a' - 'A'; + if( bc >= 'A' && bc <= 'Z' ) bc += 'a' - 'A'; + if( ac < bc ) return -1; + else if( ac > bc ) return 1; + } else { + dchar acp = decode(a, i); + dchar bcp = decode(b, j); + if( acp != bcp ){ + acp = std.uni.toLower(acp); + bcp = std.uni.toLower(bcp); + if( acp < bcp ) return -1; + else if( acp > bcp ) return 1; + } + } + } + + if( i < a.length ) return 1; + else if( j < b.length ) return -1; + + assert(i == a.length || j == b.length, "Strings equal but we didn't fully compare them!?"); + return 0; +} diff --git a/source/vibe/internal/traits.d b/source/vibe/internal/traits.d new file mode 100644 index 0000000..b2449e4 --- /dev/null +++ b/source/vibe/internal/traits.d @@ -0,0 +1,384 @@ +/** + Extensions to `std.traits` module of Phobos. Some may eventually make it into Phobos, + some are dirty hacks that work only for vibe.d + + Copyright: © 2012 RejectedSoftware e.K. + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. + Authors: Sönke Ludwig, Михаил Страшун +*/ + +module vibe.internal.traits; + +import vibe.internal.typetuple; + + +/** + Checks if given type is a getter function type + + Returns: `true` if argument is a getter + */ +template isPropertyGetter(T...) + if (T.length == 1) +{ + import std.traits : functionAttributes, FunctionAttribute, ReturnType, + isSomeFunction; + static if (isSomeFunction!(T[0])) { + enum isPropertyGetter = + (functionAttributes!(T[0]) & FunctionAttribute.property) != 0 + && !is(ReturnType!T == void); + } + else + enum isPropertyGetter = false; +} + +/// +unittest +{ + interface Test + { + @property int getter(); + @property void setter(int); + int simple(); + } + + static assert(isPropertyGetter!(typeof(&Test.getter))); + static assert(!isPropertyGetter!(typeof(&Test.setter))); + static assert(!isPropertyGetter!(typeof(&Test.simple))); + static assert(!isPropertyGetter!int); +} + +/** + Checks if given type is a setter function type + + Returns: `true` if argument is a setter + */ +template isPropertySetter(T...) + if (T.length == 1) +{ + import std.traits : functionAttributes, FunctionAttribute, ReturnType, + isSomeFunction; + + static if (isSomeFunction!(T[0])) { + enum isPropertySetter = + (functionAttributes!(T) & FunctionAttribute.property) != 0 + && is(ReturnType!(T[0]) == void); + } + else + enum isPropertySetter = false; +} + +/// +unittest +{ + interface Test + { + @property int getter(); + @property void setter(int); + int simple(); + } + + static assert(isPropertySetter!(typeof(&Test.setter))); + static assert(!isPropertySetter!(typeof(&Test.getter))); + static assert(!isPropertySetter!(typeof(&Test.simple))); + static assert(!isPropertySetter!int); +} + +/** + Deduces single base interface for a type. Multiple interfaces + will result in compile-time error. + + Params: + T = interface or class type + + Returns: + T if it is an interface. If T is a class, interface it implements. +*/ +template baseInterface(T) + if (is(T == interface) || is(T == class)) +{ + import std.traits : InterfacesTuple; + + static if (is(T == interface)) { + alias baseInterface = T; + } + else + { + alias Ifaces = InterfacesTuple!T; + static assert ( + Ifaces.length == 1, + "Type must be either provided as an interface or implement only one interface" + ); + alias baseInterface = Ifaces[0]; + } +} + +/// +unittest +{ + interface I1 { } + class A : I1 { } + interface I2 { } + class B : I1, I2 { } + + static assert (is(baseInterface!I1 == I1)); + static assert (is(baseInterface!A == I1)); + static assert (!is(typeof(baseInterface!B))); +} + + +/** + Determins if a member is a public, non-static data field. +*/ +template isRWPlainField(T, string M) +{ + static if (!isRWField!(T, M)) enum isRWPlainField = false; + else { + //pragma(msg, T.stringof~"."~M~":"~typeof(__traits(getMember, T, M)).stringof); + enum isRWPlainField = __traits(compiles, *(&__traits(getMember, Tgen!T(), M)) = *(&__traits(getMember, Tgen!T(), M))); + } +} + +/** + Determines if a member is a public, non-static, de-facto data field. + + In addition to plain data fields, R/W properties are also accepted. +*/ +template isRWField(T, string M) +{ + import std.traits; + import std.typetuple; + + static void testAssign()() { + T t = void; + __traits(getMember, t, M) = __traits(getMember, t, M); + } + + // reject type aliases + static if (is(TypeTuple!(__traits(getMember, T, M)))) enum isRWField = false; + // reject non-public members + else static if (!isPublicMember!(T, M)) enum isRWField = false; + // reject static members + else static if (!isNonStaticMember!(T, M)) enum isRWField = false; + // reject non-typed members + else static if (!is(typeof(__traits(getMember, T, M)))) enum isRWField = false; + // reject void typed members (includes templates) + else static if (is(typeof(__traits(getMember, T, M)) == void)) enum isRWField = false; + // reject non-assignable members + else static if (!__traits(compiles, testAssign!()())) enum isRWField = false; + else static if (anySatisfy!(isSomeFunction, __traits(getMember, T, M))) { + // If M is a function, reject if not @property or returns by ref + private enum FA = functionAttributes!(__traits(getMember, T, M)); + enum isRWField = (FA & FunctionAttribute.property) != 0; + } else { + enum isRWField = true; + } +} + +unittest { + import std.algorithm; + + struct S { + alias a = int; // alias + int i; // plain RW field + enum j = 42; // manifest constant + static int k = 42; // static field + private int privateJ; // private RW field + + this(Args...)(Args args) {} + + // read-write property (OK) + @property int p1() { return privateJ; } + @property void p1(int j) { privateJ = j; } + // read-only property (NO) + @property int p2() { return privateJ; } + // write-only property (NO) + @property void p3(int value) { privateJ = value; } + // ref returning property (OK) + @property ref int p4() { return i; } + // parameter-less template property (OK) + @property ref int p5()() { return i; } + // not treated as a property by DMD, so not a field + @property int p6()() { return privateJ; } + @property void p6(int j)() { privateJ = j; } + + static @property int p7() { return k; } + static @property void p7(int value) { k = value; } + + ref int f1() { return i; } // ref returning function (no field) + + int f2(Args...)(Args args) { return i; } + + ref int f3(Args...)(Args args) { return i; } + + void someMethod() {} + + ref int someTempl()() { return i; } + } + + enum plainFields = ["i"]; + enum fields = ["i", "p1", "p4", "p5"]; + + foreach (mem; __traits(allMembers, S)) { + static if (isRWField!(S, mem)) static assert(fields.canFind(mem), mem~" detected as field."); + else static assert(!fields.canFind(mem), mem~" not detected as field."); + + static if (isRWPlainField!(S, mem)) static assert(plainFields.canFind(mem), mem~" not detected as plain field."); + else static assert(!plainFields.canFind(mem), mem~" not detected as plain field."); + } +} + +package T Tgen(T)(){ return T.init; } + + +/** + Tests if the protection of a member is public. +*/ +template isPublicMember(T, string M) +{ + import std.algorithm, std.typetuple : TypeTuple; + + static if (!__traits(compiles, TypeTuple!(__traits(getMember, T, M)))) enum isPublicMember = false; + else { + alias MEM = TypeTuple!(__traits(getMember, T, M)); + enum isPublicMember = __traits(getProtection, MEM).among("public", "export"); + } +} + +unittest { + class C { + int a; + export int b; + protected int c; + private int d; + package int e; + void f() {} + static void g() {} + private void h() {} + private static void i() {} + } + + static assert (isPublicMember!(C, "a")); + static assert (isPublicMember!(C, "b")); + static assert (!isPublicMember!(C, "c")); + static assert (!isPublicMember!(C, "d")); + static assert (!isPublicMember!(C, "e")); + static assert (isPublicMember!(C, "f")); + static assert (isPublicMember!(C, "g")); + static assert (!isPublicMember!(C, "h")); + static assert (!isPublicMember!(C, "i")); + + struct S { + int a; + export int b; + private int d; + package int e; + } + static assert (isPublicMember!(S, "a")); + static assert (isPublicMember!(S, "b")); + static assert (!isPublicMember!(S, "d")); + static assert (!isPublicMember!(S, "e")); + + S s; + s.a = 21; + assert(s.a == 21); +} + +/** + Tests if a member requires $(D this) to be used. +*/ +template isNonStaticMember(T, string M) +{ + import std.typetuple; + import std.traits; + + alias MF = TypeTuple!(__traits(getMember, T, M)); + static if (M.length == 0) { + enum isNonStaticMember = false; + } else static if (anySatisfy!(isSomeFunction, MF)) { + enum isNonStaticMember = !__traits(isStaticFunction, MF); + } else { + enum isNonStaticMember = !__traits(compiles, (){ auto x = __traits(getMember, T, M); }()); + } +} + +unittest { // normal fields + struct S { + int a; + static int b; + enum c = 42; + void f(); + static void g(); + ref int h() { return a; } + static ref int i() { return b; } + } + static assert(isNonStaticMember!(S, "a")); + static assert(!isNonStaticMember!(S, "b")); + static assert(!isNonStaticMember!(S, "c")); + static assert(isNonStaticMember!(S, "f")); + static assert(!isNonStaticMember!(S, "g")); + static assert(isNonStaticMember!(S, "h")); + static assert(!isNonStaticMember!(S, "i")); +} + +unittest { // tuple fields + struct S(T...) { + T a; + static T b; + } + + alias T = S!(int, float); + auto p = T.b; + static assert(isNonStaticMember!(T, "a")); + static assert(!isNonStaticMember!(T, "b")); + + alias U = S!(); + static assert(!isNonStaticMember!(U, "a")); + static assert(!isNonStaticMember!(U, "b")); +} + + +/** + Tests if a Group of types is implicitly convertible to a Group of target types. +*/ +bool areConvertibleTo(alias TYPES, alias TARGET_TYPES)() + if (isGroup!TYPES && isGroup!TARGET_TYPES) +{ + static assert(TYPES.expand.length == TARGET_TYPES.expand.length); + foreach (i, V; TYPES.expand) + if (!is(V : TARGET_TYPES.expand[i])) + return false; + return true; +} + +/// Test if the type $(D DG) is a correct delegate for an opApply where the +/// key/index is of type $(D TKEY) and the value of type $(D TVALUE). +template isOpApplyDg(DG, TKEY, TVALUE) { + import std.traits; + static if (is(DG == delegate) && is(ReturnType!DG : int)) { + private alias PTT = ParameterTypeTuple!(DG); + private alias PSCT = ParameterStorageClassTuple!(DG); + private alias STC = ParameterStorageClass; + // Just a value + static if (PTT.length == 1) { + enum isOpApplyDg = (is(PTT[0] == TVALUE)); + } else static if (PTT.length == 2) { + enum isOpApplyDg = (is(PTT[0] == TKEY)) + && (is(PTT[1] == TVALUE)); + } else + enum isOpApplyDg = false; + } else { + enum isOpApplyDg = false; + } +} + +unittest { + static assert(isOpApplyDg!(int delegate(int, string), int, string)); + static assert(isOpApplyDg!(int delegate(ref int, ref string), int, string)); + static assert(isOpApplyDg!(int delegate(int, ref string), int, string)); + static assert(isOpApplyDg!(int delegate(ref int, string), int, string)); +} + +// Synchronized statements are logically nothrow but dmd still marks them as throwing. +// DMD#4115, Druntime#1013, Druntime#1021, Phobos#2704 +import core.sync.mutex : Mutex; +enum synchronizedIsNothrow = __traits(compiles, (Mutex m) nothrow { synchronized(m) {} }); diff --git a/source/vibe/internal/typetuple.d b/source/vibe/internal/typetuple.d new file mode 100644 index 0000000..bee6a8b --- /dev/null +++ b/source/vibe/internal/typetuple.d @@ -0,0 +1,123 @@ +/** + Additions to std.typetuple pending for inclusion into Phobos. + + Copyright: © 2013 RejectedSoftware e.K. + License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. + Authors: Михаил Страшун +*/ + +module vibe.internal.typetuple; + +import std.typetuple; +import std.traits; + +/** + TypeTuple which does not auto-expand. + + Useful when you need + to multiple several type tuples as different template argument + list parameters, without merging those. +*/ +template Group(T...) +{ + alias expand = T; +} + +/// +unittest +{ + alias group = Group!(int, double, string); + static assert (!is(typeof(group.length))); + static assert (group.expand.length == 3); + static assert (is(group.expand[1] == double)); +} + +/** +*/ +template isGroup(T...) +{ + static if (T.length != 1) enum isGroup = false; + else enum isGroup = + !is(T[0]) && is(typeof(T[0]) == void) // does not evaluate to something + && is(typeof(T[0].expand.length) : size_t) // expands to something with length + && !is(typeof(&(T[0].expand))); // expands to not addressable +} + +version (unittest) // NOTE: GDC complains about template definitions in unittest blocks +{ + alias group = Group!(int, double, string); + alias group2 = Group!(); + + template Fake(T...) + { + int[] expand; + } + alias fake = Fake!(int, double, string); + + alias fake2 = TypeTuple!(int, double, string); + + static assert (isGroup!group); + static assert (isGroup!group2); + static assert (!isGroup!fake); + static assert (!isGroup!fake2); +} + +/* Copied from Phobos as it is private there. + */ +private template isSame(ab...) + if (ab.length == 2) +{ + static if (is(ab[0]) && is(ab[1])) + { + enum isSame = is(ab[0] == ab[1]); + } + else static if (!is(ab[0]) && + !is(ab[1]) && + is(typeof(ab[0] == ab[1]) == bool) && + (ab[0] == ab[1])) + { + static if (!__traits(compiles, &ab[0]) || + !__traits(compiles, &ab[1])) + enum isSame = (ab[0] == ab[1]); + else + enum isSame = __traits(isSame, ab[0], ab[1]); + } + else + { + enum isSame = __traits(isSame, ab[0], ab[1]); + } +} + +/** + Compares two groups for element identity + + Params: + Group1, Group2 = any instances of `Group` + + Returns: + `true` if each element of Group1 is identical to + the one of Group2 at the same index +*/ +template Compare(alias Group1, alias Group2) + if (isGroup!Group1 && isGroup!Group2) +{ + private template implementation(size_t index) + { + static if (Group1.expand.length != Group2.expand.length) enum implementation = false; + else static if (index >= Group1.expand.length) enum implementation = true; + else static if (!isSame!(Group1.expand[index], Group2.expand[index])) enum implementation = false; + else enum implementation = implementation!(index+1); + } + + enum Compare = implementation!0; +} + +/// +unittest +{ + alias one = Group!(int, double); + alias two = Group!(int, double); + alias three = Group!(double, int); + static assert (Compare!(one, two)); + static assert (!Compare!(one, three)); +}