Merge pull request #21 from wilzbach/trailing-whitespace

Remove all trailing whitespace + add check
This commit is contained in:
Sönke Ludwig 2017-07-19 09:20:19 +02:00 committed by GitHub
commit 7f050af455
14 changed files with 109 additions and 106 deletions

View file

@ -1237,7 +1237,7 @@ package class VibedScheduler : Scheduler {
final switch (st_concurrencyPrimitive) with (ConcurrencyPrimitive) {
case task: runTask(op); break;
case workerTask:
case workerTask:
static void wrapper(shared(void delegate()) op) {
(cast(void delegate())op)();
}

View file

@ -133,7 +133,7 @@ unittest {
struct LockedConnection(Connection) {
import vibe.core.task : Task;
private {
ConnectionPool!Connection m_pool;
Task m_task;

View file

@ -668,7 +668,7 @@ void yield()
to call `switchToTask` will result in task starvation and resource leakage.
Params:
on_interrupt = If specified, is required to
on_interrupt = If specified, is required to
See_Also: `switchToTask`
*/
@ -1064,7 +1064,7 @@ struct Timer {
/** Resets the timer to the specified timeout
*/
void rearm(Duration dur, bool periodic = false) nothrow
void rearm(Duration dur, bool periodic = false) nothrow
in { assert(dur > 0.seconds, "Negative timer duration specified."); }
body { m_driver.set(m_id, dur, periodic ? dur : 0.seconds); }

View file

@ -410,7 +410,7 @@ struct FileStream {
if (m_fd != FileFD.invalid)
eventDriver.files.addRef(m_fd);
}
~this()
{
if (m_fd != FileFD.invalid)

View file

@ -252,7 +252,7 @@ final class FileLogger : Logger {
Format infoFormat = Format.thread;
/** Use escape sequences to color log output.
Note that the terminal must support 256-bit color codes.
*/
bool useColors = false;

View file

@ -391,7 +391,7 @@ struct GenericPath(F) {
/** Constructs a path from an input range of `Segment`s.
Throws:
Throws:
Since path segments are pre-validated, this constructor does not
throw an exception.
*/

View file

@ -158,8 +158,8 @@ class LocalTaskSemaphore
LocalManualEvent m_signal;
}
this(uint max_locks)
{
this(uint max_locks)
{
m_maxLocks = max_locks;
m_signal = createManualEvent();
}
@ -182,10 +182,10 @@ class LocalTaskSemaphore
than one.
*/
bool tryLock()
{
if (available > 0)
{
if (available > 0)
{
m_locks++;
m_locks++;
return true;
}
return false;
@ -202,13 +202,13 @@ class LocalTaskSemaphore
if (tryLock())
return;
ThreadWaiter w;
w.priority = priority;
w.seq = min(0, m_seq - w.priority);
if (++m_seq == uint.max)
rewindSeq();
() @trusted { m_waiters.insert(w); } ();
while (true) {
@ -222,7 +222,7 @@ class LocalTaskSemaphore
/** Gives up an existing lock.
*/
void unlock()
void unlock()
{
assert(m_locks >= 1);
m_locks--;
@ -232,7 +232,7 @@ class LocalTaskSemaphore
// if true, a goes after b. ie. b comes out front()
/// private
static bool asc(ref ThreadWaiter a, ref ThreadWaiter b)
static bool asc(ref ThreadWaiter a, ref ThreadWaiter b)
{
if (a.priority != b.priority)
return a.priority < b.priority;
@ -735,7 +735,7 @@ struct LocalManualEvent {
int wait(int emit_count) { return doWait!true(Duration.max, emit_count); }
/// ditto
int wait(Duration timeout, int emit_count) { return doWait!true(timeout, emit_count); }
/** Same as $(D wait), but defers throwing any $(D InterruptException).
This method is annotated $(D nothrow) at the expense that it cannot be
@ -905,7 +905,7 @@ struct ManualEvent {
int wait(int emit_count) shared { return doWaitShared!true(Duration.max, emit_count); }
/// ditto
int wait(Duration timeout, int emit_count) shared { return doWaitShared!true(timeout, emit_count); }
/** Same as $(D wait), but defers throwing any $(D InterruptException).
This method is annotated $(D nothrow) at the expense that it cannot be
@ -1249,7 +1249,7 @@ private struct ThreadLocalWaiter {
} else {
asyncAwaitAny!interruptible(timeout, waitable);
}
if (waitable.cancelled) {
removeWaiter();
return false;
@ -1548,7 +1548,7 @@ private struct TaskConditionImpl(bool INTERRUPTIBLE, LOCKABLE) {
* the actual functionality of their method calls.
*
* The method implementations are based on two static parameters
* ($(D INTERRUPTIBLE) and $(D INTENT)), which are configured through
* ($(D INTERRUPTIBLE) and $(D INTENT)), which are configured through
* template arguments:
*
* - $(D INTERRUPTIBLE) determines whether the mutex implementation
@ -1565,12 +1565,12 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
{
/** The policy with which the mutex should operate.
*
* The policy determines how the acquisition of the locks is
* The policy determines how the acquisition of the locks is
* performed and can be used to tune the mutex according to the
* underlying algorithm in which it is used.
*
* According to the provided policy, the mutex will either favor
* reading or writing tasks and could potentially starve the
* reading or writing tasks and could potentially starve the
* respective opposite.
*
* cf. $(D core.sync.rwmutex.ReadWriteMutex.Policy)
@ -1582,7 +1582,7 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
/** Writers are prioritized, readers may be starved as a result. */
PREFER_WRITERS
}
/** The intent with which a locking operation is performed.
*
* Since both locks share the same underlying algorithms, the actual
@ -1598,23 +1598,23 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
* hold a lock at any given time. */
READ_WRITE = 1
}
private {
//Queue counters
/** The number of reading tasks waiting for the lock to become available. */
shared(uint) m_waitingForReadLock = 0;
/** The number of writing tasks waiting for the lock to become available. */
shared(uint) m_waitingForWriteLock = 0;
//Lock counters
/** The number of reading tasks that currently hold the lock. */
uint m_activeReadLocks = 0;
/** The number of writing tasks that currently hold the lock (binary). */
ubyte m_activeWriteLocks = 0;
/** The policy determining the lock's behavior. */
Policy m_policy;
//Queue Events
/** The event used to wake reading tasks waiting for the lock while it is blocked. */
shared(ManualEvent) m_readyForReadLock;
@ -1624,7 +1624,7 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
/** The underlying mutex that gates the access to the shared state. */
Mutex m_counterMutex;
}
this(Policy policy)
{
m_policy = policy;
@ -1634,10 +1634,10 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
}
@disable this(this);
/** The policy with which the lock has been created. */
@property policy() const { return m_policy; }
version(RWMutexPrint)
{
/** Print out debug information during lock operations. */
@ -1647,17 +1647,17 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
try
{
import std.stdio;
writefln("RWMutex: %s (%s), active: RO: %d, RW: %d; waiting: RO: %d, RW: %d",
OP.leftJustify(10,' '),
INTENT == LockingIntent.READ_ONLY ? "RO" : "RW",
m_activeReadLocks, m_activeWriteLocks,
writefln("RWMutex: %s (%s), active: RO: %d, RW: %d; waiting: RO: %d, RW: %d",
OP.leftJustify(10,' '),
INTENT == LockingIntent.READ_ONLY ? "RO" : "RW",
m_activeReadLocks, m_activeWriteLocks,
m_waitingForReadLock, m_waitingForWriteLock
);
}
catch (Throwable t){}
}
}
/** An internal shortcut method to determine the queue event for a given intent. */
@property ref auto queueEvent(LockingIntent INTENT)()
{
@ -1666,7 +1666,7 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
else
return m_readyForWriteLock;
}
/** An internal shortcut method to determine the queue counter for a given intent. */
@property ref auto queueCounter(LockingIntent INTENT)()
{
@ -1675,13 +1675,13 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
else
return m_waitingForWriteLock;
}
/** An internal shortcut method to determine the current emitCount of the queue counter for a given intent. */
int emitCount(LockingIntent INTENT)()
{
return queueEvent!INTENT.emitCount();
}
/** An internal shortcut method to determine the active counter for a given intent. */
@property ref auto activeCounter(LockingIntent INTENT)()
{
@ -1690,8 +1690,8 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
else
return m_activeWriteLocks;
}
/** An internal shortcut method to wait for the queue event for a given intent.
/** An internal shortcut method to wait for the queue event for a given intent.
*
* This method is used during the `lock()` operation, after a
* `tryLock()` operation has been unsuccessfully finished.
@ -1705,8 +1705,8 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
else
return queueEvent!INTENT.waitUninterruptible(count);
}
/** An internal shortcut method to notify tasks waiting for the lock to become available again.
/** An internal shortcut method to notify tasks waiting for the lock to become available again.
*
* This method is called whenever the number of owners of the mutex hits
* zero; this is basically the counterpart to `wait()`.
@ -1723,12 +1723,12 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
{ //If a writer unlocks the mutex, notify both readers and writers
if (atomicLoad(m_waitingForReadLock) > 0)
m_readyForReadLock.emit();
if (atomicLoad(m_waitingForWriteLock) > 0)
m_readyForWriteLock.emit();
}
}
/** An internal method that performs the acquisition attempt in different variations.
*
* Since both locks rely on a common TaskMutex object which gates the access
@ -1736,15 +1736,15 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
* than for simple mutex variants. This method will thus be performing the
* `tryLock()` operation in two variations, depending on the callee:
*
* If called from the outside ($(D WAIT_FOR_BLOCKING_MUTEX) = false), the method
* will instantly fail if the underlying mutex is locked (i.e. during another
* `tryLock()` or `unlock()` operation), in order to guarantee the fastest
* If called from the outside ($(D WAIT_FOR_BLOCKING_MUTEX) = false), the method
* will instantly fail if the underlying mutex is locked (i.e. during another
* `tryLock()` or `unlock()` operation), in order to guarantee the fastest
* possible locking attempt.
*
* If used internally by the `lock()` method ($(D WAIT_FOR_BLOCKING_MUTEX) = true),
* If used internally by the `lock()` method ($(D WAIT_FOR_BLOCKING_MUTEX) = true),
* the operation will wait for the mutex to be available before deciding if
* the lock can be acquired, since the attempt would anyway be repeated until
* it succeeds. This will prevent frequent retries under heavy loads and thus
* it succeeds. This will prevent frequent retries under heavy loads and thus
* should ensure better performance.
*/
@trusted bool tryLock(LockingIntent INTENT, bool WAIT_FOR_BLOCKING_MUTEX)()
@ -1752,7 +1752,7 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
//Log a debug statement for the attempt
version(RWMutexPrint)
printInfo!("tryLock",INTENT)();
//Try to acquire the lock
static if (!WAIT_FOR_BLOCKING_MUTEX)
{
@ -1761,43 +1761,43 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
}
else
m_counterMutex.lock();
scope(exit)
m_counterMutex.unlock();
//Log a debug statement for the attempt
version(RWMutexPrint)
printInfo!("checkCtrs",INTENT)();
//Check if there's already an active writer
if (m_activeWriteLocks > 0)
return false;
//If writers are preferred over readers, check whether there
//currently is a writer in the waiting queue and abort if
//that's the case.
static if (INTENT == LockingIntent.READ_ONLY)
if (m_policy.PREFER_WRITERS && m_waitingForWriteLock > 0)
return false;
//If we are locking the mutex for writing, make sure that
//there's no reader active.
static if (INTENT == LockingIntent.READ_WRITE)
if (m_activeReadLocks > 0)
return false;
//We can successfully acquire the lock!
//Log a debug statement for the success.
version(RWMutexPrint)
printInfo!("lock",INTENT)();
//Increase the according counter
//Increase the according counter
//(number of active readers/writers)
//and return a success code.
activeCounter!INTENT += 1;
return true;
}
/** Attempt to acquire the lock for a given intent.
*
* Returns:
@ -1810,7 +1810,7 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
//TaskMutex - fail if it is already blocked.
return tryLock!(INTENT,false)();
}
/** Acquire the lock for the given intent; yield and suspend until the lock has been acquired. */
@trusted void lock(LockingIntent INTENT)()
{
@ -1822,29 +1822,29 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
atomicOp!"+="(queueCounter!INTENT,1);
scope(exit)
atomicOp!"-="(queueCounter!INTENT,1);
//Try to lock the mutex
auto locked = tryLock!(INTENT,true)();
if (locked)
return;
//Retry until we successfully acquired the lock
while(!locked)
{
version(RWMutexPrint)
printInfo!("wait",INTENT)();
count = wait!INTENT(count);
locked = tryLock!(INTENT,true)();
}
}
/** Unlock the mutex after a successful acquisition. */
@trusted void unlock(LockingIntent INTENT)()
{
version(RWMutexPrint)
printInfo!("unlock",INTENT)();
debug assert(activeCounter!INTENT > 0);
synchronized(m_counterMutex)
@ -1856,7 +1856,7 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
{
version(RWMutexPrint)
printInfo!("notify",INTENT)();
notify!INTENT();
}
}
@ -1867,20 +1867,20 @@ private struct ReadWriteMutexState(bool INTERRUPTIBLE)
*
* This mutex can be used in exchange for a $(D core.sync.mutex.ReadWriteMutex),
* but does not block the event loop in contention situations. The `reader` and `writer`
* members are used for locking. Locking the `reader` mutex allows access to multiple
* members are used for locking. Locking the `reader` mutex allows access to multiple
* readers at once, while the `writer` mutex only allows a single writer to lock it at
* any given time. Locks on `reader` and `writer` are mutually exclusive (i.e. whenever a
* any given time. Locks on `reader` and `writer` are mutually exclusive (i.e. whenever a
* writer is active, no readers can be active at the same time, and vice versa).
*
*
* Notice:
* Mutexes implemented by this class cannot be interrupted
* using $(D vibe.core.task.Task.interrupt()). The corresponding
* InterruptException will be deferred until the next blocking
* operation yields the event loop.
*
*
* Use $(D InterruptibleTaskReadWriteMutex) as an alternative that can be
* interrupted.
*
*
* cf. $(D core.sync.mutex.ReadWriteMutex)
*/
class TaskReadWriteMutex
@ -1890,29 +1890,29 @@ class TaskReadWriteMutex
alias LockingIntent = State.LockingIntent;
alias READ_ONLY = LockingIntent.READ_ONLY;
alias READ_WRITE = LockingIntent.READ_WRITE;
/** The shared state used by the reader and writer mutexes. */
State m_state;
}
/** The policy with which the mutex should operate.
*
* The policy determines how the acquisition of the locks is
* The policy determines how the acquisition of the locks is
* performed and can be used to tune the mutex according to the
* underlying algorithm in which it is used.
*
* According to the provided policy, the mutex will either favor
* reading or writing tasks and could potentially starve the
* reading or writing tasks and could potentially starve the
* respective opposite.
*
* cf. $(D core.sync.rwmutex.ReadWriteMutex.Policy)
*/
alias Policy = State.Policy;
/** A common baseclass for both of the provided mutexes.
*
* The intent for the according mutex is specified through the
* $(D INTENT) template argument, which determines if a mutex is
* The intent for the according mutex is specified through the
* $(D INTENT) template argument, which determines if a mutex is
* used for read or write locking.
*/
final class Mutex(LockingIntent INTENT): core.sync.mutex.Mutex, Lockable
@ -1926,17 +1926,17 @@ class TaskReadWriteMutex
}
alias Reader = Mutex!READ_ONLY;
alias Writer = Mutex!READ_WRITE;
Reader reader;
Writer writer;
this(Policy policy = Policy.PREFER_WRITERS)
{
m_state = State(policy);
reader = new Reader();
writer = new Writer();
}
/** The policy with which the lock has been created. */
@property Policy policy() const { return m_state.policy; }
}
@ -1945,7 +1945,7 @@ class TaskReadWriteMutex
*
* This class supports the use of $(D vibe.core.task.Task.interrupt()) while
* waiting in the `lock()` method.
*
*
* cf. $(D core.sync.mutex.ReadWriteMutex)
*/
class InterruptibleTaskReadWriteMutex
@ -1957,31 +1957,31 @@ class InterruptibleTaskReadWriteMutex
alias LockingIntent = State.LockingIntent;
alias READ_ONLY = LockingIntent.READ_ONLY;
alias READ_WRITE = LockingIntent.READ_WRITE;
/** The shared state used by the reader and writer mutexes. */
State m_state;
}
/** The policy with which the mutex should operate.
*
* The policy determines how the acquisition of the locks is
* The policy determines how the acquisition of the locks is
* performed and can be used to tune the mutex according to the
* underlying algorithm in which it is used.
*
* According to the provided policy, the mutex will either favor
* reading or writing tasks and could potentially starve the
* reading or writing tasks and could potentially starve the
* respective opposite.
*
* cf. $(D core.sync.rwmutex.ReadWriteMutex.Policy)
*/
alias Policy = State.Policy;
/** A common baseclass for both of the provided mutexes.
*
* The intent for the according mutex is specified through the
* $(D INTENT) template argument, which determines if a mutex is
* The intent for the according mutex is specified through the
* $(D INTENT) template argument, which determines if a mutex is
* used for read or write locking.
*
*
*/
final class Mutex(LockingIntent INTENT): core.sync.mutex.Mutex, Lockable
{
@ -1994,17 +1994,17 @@ class InterruptibleTaskReadWriteMutex
}
alias Reader = Mutex!READ_ONLY;
alias Writer = Mutex!READ_WRITE;
Reader reader;
Writer writer;
this(Policy policy = Policy.PREFER_WRITERS)
{
m_state = State(policy);
reader = new Reader();
writer = new Writer();
}
/** The policy with which the lock has been created. */
@property Policy policy() const { return m_state.policy; }
}

View file

@ -77,7 +77,7 @@ struct Task {
}
package @property ref ThreadInfo tidInfo() @system { return m_fiber ? taskFiber.tidInfo : s_tidInfo; } // FIXME: this is not thread safe!
@property Tid tid() @trusted { return tidInfo.ident; }
}
@ -346,7 +346,7 @@ final package class TaskFiber : Fiber {
import std.concurrency : Tid, thisTid;
import std.encoding : sanitize;
import vibe.core.core : isEventLoopRunning, recycleFiber, taskScheduler, yield;
version (VibeDebugCatchAll) alias UncaughtException = Throwable;
else alias UncaughtException = Exception;
try {
@ -521,7 +521,7 @@ package struct TaskFuncInfo {
import std.algorithm : move;
import std.traits : hasElaborateAssign;
import std.conv : to;
import std.conv : to;
static struct TARGS { ARGS expand; }
@ -745,7 +745,7 @@ package struct TaskScheduler {
/** Holds execution until the task gets explicitly resumed.
*/
void hibernate()
{

View file

@ -47,7 +47,7 @@ shared class TaskPool {
threads.length = thread_count;
foreach (i; 0 .. thread_count) {
WorkerThread thr;
() @trusted {
() @trusted {
thr = new WorkerThread(this);
thr.name = format("vibe-%s", i);
thr.start();
@ -338,7 +338,7 @@ nothrow @safe:
bool consume(ref TaskFuncInfo tfi)
{
import std.algorithm.mutation : swap;
if (m_queue.empty) return false;
swap(tfi, m_queue.front);
m_queue.popFront();

View file

@ -61,7 +61,7 @@ struct Waitable(CB, alias wait, alias cancel, on_result...)
bool cancelled;
auto waitCallback(Callback cb) nothrow { return wait(cb); }
static if (is(ReturnType!waitCallback == void))
void cancelCallback(Callback cb) nothrow { cancel(cb); }
else

View file

@ -440,7 +440,7 @@ template checkInterfaceConformance(T, I) {
}
alias checkMemberConformance = impl!0;
}
template impl(size_t i) {
static if (i < Members.length) {
static if (__traits(compiles, __traits(getMember, I, Members[i])))

View file

@ -70,13 +70,13 @@ void runTest()
remove(bar);
watcher = Path(dir).watchDirectory(Yes.recursive);
write(foo, null);
sleep(1.seconds);
sleep(1.seconds);
write(foo, [0, 1]);
sleep(100.msecs);
remove(foo);
write(bar, null);
sleep(1.seconds);
sleep(1.seconds);
write(bar, [0, 1]);
sleep(100.msecs);
remove(bar);

View file

@ -20,7 +20,7 @@ void test()
assert(gotit);
sleep(10.msecs);
});
t.tid.send(10);
t.tid.send(11); // never received
t.join();
@ -43,9 +43,9 @@ void test()
t3.tid.send(13);
sleep(10.msecs);
logInfo("Success.");
exitEventLoop(true);
}

View file

@ -4,6 +4,9 @@ set -e -x -o pipefail
DUB_FLAGS=${DUB_FLAGS:-}
# Check for trailing whitespace"
grep -nrI --include='*.d' '\s$' . && (echo "Trailing whitespace found"; exit 1)
# test for successful release build
dub build -b release --compiler=$DC -c $CONFIG $DUB_FLAGS