Initial commit
This commit is contained in:
commit
4a9cfda0bd
40 changed files with 2743 additions and 0 deletions
144
source/app.d
Normal file
144
source/app.d
Normal file
|
@ -0,0 +1,144 @@
|
|||
import std.experimental.logger;
|
||||
import std.range;
|
||||
import std.string;
|
||||
import std.stdio;
|
||||
import std.typecons;
|
||||
|
||||
import vibe.d;
|
||||
|
||||
import article;
|
||||
import page;
|
||||
import project;
|
||||
import watcher;
|
||||
|
||||
/**
|
||||
* Internal list of articles by slug.
|
||||
*/
|
||||
Article[string] articles;
|
||||
Page[string] pages;
|
||||
Project[string] projects;
|
||||
|
||||
immutable string articleSortPred = "a.firstPublished > b.firstPublished";
|
||||
Article*[] articleList;
|
||||
immutable string pageSortedPred = "a.title < b.title";
|
||||
Page*[] pageList;
|
||||
Project[] projectList;
|
||||
|
||||
enum OutputType {
|
||||
HTML,
|
||||
MARKDOWN
|
||||
}
|
||||
|
||||
const string MIME_MARKDOWN = "text/markdown";
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Get's the document type for the given slug based on extension
|
||||
* and returns the slug without extension and the document type.
|
||||
*/
|
||||
private OutputType getOutputType(ref string slug) {
|
||||
if (slug.endsWith(".md")) {
|
||||
slug = chomp(slug, ".md");
|
||||
return OutputType.MARKDOWN;
|
||||
} else if (slug.endsWith(".html")){
|
||||
// If explicitly asking for HTML, we'll return HTML
|
||||
slug = chomp(slug, ".html");
|
||||
return OutputType.HTML;
|
||||
} else {
|
||||
// If in the future, for any reason, we no longer use HTML
|
||||
// this allows to us to keep the current urls with an option
|
||||
// to change the output in the future.
|
||||
return OutputType.HTML;
|
||||
}
|
||||
}
|
||||
|
||||
void getSingle(T, string templ)(ref T[string] array, HTTPServerRequest req, HTTPServerResponse res) {
|
||||
string slug = req.params["slug"];
|
||||
OutputType outputType = getOutputType(slug);
|
||||
|
||||
enforceHTTP(slug in array, HTTPStatus.notFound, "Page not found");
|
||||
T content = array[slug];
|
||||
res.headers["Cache-Control"] = "public";
|
||||
switch(outputType) with (OutputType) {
|
||||
case MARKDOWN:
|
||||
res.writeBody(content.contentSource, MIME_MARKDOWN);
|
||||
break;
|
||||
default:
|
||||
case HTML:
|
||||
res.render!(templ, content);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates response for /posts/:slug and /palen/:slug.
|
||||
*/
|
||||
void articleGetSingle(HTTPServerRequest req, HTTPServerResponse res) {
|
||||
getSingle!(Article, "pages/article.dt")(articles, req, res);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates response for /posts/ and /palen/
|
||||
*/
|
||||
void articleGetOverview(HTTPServerRequest req, HTTPServerResponse res) {
|
||||
res.headers["Cache-Control"] = "public";
|
||||
render!("pages/article-list.dt", articleList)(res);
|
||||
}
|
||||
|
||||
void projectGetOverview(HTTPServerRequest req, HTTPServerResponse res) {
|
||||
res.headers["Cache-Control"] = "public";
|
||||
render!("pages/project-list.dt", projectList)(res);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate response for a page
|
||||
*/
|
||||
void pageGet(HTTPServerRequest req, HTTPServerResponse res) {
|
||||
if (("slug" in req.params) is null) {
|
||||
req.params.addField("slug", "index");
|
||||
}
|
||||
getSingle!(Page, "pages/page.dt")(pages, req, res);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates response whenever an error occurs.
|
||||
*/
|
||||
@safe
|
||||
void errorPage(HTTPServerRequest req, HTTPServerResponse res, HTTPServerErrorInfo error) {
|
||||
render!("pages/error.dt", error)(res);
|
||||
}
|
||||
|
||||
void main() {
|
||||
//articles["hello-world"] = new Article("hello-world.yamd");
|
||||
|
||||
HTTPServerSettings settings = new HTTPServerSettings;
|
||||
settings.bindAddresses = ["0.0.0.0"];
|
||||
settings.port = 3465;
|
||||
settings.serverString = "zeg ik lekker niet";
|
||||
settings.errorPageHandler = toDelegate(&errorPage);
|
||||
settings.keepAliveTimeout = dur!"seconds"(60);
|
||||
debug {
|
||||
settings.accessLogToConsole = true;
|
||||
}
|
||||
|
||||
URLRouter router = new URLRouter;
|
||||
router.get("/posts/:slug", &articleGetSingle);
|
||||
router.get("/palen/:slug", &articleGetSingle);
|
||||
router.get("/posts/", &articleGetOverview);
|
||||
router.get("/palen/", &articleGetOverview);
|
||||
router.get("/projects/", &projectGetOverview);
|
||||
router.get("/projecten/", &projectGetOverview);
|
||||
router.get("/static/*", serveStaticFiles("./public/"));
|
||||
router.get("/:slug", &pageGet);
|
||||
router.get("/", &pageGet);
|
||||
|
||||
listenHTTP(settings, router);
|
||||
runTask({
|
||||
initPages!(Page, pageSortedPred)(pages, pageList, "pages");
|
||||
});
|
||||
runTask({
|
||||
initPages!(Article, articleSortPred)(articles, articleList, "articles");
|
||||
});
|
||||
runApplication();
|
||||
}
|
85
source/article.d
Normal file
85
source/article.d
Normal file
|
@ -0,0 +1,85 @@
|
|||
import std.file;
|
||||
import std.stdio;
|
||||
import std.string;
|
||||
import std.datetime.date;
|
||||
import std.experimental.logger;
|
||||
|
||||
import dyaml;
|
||||
import page;
|
||||
import vibe.d;
|
||||
|
||||
|
||||
/**
|
||||
* Represents an article on the blog
|
||||
*/
|
||||
class Article : Page {
|
||||
private string m_author;
|
||||
private string m_title;
|
||||
private string m_slug;
|
||||
private DateTime m_firstPublished;
|
||||
private string m_excerpt;
|
||||
/**
|
||||
* Time that the file was last updated
|
||||
*/
|
||||
private DateTime m_updated;
|
||||
|
||||
/**
|
||||
* Loads an article from a file.
|
||||
*/
|
||||
this(string file) {
|
||||
this.m_headerShift = 1;
|
||||
super(file);
|
||||
// Find the first header and mark everything up to that as
|
||||
if (m_excerpt is null) {
|
||||
// an excerpt, used in search results.
|
||||
const long seperatorIndex = indexOf(m_contentSource, "---\n");
|
||||
this.m_excerpt = this.m_contentSource[seperatorIndex + 4..$];
|
||||
long firstHeaderIndex = indexOf(this.m_excerpt, '#');
|
||||
if (firstHeaderIndex >= 0) {
|
||||
this.m_excerpt = this.m_excerpt[0..firstHeaderIndex];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the metadata specific to Articles.
|
||||
*/
|
||||
@safe
|
||||
override protected void loadHeader(Node headerNode) {
|
||||
super.loadHeader(headerNode);
|
||||
if (headerNode.containsKey("author")) {
|
||||
this.m_author = headerNode["author"].as!string;
|
||||
} else {
|
||||
this.m_author = "<unknown author>";
|
||||
}
|
||||
|
||||
if ("excerpt" in headerNode) {
|
||||
this.m_excerpt = headerNode["excerpt"].as!string;
|
||||
}
|
||||
|
||||
if ("firstPublished" in headerNode) {
|
||||
try {
|
||||
this.m_firstPublished = cast(DateTime) headerNode["firstPublished"].as!SysTime;
|
||||
} catch(DateTimeException e) {
|
||||
warningf("%s: invalid date format", this.m_slug);
|
||||
}
|
||||
} else {
|
||||
this.m_firstPublished= DateTime.fromSimpleString("1970-Jan-01 00:00:00");
|
||||
}
|
||||
|
||||
if ("updated" in headerNode) {
|
||||
try {
|
||||
this.m_updated = cast(DateTime) headerNode["updated"].as!SysTime();
|
||||
} catch(DateTimeException e) {
|
||||
warningf("%s: invalid date format", this.m_slug);
|
||||
}
|
||||
} else {
|
||||
this.m_updated = this.m_firstPublished;
|
||||
}
|
||||
}
|
||||
|
||||
@property string excerpt() { return m_excerpt; }
|
||||
@property string author() { return m_author; }
|
||||
@property DateTime firstPublished() { return m_firstPublished; }
|
||||
@property DateTime updated() { return m_updated; }
|
||||
}
|
119
source/page.d
Normal file
119
source/page.d
Normal file
|
@ -0,0 +1,119 @@
|
|||
import std.exception;
|
||||
import std.experimental.logger;
|
||||
import std.file;
|
||||
import std.process;
|
||||
import std.stdio;
|
||||
|
||||
import dyaml;
|
||||
import vibe.vibe;
|
||||
|
||||
|
||||
/**
|
||||
* Exception thrown when a page has syntax errors e.g.
|
||||
*/
|
||||
class ArticleParseException : Exception {
|
||||
mixin basicExceptionCtors;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Represents a page on the blog
|
||||
*/
|
||||
class Page {
|
||||
/**
|
||||
* Internal name of the article. Usually the file name.
|
||||
*/
|
||||
protected string m_name;
|
||||
|
||||
/**
|
||||
* Slug either manually assigned or generated based on file name.
|
||||
* Only used in the url.
|
||||
*/
|
||||
protected string m_slug;
|
||||
protected string m_title;
|
||||
protected string m_content;
|
||||
protected string m_contentSource;
|
||||
|
||||
/**
|
||||
* Option for the markdown parser: the amount of levels the header found in the markdown should
|
||||
* be shifted. For example, 0 means H1 -> H1, 1 means H1 -> H2, 2 means H1 -> H3 and so on.
|
||||
*/
|
||||
protected int m_headerShift = 1;
|
||||
|
||||
private bool hasCalledSuper = false;
|
||||
|
||||
/**
|
||||
* Creates a page from a file
|
||||
*/
|
||||
this(string file) {
|
||||
this.m_name = file;
|
||||
this.m_contentSource = readText(file);
|
||||
// Find the seperator and split the string in two
|
||||
const long seperatorIndex = indexOf(m_contentSource, "---\n");
|
||||
enforce!ArticleParseException(seperatorIndex >= 0);
|
||||
string header = m_contentSource[0..seperatorIndex];
|
||||
|
||||
Node node = Loader.fromString(header).load();
|
||||
loadHeader(node);
|
||||
assert(hasCalledSuper);
|
||||
|
||||
this.m_content = Page.parseMarkdown(m_contentSource[seperatorIndex + 4..$],
|
||||
this.m_headerShift);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Parse metadata from the header.
|
||||
* Params:
|
||||
* headerNode = the YAML node to parse the header metadata from.
|
||||
*/
|
||||
@safe
|
||||
protected void loadHeader(Node headerNode){
|
||||
if (headerNode.containsKey("title")) {
|
||||
this.m_title = headerNode["title"].as!string;
|
||||
} else {
|
||||
warningf("%s does not contain a title", this.m_name);
|
||||
}
|
||||
if (headerNode.containsKey("slug")) {
|
||||
this.m_slug = headerNode["slug"].as!string;
|
||||
} else {
|
||||
this.m_slug = this.m_title;
|
||||
infof("%s does not have a slug. Using %s", this.m_name, this.m_slug);
|
||||
}
|
||||
hasCalledSuper = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts pandoc to convert MarkDown to HTML
|
||||
* Params:
|
||||
* source = The MarkDown source as a string (not a path!)
|
||||
* shiftHeader = (Optional) The amount a header needs to be shifted. If for example, it
|
||||
* is set to 1, first level headings within MarkDown become second level
|
||||
* headers within HTML.
|
||||
*/
|
||||
public static string parseMarkdown(string source, int shiftHeader = 0) {
|
||||
string[] args = ["pandoc",
|
||||
"-f", "markdown",
|
||||
"-t", "html"];
|
||||
|
||||
if (shiftHeader != 0) args ~= "--shift-heading-level-by=" ~ to!string(shiftHeader);
|
||||
|
||||
ProcessPipes pandoc = pipeProcess(args);
|
||||
pandoc.stdin.write(source);
|
||||
pandoc.stdin.writeln();
|
||||
pandoc.stdin.flush();
|
||||
pandoc.stdin.close();
|
||||
string result;
|
||||
string line;
|
||||
while ((line = pandoc.stdout.readln()) !is null) {
|
||||
result ~= line;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@property string name() { return m_name; }
|
||||
@property string title() { return m_title; }
|
||||
@property string slug() { return m_slug; }
|
||||
@property string content() { return m_content; }
|
||||
@property string contentSource() { return m_contentSource; }
|
||||
}
|
46
source/project.d
Normal file
46
source/project.d
Normal file
|
@ -0,0 +1,46 @@
|
|||
import std.array;
|
||||
import std.algorithm;
|
||||
|
||||
import dyaml;
|
||||
import vibe.vibe;
|
||||
|
||||
import page;
|
||||
import utils;
|
||||
|
||||
/**
|
||||
* Represents a project, like an unfinished application
|
||||
*/
|
||||
class Project : Page {
|
||||
protected string m_state;
|
||||
protected string[] m_platforms;
|
||||
protected string[] m_technologies;
|
||||
protected string m_icon;
|
||||
protected string[] m_images;
|
||||
|
||||
/**
|
||||
* Creates a project from a file
|
||||
*/
|
||||
this(string file) {
|
||||
super(file);
|
||||
}
|
||||
|
||||
@safe
|
||||
override protected void loadHeader(Node headerNode) {
|
||||
super.loadHeader(headerNode);
|
||||
this.m_state = headerNode.getOr!string("state", "unknown");
|
||||
this.m_platforms = headerNode.getOr!(Node[])("platforms", [])
|
||||
.map!(x => x.get!string).array;
|
||||
this.m_technologies = headerNode.getOr!(Node[])("technologies", [])
|
||||
.map!(x => x.get!string).array;
|
||||
this.m_icon = headerNode.getOr!string("icon", "");
|
||||
this.m_images = headerNode.getOr!(Node[])("images", [])
|
||||
.map!(x => x.get!string).array;
|
||||
}
|
||||
|
||||
@property string state() { return m_state; }
|
||||
@property string[] platforms() { return m_platforms; }
|
||||
@property string[] technologies() { return m_technologies; }
|
||||
@property string icon() { return m_icon; }
|
||||
@property string[] images() { return m_images; }
|
||||
|
||||
}
|
16
source/utils.d
Normal file
16
source/utils.d
Normal file
|
@ -0,0 +1,16 @@
|
|||
import std.conv;
|
||||
import std.datetime;
|
||||
|
||||
import dyaml;
|
||||
|
||||
string toHumanString(DateTime value) {
|
||||
return to!string(value.year) ~ "-" ~ to!string(ubyte(value.month)) ~ "-" ~ to!string(value.day);
|
||||
}
|
||||
|
||||
T getOr(T)(Node node, string key, T or) {
|
||||
if (key in node) {
|
||||
return node[key].get!T;
|
||||
} else {
|
||||
return or;
|
||||
}
|
||||
}
|
115
source/watcher.d
Normal file
115
source/watcher.d
Normal file
|
@ -0,0 +1,115 @@
|
|||
import std.array;
|
||||
import std.algorithm;
|
||||
import std.experimental.logger;
|
||||
import std.file;
|
||||
import std.stdio;
|
||||
|
||||
import vibe.d;
|
||||
|
||||
import app;
|
||||
import page;
|
||||
|
||||
void initPages(T, string sortPred)(ref T[string] array, ref T*[] sortedRange, const string directory) {
|
||||
|
||||
bool addPage(string path) {
|
||||
try {
|
||||
T newPage = new T(path);
|
||||
logf("Added %s", newPage.slug);
|
||||
array[newPage.slug] = newPage;
|
||||
return true;
|
||||
} catch (page.ArticleParseException e) {
|
||||
logf("Could not parse %s", path);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Initial scan
|
||||
void scan(NativePath path, int level = 0) {
|
||||
logf("Scanning %s", path.toString());
|
||||
foreach(file; iterateDirectory(path)) {
|
||||
if (file.isDirectory) {
|
||||
scan(path ~ file.name, level + 1);
|
||||
} else {
|
||||
addPage((path ~ file.name).toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void sortThings() {
|
||||
sortedRange = sort!(sortPred)(array.values)
|
||||
.map!"&a".array;
|
||||
logf("sorted: %s", sortedRange);
|
||||
}
|
||||
|
||||
if (!existsFile(getWorkingDirectory() ~ directory)) {
|
||||
createDirectory(getWorkingDirectory() ~ directory);
|
||||
}
|
||||
scan(getWorkingDirectory() ~ directory);
|
||||
sortThings();
|
||||
|
||||
DirectoryWatcher watcher = watchDirectory(getWorkingDirectory() ~ directory, true);
|
||||
//auto watcher = FileWatch((getWorkingDirectory() ~ directory).toString(), true);
|
||||
|
||||
bool shouldStop = false;
|
||||
while (!shouldStop) {
|
||||
// Try to reduce changes to only one DirectoryChangeType per change
|
||||
DirectoryChange[] changes;
|
||||
shouldStop = !watcher.readChanges(changes);
|
||||
foreach(change; changes) {
|
||||
logf("=======[New changes]======");
|
||||
string[] changeTypes = ["added", "removed", "modified"];
|
||||
logf("Path: %s, type: %s", change.path.toString(), changeTypes[change.type]);
|
||||
if (endsWith(change.path.toString(), ".kate-swp")) continue;
|
||||
switch (change.type) with (DirectoryChangeType){
|
||||
case added:
|
||||
try {
|
||||
addPage(change.path.toString());
|
||||
} catch(Exception e) {
|
||||
warningf("Error while updating %s: %s", change.path.toString(), e.msg);
|
||||
}
|
||||
break;
|
||||
case modified:
|
||||
T newPage;
|
||||
try {
|
||||
newPage = new T(change.path.toString());
|
||||
log(newPage.slug);
|
||||
if (newPage.slug in array) {
|
||||
log("Slug not changed");
|
||||
array[newPage.slug] = newPage;
|
||||
} else {
|
||||
log("Slug changed");
|
||||
foreach(item; array) {
|
||||
if (item.name == change.path.toString()) {
|
||||
logf("Removed %s, which is the old slug of %s", newPage.slug, item.slug);
|
||||
array.remove(item.slug);
|
||||
}
|
||||
}
|
||||
}
|
||||
logf("Modified %s", newPage.slug);
|
||||
array[newPage.slug] = newPage;
|
||||
} catch(page.ArticleParseException e) {
|
||||
warningf("Could not parse %s", change.path.toString());
|
||||
}
|
||||
break;
|
||||
case removed:
|
||||
try {
|
||||
foreach(item; array.byValue) {
|
||||
logf(" - %s", item.name);
|
||||
if (item.name == change.path.toString()) {
|
||||
logf("Removed %s", item.slug);
|
||||
array.remove(item.slug);
|
||||
}
|
||||
}
|
||||
} catch(Exception e) {}
|
||||
break;
|
||||
default: break;
|
||||
}
|
||||
log("Current state:");
|
||||
//sortedRange = sort!("a."~sortProp~" < b."~sortProp)(array.values);
|
||||
sortThings();
|
||||
foreach (item; array) {
|
||||
logf("%s - %s", item.name, item.slug);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue