Refractor code
* This project now should make proper use of modules * HTTP specific logic was extracted from app.d to http.d * Logic for managing files in memory was moved to cache.d from watcher.d * Added license
This commit is contained in:
parent
7e01e575fc
commit
35d5b02b5e
12 changed files with 799 additions and 89 deletions
91
source/nl/netsoj/chris/blog/cache.d
Normal file
91
source/nl/netsoj/chris/blog/cache.d
Normal file
|
@ -0,0 +1,91 @@
|
|||
import std.experimental.logger;
|
||||
import std.traits;
|
||||
|
||||
import article;
|
||||
import page;
|
||||
import project;
|
||||
|
||||
|
||||
/**
|
||||
* Default ordering and list with pointers to ordered articles.
|
||||
* (Note: this is code which will actually be compiled and passed on!)
|
||||
*/
|
||||
GenericCache!(Article, "a.firstPublished > b.firstPublished") articles;
|
||||
GenericCache!(Page, "a.title < b.title") pages;
|
||||
GenericCache!(Project, "a.title < b.title") projects;
|
||||
|
||||
/**
|
||||
* In memory cache of T (where T is like a page). Right now it simply holds everything in memory.
|
||||
*
|
||||
* At a later date, this cache might start unloading lesser-accessed items, and load them later
|
||||
* again if needed.
|
||||
*/
|
||||
struct GenericCache(T, string sortOrder)
|
||||
if (isImplicitlyConvertible!(T, Page)) {
|
||||
public:
|
||||
|
||||
void addItem(T item) {
|
||||
logf("Added %s '%s'", T.stringof, item.slug);
|
||||
m_map[item.slug] = item;
|
||||
sortItems();
|
||||
}
|
||||
void removeItem(T item) {
|
||||
logf("Removed %s '%s'", T.stringof, item.slug);
|
||||
m_map.remove(item.slug);
|
||||
sortItems();
|
||||
}
|
||||
|
||||
void removeItemByName(string name) {
|
||||
foreach(item; m_map.byValue) {
|
||||
if (item.name == name) removeItem(item);
|
||||
}
|
||||
}
|
||||
|
||||
void changeItem(T item) {
|
||||
import std.algorithm;
|
||||
import std.range;
|
||||
|
||||
auto r = m_map.byValue.find!((a,b) => a.name == b.name)(item);
|
||||
if (r.empty()) {
|
||||
warningf("Could not find old %s with name %s", T.stringof, item.name);
|
||||
return;
|
||||
}
|
||||
T oldItem = r.front;
|
||||
if (oldItem.slug != item.slug) {
|
||||
logf("Slug of %s '%s' changed to '%s'", T.stringof, oldItem.slug, item.slug);
|
||||
m_map.remove(oldItem.slug);
|
||||
}
|
||||
m_map[item.slug] = item;
|
||||
sortItems();
|
||||
}
|
||||
|
||||
/**
|
||||
* Overload []-operator to redirect to our internal map.
|
||||
*/
|
||||
T opIndex(string key) {
|
||||
return m_map[key];
|
||||
}
|
||||
|
||||
T* opBinaryRight(string op)(const scope string key) {
|
||||
static if (op == "in") {
|
||||
return key in m_map;
|
||||
} else static assert(false, "Operation " ~ op ~ "is not supported on this class");
|
||||
}
|
||||
|
||||
T[] sortedList() {
|
||||
return m_publicItems;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
T[string] m_map;
|
||||
T[] m_publicItems;
|
||||
|
||||
void sortItems() {
|
||||
import std.algorithm;
|
||||
import std.array;
|
||||
m_publicItems = sort!sortOrder(m_map.values)
|
||||
.filter!(x => !x.isHidden)
|
||||
.array;
|
||||
}
|
||||
}
|
8
source/nl/netsoj/chris/blog/constants.d
Normal file
8
source/nl/netsoj/chris/blog/constants.d
Normal file
|
@ -0,0 +1,8 @@
|
|||
/**
|
||||
* Constants which are passed to templates while rendering.
|
||||
*/
|
||||
class Constants {
|
||||
public static immutable string SITE_NAME = "Chris Josten's site";
|
||||
public static immutable string SITE_URL = "https://chris.netsoj.nl";
|
||||
public static immutable string COPYRIGHT = "© Chris Josten, 2020";
|
||||
}
|
174
source/nl/netsoj/chris/blog/interfaces/http.d
Normal file
174
source/nl/netsoj/chris/blog/interfaces/http.d
Normal file
|
@ -0,0 +1,174 @@
|
|||
import vibe.d;
|
||||
|
||||
import cache;
|
||||
import article;
|
||||
import page;
|
||||
import project;
|
||||
|
||||
/**
|
||||
* Output types for the content.
|
||||
*/
|
||||
enum OutputType {
|
||||
HTML,
|
||||
MARKDOWN
|
||||
}
|
||||
|
||||
immutable string MIME_MARKDOWN = "text/markdown";
|
||||
|
||||
immutable Duration CACHE_TIME = days(16);
|
||||
|
||||
/**
|
||||
* Get's the document type for the given slug based on extension
|
||||
* and returns the slug without extension and the document type. Also removes the extension from the
|
||||
* slug.
|
||||
*/
|
||||
private OutputType getOutputType(ref string slug) {
|
||||
if (slug.endsWith(".md")) {
|
||||
slug = chomp(slug, ".md");
|
||||
return OutputType.MARKDOWN;
|
||||
} else if (slug.endsWith(".html")){
|
||||
// If explicitly asking for HTML, we'll return HTML
|
||||
slug = chomp(slug, ".html");
|
||||
return OutputType.HTML;
|
||||
} else {
|
||||
// If in the future, for any reason, we no longer use HTML
|
||||
// this allows to us to keep the current urls with an option
|
||||
// to change the output in the future.
|
||||
return OutputType.HTML;
|
||||
}
|
||||
}
|
||||
|
||||
void addCachingHeader(bool publicCache = true)(ref HTTPServerResponse res) {
|
||||
string header = "";
|
||||
static if (publicCache) {
|
||||
header ~= "public";
|
||||
} else {
|
||||
header ~= "private";
|
||||
}
|
||||
header ~= ", max-age=" ~ to!string(CACHE_TIME.total!"seconds");
|
||||
res.headers["Cache-Control"] = header;
|
||||
}
|
||||
|
||||
|
||||
|
||||
struct TranslateContext {
|
||||
import std.typetuple;
|
||||
|
||||
alias languages = TypeTuple!("en_GB", "nl_NL");
|
||||
mixin translationModule!"mijnblog";
|
||||
static string determineLanguage(scope HTTPServerRequest req) {
|
||||
if ("lang" !in req.query) return req.determineLanguageByHeader(languages); // default behaviour using "Accept-Language" header
|
||||
return req.query.get("lang", "en_GB");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates boilerplate code for a single response.
|
||||
* params:
|
||||
* arrayName = The name of the associative array to take the items from.
|
||||
* templateName = The name of the template to render.
|
||||
*/
|
||||
string singleResponseMixin(string arrayName, string templateName) {
|
||||
return `string slug = req.params["slug"];
|
||||
OutputType outputType = getOutputType(slug);
|
||||
|
||||
enforceHTTP(slug in ` ~ arrayName ~ `, HTTPStatus.notFound, "Page not found");
|
||||
auto content = ` ~ arrayName ~ `[slug];
|
||||
switch(outputType) with (OutputType) {
|
||||
case MARKDOWN:
|
||||
res.writeBody(content.contentSource, MIME_MARKDOWN);
|
||||
break;
|
||||
default:
|
||||
case HTML:
|
||||
render!("` ~ templateName ~ `", content);
|
||||
break;
|
||||
}`;
|
||||
}
|
||||
|
||||
@translationContext!TranslateContext
|
||||
class MijnBlog {
|
||||
|
||||
public:
|
||||
/**
|
||||
* Generates response for /posts/:slug and /palen/:slug.
|
||||
*/
|
||||
@path("/posts/:slug")
|
||||
void getArticleSingle(string _slug, HTTPServerRequest req, HTTPServerResponse res) {
|
||||
//getSingle!(Article, "pages/article.dt")(articles, req, res);
|
||||
mixin(singleResponseMixin("articles", "pages/article.dt"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates response for /posts and /palen
|
||||
*/
|
||||
@path("/posts/")
|
||||
void getArticleOverview(HTTPServerRequest req, HTTPServerResponse res) {
|
||||
addCachingHeader(res);
|
||||
Article[] articleList = articles.sortedList;
|
||||
render!("pages/article-list.dt", articleList);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates response for /projects and /projecten
|
||||
*/
|
||||
@path("/projects/")
|
||||
void getProjectOverview(HTTPServerRequest req, HTTPServerResponse res) {
|
||||
addCachingHeader(res);
|
||||
Project[] projectList = projects.sortedList;
|
||||
render!("pages/project-list.dt", projectList);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate response for a project page
|
||||
*/
|
||||
@path("/projects/:slug")
|
||||
void getProject(HTTPServerRequest req, HTTPServerResponse res) {
|
||||
res.headers["Cache-Control"] = "public";
|
||||
mixin(singleResponseMixin("projects", "pages/project.dt"));
|
||||
}
|
||||
/**
|
||||
* Generate response for a page
|
||||
*/
|
||||
@path("/:slug")
|
||||
void getPage(HTTPServerRequest req, HTTPServerResponse res) {
|
||||
addCachingHeader(res);
|
||||
mixin(singleResponseMixin("pages", "pages/page.dt"));
|
||||
}
|
||||
|
||||
@path("/")
|
||||
void getIndexPage(HTTPServerRequest req, HTTPServerResponse res) {
|
||||
addCachingHeader(res);
|
||||
// If no slug is supplied, it will be adjusted to "index"
|
||||
req.params.addField("slug", "index");
|
||||
mixin(singleResponseMixin("pages", "pages/page.dt"));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates response whenever an error occurs.
|
||||
*/
|
||||
@safe
|
||||
void errorPage(HTTPServerRequest req, HTTPServerResponse res, HTTPServerErrorInfo error) {
|
||||
render!("pages/error.dt", error)(res);
|
||||
}
|
||||
|
||||
@trusted
|
||||
void startHTTPServer() {
|
||||
HTTPServerSettings settings = new HTTPServerSettings;
|
||||
settings.bindAddresses = ["0.0.0.0"];
|
||||
settings.port = 3465;
|
||||
settings.serverString = "zeg ik lekker niet";
|
||||
settings.errorPageHandler = toDelegate(&errorPage);
|
||||
settings.keepAliveTimeout = dur!"seconds"(60);
|
||||
debug {
|
||||
settings.accessLogToConsole = true;
|
||||
}
|
||||
HTTPFileServerSettings fSettings = new HTTPFileServerSettings;
|
||||
fSettings.maxAge = days(16);
|
||||
|
||||
URLRouter router = new URLRouter;
|
||||
router.get("/static/*", serveStaticFiles("./public/", fSettings));
|
||||
router.registerWebInterface(new MijnBlog);
|
||||
|
||||
listenHTTP(settings, router);
|
||||
}
|
27
source/nl/netsoj/chris/blog/main.d
Normal file
27
source/nl/netsoj/chris/blog/main.d
Normal file
|
@ -0,0 +1,27 @@
|
|||
import std.experimental.logger;
|
||||
import vibe.d;
|
||||
|
||||
import article;
|
||||
import page;
|
||||
import project;
|
||||
|
||||
import cache;
|
||||
import http;
|
||||
import watcher;
|
||||
|
||||
|
||||
void main() {
|
||||
startHTTPServer();
|
||||
|
||||
// Start indexing pages.
|
||||
runTask({
|
||||
initPages!Page(&pages, "pages");
|
||||
});
|
||||
runTask({
|
||||
initPages!Article(&articles, "articles");
|
||||
});
|
||||
runTask({
|
||||
initPages!Project(&projects, "projects");
|
||||
});
|
||||
runApplication();
|
||||
}
|
66
source/nl/netsoj/chris/blog/model/article.d
Normal file
66
source/nl/netsoj/chris/blog/model/article.d
Normal file
|
@ -0,0 +1,66 @@
|
|||
import std.file;
|
||||
import std.stdio;
|
||||
import std.string;
|
||||
import std.datetime.date;
|
||||
import std.experimental.logger;
|
||||
|
||||
import dyaml;
|
||||
import vibe.d;
|
||||
|
||||
import page;
|
||||
import utils;
|
||||
|
||||
|
||||
/**
|
||||
* Represents an article on the blog
|
||||
*/
|
||||
class Article : Page {
|
||||
private string m_author;
|
||||
private string m_title;
|
||||
private string m_slug;
|
||||
private DateTime m_firstPublished;
|
||||
private string m_excerpt;
|
||||
/**
|
||||
* Time that the file was last updated
|
||||
*/
|
||||
private DateTime m_updated;
|
||||
|
||||
/**
|
||||
* Loads an article from a file.
|
||||
*/
|
||||
this(string file) {
|
||||
this.m_headerShift = 1;
|
||||
super(file);
|
||||
// Find the first header and mark everything up to that as
|
||||
if (m_excerpt is null) {
|
||||
// an excerpt, used in search results.
|
||||
const uint seperatorIndex = cast(uint) indexOf(m_contentSource, "---\n");
|
||||
this.m_excerpt = this.m_contentSource[seperatorIndex + 4..$];
|
||||
const uint firstHeaderIndex = cast(uint) indexOf(this.m_excerpt, '#');
|
||||
if (firstHeaderIndex >= 0) {
|
||||
this.m_excerpt = this.m_excerpt[0..firstHeaderIndex];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the metadata specific to Articles.
|
||||
*/
|
||||
@safe
|
||||
override protected void loadHeader(Node headerNode) {
|
||||
super.loadHeader(headerNode);
|
||||
this.m_author = headerNode.getOr!string("author", "<unknown author>");
|
||||
this.m_excerpt = headerNode.getOr!string("excerpt", null);
|
||||
|
||||
SysTime firstPublished;
|
||||
firstPublished = headerNode.getOr!SysTime("firstPublished", SysTime(DateTime.fromSimpleString("0001-Jan-01 00:00:00")));
|
||||
this.m_firstPublished = cast(DateTime) firstPublished;
|
||||
this.m_updated = cast(DateTime) headerNode.getOr!SysTime("updated", firstPublished);
|
||||
}
|
||||
|
||||
@property string excerpt() { return m_excerpt; }
|
||||
@property string author() { return m_author; }
|
||||
@property DateTime firstPublished() { return m_firstPublished; }
|
||||
@property DateTime updated() { return m_updated; }
|
||||
@property bool isModified() { return m_firstPublished != m_updated; }
|
||||
}
|
143
source/nl/netsoj/chris/blog/model/page.d
Normal file
143
source/nl/netsoj/chris/blog/model/page.d
Normal file
|
@ -0,0 +1,143 @@
|
|||
import std.exception;
|
||||
import std.experimental.logger;
|
||||
import std.file;
|
||||
import std.process;
|
||||
import std.stdio;
|
||||
|
||||
import dyaml;
|
||||
import vibe.vibe;
|
||||
|
||||
import utils;
|
||||
|
||||
|
||||
/**
|
||||
* Exception thrown when a page has syntax errors e.g.
|
||||
*/
|
||||
class ArticleParseException : Exception {
|
||||
mixin basicExceptionCtors;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Represents a page on the blog. Every other page, including blog articles,
|
||||
* projects and so on derrive from this class.
|
||||
*/
|
||||
class Page {
|
||||
/**
|
||||
* Internal name of the article. Usually the file name.
|
||||
*/
|
||||
protected string m_name;
|
||||
|
||||
/**
|
||||
* Slug either manually assigned or generated based on file name.
|
||||
* Only used in the url.
|
||||
*/
|
||||
protected string m_slug;
|
||||
protected string m_title;
|
||||
protected string m_content;
|
||||
protected string m_contentSource;
|
||||
protected bool m_hidden;
|
||||
protected string m_language;
|
||||
|
||||
|
||||
/**
|
||||
* Option for the markdown parser: the amount of levels the header found in the markdown should
|
||||
* be shifted. For example, 0 means H1 -> H1, 1 means H1 -> H2, 2 means H1 -> H3 and so on.
|
||||
*/
|
||||
protected int m_headerShift = 1;
|
||||
|
||||
private bool hasCalledSuper = false;
|
||||
|
||||
/**
|
||||
* Creates a page from a file. This will read from the file and parse it.
|
||||
*/
|
||||
this(string file) {
|
||||
this.m_name = file;
|
||||
this.m_contentSource = readText(file);
|
||||
// Find the seperator and split the string in two
|
||||
const uint seperatorIndex = cast(uint) lastIndexOf(m_contentSource, "---\n");
|
||||
enforce!ArticleParseException(seperatorIndex >= 0);
|
||||
|
||||
string header = m_contentSource[0..seperatorIndex];
|
||||
|
||||
Node node = Loader.fromString(header).load();
|
||||
loadHeader(node);
|
||||
assert(hasCalledSuper);
|
||||
|
||||
this.m_content = Page.parseMarkdown(m_contentSource[seperatorIndex + 4..$],
|
||||
this.m_headerShift);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Parse metadata from the header. Subclasses should override this method,
|
||||
* to parse their own metadata and call super.
|
||||
* Params:
|
||||
* headerNode = the YAML node to parse the header metadata from.
|
||||
*/
|
||||
@safe
|
||||
protected void loadHeader(Node headerNode){
|
||||
this.m_hidden = headerNode.getOr!bool("hidden", false);
|
||||
if (headerNode.containsKey("title")) {
|
||||
this.m_title = headerNode["title"].as!string;
|
||||
} else {
|
||||
warningf("%s does not contain a title", this.m_name);
|
||||
}
|
||||
if (headerNode.containsKey("slug")) {
|
||||
this.m_slug = headerNode["slug"].as!string;
|
||||
} else {
|
||||
this.m_slug = this.m_title;
|
||||
infof("%s does not have a slug. Using %s", this.m_name, this.m_slug);
|
||||
}
|
||||
this.m_language = headerNode.getOr!string("language", "unknown");
|
||||
hasCalledSuper = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts pandoc to convert MarkDown to HTML
|
||||
* Params:
|
||||
* source = The MarkDown source as a string (not a path!)
|
||||
* shiftHeader = (Optional) The amount a header needs to be shifted. If for example, it
|
||||
* is set to 1, first level headings within MarkDown become second level
|
||||
* headers within HTML.
|
||||
*/
|
||||
public static string parseMarkdown(string source, int shiftHeader = 0) {
|
||||
string[] args = ["pandoc",
|
||||
"-f", "markdown",
|
||||
"-t", "html",
|
||||
"--lua-filter", "defaultClasses.lua"];
|
||||
|
||||
if (shiftHeader != 0) args ~= "--shift-heading-level-by=" ~ to!string(shiftHeader);
|
||||
|
||||
ProcessPipes pandoc = pipeProcess(args);
|
||||
pandoc.stdin.write(source);
|
||||
pandoc.stdin.writeln();
|
||||
pandoc.stdin.flush();
|
||||
pandoc.stdin.close();
|
||||
pandoc.pid.wait();
|
||||
string result;
|
||||
string line;
|
||||
while ((line = pandoc.stdout.readln()) !is null) {
|
||||
result ~= line;
|
||||
debug {
|
||||
//logf("Pandoc stdout: %s", line);
|
||||
}
|
||||
}
|
||||
|
||||
while ((line = pandoc.stderr.readln()) !is null) {
|
||||
debug {
|
||||
logf("Pandoc stderr: %s", line);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@property string name() { return m_name; }
|
||||
@property string title() { return m_title; }
|
||||
@property string slug() { return m_slug; }
|
||||
@property string content() { return m_content; }
|
||||
@property string contentSource() { return m_contentSource; }
|
||||
@property bool isHidden() { return m_hidden; }
|
||||
@property string language() { return m_language; }
|
||||
}
|
61
source/nl/netsoj/chris/blog/model/project.d
Normal file
61
source/nl/netsoj/chris/blog/model/project.d
Normal file
|
@ -0,0 +1,61 @@
|
|||
import std.array;
|
||||
import std.algorithm;
|
||||
import std.typecons;
|
||||
|
||||
import dyaml;
|
||||
import vibe.vibe;
|
||||
|
||||
import page;
|
||||
import utils;
|
||||
import staticpaths;
|
||||
|
||||
/**
|
||||
* Represents a project, like an unfinished application
|
||||
*/
|
||||
class Project : Page {
|
||||
alias Link= Tuple!(string, "name", string, "url");
|
||||
protected immutable string PROJECT_ICON_DIR = IMG_DIR ~ "projects/icons/";
|
||||
protected immutable string PROJECT_IMAGE_DIR = IMG_DIR ~ "projects/images/";
|
||||
protected string m_state;
|
||||
protected string[] m_platforms;
|
||||
protected string[] m_technologies;
|
||||
protected string m_icon;
|
||||
protected string[] m_images;
|
||||
protected string m_description;
|
||||
protected Link[] m_sourceCode = [];
|
||||
|
||||
/**
|
||||
* Creates a project from a file
|
||||
*/
|
||||
this(string file) {
|
||||
super(file);
|
||||
}
|
||||
|
||||
@safe
|
||||
override protected void loadHeader(Node headerNode) {
|
||||
super.loadHeader(headerNode);
|
||||
this.m_state = headerNode.getOr!string("state", "unknown");
|
||||
this.m_platforms = headerNode.getArray!string("platforms", []);
|
||||
this.m_technologies = headerNode.getArray!string("technologies", []);
|
||||
this.m_icon = PROJECT_ICON_DIR ~ headerNode.getOr!string("icon", "");
|
||||
this.m_images = headerNode.getArray!string("images", [])
|
||||
.map!(x => PROJECT_IMAGE_DIR ~ x).array;
|
||||
this.m_description = headerNode.getOr!string("description", "<no description>");
|
||||
if ("sourceCode" in headerNode) {
|
||||
m_sourceCode.reserve(headerNode.length);
|
||||
foreach(Node node; headerNode["sourceCode"]) {
|
||||
m_sourceCode ~= tuple!("name", "url")(node.getOr!string("name", "link"),
|
||||
node.getOr!string("url", "#"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@property string state() { return m_state; }
|
||||
@property string[] platforms() { return m_platforms; }
|
||||
@property string[] technologies() { return m_technologies; }
|
||||
@property string icon() { return m_icon; }
|
||||
@property string[] images() { return m_images; }
|
||||
@property string description() { return m_description; }
|
||||
@property Link[] sourceCode() { return m_sourceCode; }
|
||||
|
||||
}
|
6
source/nl/netsoj/chris/blog/staticpaths.d
Normal file
6
source/nl/netsoj/chris/blog/staticpaths.d
Normal file
|
@ -0,0 +1,6 @@
|
|||
/**
|
||||
* Paths to static data.
|
||||
*/
|
||||
immutable string STATIC_DIR = "/static/";
|
||||
immutable string IMG_DIR = STATIC_DIR ~ "img/";
|
||||
immutable string SCRIPT_DIR = STATIC_DIR ~ "script/";
|
31
source/nl/netsoj/chris/blog/utils.d
Normal file
31
source/nl/netsoj/chris/blog/utils.d
Normal file
|
@ -0,0 +1,31 @@
|
|||
import std.algorithm;
|
||||
import std.array;
|
||||
import std.conv;
|
||||
import std.datetime;
|
||||
|
||||
import dyaml;
|
||||
|
||||
string toHumanString(DateTime value) {
|
||||
return to!string(value.day) ~ "-" ~ to!string(ubyte(value.month)) ~ "-" ~ to!string(value.year);
|
||||
}
|
||||
|
||||
T getOr(T)(Node node, string key, T or) {
|
||||
if (key in node) {
|
||||
try {
|
||||
return node[key].get!T;
|
||||
} catch (Exception e) {
|
||||
return or;
|
||||
}
|
||||
} else {
|
||||
return or;
|
||||
}
|
||||
}
|
||||
|
||||
T[] getArray(T)(Node node, string key, T[] or = []) {
|
||||
try {
|
||||
return node.getOr!(Node[])(key, [])
|
||||
.map!(x => x.as!T).array;
|
||||
} catch (NodeException e) {
|
||||
return or;
|
||||
}
|
||||
}
|
92
source/nl/netsoj/chris/blog/watcher.d
Normal file
92
source/nl/netsoj/chris/blog/watcher.d
Normal file
|
@ -0,0 +1,92 @@
|
|||
import std.array;
|
||||
import std.algorithm;
|
||||
import std.experimental.logger;
|
||||
import std.file;
|
||||
import std.stdio;
|
||||
import std.traits;
|
||||
|
||||
import vibe.d;
|
||||
|
||||
import cache;
|
||||
import page;
|
||||
|
||||
/**
|
||||
* Loads pages into memory and sets up a "watcher" to watch a directory for file changes.
|
||||
*/
|
||||
void initPages(T, C)(C *cache, const string directory)
|
||||
if (isImplicitlyConvertible!(T, Page)) {
|
||||
|
||||
bool addPage(string path) {
|
||||
try {
|
||||
T newPage = new T(path);
|
||||
logf("Added %s", newPage.slug);
|
||||
cache.addItem(newPage);
|
||||
return true;
|
||||
} catch (page.ArticleParseException e) {
|
||||
logf("Could not parse %s: %s", path, e);
|
||||
return false;
|
||||
} catch (Exception e) {
|
||||
logf("Other exception while parsing %s: %s", path, e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Initial scan
|
||||
void scan(NativePath path, int level = 0) {
|
||||
logf("Scanning %s", path.toString());
|
||||
foreach(file; iterateDirectory(path)) {
|
||||
if (file.isDirectory) {
|
||||
scan(path ~ file.name, level + 1);
|
||||
} else {
|
||||
addPage((path ~ file.name).toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!existsFile(getWorkingDirectory() ~ directory)) {
|
||||
createDirectory(getWorkingDirectory() ~ directory);
|
||||
}
|
||||
scan(getWorkingDirectory() ~ directory);
|
||||
DirectoryWatcher watcher = watchDirectory(getWorkingDirectory() ~ directory, true);
|
||||
|
||||
bool shouldStop = false;
|
||||
while (!shouldStop) {
|
||||
// Try to reduce changes to only one DirectoryChangeType per change
|
||||
DirectoryChange[] changes;
|
||||
shouldStop = !watcher.readChanges(changes);
|
||||
foreach(change; changes) {
|
||||
logf("=======[New changes]======");
|
||||
string[] changeTypes = ["added", "removed", "modified"];
|
||||
logf("Path: %s, type: %s", change.path.toString(), changeTypes[change.type]);
|
||||
if (endsWith(change.path.toString(), ".kate-swp")) continue;
|
||||
switch (change.type) with (DirectoryChangeType){
|
||||
case added:
|
||||
try {
|
||||
addPage(change.path.toString());
|
||||
} catch(Exception e) {
|
||||
warningf("Error while updating %s: %s", change.path.toString(), e.msg);
|
||||
}
|
||||
break;
|
||||
case modified:
|
||||
T newPage;
|
||||
try {
|
||||
newPage = new T(change.path.toString());
|
||||
cache.changeItem(newPage);
|
||||
} catch(page.ArticleParseException e) {
|
||||
warningf("Could not parse %s", change.path.toString());
|
||||
} catch (Exception e) {
|
||||
warningf("Error while updating %s: %s", change.path.toString(), e.msg);
|
||||
}
|
||||
break;
|
||||
case removed:
|
||||
try {
|
||||
cache.removeItemByName(change.path.toString());
|
||||
} catch(Exception e) {
|
||||
logf("Error while trying to remove %s: %s", T.stringof, e.msg);
|
||||
}
|
||||
break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue