mod: rename functions; move knowledge from lexer to parser
All checks were successful
Zig Project Action / Lint, Spell-check and test zig project (push) Successful in 56s

Lexer implementation shall remain dumb and not contain implicit
knowledge about the `smd` file format. That information shall belong to
the parser which constructs the `Ast`. The Tokenization does not need to
know details, simplifying the implementation and tests accordingly.
This commit is contained in:
2025-11-09 22:44:24 +01:00
parent 27df340cad
commit b32c5ed96b
4 changed files with 426 additions and 387 deletions

View File

@@ -1,14 +1,16 @@
///! Abstract syntax tree implementation for `nf` file format.
///! Abstract syntax tree implementation for `smd` file format.
///! Which is a simplified markdown based file format. `md` files
///! may be parsed correctly too.
nodes: std.MultiArrayList(Node),
tokens: std.MultiArrayList(Token),
// TODO what would I need from the `Ast`?
// - parser checks if `Ast` is _correct_ when constructing from source using lexer / tokenizer
// - parser checks if `Ast` is *correct* when constructing from source using lexer / tokenizer
// -> use that for reporting through the lsp / syntax checker?
// - Ast holds representation for formatting
// - Ast holds information about contents and locations for lsp implementation
// - `Ast` holds representation for formatting
// - `Ast` holds information about contents and locations for lsp implementation
// TODO do I need more specific ast nodes? or do I even need some kind of interface?
// TODO do I need more specific `Ast` nodes? or do I even need some kind of interface?
// -> i.e. blocks should also tell me what kind of block it is
// -> i.e. headers should also tell me what level (and their corresponding children)
// -> i.e. links should also tell me what kind of link (file, web, note)
@@ -18,51 +20,49 @@ tokens: std.MultiArrayList(Token),
// -> have only specific children allowed for each kind?
pub const Tag = enum(u8) {
anchor,
block,
comment,
header,
link,
list,
paragraph,
reference,
table,
topic,
hashtag,
separator,
text,
styled_text,
quote,
/// invalid type as the `Ast` may be not complete to derive the correct type yet
invalid,
};
pub const Type = union(Tag) {
anchor: Anchor,
block: Block,
comment: Comment,
header: Header,
link: Link,
list: List,
paragraph: Paragraph,
reference: Reference,
table: Table,
topic: Topic,
hashtag: Hashtag,
separator,
text: Text,
styled_text: Text,
quote: Quote,
invalid,
pub fn getNode(this: @This(), ast: Ast) Node {
return switch (this) {
.anchor => |anchor| ast.nodes.get(anchor.idx),
.block => |block| ast.nodes.get(block.idx),
.comment => |comment| ast.nodes.get(comment.idx),
.header => |header| ast.nodes.get(header.idx),
.link => |link| ast.nodes.get(link.idx),
.list => |list| ast.nodes.get(list.idx),
.paragraph => |paragraph| ast.nodes.get(paragraph.idx),
.reference => |reference| ast.nodes.get(reference.idx),
.table => |table| ast.nodes.get(table.idx),
.topic => |topic| ast.nodes.get(topic.idx),
.hashtag => |hashtag| ast.nodes.get(hashtag.idx),
.separator => .{
.tag = .separator,
.loc = .{
.start = 0,
.end = 0,
}, // TODO that information might be required!
.parent = .{
.start = 0,
.end = 0,
},
},
.text => |text| ast.nodes.get(text.idx),
.styled_text => |text| ast.nodes.get(text.idx),
.quote => |quote| ast.nodes.get(quote.idx),
.invalid => .{
.tag = .invalid,
.loc = .{
@@ -79,18 +79,14 @@ pub const Type = union(Tag) {
pub fn dump(this: @This()) void {
switch (this) {
.anchor => |anchor| anchor.dump(),
.block => |block| block.dump(),
.comment => |comment| comment.dump(),
.header => |header| header.dump(),
.link => |link| link.dump(),
.list => |list| list.dump(),
.paragraph => |paragraph| paragraph.dump(),
.reference => |reference| reference.dump(),
.table => |table| table.dump(),
.topic => |topic| topic.dump(),
.hashtag => |hashtag| hashtag.dump(),
.separator => print("---", .{}),
.text => |text| text.dump(),
.styled_text => |text| text.dump(),
.quote => |quote| quote.dump(),
.invalid => {},
}
}
@@ -125,33 +121,16 @@ pub const Node = struct {
}
};
pub const Anchor = struct {
idx: usize = undefined,
target: Token.Location,
pub fn dump(this: @This()) void {
print(".target: {any} ", .{this.target});
}
};
pub const Block = struct {
idx: usize = undefined,
kind: enum(u8) {
tldr,
info,
warn,
quote,
math,
@"fn",
code, // if not matched with one above 'code' is assumed
},
lang: Token.Location,
pub fn dump(this: @This()) void {
print(".kind: {any} ", .{this.kind});
print(".lang: {any} ", .{this.lang});
}
};
pub const Comment = struct {
pub const Quote = struct {
idx: usize = undefined,
pub fn dump(this: @This()) void {
@@ -170,10 +149,11 @@ pub const Header = struct {
pub const Link = struct {
idx: usize = undefined,
kind: enum(u2) { note, file, web },
name: Token.Location,
ref: ?Token.Location,
pub fn dump(this: @This()) void {
print(".kind: .{s} ", .{@tagName(this.kind)});
print(".name: .{any}, .ref: {any}", .{ this.name, this.ref });
}
};
@@ -187,7 +167,7 @@ pub const List = struct {
}
};
pub const Paragraph = struct {
pub const Hashtag = struct {
idx: usize = undefined,
pub fn dump(this: @This()) void {
@@ -195,36 +175,14 @@ pub const Paragraph = struct {
}
};
pub const Reference = struct {
idx: usize = undefined,
pub fn dump(this: @This()) void {
_ = this;
}
};
pub const Table = struct {
idx: usize = undefined,
cols: u8,
rows: u8,
pub fn dump(this: @This()) void {
_ = this;
}
};
pub const Topic = struct {
idx: usize = undefined,
topics: []const u8, // order here is important!
pub fn dump(this: @This()) void {
_ = this;
}
};
pub const Text = struct {
idx: usize = undefined,
styled: bool,
style: packed struct {
underlined: bool = false, // _text_
strong: bool = false, // **text**
italic: bool = false, // *text*
code: bool = false, // `text`
} = .{},
pub fn dump(this: @This()) void {
_ = this;
@@ -249,18 +207,14 @@ pub fn addNode(this: *Ast, gpa: Allocator, node: Node) !void {
var n = node;
switch (n.tag) {
.anchor => |*anchor| anchor.idx = idx,
.block => |*block| block.idx = idx,
.comment => |*comment| comment.idx = idx,
.header => |*header| header.idx = idx,
.link => |*link| link.idx = idx,
.list => |*list| list.idx = idx,
.paragraph => |*paragraph| paragraph.idx = idx,
.reference => |*reference| reference.idx = idx,
.table => |*table| table.idx = idx,
.topic => |*topic| topic.idx = idx,
.hashtag => |*hashtag| hashtag.idx = idx,
.separator => {},
.text => |*text| text.idx = idx,
.styled_text => |*text| text.idx = idx,
.quote => |*quote| quote.idx = idx,
.invalid => {},
}
this.nodes.appendAssumeCapacity(n);

View File

@@ -13,10 +13,13 @@ pub const Token = struct {
block,
eof,
hashtag,
heading,
tag,
indentation,
invalid,
link,
l_bracket,
r_bracket,
l_brace,
r_brace,
minus,
separator,
text,
@@ -27,9 +30,14 @@ pub const Token = struct {
pub fn lexeme(tag: Tag) ?[]const u8 {
return switch (tag) {
.asterisk => "*",
.hashtag => "#",
.minus => "-",
.tick => "`",
.underscore => "_",
.l_bracket => "[",
.r_bracket => "]",
.l_brace => "(",
.r_brace => ")",
else => null,
};
}
@@ -38,8 +46,7 @@ pub const Token = struct {
return tag.lexeme() orelse switch (tag) {
.block => "block",
.eof => "EOF",
.hashtag => "#hashtag",
.heading => "heading",
.tag => "#tag",
.indentation => "indentation",
.invalid => "invalid token",
.separator => "---",
@@ -74,13 +81,10 @@ pub const Tokenizer = struct {
invalid,
hashtag,
tag,
heading,
minus,
tick,
block,
r_angle_bracket,
l_bracket,
l_brace,
};
/// state fsm (finite state machine) describing the syntax of `nf`
@@ -167,7 +171,26 @@ pub const Tokenizer = struct {
'[' => if (result.loc.start != index) {
result.tag = .text;
} else {
continue :state .l_bracket;
index += 1;
result.tag = .l_bracket;
},
'(' => if (result.loc.start != index) {
result.tag = .text;
} else {
index += 1;
result.tag = .l_brace;
},
']' => if (result.loc.start != index) {
result.tag = .text;
} else {
index += 1;
result.tag = .r_bracket;
},
')' => if (result.loc.start != index) {
result.tag = .text;
} else {
index += 1;
result.tag = .r_brace;
},
'-' => if (result.loc.start != index) {
result.tag = .text;
@@ -210,31 +233,6 @@ pub const Tokenizer = struct {
else => continue :state .r_angle_bracket,
}
},
.l_bracket => {
index += 1;
switch (this.buffer[index]) {
']' => {
index += 1;
switch (this.buffer[index]) {
'(' => continue :state .l_brace,
else => result.tag = .link,
}
},
0, '\n' => result.tag = .invalid,
else => continue :state .l_bracket,
}
},
.l_brace => {
index += 1;
switch (this.buffer[index]) {
')' => {
index += 1;
result.tag = .link;
},
0, '\n' => result.tag = .invalid,
else => continue :state .l_brace,
}
},
.tick => {
index += 1;
switch (this.buffer[index]) {
@@ -263,24 +261,17 @@ pub const Tokenizer = struct {
.hashtag => {
index += 1;
switch (this.buffer[index]) {
'#', ' ' => continue :state .heading,
'#', ' ' => result.tag = .hashtag,
else => continue :state .tag,
}
},
.tag => {
index += 1;
switch (this.buffer[index]) {
' ', 0, '\n' => result.tag = .hashtag,
' ', 0, '\n' => result.tag = .tag,
else => continue :state .tag,
}
},
.heading => {
index += 1;
switch (this.buffer[index]) {
0, '\n' => result.tag = .heading,
else => continue :state .heading,
}
},
.minus => {
index += 1;
switch (this.buffer[index]) {
@@ -375,20 +366,20 @@ test "blocks" {
}
test "heading" {
try testTokenize("# Heading 1", &.{.heading});
try testTokenize("## Heading 2", &.{.heading});
try testTokenize("### Heading 3", &.{.heading});
try testTokenize("#### Heading 4", &.{.heading});
try testTokenize("##### Heading 5", &.{.heading});
try testTokenize("###### Heading 6", &.{.heading});
try testTokenize("# Heading 1", &.{ .hashtag, .text });
try testTokenize("## Heading 2", &.{ .hashtag, .hashtag, .text });
try testTokenize("### Heading 3", &.{ .hashtag, .hashtag, .hashtag, .text });
try testTokenize("#### Heading 4", &.{ .hashtag, .hashtag, .hashtag, .hashtag, .text });
try testTokenize("##### Heading 5", &.{ .hashtag, .hashtag, .hashtag, .hashtag, .hashtag, .text });
try testTokenize("###### Heading 6", &.{ .hashtag, .hashtag, .hashtag, .hashtag, .hashtag, .hashtag, .text });
try testTokenize(
\\# Heading With some paragraph
\\
\\Followed with some more paragraph for that heading.
, &.{ .heading, .text });
, &.{ .hashtag, .text });
try testTokenize("Some paragraph with some equal signs#Which is not a heading.", &.{ .text, .tag, .text });
try testTokenize("Some paragraph with some equal signs #Which is not a heading.", &.{ .text, .tag, .text });
try testTokenize("Some paragraph with some equal signs # Which is not a heading.", &.{ .text, .hashtag, .text });
try testTokenize("Some paragraph with some equal signs #Which is not a heading.", &.{ .text, .hashtag, .text });
try testTokenize("Some paragraph with some equal signs # Which is not a heading.", &.{ .text, .heading });
}
test "lists" {
@@ -406,7 +397,7 @@ test "tables are just text" {
try testTokenize(
\\| Build Mode | Runtime Safety | Optimizations |
\\| | | |
\\| Debug (default) | Yes | No |
\\| Debug =default | Yes | No |
\\| ReleaseSafe | Yes | Yes, Speed |
\\| ReleaseSmall | No | Yes, Size |
\\| ReleaseFast | No | Yes, Speed |

View File

@@ -1,4 +1,12 @@
fn parse(allocator: Allocator, content: [:0]const u8) !Ast {
// TODO planned features:
// - integrate better with the `std.Io.Reader` interface, providing more
// flexibility (i.e. even streaming the file contents in, etc.)
// - tests with increasing complexity
// - ordering tests with increasing functional parsing
// - what should be able to be parsed first?
// - which combinations shall be tested?
fn parse_sentinel_slice(allocator: Allocator, content: [:0]const u8) !Ast {
var ast: Ast = .init;
var tokenizer = Tokenizer.init(content);
var token = tokenizer.next();
@@ -11,90 +19,18 @@ fn parse(allocator: Allocator, content: [:0]const u8) !Ast {
var idx: u32 = 0; // keep track on how much we peeked ahead
// TODO keep track of the parent position too!
const node: Ast.Node = tag: switch (token.tag) {
// tracing
.anchor => break :tag .{
.tag = .{
.anchor = .{ .target = undefined },
},
.loc = token.loc,
.parent = last_loc,
},
.reference => break :tag .{
.tag = .{ .reference = .{} },
.loc = token.loc,
.parent = last_loc,
},
.at_sign => break :tag .{
.tag = .{ .text = .{ .styled = false } },
.loc = token.loc,
.parent = last_loc,
},
// TODO determine kind of block
// -> run lexer on block contents if necessary and inject results into this tree accordingly
// -> recursive lexer instance needs to update the found corresponding index with the starting index of the block contents!
// TODO maybe make the tag switch a bit different?
// - add interface to update the tokenizer to advance accordingly!
.block => break :tag .{
.tag = .{
.block = .{
.kind = .tldr, // TODO parse correctly from the contents
.lang = token.loc,
},
},
.loc = token.loc,
.parent = last_loc,
},
.colon => {
var loc: Location = token.loc;
idx += 1;
token = tokenizer.peek(idx);
colon: switch (token.tag) {
.colon => {
idx += 1;
token = tokenizer.peek(idx);
continue :colon token.tag;
},
.text => {
idx += 1;
const end = tokenizer.peek(idx);
if (end.tag == .colon) {
loc.end = end.loc.end;
idx += 1;
token = tokenizer.peek(idx);
continue :colon token.tag;
}
},
.newline => if (loc.start + 1 == loc.end) {
// only have ':\n' which is not a .tag
continue;
},
else => continue :tag token.tag,
}
for (0..idx) |_| _ = tokenizer.next();
break :tag .{
.tag = .{
.topic = .{ .topics = undefined },
},
.loc = loc,
.parent = last_loc,
};
},
.comment => break :tag .{
.tag = .{
.comment = .{},
},
.loc = token.loc,
.parent = last_loc,
},
.newline => {
idx = 0;
token = tokenizer.next();
continue :tag token.tag;
},
.l_angle_bracket, .r_angle_bracket, .l_bracket, .r_bracket, .hashtag => {
idx = 0;
token = tokenizer.next();
continue :tag token.tag;
},
// header
.equal => {
.hashtag => {
var loc: Location = token.loc;
idx += 1;
const next = tokenizer.peek(idx);
@@ -102,8 +38,9 @@ fn parse(allocator: Allocator, content: [:0]const u8) !Ast {
// invalid
continue :tag .invalid;
} else {
// FIX this will fail if the entire heading is not only a text, but contains a minus, slash, etc.
// FIX this will fail if the entire heading is not only a text, but contains a minus, or other symbols that cause the text to be not complete, or it may even over shoot!
loc.end = next.loc.end;
for (0..idx) |_| _ = tokenizer.next();
break :tag .{
.tag = .{
.header = .{ .level = undefined },
@@ -113,113 +50,28 @@ fn parse(allocator: Allocator, content: [:0]const u8) !Ast {
};
}
},
// link
.l_bracket_colon => {
var loc: Location = token.loc;
idx += 1;
var next = tokenizer.peek(idx);
link: switch (next.tag) {
.text, .reference, .pipe => {
idx += 1;
next = tokenizer.peek(idx);
continue :link next.tag;
.tag => break :tag .{
.tag = .{
.hashtag = .{},
},
.r_bracket => {
loc.end = next.loc.end;
idx += 1;
break :tag .{
.tag = .{ .link = .{ .kind = .note } },
.loc = loc,
.loc = token.loc,
.parent = last_loc,
};
},
else => {
// invalid
continue;
},
}
},
.l_bracket_minus => {
var loc: Location = token.loc;
idx += 1;
var next = tokenizer.peek(idx);
link: switch (next.tag) {
.text, .pipe => {
idx += 1;
next = tokenizer.peek(idx);
continue :link next.tag;
},
.r_bracket => {
loc.end = next.loc.end;
idx += 1;
break :tag .{
.tag = .{ .link = .{ .kind = .file } },
.loc = loc,
.parent = last_loc,
};
},
else => {
// invalid
continue;
},
}
},
.l_bracket_slash => {
var loc: Location = token.loc;
idx += 1;
var next = tokenizer.peek(idx);
link: switch (next.tag) {
.text, .reference, .pipe => {
idx += 1;
next = tokenizer.peek(idx);
continue :link next.tag;
},
.r_bracket => {
loc.end = next.loc.end;
idx += 1;
break :tag .{
.tag = .{ .link = .{ .kind = .web } },
.loc = loc,
.parent = last_loc,
};
},
else => {
// invalid
continue;
},
}
},
// list
.minus => break :tag .{
.tag = .{
.text = .{ .styled = false },
.text = .{ .style = .{} },
},
.loc = token.loc,
.parent = last_loc,
},
.plus => break :tag .{
.tag = .{
.text = .{ .styled = false },
},
.loc = token.loc,
.parent = last_loc,
},
// table
.pipe => break :tag .{
.tag = .{
.text = .{ .styled = false },
},
.loc = token.loc,
.parent = last_loc,
},
.pipe_equal => unreachable,
// text
.text => {
var loc: Location = token.loc;
idx += 1;
var next = tokenizer.peek(idx);
text: switch (next.tag) {
.plus, .minus, .text, .colon => {
.text => {
loc.end = next.loc.end;
idx += 2;
next = tokenizer.peek(idx);
@@ -227,7 +79,7 @@ fn parse(allocator: Allocator, content: [:0]const u8) !Ast {
},
else => break :tag .{
.tag = .{
.text = .{ .styled = false },
.text = .{ .style = .{} },
},
.loc = loc,
.parent = last_loc,
@@ -251,7 +103,11 @@ fn parse(allocator: Allocator, content: [:0]const u8) !Ast {
for (0..idx) |_| _ = tokenizer.next();
break :tag .{
.tag = .{
.text = .{ .styled = true },
.text = .{
.style = .{
.underlined = true,
},
},
},
.loc = loc,
.parent = last_loc,
@@ -267,6 +123,196 @@ fn parse(allocator: Allocator, content: [:0]const u8) !Ast {
}
}
},
.asterisk => {
var loc: Location = token.loc;
idx += 1;
var next = tokenizer.peek(idx);
if (next.tag != .text) {
// invalid
loc.end = token.loc.end;
continue :tag .invalid;
} else {
loc = next.loc;
idx += 1;
next = tokenizer.peek(idx);
if (next.tag == .asterisk) {
for (0..idx) |_| _ = tokenizer.next();
break :tag .{
.tag = .{
.text = .{
.style = .{
.italic = true,
},
},
},
.loc = loc,
.parent = last_loc,
};
} else {
// invalid
loc.end = next.loc.end;
break :tag .{
.tag = .invalid,
.loc = loc,
.parent = last_loc,
};
}
}
},
.tick => {
var loc: Location = token.loc;
idx += 1;
var next = tokenizer.peek(idx);
if (next.tag != .text) {
// invalid
loc.end = token.loc.end;
continue :tag .invalid;
} else {
loc = next.loc;
idx += 1;
next = tokenizer.peek(idx);
if (next.tag == .tick) {
for (0..idx) |_| _ = tokenizer.next();
break :tag .{
.tag = .{
.text = .{
.style = .{
.code = true,
},
},
},
.loc = loc,
.parent = last_loc,
};
} else {
// invalid
loc.end = next.loc.end;
break :tag .{
.tag = .invalid,
.loc = loc,
.parent = last_loc,
};
}
}
},
.l_bracket => {
var name = token.loc;
var loc = token.loc;
idx += 1;
var next = tokenizer.peek(idx);
if (next.tag != .text) {
// invalid
loc.end = token.loc.end;
continue :tag .invalid;
} else {
loc.end = next.loc.end;
idx += 1;
next = tokenizer.peek(idx);
if (next.tag == .r_bracket) {
name.end = next.loc.end;
loc.end = next.loc.end;
idx += 1;
next = tokenizer.peek(idx);
if (next.tag != .l_brace) {
for (0..idx - 1) |_| _ = tokenizer.next();
break :tag .{
.tag = .{
.text = .{
.style = .{},
},
},
.loc = loc,
.parent = last_loc,
};
} else {
var ref = next.loc;
idx += 1;
next = tokenizer.peek(idx);
// TODO this should use a loop (i.e. the link may contain *underscore*s, *minus*, etc.)
if (next.tag != .text) {
loc.end = next.loc.end;
continue :tag .invalid;
} else {
idx += 1;
next = tokenizer.peek(idx);
// this would be the end of the link! (for the while loop)
if (next.tag == .r_brace) {
loc.end = next.loc.end;
ref.end = next.loc.end;
for (0..idx) |_| _ = tokenizer.next();
break :tag .{
.tag = .{
.link = .{
.name = name,
.ref = ref,
},
},
.loc = loc,
.parent = last_loc,
};
}
}
}
}
break :tag .{
.tag = .{
.text = .{
.style = .{},
},
},
.loc = loc,
.parent = last_loc,
};
}
},
.l_brace => break :tag .{
.tag = .{
.text = .{
.style = .{},
},
},
.loc = token.loc,
.parent = last_loc,
},
.r_bracket => break :tag .{
.tag = .{
.text = .{
.style = .{},
},
},
.loc = token.loc,
.parent = last_loc,
},
.r_brace => break :tag .{
.tag = .{
.text = .{
.style = .{},
},
},
.loc = token.loc,
.parent = last_loc,
},
.quote => break :tag .{
.tag = .{
.quote = .{},
},
.loc = token.loc,
.parent = last_loc,
},
.indentation => {
// TODO currently not even emitted by the lexer
continue;
},
.separator => break :tag .{
.tag = .separator,
.loc = token.loc,
.parent = last_loc,
},
.eof => return ast,
.invalid => {
idx += 1;
@@ -281,7 +327,7 @@ fn parse(allocator: Allocator, content: [:0]const u8) !Ast {
try ast.addNode(allocator, node);
// TODO improve the parent node's location information!
switch (node.tag) {
.header, .link, .list, .table, .block => last_loc = node.loc,
.header, .list => last_loc = node.loc,
else => {},
}
}
@@ -289,16 +335,18 @@ fn parse(allocator: Allocator, content: [:0]const u8) !Ast {
return ast;
}
pub fn parse_reader(allocator: Allocator, reader: Reader) !Ast {
const contents = try reader.readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(contents);
return parse(allocator, contents[0..contents.len :0]);
pub fn parse(allocator: Allocator, reader: *Reader) !Ast {
var buffer = try reader.allocRemaining(allocator, .unlimited);
defer allocator.free(buffer);
try reader.readSliceAll(buffer);
return parse_sentinel_slice(allocator, buffer[0..buffer.len :0]);
}
const std = @import("std");
const testing = std.testing;
const Allocator = std.mem.Allocator;
const Reader = std.io.AnyReader;
const Reader = std.Io.Reader;
const lexer = @import("lexer.zig");
const Tokenizer = lexer.Tokenizer;
const Location = lexer.Token.Location;
@@ -306,71 +354,114 @@ const Ast = @import("Ast.zig");
test "Example note with code snippets" {
const content: [:0]const u8 =
\\:code:zig:
\\:test:
\\:test:another:
\\#book #fleeting
\\
\\= Conditional Code <conditional-code>
\\Controlling not just the control flow of the code, but also which parts of the code base are actually compiled and used when shipping the application is very crucial and often done via condiationally enabling / disabling code. They are usually controlled via _feature toggles_ and can be implemented in zig via [:ly9j.n|comptime] (pre-processor statements in C/C++, etc.).
\\---
\\# Book: Building a Second Brain by Tiago Forte
\\
\\[:ly9j.n@comptime] even allows mixing build and runtime checks, see the following example:
\\> [!tldr]
\\> Overall summary of the book contents and the smaller summaries below.
\\
\\##zig
\\ fn myFunction() void {
\\ if (hasFeature()) {
\\ // Feature-specific code
\\ } else {
\\ // Default code
\\ }
\\ }
\\## Partial Summaries
\\
\\ inline fn hasFeature() bool {
\\ return (comptime comptimeCheck()) and runtimeCheck();
\\ }
\\##
\\### Part One: The Foundation
\\
\\Both the [:g0ic.n@inline] and [:ly9j.n@comptime] keywords are required, such that the `hasFeature` function call in `myFunction` will be [:msev.n|correctly] evaluated during build-time.
\\> Your mind is for having ideas, not holding them. [@4]
\\
\\Most commonly such conditional code is used to provide _platform specific_ implementations:
\\#### What is a Second Brain?
\\
\\##zig
\\ const builtin = @import("builtin");
\\A digital [commonplace book](https://en.wikipedia.org/wiki/Commonplace_book) is what I call a _Second Brain_. It describes the combination of a study notebook, a personal [journal](0lyw), and a [sketchbook](7we8) for new ideas. This dynamic tool can be used to take notes for studying, organize projects for work and private goals, help organizing your household, keep track of tasks, etc. Essentially it should help you lift the burden of having to remember everything and build a system that allows you (and your brain) to relax by [Externalizing Knowledge](fvhx).
\\
\\ fn myFunction() void {
\\ if (builtin.os.tag == .macos) {
\\ // This code will only be included if the target OS is macOS.
\\ return;
\\ }
\\> However you decide to use it, your Second Brain is a private knowledge collection designed to serve a lifetime of learning and growth, not just a single use case.
\\
\\ // This code will be included for all other operating systems.
\\ }
\\##
\\In the new modern world the amounts of contents we consume is growing ever faster, but our brains cannot keep up. We end up spending too much time looking for misplaced notes, items or files; consume up to about 34 GB of information a day robbing us our attention (doom-scrolling, etc.); etc. Instead of spending our time and energy with the consumption we should spend more time to actually do what our brains are good at: inventing new things, crafting stories, recognizing patterns, following our intuition, collaborating with others, investigating new subjects, making plans, testing theories, ...
\\
\\##fn [/https://mitchellh.com/writing/zig-comptime-conditional-disable|Conditionally Disabling Code with comptime in Zig - Mitchell Hashimoto]## <fn-1>
\\> However, there's a catch: every change in how we use technology also requires a change in how we think. To properly take advantage of the power of a Second Brain, we need a new relationship with information, to technology, and even to ourselves.
\\
\\A shift towards a more patient, thoughtful approach to information that favors [rereading](3g3j), reformulating, and working through the implications of ideas over time should be taken, to take full advantage of the ever growing amounts of contents we consume. This leads to more civil discussions about important topics (maybe even with just yourself) and could preserve our mental health and heal splintered attention. This also includes the reduction of content consumption and a more focused and controlled way to interact with the contents.
\\
\\So what do you write in your digital commonplace book (aka your Second Brain)? This differs completely to what we are used to from school, as in the professional world:
\\
\\ - It's not at all clear what you should be taking notes on.
\\ - No one tells you when or how your notes will be used.
\\ - The "test" can come at any time and in any form.
\\ - You're allowed to reference your notes at any time, provided you took them in the first place.
\\ - You are expected to take action on your notes, not just regurgitate them.
\\
\\This results in notes becoming [Building-Block of Knowledge](h8pi), discrete units of information which stand on their own and have intrinsic value. They become even more useful (and provide more insight) when combined.
\\
\\> [..] a note could include a passage from a book ar particle tahat you were inspired by; a photo or image from the web with your annotations; or a bullet-point list of your meandering thoughts on a topic, among many other examples. A note could include a single quote from a film that really struck with you, all the way to thousands of words you saved from an in-depth book.
\\
\\The important aspect is not the length or format, but rather the process of the information going through your [head and then back](mn0e) to the [note](8mdf) containing corresponding information and a unique perspective of your own stored outside of your head. With that you also generate _value_.
\\
\\#### How a Second Brain Works
\\
\\A second brain usually provides four capabilities to aid you in your everyday routines:
\\
\\ 1. Make ideas concrete
\\ 2. Reveal new associations between ideas
\\ > [..] creativity is about connecting ideas together, especially ideas that don't seem to be connected.
\\ 3. Incubate ideas over time
\\ 4. Sharpen your unique perspective
\\
\\For me personally the third aspect is where I retrieve the most benefit from my second brain. When you need to come up with something on the spot (i.e. brainstorm, [Mindmap](svru), etc.), what are the chances that your most innovative approaches, ideas and thoughts come to you right in that moment? We are heavily influenced by the [Recency Bias](anya).
\\
\\The last aspect describes a hint system for yourself if you find yourself lost of creative juice or if you feel stuck with your creative pursuits. You most likely don't have enough material to work with (or not everything in your direct reach - maybe you don't remember it anymore or cannot find it), instead a second brain can help you to organize these for you and show you clearily if you have enough materials to work with or if there are things missing.
\\
\\The [Paradox of Choice](17gq) shows that we tend to have a difficult time to decide what resource to use or what should be done when we have too many options to choose from.
\\
\\> The solution is to _keep only what resonates_ in a trusted place that you control, and leave the rest aside. [..] Don't make it an analytical decision, and don't worry about why exactly it resonates - just look inside for a feeling of pleasure, curiosity, wonder, or excitement, and let that be your signal for when it's time to capture a passage, an image, a quote, or a fact.
\\
\\A second brain has three main stages of progress (applies to every individual item not the entire thing):
\\
\\ 1. Remembering - _Memory Aid_:
\\ Use [digital notes](h8pi) to save facts and ideas that are otherwise difficult to recall, that may include takeaways from meetings, quotes from interviews, project details, etc.
\\ 2. Connecting - _Thinking Aid_:
\\ Evolve ideas from the memory aid to a thinking aid by elaborating the fact that you don't have to remember all the ideas and can focus purely on working with the ideas.
\\ 3. Creating - _Create new things_:
\\ With the collected knowledge through facts, ideas and resulted thinking about the contents you are able to create further concrete ideas and outcomes (maybe even worth sharing with others).
\\
\\Tiago Forte further then describes a four step #framework on how to work with a second brain:
\\
\\ 1. *C*apture: Keep what resonates
\\ 2. *O*rganize: Save for actionability
\\ 3. *D*istill: Find the essence
\\ 4. *E*xpress: Show your work
\\
\\When organizing I use [hashtags](gipf) to group by corresponding _subjects_. However these are not brought _subjects_, but more focused on given aspects of my [Core Questions](t7lc) I want to focus on for myself. These are actionalable information as they help me to answer my personal questions and [goals](leur).
\\
\\The most important step however is the distillation of the notes down to their _essence_. Notes can have arbitrary length, but you only have so much time to re-read through your notes. It is up to you to summarize and reduce the contents to the hearth and soul of what it is trying to communicate (and what it means to you). This [simplicity leads to rememberability](lldi) and also shows you that you have the understanding of a given item to extract the _essence_ from with as few words as possible.
\\
\\> Your notes will be useless if you can't decipher them in the future, or if they're so long that you don't even try.
\\
\\Similar to [comments in code](0ad3) [notes](8mdf) are meant to be read more often than written.
\\
\\### Part two: The Method
\\
\\---
\\1. [How to Read](vobk)
\\2. [Read faster](3f5i)
\\3. [Reading Technique](3g3j)
\\4. Getting Things Done by David Allen
\\5. [Commonplace Book | Wikipedia](https://en.wikipedia.org/wiki/Commonplace_book)
;
var ast = try parse(testing.allocator, content);
var ast = try parse_sentinel_slice(testing.allocator, content);
defer ast.deinit(testing.allocator);
var idx: usize = 0;
while (idx < ast.nodes.len) : (idx += 1) ast.nodes.get(idx).dump(content);
// access specific tags and then their associated node
for (ast.nodes.items(.tag)) |tag| switch (tag) {
.anchor => |anchor| {
std.debug.print("found anchor {any}\n", .{anchor});
const node = ast.nodes.get(anchor.idx);
std.debug.print("\tassociated node: {any}: ", .{node});
node.dump(content);
const parent = node.getParent(ast);
std.debug.print("\tassociated parent: {any}: ", .{parent});
parent.dump(content);
},
// .reference => |reference| {
// std.debug.print("found reference {any}\n", .{reference});
// const node = ast.nodes.get(reference.idx);
// std.debug.print("associated node: {any}: ", .{node});
// for (ast.nodes.items(.tag)) |tag| switch (tag) {
// .link => |link| {
// std.debug.print("found link {any}\n", .{link});
// const node = ast.nodes.get(link.idx);
// std.debug.print("\tassociated node: {any}: ", .{node});
// node.dump(content);
// const parent = node.getParent(ast);
// std.debug.print("\tassociated parent: {any}: ", .{parent});
// parent.dump(content);
// },
else => {},
};
// else => {},
// };
}

View File

@@ -1,9 +1,12 @@
///! Parser for nf file contents. This library provides a lexer and parser and
///! emits an AST of valid nf file contents. In case of invalid files,
///! corresponding errors are returned. For detailed error messages refer to
///! `errorMessage()`
///! Parser for smd file contents. This library provides a lexer and parser and
///! emits an AST (Abstract Syntax Tree) of valid smd file contents. In case of
///! invalid files, corresponding errors are returned.
///!
///! As the smd file format is based on a stripped down markdown format, markdown
///! files which comply with the smd file format limitations are expected to
///! work.
pub const Ast = @import("Ast.zig");
// pub const parser = @import("parser.zig");
pub const parser = @import("parser.zig");
pub const lexer = @import("lexer.zig");
test {