code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
const std = @import("std");
const allocator = std.heap.page_allocator;
const Map = @import("./map.zig").Map;
pub fn main() !void {
var maps: [4]Map = undefined;
var j: usize = 0;
while (j < 4) : (j += 1) {
maps[j] = Map.init();
}
var r: usize = 0;
var first: []const u8 = undefined;
const inp = std.io.getStdIn().reader();
var buf: [20480]u8 = undefined;
while (try inp.readUntilDelimiterOrEof(&buf, '\n')) |line| {
if (r == 0) {
first = line;
}
if (r < 39) {
maps[0].parse(line[0..41]);
maps[1].parse(line[40..]);
} else if (r == 39) {
line[39] = '@';
line[40] = '#';
line[41] = '@';
maps[0].parse(line[0..41]);
maps[1].parse(line[40..]);
} else if (r == 41) {
line[39] = '@';
line[40] = '#';
line[41] = '@';
maps[2].parse(line[0..41]);
maps[3].parse(line[40..]);
} else if (r > 41) {
maps[2].parse(line[0..41]);
maps[3].parse(line[40..]);
} else {
maps[0].parse(first[0..41]);
maps[1].parse(first[40..]);
maps[2].parse(first[0..41]);
maps[3].parse(first[40..]);
}
r += 1;
}
var sum: usize = 0;
j = 0;
while (j < 4) : (j += 1) {
var mk = std.AutoHashMap(u8, void).init(allocator);
defer mk.deinit();
var ik = maps[j].keys.iterator();
while (ik.next()) |kv| {
const k = kv.value_ptr.*;
_ = mk.put(k, {}) catch unreachable;
std.debug.warn("MAP {}: mapping key {c}\n", .{ j, k });
}
var id = maps[j].doors.iterator();
while (id.next()) |kv| {
const d = kv.value_ptr.*;
const k = d - 'A' + 'a';
std.debug.warn("MAP {}: checking door {c}\n", .{ j, d });
if (mk.contains(k)) continue;
const p = kv.key_ptr.*;
std.debug.warn("MAP {}: removing door {c} without key {c}\n", .{ j, d, k });
maps[j].set_pos(p, Map.Tile.Empty);
}
// maps[j].show();
maps[j].walk_map();
const dist = maps[j].walk_graph();
sum += dist;
}
std.debug.warn("TOTAL {}\n", .{sum});
j = 0;
while (j < 4) : (j += 1) {
maps[j].deinit();
}
} | 2019/p18/p18b.zig |
const std = @import("std");
const assert = std.debug.assert;
/// Provides FIFO (First-In First-Out) queue. `push` and `pop` are O(1) amortized.
pub fn ArrayDeque(comptime T: type) type {
return struct {
alloc: *std.mem.Allocator,
buffer: []T,
head: usize,
tail: usize,
pub fn init(alloc: *std.mem.Allocator) @This() {
return .{
.alloc = alloc,
.buffer = &[0]T{},
.head = 0,
.tail = 0,
};
}
pub fn deinit(self: *@This()) void {
self.alloc.free(self.buffer);
}
pub fn push_back(self: *@This(), data: T) !void {
try self.ensureCapacity(self.len() + 1);
var next = self.head + 1;
if (next >= self.buffer.len) {
next = 0;
}
if (next == self.tail) {
return error.BufferOverflow;
}
self.buffer[self.head] = data;
self.head = next;
}
pub fn pop_front(self: *@This()) ?T {
if (self.head == self.tail) {
return null;
}
var next = self.tail + 1;
if (next >= self.buffer.len) {
next = 0;
}
defer self.tail = next;
return self.buffer[self.tail];
}
pub fn discard_front(self: *@This(), amount_wanted: usize) void {
if (self.head == self.tail) {
return;
}
const amount = std.math.min(self.len(), amount_wanted);
var next = self.tail + amount;
if (next >= self.buffer.len) {
next -= self.buffer.len;
}
self.tail = next;
}
pub fn idx(self: *const @This(), i: usize) ?T {
if (i >= self.buffer.len) {
return null;
}
const j = (self.tail + i) % self.buffer.len;
if (j < self.head or j >= self.tail) {
return self.buffer[j];
}
return null;
}
pub fn idxMut(self: *const @This(), i: usize) ?*T {
if (i >= self.buffer.len) {
return null;
}
const j = (self.tail + i) % self.buffer.len;
if (j < self.head or j >= self.tail) {
return &self.buffer[j];
}
return null;
}
pub fn len(self: *@This()) usize {
if (self.head == self.tail) {
return 0;
} else if (self.head > self.tail) {
return self.head - self.tail;
} else {
return self.buffer.len - self.tail + self.head;
}
}
pub fn capacity(self: *@This()) usize {
if (self.buffer.len == 0) {
return 0;
} else {
return self.buffer.len - 1;
}
}
pub fn ensureCapacity(self: *@This(), new_capacity: usize) !void {
var better_capacity = self.capacity();
if (better_capacity >= new_capacity) return;
while (true) {
better_capacity += better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
}
if (self.head < self.tail) {
// The buffer is split, we need to copy each half to the new buffer
const old_buffer = self.buffer;
defer self.alloc.free(old_buffer);
const tail = self.tail;
const head = self.head;
const end = old_buffer.len;
self.buffer = try self.alloc.alloc(T, better_capacity);
std.mem.copy(T, self.buffer[0 .. end - tail], old_buffer[tail..end]);
std.mem.copy(T, self.buffer[end - tail ..], old_buffer[0..head]);
self.head = head + end - tail;
self.tail = 0;
} else {
self.buffer = try self.alloc.realloc(self.buffer, better_capacity);
}
}
};
}
test "pop() gives back results in FIFO order" {
var ring = ArrayDeque(i32).init(std.testing.allocator);
defer ring.deinit();
try ring.push_back(1);
try ring.push_back(2);
try ring.push_back(3);
assert(ring.head == 3);
assert(ring.idx(0).? == 1);
assert(ring.idx(1).? == 2);
assert(ring.idx(2).? == 3);
assert(ring.pop_front().? == 1);
assert(ring.pop_front().? == 2);
assert(ring.pop_front().? == 3);
assert(ring.pop_front() == null);
}
test "buffer wraps around" {
var ring = ArrayDeque(i32).init(std.testing.allocator);
defer ring.deinit();
try ring.ensureCapacity(1);
const amt = ring.capacity() - 1;
var i: i32 = 0;
while (i < amt) : (i += 1) {
try ring.push_back(i);
}
i = 0;
while (i < amt) : (i += 1) {
assert(std.meta.eql(ring.pop_front(), i));
}
i = 0;
while (i < amt) : (i += 1) {
try ring.push_back(i);
}
assert(ring.head == (2 * amt % ring.capacity()) - 1);
i = 0;
while (i < amt) : (i += 1) {
std.debug.assert(std.meta.eql(ring.pop_front(), i));
}
assert(ring.pop_front() == null);
}
test "dynamic allocation does not mess up split array" {
var ring = ArrayDeque(usize).init(std.testing.allocator);
defer ring.deinit();
try ring.ensureCapacity(1);
const amt = ring.capacity() + 1;
// Set head and tail to middle of array
ring.head = @divFloor(ring.capacity(), 2);
ring.tail = @divFloor(ring.capacity(), 2);
var i: usize = 0;
while (i < amt) : (i += 1) {
try ring.push_back(i);
}
assert(ring.len() == amt);
i = 0;
while (i < amt) : (i += 1) {
assert(std.meta.eql(ring.idx(i), i));
}
} | src/array_deque.zig |
const length_shift = 22;
const offset_mask = (1 << length_shift) - 1; // 4_194_303
const literal_type = 0 << 30; // 0
pub const match_type = 1 << 30; // 1_073_741_824
// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
// is length_codes[length - MIN_MATCH_LENGTH]
var length_codes = [_]u32{
0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 28,
};
var offset_codes = [_]u32{
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
};
pub const Token = u32;
// Convert a literal into a literal token.
pub fn literalToken(lit: u32) Token {
return literal_type + lit;
}
// Convert a < xlength, xoffset > pair into a match token.
pub fn matchToken(xlength: u32, xoffset: u32) Token {
return match_type + (xlength << length_shift) + xoffset;
}
// Returns the literal of a literal token
pub fn literal(t: Token) u32 {
return @intCast(u32, t - literal_type);
}
// Returns the extra offset of a match token
pub fn offset(t: Token) u32 {
return @intCast(u32, t) & offset_mask;
}
pub fn length(t: Token) u32 {
return @intCast(u32, (t - match_type) >> length_shift);
}
pub fn lengthCode(len: u32) u32 {
return length_codes[len];
}
// Returns the offset code corresponding to a specific offset
pub fn offsetCode(off: u32) u32 {
if (off < @intCast(u32, offset_codes.len)) {
return offset_codes[off];
}
if (off >> 7 < @intCast(u32, offset_codes.len)) {
return offset_codes[off >> 7] + 14;
}
return offset_codes[off >> 14] + 28;
}
test {
const std = @import("std");
const expect = std.testing.expect;
try expect(matchToken(555, 555) == 3_401_581_099);
} | lib/std/compress/deflate/token.zig |
pub const TestCase = struct{
start_line: u32,
end_line: u32,
example: u32,
sec: []const u8,
html: []const u8,
markdown: []const u8,
};
pub const all_cases = []TestCase{
TestCase{
.example = 1,
.sec = "Tabs",
.html = "<pre><code>foo\tbaz\t\tbim\n</code></pre>\n",
.markdown = "\tfoo\tbaz\t\tbim\n",
},
TestCase{
.example = 2,
.sec = "Tabs",
.html = "<pre><code>foo\tbaz\t\tbim\n</code></pre>\n",
.markdown = " \tfoo\tbaz\t\tbim\n",
},
TestCase{
.example = 3,
.sec = "Tabs",
.html = "<pre><code>a\ta\nὐ\ta\n</code></pre>\n",
.markdown = " a\ta\n ὐ\ta\n",
},
TestCase{
.example = 4,
.sec = "Tabs",
.html = "<ul>\n<li>\n<p>foo</p>\n<p>bar</p>\n</li>\n</ul>\n",
.markdown = " - foo\n\n\tbar\n",
},
TestCase{
.example = 5,
.sec = "Tabs",
.html = "<ul>\n<li>\n<p>foo</p>\n<pre><code> bar\n</code></pre>\n</li>\n</ul>\n",
.markdown = "- foo\n\n\t\tbar\n",
},
TestCase{
.example = 6,
.sec = "Tabs",
.html = "<blockquote>\n<pre><code> foo\n</code></pre>\n</blockquote>\n",
.markdown = ">\t\tfoo\n",
},
TestCase{
.example = 7,
.sec = "Tabs",
.html = "<ul>\n<li>\n<pre><code> foo\n</code></pre>\n</li>\n</ul>\n",
.markdown = "-\t\tfoo\n",
},
TestCase{
.example = 8,
.sec = "Tabs",
.html = "<pre><code>foo\nbar\n</code></pre>\n",
.markdown = " foo\n\tbar\n",
},
TestCase{
.example = 9,
.sec = "Tabs",
.html = "<ul>\n<li>foo\n<ul>\n<li>bar\n<ul>\n<li>baz</li>\n</ul>\n</li>\n</ul>\n</li>\n</ul>\n",
.markdown = " - foo\n - bar\n\t - baz\n",
},
TestCase{
.example = 10,
.sec = "Tabs",
.html = "<h1>Foo</h1>\n",
.markdown = "#\tFoo\n",
},
TestCase{
.example = 11,
.sec = "Tabs",
.html = "<hr />\n",
.markdown = "*\t*\t*\t\n",
},
TestCase{
.example = 12,
.sec = "Precedence",
.html = "<ul>\n<li>`one</li>\n<li>two`</li>\n</ul>\n",
.markdown = "- `one\n- two`\n",
},
TestCase{
.example = 13,
.sec = "Thematic breaks",
.html = "<hr />\n<hr />\n<hr />\n",
.markdown = "***\n---\n___\n",
},
TestCase{
.example = 14,
.sec = "Thematic breaks",
.html = "<p>+++</p>\n",
.markdown = "+++\n",
},
TestCase{
.example = 15,
.sec = "Thematic breaks",
.html = "<p>===</p>\n",
.markdown = "===\n",
},
TestCase{
.example = 16,
.sec = "Thematic breaks",
.html = "<p>--\n**\n__</p>\n",
.markdown = "--\n**\n__\n",
},
TestCase{
.example = 17,
.sec = "Thematic breaks",
.html = "<hr />\n<hr />\n<hr />\n",
.markdown = " ***\n ***\n ***\n",
},
TestCase{
.example = 18,
.sec = "Thematic breaks",
.html = "<pre><code>***\n</code></pre>\n",
.markdown = " ***\n",
},
TestCase{
.example = 19,
.sec = "Thematic breaks",
.html = "<p>Foo\n***</p>\n",
.markdown = "Foo\n ***\n",
},
TestCase{
.example = 20,
.sec = "Thematic breaks",
.html = "<hr />\n",
.markdown = "_____________________________________\n",
},
TestCase{
.example = 21,
.sec = "Thematic breaks",
.html = "<hr />\n",
.markdown = " - - -\n",
},
TestCase{
.example = 22,
.sec = "Thematic breaks",
.html = "<hr />\n",
.markdown = " ** * ** * ** * **\n",
},
TestCase{
.example = 23,
.sec = "Thematic breaks",
.html = "<hr />\n",
.markdown = "- - - -\n",
},
TestCase{
.example = 24,
.sec = "Thematic breaks",
.html = "<hr />\n",
.markdown = "- - - - \n",
},
TestCase{
.example = 25,
.sec = "Thematic breaks",
.html = "<p>_ _ _ _ a</p>\n<p>a------</p>\n<p>---a---</p>\n",
.markdown = "_ _ _ _ a\n\na------\n\n---a---\n",
},
TestCase{
.example = 26,
.sec = "Thematic breaks",
.html = "<p><em>-</em></p>\n",
.markdown = " *-*\n",
},
TestCase{
.example = 27,
.sec = "Thematic breaks",
.html = "<ul>\n<li>foo</li>\n</ul>\n<hr />\n<ul>\n<li>bar</li>\n</ul>\n",
.markdown = "- foo\n***\n- bar\n",
},
TestCase{
.example = 28,
.sec = "Thematic breaks",
.html = "<p>Foo</p>\n<hr />\n<p>bar</p>\n",
.markdown = "Foo\n***\nbar\n",
},
TestCase{
.example = 29,
.sec = "Thematic breaks",
.html = "<h2>Foo</h2>\n<p>bar</p>\n",
.markdown = "Foo\n---\nbar\n",
},
TestCase{
.example = 30,
.sec = "Thematic breaks",
.html = "<ul>\n<li>Foo</li>\n</ul>\n<hr />\n<ul>\n<li>Bar</li>\n</ul>\n",
.markdown = "* Foo\n* * *\n* Bar\n",
},
TestCase{
.example = 31,
.sec = "Thematic breaks",
.html = "<ul>\n<li>Foo</li>\n<li>\n<hr />\n</li>\n</ul>\n",
.markdown = "- Foo\n- * * *\n",
},
TestCase{
.example = 32,
.sec = "ATX headings",
.html = "<h1>foo</h1>\n<h2>foo</h2>\n<h3>foo</h3>\n<h4>foo</h4>\n<h5>foo</h5>\n<h6>foo</h6>\n",
.markdown = "# foo\n## foo\n### foo\n#### foo\n##### foo\n###### foo\n",
},
TestCase{
.example = 33,
.sec = "ATX headings",
.html = "<p>####### foo</p>\n",
.markdown = "####### foo\n",
},
TestCase{
.example = 34,
.sec = "ATX headings",
.html = "<p>#5 bolt</p>\n<p>#hashtag</p>\n",
.markdown = "#5 bolt\n\n#hashtag\n",
},
TestCase{
.example = 35,
.sec = "ATX headings",
.html = "<p>## foo</p>\n",
.markdown = "\\## foo\n",
},
TestCase{
.example = 36,
.sec = "ATX headings",
.html = "<h1>foo <em>bar</em> *baz*</h1>\n",
.markdown = "# foo *bar* \\*baz\\*\n",
},
TestCase{
.example = 37,
.sec = "ATX headings",
.html = "<h1>foo</h1>\n",
.markdown = "# foo \n",
},
TestCase{
.example = 38,
.sec = "ATX headings",
.html = "<h3>foo</h3>\n<h2>foo</h2>\n<h1>foo</h1>\n",
.markdown = " ### foo\n ## foo\n # foo\n",
},
TestCase{
.example = 39,
.sec = "ATX headings",
.html = "<pre><code># foo\n</code></pre>\n",
.markdown = " # foo\n",
},
TestCase{
.example = 40,
.sec = "ATX headings",
.html = "<p>foo\n# bar</p>\n",
.markdown = "foo\n # bar\n",
},
TestCase{
.example = 41,
.sec = "ATX headings",
.html = "<h2>foo</h2>\n<h3>bar</h3>\n",
.markdown = "## foo ##\n ### bar ###\n",
},
TestCase{
.example = 42,
.sec = "ATX headings",
.html = "<h1>foo</h1>\n<h5>foo</h5>\n",
.markdown = "# foo ##################################\n##### foo ##\n",
},
TestCase{
.example = 43,
.sec = "ATX headings",
.html = "<h3>foo</h3>\n",
.markdown = "### foo ### \n",
},
TestCase{
.example = 44,
.sec = "ATX headings",
.html = "<h3>foo ### b</h3>\n",
.markdown = "### foo ### b\n",
},
TestCase{
.example = 45,
.sec = "ATX headings",
.html = "<h1>foo#</h1>\n",
.markdown = "# foo#\n",
},
TestCase{
.example = 46,
.sec = "ATX headings",
.html = "<h3>foo ###</h3>\n<h2>foo ###</h2>\n<h1>foo #</h1>\n",
.markdown = "### foo \\###\n## foo #\\##\n# foo \\#\n",
},
TestCase{
.example = 47,
.sec = "ATX headings",
.html = "<hr />\n<h2>foo</h2>\n<hr />\n",
.markdown = "****\n## foo\n****\n",
},
TestCase{
.example = 48,
.sec = "ATX headings",
.html = "<p>Foo bar</p>\n<h1>baz</h1>\n<p>Bar foo</p>\n",
.markdown = "Foo bar\n# baz\nBar foo\n",
},
TestCase{
.example = 49,
.sec = "ATX headings",
.html = "<h2></h2>\n<h1></h1>\n<h3></h3>\n",
.markdown = "## \n#\n### ###\n",
},
TestCase{
.example = 50,
.sec = "Setext headings",
.html = "<h1>Foo <em>bar</em></h1>\n<h2>Foo <em>bar</em></h2>\n",
.markdown = "Foo *bar*\n=========\n\nFoo *bar*\n---------\n",
},
TestCase{
.example = 51,
.sec = "Setext headings",
.html = "<h1>Foo <em>bar\nbaz</em></h1>\n",
.markdown = "Foo *bar\nbaz*\n====\n",
},
TestCase{
.example = 52,
.sec = "Setext headings",
.html = "<h2>Foo</h2>\n<h1>Foo</h1>\n",
.markdown = "Foo\n-------------------------\n\nFoo\n=\n",
},
TestCase{
.example = 53,
.sec = "Setext headings",
.html = "<h2>Foo</h2>\n<h2>Foo</h2>\n<h1>Foo</h1>\n",
.markdown = " Foo\n---\n\n Foo\n-----\n\n Foo\n ===\n",
},
TestCase{
.example = 54,
.sec = "Setext headings",
.html = "<pre><code>Foo\n---\n\nFoo\n</code></pre>\n<hr />\n",
.markdown = " Foo\n ---\n\n Foo\n---\n",
},
TestCase{
.example = 55,
.sec = "Setext headings",
.html = "<h2>Foo</h2>\n",
.markdown = "Foo\n ---- \n",
},
TestCase{
.example = 56,
.sec = "Setext headings",
.html = "<p>Foo\n---</p>\n",
.markdown = "Foo\n ---\n",
},
TestCase{
.example = 57,
.sec = "Setext headings",
.html = "<p>Foo\n= =</p>\n<p>Foo</p>\n<hr />\n",
.markdown = "Foo\n= =\n\nFoo\n--- -\n",
},
TestCase{
.example = 58,
.sec = "Setext headings",
.html = "<h2>Foo</h2>\n",
.markdown = "Foo \n-----\n",
},
TestCase{
.example = 59,
.sec = "Setext headings",
.html = "<h2>Foo\\</h2>\n",
.markdown = "Foo\\\n----\n",
},
TestCase{
.example = 60,
.sec = "Setext headings",
.html = "<h2>`Foo</h2>\n<p>`</p>\n<h2><a title="a lot</h2>\n<p>of dashes"/></p>\n",
.markdown = "`Foo\n----\n`\n\n<a title=\"a lot\n---\nof dashes\"/>\n",
},
TestCase{
.example = 61,
.sec = "Setext headings",
.html = "<blockquote>\n<p>Foo</p>\n</blockquote>\n<hr />\n",
.markdown = "> Foo\n---\n",
},
TestCase{
.example = 62,
.sec = "Setext headings",
.html = "<blockquote>\n<p>foo\nbar\n===</p>\n</blockquote>\n",
.markdown = "> foo\nbar\n===\n",
},
TestCase{
.example = 63,
.sec = "Setext headings",
.html = "<ul>\n<li>Foo</li>\n</ul>\n<hr />\n",
.markdown = "- Foo\n---\n",
},
TestCase{
.example = 64,
.sec = "Setext headings",
.html = "<h2>Foo\nBar</h2>\n",
.markdown = "Foo\nBar\n---\n",
},
TestCase{
.example = 65,
.sec = "Setext headings",
.html = "<hr />\n<h2>Foo</h2>\n<h2>Bar</h2>\n<p>Baz</p>\n",
.markdown = "---\nFoo\n---\nBar\n---\nBaz\n",
},
TestCase{
.example = 66,
.sec = "Setext headings",
.html = "<p>====</p>\n",
.markdown = "\n====\n",
},
TestCase{
.example = 67,
.sec = "Setext headings",
.html = "<hr />\n<hr />\n",
.markdown = "---\n---\n",
},
TestCase{
.example = 68,
.sec = "Setext headings",
.html = "<ul>\n<li>foo</li>\n</ul>\n<hr />\n",
.markdown = "- foo\n-----\n",
},
TestCase{
.example = 69,
.sec = "Setext headings",
.html = "<pre><code>foo\n</code></pre>\n<hr />\n",
.markdown = " foo\n---\n",
},
TestCase{
.example = 70,
.sec = "Setext headings",
.html = "<blockquote>\n<p>foo</p>\n</blockquote>\n<hr />\n",
.markdown = "> foo\n-----\n",
},
TestCase{
.example = 71,
.sec = "Setext headings",
.html = "<h2>> foo</h2>\n",
.markdown = "\\> foo\n------\n",
},
TestCase{
.example = 72,
.sec = "Setext headings",
.html = "<p>Foo</p>\n<h2>bar</h2>\n<p>baz</p>\n",
.markdown = "Foo\n\nbar\n---\nbaz\n",
},
TestCase{
.example = 73,
.sec = "Setext headings",
.html = "<p>Foo\nbar</p>\n<hr />\n<p>baz</p>\n",
.markdown = "Foo\nbar\n\n---\n\nbaz\n",
},
TestCase{
.example = 74,
.sec = "Setext headings",
.html = "<p>Foo\nbar</p>\n<hr />\n<p>baz</p>\n",
.markdown = "Foo\nbar\n* * *\nbaz\n",
},
TestCase{
.example = 75,
.sec = "Setext headings",
.html = "<p>Foo\nbar\n---\nbaz</p>\n",
.markdown = "Foo\nbar\n\\---\nbaz\n",
},
TestCase{
.example = 76,
.sec = "Indented code blocks",
.html = "<pre><code>a simple\n indented code block\n</code></pre>\n",
.markdown = " a simple\n indented code block\n",
},
TestCase{
.example = 77,
.sec = "Indented code blocks",
.html = "<ul>\n<li>\n<p>foo</p>\n<p>bar</p>\n</li>\n</ul>\n",
.markdown = " - foo\n\n bar\n",
},
TestCase{
.example = 78,
.sec = "Indented code blocks",
.html = "<ol>\n<li>\n<p>foo</p>\n<ul>\n<li>bar</li>\n</ul>\n</li>\n</ol>\n",
.markdown = "1. foo\n\n - bar\n",
},
TestCase{
.example = 79,
.sec = "Indented code blocks",
.html = "<pre><code><a/>\n*hi*\n\n- one\n</code></pre>\n",
.markdown = " <a/>\n *hi*\n\n - one\n",
},
TestCase{
.example = 80,
.sec = "Indented code blocks",
.html = "<pre><code>chunk1\n\nchunk2\n\n\n\nchunk3\n</code></pre>\n",
.markdown = " chunk1\n\n chunk2\n \n \n \n chunk3\n",
},
TestCase{
.example = 81,
.sec = "Indented code blocks",
.html = "<pre><code>chunk1\n \n chunk2\n</code></pre>\n",
.markdown = " chunk1\n \n chunk2\n",
},
TestCase{
.example = 82,
.sec = "Indented code blocks",
.html = "<p>Foo\nbar</p>\n",
.markdown = "Foo\n bar\n\n",
},
TestCase{
.example = 83,
.sec = "Indented code blocks",
.html = "<pre><code>foo\n</code></pre>\n<p>bar</p>\n",
.markdown = " foo\nbar\n",
},
TestCase{
.example = 84,
.sec = "Indented code blocks",
.html = "<h1>Heading</h1>\n<pre><code>foo\n</code></pre>\n<h2>Heading</h2>\n<pre><code>foo\n</code></pre>\n<hr />\n",
.markdown = "# Heading\n foo\nHeading\n------\n foo\n----\n",
},
TestCase{
.example = 85,
.sec = "Indented code blocks",
.html = "<pre><code> foo\nbar\n</code></pre>\n",
.markdown = " foo\n bar\n",
},
TestCase{
.example = 86,
.sec = "Indented code blocks",
.html = "<pre><code>foo\n</code></pre>\n",
.markdown = "\n \n foo\n \n\n",
},
TestCase{
.example = 87,
.sec = "Indented code blocks",
.html = "<pre><code>foo \n</code></pre>\n",
.markdown = " foo \n",
},
TestCase{
.example = 88,
.sec = "Fenced code blocks",
.html = "<pre><code><\n >\n</code></pre>\n",
.markdown = "```\n<\n >\n```\n",
},
TestCase{
.example = 89,
.sec = "Fenced code blocks",
.html = "<pre><code><\n >\n</code></pre>\n",
.markdown = "~~~\n<\n >\n~~~\n",
},
TestCase{
.example = 90,
.sec = "Fenced code blocks",
.html = "<p><code>foo</code></p>\n",
.markdown = "``\nfoo\n``\n",
},
TestCase{
.example = 91,
.sec = "Fenced code blocks",
.html = "<pre><code>aaa\n~~~\n</code></pre>\n",
.markdown = "```\naaa\n~~~\n```\n",
},
TestCase{
.example = 92,
.sec = "Fenced code blocks",
.html = "<pre><code>aaa\n```\n</code></pre>\n",
.markdown = "~~~\naaa\n```\n~~~\n",
},
TestCase{
.example = 93,
.sec = "Fenced code blocks",
.html = "<pre><code>aaa\n```\n</code></pre>\n",
.markdown = "````\naaa\n```\n``````\n",
},
TestCase{
.example = 94,
.sec = "Fenced code blocks",
.html = "<pre><code>aaa\n~~~\n</code></pre>\n",
.markdown = "~~~~\naaa\n~~~\n~~~~\n",
},
TestCase{
.example = 95,
.sec = "Fenced code blocks",
.html = "<pre><code></code></pre>\n",
.markdown = "```\n",
},
TestCase{
.example = 96,
.sec = "Fenced code blocks",
.html = "<pre><code>\n```\naaa\n</code></pre>\n",
.markdown = "`````\n\n```\naaa\n",
},
TestCase{
.example = 97,
.sec = "Fenced code blocks",
.html = "<blockquote>\n<pre><code>aaa\n</code></pre>\n</blockquote>\n<p>bbb</p>\n",
.markdown = "> ```\n> aaa\n\nbbb\n",
},
TestCase{
.example = 98,
.sec = "Fenced code blocks",
.html = "<pre><code>\n \n</code></pre>\n",
.markdown = "```\n\n \n```\n",
},
TestCase{
.example = 99,
.sec = "Fenced code blocks",
.html = "<pre><code></code></pre>\n",
.markdown = "```\n```\n",
},
TestCase{
.example = 100,
.sec = "Fenced code blocks",
.html = "<pre><code>aaa\naaa\n</code></pre>\n",
.markdown = " ```\n aaa\naaa\n```\n",
},
TestCase{
.example = 101,
.sec = "Fenced code blocks",
.html = "<pre><code>aaa\naaa\naaa\n</code></pre>\n",
.markdown = " ```\naaa\n aaa\naaa\n ```\n",
},
TestCase{
.example = 102,
.sec = "Fenced code blocks",
.html = "<pre><code>aaa\n aaa\naaa\n</code></pre>\n",
.markdown = " ```\n aaa\n aaa\n aaa\n ```\n",
},
TestCase{
.example = 103,
.sec = "Fenced code blocks",
.html = "<pre><code>```\naaa\n```\n</code></pre>\n",
.markdown = " ```\n aaa\n ```\n",
},
TestCase{
.example = 104,
.sec = "Fenced code blocks",
.html = "<pre><code>aaa\n</code></pre>\n",
.markdown = "```\naaa\n ```\n",
},
TestCase{
.example = 105,
.sec = "Fenced code blocks",
.html = "<pre><code>aaa\n</code></pre>\n",
.markdown = " ```\naaa\n ```\n",
},
TestCase{
.example = 106,
.sec = "Fenced code blocks",
.html = "<pre><code>aaa\n ```\n</code></pre>\n",
.markdown = "```\naaa\n ```\n",
},
TestCase{
.example = 107,
.sec = "Fenced code blocks",
.html = "<p><code></code>\naaa</p>\n",
.markdown = "``` ```\naaa\n",
},
TestCase{
.example = 108,
.sec = "Fenced code blocks",
.html = "<pre><code>aaa\n~~~ ~~\n</code></pre>\n",
.markdown = "~~~~~~\naaa\n~~~ ~~\n",
},
TestCase{
.example = 109,
.sec = "Fenced code blocks",
.html = "<p>foo</p>\n<pre><code>bar\n</code></pre>\n<p>baz</p>\n",
.markdown = "foo\n```\nbar\n```\nbaz\n",
},
TestCase{
.example = 110,
.sec = "Fenced code blocks",
.html = "<h2>foo</h2>\n<pre><code>bar\n</code></pre>\n<h1>baz</h1>\n",
.markdown = "foo\n---\n~~~\nbar\n~~~\n# baz\n",
},
TestCase{
.example = 111,
.sec = "Fenced code blocks",
.html = "<pre><code class=\"language-ruby\">def foo(x)\n return 3\nend\n</code></pre>\n",
.markdown = "```ruby\ndef foo(x)\n return 3\nend\n```\n",
},
TestCase{
.example = 112,
.sec = "Fenced code blocks",
.html = "<pre><code class=\"language-ruby\">def foo(x)\n return 3\nend\n</code></pre>\n",
.markdown = "~~~~ ruby startline=3 $%@#$\ndef foo(x)\n return 3\nend\n~~~~~~~\n",
},
TestCase{
.example = 113,
.sec = "Fenced code blocks",
.html = "<pre><code class=\"language-;\"></code></pre>\n",
.markdown = "````;\n````\n",
},
TestCase{
.example = 114,
.sec = "Fenced code blocks",
.html = "<p><code>aa</code>\nfoo</p>\n",
.markdown = "``` aa ```\nfoo\n",
},
TestCase{
.example = 115,
.sec = "Fenced code blocks",
.html = "<pre><code>``` aaa\n</code></pre>\n",
.markdown = "```\n``` aaa\n```\n",
},
TestCase{
.example = 116,
.sec = "HTML blocks",
.html = "<table><tr><td>\n<pre>\n**Hello**,\n<p><em>world</em>.\n</pre></p>\n</td></tr></table>\n",
.markdown = "<table><tr><td>\n<pre>\n**Hello**,\n\n_world_.\n</pre>\n</td></tr></table>\n",
},
TestCase{
.example = 117,
.sec = "HTML blocks",
.html = "<table>\n <tr>\n <td>\n hi\n </td>\n </tr>\n</table>\n<p>okay.</p>\n",
.markdown = "<table>\n <tr>\n <td>\n hi\n </td>\n </tr>\n</table>\n\nokay.\n",
},
TestCase{
.example = 118,
.sec = "HTML blocks",
.html = " <div>\n *hello*\n <foo><a>\n",
.markdown = " <div>\n *hello*\n <foo><a>\n",
},
TestCase{
.example = 119,
.sec = "HTML blocks",
.html = "</div>\n*foo*\n",
.markdown = "</div>\n*foo*\n",
},
TestCase{
.example = 120,
.sec = "HTML blocks",
.html = "<DIV CLASS=\"foo\">\n<p><em>Markdown</em></p>\n</DIV>\n",
.markdown = "<DIV CLASS=\"foo\">\n\n*Markdown*\n\n</DIV>\n",
},
TestCase{
.example = 121,
.sec = "HTML blocks",
.html = "<div id=\"foo\"\n class=\"bar\">\n</div>\n",
.markdown = "<div id=\"foo\"\n class=\"bar\">\n</div>\n",
},
TestCase{
.example = 122,
.sec = "HTML blocks",
.html = "<div id=\"foo\" class=\"bar\n baz\">\n</div>\n",
.markdown = "<div id=\"foo\" class=\"bar\n baz\">\n</div>\n",
},
TestCase{
.example = 123,
.sec = "HTML blocks",
.html = "<div>\n*foo*\n<p><em>bar</em></p>\n",
.markdown = "<div>\n*foo*\n\n*bar*\n",
},
TestCase{
.example = 124,
.sec = "HTML blocks",
.html = "<div id=\"foo\"\n*hi*\n",
.markdown = "<div id=\"foo\"\n*hi*\n",
},
TestCase{
.example = 125,
.sec = "HTML blocks",
.html = "<div class\nfoo\n",
.markdown = "<div class\nfoo\n",
},
TestCase{
.example = 126,
.sec = "HTML blocks",
.html = "<div *???-&&&-<---\n*foo*\n",
.markdown = "<div *???-&&&-<---\n*foo*\n",
},
TestCase{
.example = 127,
.sec = "HTML blocks",
.html = "<div><a href=\"bar\">*foo*</a></div>\n",
.markdown = "<div><a href=\"bar\">*foo*</a></div>\n",
},
TestCase{
.example = 128,
.sec = "HTML blocks",
.html = "<table><tr><td>\nfoo\n</td></tr></table>\n",
.markdown = "<table><tr><td>\nfoo\n</td></tr></table>\n",
},
TestCase{
.example = 129,
.sec = "HTML blocks",
.html = "<div></div>\n``` c\nint x = 33;\n```\n",
.markdown = "<div></div>\n``` c\nint x = 33;\n```\n",
},
TestCase{
.example = 130,
.sec = "HTML blocks",
.html = "<a href=\"foo\">\n*bar*\n</a>\n",
.markdown = "<a href=\"foo\">\n*bar*\n</a>\n",
},
TestCase{
.example = 131,
.sec = "HTML blocks",
.html = "<Warning>\n*bar*\n</Warning>\n",
.markdown = "<Warning>\n*bar*\n</Warning>\n",
},
TestCase{
.example = 132,
.sec = "HTML blocks",
.html = "<i class=\"foo\">\n*bar*\n</i>\n",
.markdown = "<i class=\"foo\">\n*bar*\n</i>\n",
},
TestCase{
.example = 133,
.sec = "HTML blocks",
.html = "</ins>\n*bar*\n",
.markdown = "</ins>\n*bar*\n",
},
TestCase{
.example = 134,
.sec = "HTML blocks",
.html = "<del>\n*foo*\n</del>\n",
.markdown = "<del>\n*foo*\n</del>\n",
},
TestCase{
.example = 135,
.sec = "HTML blocks",
.html = "<del>\n<p><em>foo</em></p>\n</del>\n",
.markdown = "<del>\n\n*foo*\n\n</del>\n",
},
TestCase{
.example = 136,
.sec = "HTML blocks",
.html = "<p><del><em>foo</em></del></p>\n",
.markdown = "<del>*foo*</del>\n",
},
TestCase{
.example = 137,
.sec = "HTML blocks",
.html = "<pre language=\"haskell\"><code>\nimport Text.HTML.TagSoup\n\nmain :: IO ()\nmain = print $ parseTags tags\n</code></pre>\n<p>okay</p>\n",
.markdown = "<pre language=\"haskell\"><code>\nimport Text.HTML.TagSoup\n\nmain :: IO ()\nmain = print $ parseTags tags\n</code></pre>\nokay\n",
},
TestCase{
.example = 138,
.sec = "HTML blocks",
.html = "<script type=\"text/javascript\">\n// JavaScript example\n\ndocument.getElementById(\"demo\").innerHTML = \"Hello JavaScript!\";\n</script>\n<p>okay</p>\n",
.markdown = "<script type=\"text/javascript\">\n// JavaScript example\n\ndocument.getElementById(\"demo\").innerHTML = \"Hello JavaScript!\";\n</script>\nokay\n",
},
TestCase{
.example = 139,
.sec = "HTML blocks",
.html = "<style\n type=\"text/css\">\nh1 {color:red;}\n\np {color:blue;}\n</style>\n<p>okay</p>\n",
.markdown = "<style\n type=\"text/css\">\nh1 {color:red;}\n\np {color:blue;}\n</style>\nokay\n",
},
TestCase{
.example = 140,
.sec = "HTML blocks",
.html = "<style\n type=\"text/css\">\n\nfoo\n",
.markdown = "<style\n type=\"text/css\">\n\nfoo\n",
},
TestCase{
.example = 141,
.sec = "HTML blocks",
.html = "<blockquote>\n<div>\nfoo\n</blockquote>\n<p>bar</p>\n",
.markdown = "> <div>\n> foo\n\nbar\n",
},
TestCase{
.example = 142,
.sec = "HTML blocks",
.html = "<ul>\n<li>\n<div>\n</li>\n<li>foo</li>\n</ul>\n",
.markdown = "- <div>\n- foo\n",
},
TestCase{
.example = 143,
.sec = "HTML blocks",
.html = "<style>p{color:red;}</style>\n<p><em>foo</em></p>\n",
.markdown = "<style>p{color:red;}</style>\n*foo*\n",
},
TestCase{
.example = 144,
.sec = "HTML blocks",
.html = "<!-- foo -->*bar*\n<p><em>baz</em></p>\n",
.markdown = "<!-- foo -->*bar*\n*baz*\n",
},
TestCase{
.example = 145,
.sec = "HTML blocks",
.html = "<script>\nfoo\n</script>1. *bar*\n",
.markdown = "<script>\nfoo\n</script>1. *bar*\n",
},
TestCase{
.example = 146,
.sec = "HTML blocks",
.html = "<!-- Foo\n\nbar\n baz -->\n<p>okay</p>\n",
.markdown = "<!-- Foo\n\nbar\n baz -->\nokay\n",
},
TestCase{
.example = 147,
.sec = "HTML blocks",
.html = "<?php\n\n echo '>';\n\n?>\n<p>okay</p>\n",
.markdown = "<?php\n\n echo '>';\n\n?>\nokay\n",
},
TestCase{
.example = 148,
.sec = "HTML blocks",
.html = "<!DOCTYPE html>\n",
.markdown = "<!DOCTYPE html>\n",
},
TestCase{
.example = 149,
.sec = "HTML blocks",
.html = "<![CDATA[\nfunction matchwo(a,b)\n{\n if (a < b && a < 0) then {\n return 1;\n\n } else {\n\n return 0;\n }\n}\n]]>\n<p>okay</p>\n",
.markdown = "<![CDATA[\nfunction matchwo(a,b)\n{\n if (a < b && a < 0) then {\n return 1;\n\n } else {\n\n return 0;\n }\n}\n]]>\nokay\n",
},
TestCase{
.example = 150,
.sec = "HTML blocks",
.html = " <!-- foo -->\n<pre><code><!-- foo -->\n</code></pre>\n",
.markdown = " <!-- foo -->\n\n <!-- foo -->\n",
},
TestCase{
.example = 151,
.sec = "HTML blocks",
.html = " <div>\n<pre><code><div>\n</code></pre>\n",
.markdown = " <div>\n\n <div>\n",
},
TestCase{
.example = 152,
.sec = "HTML blocks",
.html = "<p>Foo</p>\n<div>\nbar\n</div>\n",
.markdown = "Foo\n<div>\nbar\n</div>\n",
},
TestCase{
.example = 153,
.sec = "HTML blocks",
.html = "<div>\nbar\n</div>\n*foo*\n",
.markdown = "<div>\nbar\n</div>\n*foo*\n",
},
TestCase{
.example = 154,
.sec = "HTML blocks",
.html = "<p>Foo\n<a href=\"bar\">\nbaz</p>\n",
.markdown = "Foo\n<a href=\"bar\">\nbaz\n",
},
TestCase{
.example = 155,
.sec = "HTML blocks",
.html = "<div>\n<p><em>Emphasized</em> text.</p>\n</div>\n",
.markdown = "<div>\n\n*Emphasized* text.\n\n</div>\n",
},
TestCase{
.example = 156,
.sec = "HTML blocks",
.html = "<div>\n*Emphasized* text.\n</div>\n",
.markdown = "<div>\n*Emphasized* text.\n</div>\n",
},
TestCase{
.example = 157,
.sec = "HTML blocks",
.html = "<table>\n<tr>\n<td>\nHi\n</td>\n</tr>\n</table>\n",
.markdown = "<table>\n\n<tr>\n\n<td>\nHi\n</td>\n\n</tr>\n\n</table>\n",
},
TestCase{
.example = 158,
.sec = "HTML blocks",
.html = "<table>\n <tr>\n<pre><code><td>\n Hi\n</td>\n</code></pre>\n </tr>\n</table>\n",
.markdown = "<table>\n\n <tr>\n\n <td>\n Hi\n </td>\n\n </tr>\n\n</table>\n",
},
TestCase{
.example = 159,
.sec = "Link reference definitions",
.html = "<p><a href=\"/url\" title=\"title\">foo</a></p>\n",
.markdown = "[foo]: /url \"title\"\n\n[foo]\n",
},
TestCase{
.example = 160,
.sec = "Link reference definitions",
.html = "<p><a href=\"/url\" title=\"the title\">foo</a></p>\n",
.markdown = " [foo]: \n /url \n 'the title' \n\n[foo]\n",
},
TestCase{
.example = 161,
.sec = "Link reference definitions",
.html = "<p><a href=\"my_(url)\" title=\"title (with parens)\">Foo*bar]</a></p>\n",
.markdown = "[Foo*bar\\]]:my_(url) 'title (with parens)'\n\n[Foo*bar\\]]\n",
},
TestCase{
.example = 162,
.sec = "Link reference definitions",
.html = "<p><a href=\"my%20url\" title=\"title\">Foo bar</a></p>\n",
.markdown = "[Foo bar]:\n<my%20url>\n'title'\n\n[Foo bar]\n",
},
TestCase{
.example = 163,
.sec = "Link reference definitions",
.html = "<p><a href=\"/url\" title=\"\ntitle\nline1\nline2\n\">foo</a></p>\n",
.markdown = "[foo]: /url '\ntitle\nline1\nline2\n'\n\n[foo]\n",
},
TestCase{
.example = 164,
.sec = "Link reference definitions",
.html = "<p>[foo]: /url 'title</p>\n<p>with blank line'</p>\n<p>[foo]</p>\n",
.markdown = "[foo]: /url 'title\n\nwith blank line'\n\n[foo]\n",
},
TestCase{
.example = 165,
.sec = "Link reference definitions",
.html = "<p><a href=\"/url\">foo</a></p>\n",
.markdown = "[foo]:\n/url\n\n[foo]\n",
},
TestCase{
.example = 166,
.sec = "Link reference definitions",
.html = "<p>[foo]:</p>\n<p>[foo]</p>\n",
.markdown = "[foo]:\n\n[foo]\n",
},
TestCase{
.example = 167,
.sec = "Link reference definitions",
.html = "<p><a href=\"/url%5Cbar*baz\" title=\"foo"bar\\baz\">foo</a></p>\n",
.markdown = "[foo]: /url\\bar\\*baz \"foo\\\"bar\\baz\"\n\n[foo]\n",
},
TestCase{
.example = 168,
.sec = "Link reference definitions",
.html = "<p><a href=\"url\">foo</a></p>\n",
.markdown = "[foo]\n\n[foo]: url\n",
},
TestCase{
.example = 169,
.sec = "Link reference definitions",
.html = "<p><a href=\"first\">foo</a></p>\n",
.markdown = "[foo]\n\n[foo]: first\n[foo]: second\n",
},
TestCase{
.example = 170,
.sec = "Link reference definitions",
.html = "<p><a href=\"/url\">Foo</a></p>\n",
.markdown = "[FOO]: /url\n\n[Foo]\n",
},
TestCase{
.example = 171,
.sec = "Link reference definitions",
.html = "<p><a href=\"/%CF%86%CE%BF%CF%85\">αγω</a></p>\n",
.markdown = "[ΑΓΩ]: /φου\n\n[αγω]\n",
},
TestCase{
.example = 172,
.sec = "Link reference definitions",
.html = "",
.markdown = "[foo]: /url\n",
},
TestCase{
.example = 173,
.sec = "Link reference definitions",
.html = "<p>bar</p>\n",
.markdown = "[\nfoo\n]: /url\nbar\n",
},
TestCase{
.example = 174,
.sec = "Link reference definitions",
.html = "<p>[foo]: /url "title" ok</p>\n",
.markdown = "[foo]: /url \"title\" ok\n",
},
TestCase{
.example = 175,
.sec = "Link reference definitions",
.html = "<p>"title" ok</p>\n",
.markdown = "[foo]: /url\n\"title\" ok\n",
},
TestCase{
.example = 176,
.sec = "Link reference definitions",
.html = "<pre><code>[foo]: /url "title"\n</code></pre>\n<p>[foo]</p>\n",
.markdown = " [foo]: /url \"title\"\n\n[foo]\n",
},
TestCase{
.example = 177,
.sec = "Link reference definitions",
.html = "<pre><code>[foo]: /url\n</code></pre>\n<p>[foo]</p>\n",
.markdown = "```\n[foo]: /url\n```\n\n[foo]\n",
},
TestCase{
.example = 178,
.sec = "Link reference definitions",
.html = "<p>Foo\n[bar]: /baz</p>\n<p>[bar]</p>\n",
.markdown = "Foo\n[bar]: /baz\n\n[bar]\n",
},
TestCase{
.example = 179,
.sec = "Link reference definitions",
.html = "<h1><a href=\"/url\">Foo</a></h1>\n<blockquote>\n<p>bar</p>\n</blockquote>\n",
.markdown = "# [Foo]\n[foo]: /url\n> bar\n",
},
TestCase{
.example = 180,
.sec = "Link reference definitions",
.html = "<p><a href=\"/foo-url\" title=\"foo\">foo</a>,\n<a href=\"/bar-url\" title=\"bar\">bar</a>,\n<a href=\"/baz-url\">baz</a></p>\n",
.markdown = "[foo]: /foo-url \"foo\"\n[bar]: /bar-url\n \"bar\"\n[baz]: /baz-url\n\n[foo],\n[bar],\n[baz]\n",
},
TestCase{
.example = 181,
.sec = "Link reference definitions",
.html = "<p><a href=\"/url\">foo</a></p>\n<blockquote>\n</blockquote>\n",
.markdown = "[foo]\n\n> [foo]: /url\n",
},
TestCase{
.example = 182,
.sec = "Paragraphs",
.html = "<p>aaa</p>\n<p>bbb</p>\n",
.markdown = "aaa\n\nbbb\n",
},
TestCase{
.example = 183,
.sec = "Paragraphs",
.html = "<p>aaa\nbbb</p>\n<p>ccc\nddd</p>\n",
.markdown = "aaa\nbbb\n\nccc\nddd\n",
},
TestCase{
.example = 184,
.sec = "Paragraphs",
.html = "<p>aaa</p>\n<p>bbb</p>\n",
.markdown = "aaa\n\n\nbbb\n",
},
TestCase{
.example = 185,
.sec = "Paragraphs",
.html = "<p>aaa\nbbb</p>\n",
.markdown = " aaa\n bbb\n",
},
TestCase{
.example = 186,
.sec = "Paragraphs",
.html = "<p>aaa\nbbb\nccc</p>\n",
.markdown = "aaa\n bbb\n ccc\n",
},
TestCase{
.example = 187,
.sec = "Paragraphs",
.html = "<p>aaa\nbbb</p>\n",
.markdown = " aaa\nbbb\n",
},
TestCase{
.example = 188,
.sec = "Paragraphs",
.html = "<pre><code>aaa\n</code></pre>\n<p>bbb</p>\n",
.markdown = " aaa\nbbb\n",
},
TestCase{
.example = 189,
.sec = "Paragraphs",
.html = "<p>aaa<br />\nbbb</p>\n",
.markdown = "aaa \nbbb \n",
},
TestCase{
.example = 190,
.sec = "Blank lines",
.html = "<p>aaa</p>\n<h1>aaa</h1>\n",
.markdown = " \n\naaa\n \n\n# aaa\n\n \n",
},
TestCase{
.example = 191,
.sec = "Block quotes",
.html = "<blockquote>\n<h1>Foo</h1>\n<p>bar\nbaz</p>\n</blockquote>\n",
.markdown = "> # Foo\n> bar\n> baz\n",
},
TestCase{
.example = 192,
.sec = "Block quotes",
.html = "<blockquote>\n<h1>Foo</h1>\n<p>bar\nbaz</p>\n</blockquote>\n",
.markdown = "># Foo\n>bar\n> baz\n",
},
TestCase{
.example = 193,
.sec = "Block quotes",
.html = "<blockquote>\n<h1>Foo</h1>\n<p>bar\nbaz</p>\n</blockquote>\n",
.markdown = " > # Foo\n > bar\n > baz\n",
},
TestCase{
.example = 194,
.sec = "Block quotes",
.html = "<pre><code>> # Foo\n> bar\n> baz\n</code></pre>\n",
.markdown = " > # Foo\n > bar\n > baz\n",
},
TestCase{
.example = 195,
.sec = "Block quotes",
.html = "<blockquote>\n<h1>Foo</h1>\n<p>bar\nbaz</p>\n</blockquote>\n",
.markdown = "> # Foo\n> bar\nbaz\n",
},
TestCase{
.example = 196,
.sec = "Block quotes",
.html = "<blockquote>\n<p>bar\nbaz\nfoo</p>\n</blockquote>\n",
.markdown = "> bar\nbaz\n> foo\n",
},
TestCase{
.example = 197,
.sec = "Block quotes",
.html = "<blockquote>\n<p>foo</p>\n</blockquote>\n<hr />\n",
.markdown = "> foo\n---\n",
},
TestCase{
.example = 198,
.sec = "Block quotes",
.html = "<blockquote>\n<ul>\n<li>foo</li>\n</ul>\n</blockquote>\n<ul>\n<li>bar</li>\n</ul>\n",
.markdown = "> - foo\n- bar\n",
},
TestCase{
.example = 199,
.sec = "Block quotes",
.html = "<blockquote>\n<pre><code>foo\n</code></pre>\n</blockquote>\n<pre><code>bar\n</code></pre>\n",
.markdown = "> foo\n bar\n",
},
TestCase{
.example = 200,
.sec = "Block quotes",
.html = "<blockquote>\n<pre><code></code></pre>\n</blockquote>\n<p>foo</p>\n<pre><code></code></pre>\n",
.markdown = "> ```\nfoo\n```\n",
},
TestCase{
.example = 201,
.sec = "Block quotes",
.html = "<blockquote>\n<p>foo\n- bar</p>\n</blockquote>\n",
.markdown = "> foo\n - bar\n",
},
TestCase{
.example = 202,
.sec = "Block quotes",
.html = "<blockquote>\n</blockquote>\n",
.markdown = ">\n",
},
TestCase{
.example = 203,
.sec = "Block quotes",
.html = "<blockquote>\n</blockquote>\n",
.markdown = ">\n> \n> \n",
},
TestCase{
.example = 204,
.sec = "Block quotes",
.html = "<blockquote>\n<p>foo</p>\n</blockquote>\n",
.markdown = ">\n> foo\n> \n",
},
TestCase{
.example = 205,
.sec = "Block quotes",
.html = "<blockquote>\n<p>foo</p>\n</blockquote>\n<blockquote>\n<p>bar</p>\n</blockquote>\n",
.markdown = "> foo\n\n> bar\n",
},
TestCase{
.example = 206,
.sec = "Block quotes",
.html = "<blockquote>\n<p>foo\nbar</p>\n</blockquote>\n",
.markdown = "> foo\n> bar\n",
},
TestCase{
.example = 207,
.sec = "Block quotes",
.html = "<blockquote>\n<p>foo</p>\n<p>bar</p>\n</blockquote>\n",
.markdown = "> foo\n>\n> bar\n",
},
TestCase{
.example = 208,
.sec = "Block quotes",
.html = "<p>foo</p>\n<blockquote>\n<p>bar</p>\n</blockquote>\n",
.markdown = "foo\n> bar\n",
},
TestCase{
.example = 209,
.sec = "Block quotes",
.html = "<blockquote>\n<p>aaa</p>\n</blockquote>\n<hr />\n<blockquote>\n<p>bbb</p>\n</blockquote>\n",
.markdown = "> aaa\n***\n> bbb\n",
},
TestCase{
.example = 210,
.sec = "Block quotes",
.html = "<blockquote>\n<p>bar\nbaz</p>\n</blockquote>\n",
.markdown = "> bar\nbaz\n",
},
TestCase{
.example = 211,
.sec = "Block quotes",
.html = "<blockquote>\n<p>bar</p>\n</blockquote>\n<p>baz</p>\n",
.markdown = "> bar\n\nbaz\n",
},
TestCase{
.example = 212,
.sec = "Block quotes",
.html = "<blockquote>\n<p>bar</p>\n</blockquote>\n<p>baz</p>\n",
.markdown = "> bar\n>\nbaz\n",
},
TestCase{
.example = 213,
.sec = "Block quotes",
.html = "<blockquote>\n<blockquote>\n<blockquote>\n<p>foo\nbar</p>\n</blockquote>\n</blockquote>\n</blockquote>\n",
.markdown = "> > > foo\nbar\n",
},
TestCase{
.example = 214,
.sec = "Block quotes",
.html = "<blockquote>\n<blockquote>\n<blockquote>\n<p>foo\nbar\nbaz</p>\n</blockquote>\n</blockquote>\n</blockquote>\n",
.markdown = ">>> foo\n> bar\n>>baz\n",
},
TestCase{
.example = 215,
.sec = "Block quotes",
.html = "<blockquote>\n<pre><code>code\n</code></pre>\n</blockquote>\n<blockquote>\n<p>not code</p>\n</blockquote>\n",
.markdown = "> code\n\n> not code\n",
},
TestCase{
.example = 216,
.sec = "List items",
.html = "<p>A paragraph\nwith two lines.</p>\n<pre><code>indented code\n</code></pre>\n<blockquote>\n<p>A block quote.</p>\n</blockquote>\n",
.markdown = "A paragraph\nwith two lines.\n\n indented code\n\n> A block quote.\n",
},
TestCase{
.example = 217,
.sec = "List items",
.html = "<ol>\n<li>\n<p>A paragraph\nwith two lines.</p>\n<pre><code>indented code\n</code></pre>\n<blockquote>\n<p>A block quote.</p>\n</blockquote>\n</li>\n</ol>\n",
.markdown = "1. A paragraph\n with two lines.\n\n indented code\n\n > A block quote.\n",
},
TestCase{
.example = 218,
.sec = "List items",
.html = "<ul>\n<li>one</li>\n</ul>\n<p>two</p>\n",
.markdown = "- one\n\n two\n",
},
TestCase{
.example = 219,
.sec = "List items",
.html = "<ul>\n<li>\n<p>one</p>\n<p>two</p>\n</li>\n</ul>\n",
.markdown = "- one\n\n two\n",
},
TestCase{
.example = 220,
.sec = "List items",
.html = "<ul>\n<li>one</li>\n</ul>\n<pre><code> two\n</code></pre>\n",
.markdown = " - one\n\n two\n",
},
TestCase{
.example = 221,
.sec = "List items",
.html = "<ul>\n<li>\n<p>one</p>\n<p>two</p>\n</li>\n</ul>\n",
.markdown = " - one\n\n two\n",
},
TestCase{
.example = 222,
.sec = "List items",
.html = "<blockquote>\n<blockquote>\n<ol>\n<li>\n<p>one</p>\n<p>two</p>\n</li>\n</ol>\n</blockquote>\n</blockquote>\n",
.markdown = " > > 1. one\n>>\n>> two\n",
},
TestCase{
.example = 223,
.sec = "List items",
.html = "<blockquote>\n<blockquote>\n<ul>\n<li>one</li>\n</ul>\n<p>two</p>\n</blockquote>\n</blockquote>\n",
.markdown = ">>- one\n>>\n > > two\n",
},
TestCase{
.example = 224,
.sec = "List items",
.html = "<p>-one</p>\n<p>2.two</p>\n",
.markdown = "-one\n\n2.two\n",
},
TestCase{
.example = 225,
.sec = "List items",
.html = "<ul>\n<li>\n<p>foo</p>\n<p>bar</p>\n</li>\n</ul>\n",
.markdown = "- foo\n\n\n bar\n",
},
TestCase{
.example = 226,
.sec = "List items",
.html = "<ol>\n<li>\n<p>foo</p>\n<pre><code>bar\n</code></pre>\n<p>baz</p>\n<blockquote>\n<p>bam</p>\n</blockquote>\n</li>\n</ol>\n",
.markdown = "1. foo\n\n ```\n bar\n ```\n\n baz\n\n > bam\n",
},
TestCase{
.example = 227,
.sec = "List items",
.html = "<ul>\n<li>\n<p>Foo</p>\n<pre><code>bar\n\n\nbaz\n</code></pre>\n</li>\n</ul>\n",
.markdown = "- Foo\n\n bar\n\n\n baz\n",
},
TestCase{
.example = 228,
.sec = "List items",
.html = "<ol start=\"123456789\">\n<li>ok</li>\n</ol>\n",
.markdown = "123456789. ok\n",
},
TestCase{
.example = 229,
.sec = "List items",
.html = "<p>1234567890. not ok</p>\n",
.markdown = "1234567890. not ok\n",
},
TestCase{
.example = 230,
.sec = "List items",
.html = "<ol start=\"0\">\n<li>ok</li>\n</ol>\n",
.markdown = "0. ok\n",
},
TestCase{
.example = 231,
.sec = "List items",
.html = "<ol start=\"3\">\n<li>ok</li>\n</ol>\n",
.markdown = "003. ok\n",
},
TestCase{
.example = 232,
.sec = "List items",
.html = "<p>-1. not ok</p>\n",
.markdown = "-1. not ok\n",
},
TestCase{
.example = 233,
.sec = "List items",
.html = "<ul>\n<li>\n<p>foo</p>\n<pre><code>bar\n</code></pre>\n</li>\n</ul>\n",
.markdown = "- foo\n\n bar\n",
},
TestCase{
.example = 234,
.sec = "List items",
.html = "<ol start=\"10\">\n<li>\n<p>foo</p>\n<pre><code>bar\n</code></pre>\n</li>\n</ol>\n",
.markdown = " 10. foo\n\n bar\n",
},
TestCase{
.example = 235,
.sec = "List items",
.html = "<pre><code>indented code\n</code></pre>\n<p>paragraph</p>\n<pre><code>more code\n</code></pre>\n",
.markdown = " indented code\n\nparagraph\n\n more code\n",
},
TestCase{
.example = 236,
.sec = "List items",
.html = "<ol>\n<li>\n<pre><code>indented code\n</code></pre>\n<p>paragraph</p>\n<pre><code>more code\n</code></pre>\n</li>\n</ol>\n",
.markdown = "1. indented code\n\n paragraph\n\n more code\n",
},
TestCase{
.example = 237,
.sec = "List items",
.html = "<ol>\n<li>\n<pre><code> indented code\n</code></pre>\n<p>paragraph</p>\n<pre><code>more code\n</code></pre>\n</li>\n</ol>\n",
.markdown = "1. indented code\n\n paragraph\n\n more code\n",
},
TestCase{
.example = 238,
.sec = "List items",
.html = "<p>foo</p>\n<p>bar</p>\n",
.markdown = " foo\n\nbar\n",
},
TestCase{
.example = 239,
.sec = "List items",
.html = "<ul>\n<li>foo</li>\n</ul>\n<p>bar</p>\n",
.markdown = "- foo\n\n bar\n",
},
TestCase{
.example = 240,
.sec = "List items",
.html = "<ul>\n<li>\n<p>foo</p>\n<p>bar</p>\n</li>\n</ul>\n",
.markdown = "- foo\n\n bar\n",
},
TestCase{
.example = 241,
.sec = "List items",
.html = "<ul>\n<li>foo</li>\n<li>\n<pre><code>bar\n</code></pre>\n</li>\n<li>\n<pre><code>baz\n</code></pre>\n</li>\n</ul>\n",
.markdown = "-\n foo\n-\n ```\n bar\n ```\n-\n baz\n",
},
TestCase{
.example = 242,
.sec = "List items",
.html = "<ul>\n<li>foo</li>\n</ul>\n",
.markdown = "- \n foo\n",
},
TestCase{
.example = 243,
.sec = "List items",
.html = "<ul>\n<li></li>\n</ul>\n<p>foo</p>\n",
.markdown = "-\n\n foo\n",
},
TestCase{
.example = 244,
.sec = "List items",
.html = "<ul>\n<li>foo</li>\n<li></li>\n<li>bar</li>\n</ul>\n",
.markdown = "- foo\n-\n- bar\n",
},
TestCase{
.example = 245,
.sec = "List items",
.html = "<ul>\n<li>foo</li>\n<li></li>\n<li>bar</li>\n</ul>\n",
.markdown = "- foo\n- \n- bar\n",
},
TestCase{
.example = 246,
.sec = "List items",
.html = "<ol>\n<li>foo</li>\n<li></li>\n<li>bar</li>\n</ol>\n",
.markdown = "1. foo\n2.\n3. bar\n",
},
TestCase{
.example = 247,
.sec = "List items",
.html = "<ul>\n<li></li>\n</ul>\n",
.markdown = "*\n",
},
TestCase{
.example = 248,
.sec = "List items",
.html = "<p>foo\n*</p>\n<p>foo\n1.</p>\n",
.markdown = "foo\n*\n\nfoo\n1.\n",
},
TestCase{
.example = 249,
.sec = "List items",
.html = "<ol>\n<li>\n<p>A paragraph\nwith two lines.</p>\n<pre><code>indented code\n</code></pre>\n<blockquote>\n<p>A block quote.</p>\n</blockquote>\n</li>\n</ol>\n",
.markdown = " 1. A paragraph\n with two lines.\n\n indented code\n\n > A block quote.\n",
},
TestCase{
.example = 250,
.sec = "List items",
.html = "<ol>\n<li>\n<p>A paragraph\nwith two lines.</p>\n<pre><code>indented code\n</code></pre>\n<blockquote>\n<p>A block quote.</p>\n</blockquote>\n</li>\n</ol>\n",
.markdown = " 1. A paragraph\n with two lines.\n\n indented code\n\n > A block quote.\n",
},
TestCase{
.example = 251,
.sec = "List items",
.html = "<ol>\n<li>\n<p>A paragraph\nwith two lines.</p>\n<pre><code>indented code\n</code></pre>\n<blockquote>\n<p>A block quote.</p>\n</blockquote>\n</li>\n</ol>\n",
.markdown = " 1. A paragraph\n with two lines.\n\n indented code\n\n > A block quote.\n",
},
TestCase{
.example = 252,
.sec = "List items",
.html = "<pre><code>1. A paragraph\n with two lines.\n\n indented code\n\n > A block quote.\n</code></pre>\n",
.markdown = " 1. A paragraph\n with two lines.\n\n indented code\n\n > A block quote.\n",
},
TestCase{
.example = 253,
.sec = "List items",
.html = "<ol>\n<li>\n<p>A paragraph\nwith two lines.</p>\n<pre><code>indented code\n</code></pre>\n<blockquote>\n<p>A block quote.</p>\n</blockquote>\n</li>\n</ol>\n",
.markdown = " 1. A paragraph\nwith two lines.\n\n indented code\n\n > A block quote.\n",
},
TestCase{
.example = 254,
.sec = "List items",
.html = "<ol>\n<li>A paragraph\nwith two lines.</li>\n</ol>\n",
.markdown = " 1. A paragraph\n with two lines.\n",
},
TestCase{
.example = 255,
.sec = "List items",
.html = "<blockquote>\n<ol>\n<li>\n<blockquote>\n<p>Blockquote\ncontinued here.</p>\n</blockquote>\n</li>\n</ol>\n</blockquote>\n",
.markdown = "> 1. > Blockquote\ncontinued here.\n",
},
TestCase{
.example = 256,
.sec = "List items",
.html = "<blockquote>\n<ol>\n<li>\n<blockquote>\n<p>Blockquote\ncontinued here.</p>\n</blockquote>\n</li>\n</ol>\n</blockquote>\n",
.markdown = "> 1. > Blockquote\n> continued here.\n",
},
TestCase{
.example = 257,
.sec = "List items",
.html = "<ul>\n<li>foo\n<ul>\n<li>bar\n<ul>\n<li>baz\n<ul>\n<li>boo</li>\n</ul>\n</li>\n</ul>\n</li>\n</ul>\n</li>\n</ul>\n",
.markdown = "- foo\n - bar\n - baz\n - boo\n",
},
TestCase{
.example = 258,
.sec = "List items",
.html = "<ul>\n<li>foo</li>\n<li>bar</li>\n<li>baz</li>\n<li>boo</li>\n</ul>\n",
.markdown = "- foo\n - bar\n - baz\n - boo\n",
},
TestCase{
.example = 259,
.sec = "List items",
.html = "<ol start=\"10\">\n<li>foo\n<ul>\n<li>bar</li>\n</ul>\n</li>\n</ol>\n",
.markdown = "10) foo\n - bar\n",
},
TestCase{
.example = 260,
.sec = "List items",
.html = "<ol start=\"10\">\n<li>foo</li>\n</ol>\n<ul>\n<li>bar</li>\n</ul>\n",
.markdown = "10) foo\n - bar\n",
},
TestCase{
.example = 261,
.sec = "List items",
.html = "<ul>\n<li>\n<ul>\n<li>foo</li>\n</ul>\n</li>\n</ul>\n",
.markdown = "- - foo\n",
},
TestCase{
.example = 262,
.sec = "List items",
.html = "<ol>\n<li>\n<ul>\n<li>\n<ol start=\"2\">\n<li>foo</li>\n</ol>\n</li>\n</ul>\n</li>\n</ol>\n",
.markdown = "1. - 2. foo\n",
},
TestCase{
.example = 263,
.sec = "List items",
.html = "<ul>\n<li>\n<h1>Foo</h1>\n</li>\n<li>\n<h2>Bar</h2>\nbaz</li>\n</ul>\n",
.markdown = "- # Foo\n- Bar\n ---\n baz\n",
},
TestCase{
.example = 264,
.sec = "Lists",
.html = "<ul>\n<li>foo</li>\n<li>bar</li>\n</ul>\n<ul>\n<li>baz</li>\n</ul>\n",
.markdown = "- foo\n- bar\n+ baz\n",
},
TestCase{
.example = 265,
.sec = "Lists",
.html = "<ol>\n<li>foo</li>\n<li>bar</li>\n</ol>\n<ol start=\"3\">\n<li>baz</li>\n</ol>\n",
.markdown = "1. foo\n2. bar\n3) baz\n",
},
TestCase{
.example = 266,
.sec = "Lists",
.html = "<p>Foo</p>\n<ul>\n<li>bar</li>\n<li>baz</li>\n</ul>\n",
.markdown = "Foo\n- bar\n- baz\n",
},
TestCase{
.example = 267,
.sec = "Lists",
.html = "<p>The number of windows in my house is\n14. The number of doors is 6.</p>\n",
.markdown = "The number of windows in my house is\n14. The number of doors is 6.\n",
},
TestCase{
.example = 268,
.sec = "Lists",
.html = "<p>The number of windows in my house is</p>\n<ol>\n<li>The number of doors is 6.</li>\n</ol>\n",
.markdown = "The number of windows in my house is\n1. The number of doors is 6.\n",
},
TestCase{
.example = 269,
.sec = "Lists",
.html = "<ul>\n<li>\n<p>foo</p>\n</li>\n<li>\n<p>bar</p>\n</li>\n<li>\n<p>baz</p>\n</li>\n</ul>\n",
.markdown = "- foo\n\n- bar\n\n\n- baz\n",
},
TestCase{
.example = 270,
.sec = "Lists",
.html = "<ul>\n<li>foo\n<ul>\n<li>bar\n<ul>\n<li>\n<p>baz</p>\n<p>bim</p>\n</li>\n</ul>\n</li>\n</ul>\n</li>\n</ul>\n",
.markdown = "- foo\n - bar\n - baz\n\n\n bim\n",
},
TestCase{
.example = 271,
.sec = "Lists",
.html = "<ul>\n<li>foo</li>\n<li>bar</li>\n</ul>\n<!-- -->\n<ul>\n<li>baz</li>\n<li>bim</li>\n</ul>\n",
.markdown = "- foo\n- bar\n\n<!-- -->\n\n- baz\n- bim\n",
},
TestCase{
.example = 272,
.sec = "Lists",
.html = "<ul>\n<li>\n<p>foo</p>\n<p>notcode</p>\n</li>\n<li>\n<p>foo</p>\n</li>\n</ul>\n<!-- -->\n<pre><code>code\n</code></pre>\n",
.markdown = "- foo\n\n notcode\n\n- foo\n\n<!-- -->\n\n code\n",
},
TestCase{
.example = 273,
.sec = "Lists",
.html = "<ul>\n<li>a</li>\n<li>b</li>\n<li>c</li>\n<li>d</li>\n<li>e</li>\n<li>f</li>\n<li>g</li>\n<li>h</li>\n<li>i</li>\n</ul>\n",
.markdown = "- a\n - b\n - c\n - d\n - e\n - f\n - g\n - h\n- i\n",
},
TestCase{
.example = 274,
.sec = "Lists",
.html = "<ol>\n<li>\n<p>a</p>\n</li>\n<li>\n<p>b</p>\n</li>\n<li>\n<p>c</p>\n</li>\n</ol>\n",
.markdown = "1. a\n\n 2. b\n\n 3. c\n",
},
TestCase{
.example = 275,
.sec = "Lists",
.html = "<ul>\n<li>\n<p>a</p>\n</li>\n<li>\n<p>b</p>\n</li>\n<li>\n<p>c</p>\n</li>\n</ul>\n",
.markdown = "- a\n- b\n\n- c\n",
},
TestCase{
.example = 276,
.sec = "Lists",
.html = "<ul>\n<li>\n<p>a</p>\n</li>\n<li></li>\n<li>\n<p>c</p>\n</li>\n</ul>\n",
.markdown = "* a\n*\n\n* c\n",
},
TestCase{
.example = 277,
.sec = "Lists",
.html = "<ul>\n<li>\n<p>a</p>\n</li>\n<li>\n<p>b</p>\n<p>c</p>\n</li>\n<li>\n<p>d</p>\n</li>\n</ul>\n",
.markdown = "- a\n- b\n\n c\n- d\n",
},
TestCase{
.example = 278,
.sec = "Lists",
.html = "<ul>\n<li>\n<p>a</p>\n</li>\n<li>\n<p>b</p>\n</li>\n<li>\n<p>d</p>\n</li>\n</ul>\n",
.markdown = "- a\n- b\n\n [ref]: /url\n- d\n",
},
TestCase{
.example = 279,
.sec = "Lists",
.html = "<ul>\n<li>a</li>\n<li>\n<pre><code>b\n\n\n</code></pre>\n</li>\n<li>c</li>\n</ul>\n",
.markdown = "- a\n- ```\n b\n\n\n ```\n- c\n",
},
TestCase{
.example = 280,
.sec = "Lists",
.html = "<ul>\n<li>a\n<ul>\n<li>\n<p>b</p>\n<p>c</p>\n</li>\n</ul>\n</li>\n<li>d</li>\n</ul>\n",
.markdown = "- a\n - b\n\n c\n- d\n",
},
TestCase{
.example = 281,
.sec = "Lists",
.html = "<ul>\n<li>a\n<blockquote>\n<p>b</p>\n</blockquote>\n</li>\n<li>c</li>\n</ul>\n",
.markdown = "* a\n > b\n >\n* c\n",
},
TestCase{
.example = 282,
.sec = "Lists",
.html = "<ul>\n<li>a\n<blockquote>\n<p>b</p>\n</blockquote>\n<pre><code>c\n</code></pre>\n</li>\n<li>d</li>\n</ul>\n",
.markdown = "- a\n > b\n ```\n c\n ```\n- d\n",
},
TestCase{
.example = 283,
.sec = "Lists",
.html = "<ul>\n<li>a</li>\n</ul>\n",
.markdown = "- a\n",
},
TestCase{
.example = 284,
.sec = "Lists",
.html = "<ul>\n<li>a\n<ul>\n<li>b</li>\n</ul>\n</li>\n</ul>\n",
.markdown = "- a\n - b\n",
},
TestCase{
.example = 285,
.sec = "Lists",
.html = "<ol>\n<li>\n<pre><code>foo\n</code></pre>\n<p>bar</p>\n</li>\n</ol>\n",
.markdown = "1. ```\n foo\n ```\n\n bar\n",
},
TestCase{
.example = 286,
.sec = "Lists",
.html = "<ul>\n<li>\n<p>foo</p>\n<ul>\n<li>bar</li>\n</ul>\n<p>baz</p>\n</li>\n</ul>\n",
.markdown = "* foo\n * bar\n\n baz\n",
},
TestCase{
.example = 287,
.sec = "Lists",
.html = "<ul>\n<li>\n<p>a</p>\n<ul>\n<li>b</li>\n<li>c</li>\n</ul>\n</li>\n<li>\n<p>d</p>\n<ul>\n<li>e</li>\n<li>f</li>\n</ul>\n</li>\n</ul>\n",
.markdown = "- a\n - b\n - c\n\n- d\n - e\n - f\n",
},
TestCase{
.example = 288,
.sec = "Inlines",
.html = "<p><code>hi</code>lo`</p>\n",
.markdown = "`hi`lo`\n",
},
TestCase{
.example = 289,
.sec = "Backslash escapes",
.html = "<p>!"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~</p>\n",
.markdown = "\\!\\\"\\#\\$\\%\\&\\'\\(\\)\\*\\+\\,\\-\\.\\/\\:\\;\\<\\=\\>\\?\\@\\[\\\\\\]\\^\\_\\`\\{\\|\\}\\~\n",
},
TestCase{
.example = 290,
.sec = "Backslash escapes",
.html = "<p>\\\t\\A\\a\\ \\3\\φ\\«</p>\n",
.markdown = "\\\t\\A\\a\\ \\3\\φ\\«\n",
},
TestCase{
.example = 291,
.sec = "Backslash escapes",
.html = "<p>*not emphasized*\n<br/> not a tag\n[not a link](/foo)\n`not code`\n1. not a list\n* not a list\n# not a heading\n[foo]: /url "not a reference"</p>\n",
.markdown = "\\*not emphasized*\n\\<br/> not a tag\n\\[not a link](/foo)\n\\`not code`\n1\\. not a list\n\\* not a list\n\\# not a heading\n\\[foo]: /url \"not a reference\"\n",
},
TestCase{
.example = 292,
.sec = "Backslash escapes",
.html = "<p>\\<em>emphasis</em></p>\n",
.markdown = "\\\\*emphasis*\n",
},
TestCase{
.example = 293,
.sec = "Backslash escapes",
.html = "<p>foo<br />\nbar</p>\n",
.markdown = "foo\\\nbar\n",
},
TestCase{
.example = 294,
.sec = "Backslash escapes",
.html = "<p><code>\\[\\`</code></p>\n",
.markdown = "`` \\[\\` ``\n",
},
TestCase{
.example = 295,
.sec = "Backslash escapes",
.html = "<pre><code>\\[\\]\n</code></pre>\n",
.markdown = " \\[\\]\n",
},
TestCase{
.example = 296,
.sec = "Backslash escapes",
.html = "<pre><code>\\[\\]\n</code></pre>\n",
.markdown = "~~~\n\\[\\]\n~~~\n",
},
TestCase{
.example = 297,
.sec = "Backslash escapes",
.html = "<p><a href=\"http://example.com?find=%5C*\">http://example.com?find=\\*</a></p>\n",
.markdown = "<http://example.com?find=\\*>\n",
},
TestCase{
.example = 298,
.sec = "Backslash escapes",
.html = "<a href=\"/bar\\/)\">\n",
.markdown = "<a href=\"/bar\\/)\">\n",
},
TestCase{
.example = 299,
.sec = "Backslash escapes",
.html = "<p><a href=\"/bar*\" title=\"ti*tle\">foo</a></p>\n",
.markdown = "[foo](/bar\\* \"ti\\*tle\")\n",
},
TestCase{
.example = 300,
.sec = "Backslash escapes",
.html = "<p><a href=\"/bar*\" title=\"ti*tle\">foo</a></p>\n",
.markdown = "[foo]\n\n[foo]: /bar\\* \"ti\\*tle\"\n",
},
TestCase{
.example = 301,
.sec = "Backslash escapes",
.html = "<pre><code class=\"language-foo+bar\">foo\n</code></pre>\n",
.markdown = "``` foo\\+bar\nfoo\n```\n",
},
TestCase{
.example = 302,
.sec = "Entity and numeric character references",
.html = "<p>\u00a0 & © Æ Ď\n¾ ℋ ⅆ\n∲ ≧̸</p>\n",
.markdown = " & © Æ Ď\n¾ ℋ ⅆ\n∲ ≧̸\n",
},
TestCase{
.example = 303,
.sec = "Entity and numeric character references",
.html = "<p># Ӓ Ϡ � �</p>\n",
.markdown = "# Ӓ Ϡ � �\n",
},
TestCase{
.example = 304,
.sec = "Entity and numeric character references",
.html = "<p>" ആ ಫ</p>\n",
.markdown = "" ആ ಫ\n",
},
TestCase{
.example = 305,
.sec = "Entity and numeric character references",
.html = "<p>&nbsp &x; &#; &#x;\n&ThisIsNotDefined; &hi?;</p>\n",
.markdown = "  &x; &#; &#x;\n&ThisIsNotDefined; &hi?;\n",
},
TestCase{
.example = 306,
.sec = "Entity and numeric character references",
.html = "<p>&copy</p>\n",
.markdown = "©\n",
},
TestCase{
.example = 307,
.sec = "Entity and numeric character references",
.html = "<p>&MadeUpEntity;</p>\n",
.markdown = "&MadeUpEntity;\n",
},
TestCase{
.example = 308,
.sec = "Entity and numeric character references",
.html = "<a href=\"öö.html\">\n",
.markdown = "<a href=\"öö.html\">\n",
},
TestCase{
.example = 309,
.sec = "Entity and numeric character references",
.html = "<p><a href=\"/f%C3%B6%C3%B6\" title=\"föö\">foo</a></p>\n",
.markdown = "[foo](/föö \"föö\")\n",
},
TestCase{
.example = 310,
.sec = "Entity and numeric character references",
.html = "<p><a href=\"/f%C3%B6%C3%B6\" title=\"föö\">foo</a></p>\n",
.markdown = "[foo]\n\n[foo]: /föö \"föö\"\n",
},
TestCase{
.example = 311,
.sec = "Entity and numeric character references",
.html = "<pre><code class=\"language-föö\">foo\n</code></pre>\n",
.markdown = "``` föö\nfoo\n```\n",
},
TestCase{
.example = 312,
.sec = "Entity and numeric character references",
.html = "<p><code>f&ouml;&ouml;</code></p>\n",
.markdown = "`föö`\n",
},
TestCase{
.example = 313,
.sec = "Entity and numeric character references",
.html = "<pre><code>f&ouml;f&ouml;\n</code></pre>\n",
.markdown = " föfö\n",
},
TestCase{
.example = 314,
.sec = "Code spans",
.html = "<p><code>foo</code></p>\n",
.markdown = "`foo`\n",
},
TestCase{
.example = 315,
.sec = "Code spans",
.html = "<p><code>foo ` bar</code></p>\n",
.markdown = "`` foo ` bar ``\n",
},
TestCase{
.example = 316,
.sec = "Code spans",
.html = "<p><code>``</code></p>\n",
.markdown = "` `` `\n",
},
TestCase{
.example = 317,
.sec = "Code spans",
.html = "<p><code>foo</code></p>\n",
.markdown = "``\nfoo\n``\n",
},
TestCase{
.example = 318,
.sec = "Code spans",
.html = "<p><code>foo bar baz</code></p>\n",
.markdown = "`foo bar\n baz`\n",
},
TestCase{
.example = 319,
.sec = "Code spans",
.html = "<p><code>a\u00a0\u00a0b</code></p>\n",
.markdown = "`a\u00a0\u00a0b`\n",
},
TestCase{
.example = 320,
.sec = "Code spans",
.html = "<p><code>foo `` bar</code></p>\n",
.markdown = "`foo `` bar`\n",
},
TestCase{
.example = 321,
.sec = "Code spans",
.html = "<p><code>foo\\</code>bar`</p>\n",
.markdown = "`foo\\`bar`\n",
},
TestCase{
.example = 322,
.sec = "Code spans",
.html = "<p>*foo<code>*</code></p>\n",
.markdown = "*foo`*`\n",
},
TestCase{
.example = 323,
.sec = "Code spans",
.html = "<p>[not a <code>link](/foo</code>)</p>\n",
.markdown = "[not a `link](/foo`)\n",
},
TestCase{
.example = 324,
.sec = "Code spans",
.html = "<p><code><a href="</code>">`</p>\n",
.markdown = "`<a href=\"`\">`\n",
},
TestCase{
.example = 325,
.sec = "Code spans",
.html = "<p><a href=\"`\">`</p>\n",
.markdown = "<a href=\"`\">`\n",
},
TestCase{
.example = 326,
.sec = "Code spans",
.html = "<p><code><http://foo.bar.</code>baz>`</p>\n",
.markdown = "`<http://foo.bar.`baz>`\n",
},
TestCase{
.example = 327,
.sec = "Code spans",
.html = "<p><a href=\"http://foo.bar.%60baz\">http://foo.bar.`baz</a>`</p>\n",
.markdown = "<http://foo.bar.`baz>`\n",
},
TestCase{
.example = 328,
.sec = "Code spans",
.html = "<p>```foo``</p>\n",
.markdown = "```foo``\n",
},
TestCase{
.example = 329,
.sec = "Code spans",
.html = "<p>`foo</p>\n",
.markdown = "`foo\n",
},
TestCase{
.example = 330,
.sec = "Code spans",
.html = "<p>`foo<code>bar</code></p>\n",
.markdown = "`foo``bar``\n",
},
TestCase{
.example = 331,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo bar</em></p>\n",
.markdown = "*foo bar*\n",
},
TestCase{
.example = 332,
.sec = "Emphasis and strong emphasis",
.html = "<p>a * foo bar*</p>\n",
.markdown = "a * foo bar*\n",
},
TestCase{
.example = 333,
.sec = "Emphasis and strong emphasis",
.html = "<p>a*"foo"*</p>\n",
.markdown = "a*\"foo\"*\n",
},
TestCase{
.example = 334,
.sec = "Emphasis and strong emphasis",
.html = "<p>*\u00a0a\u00a0*</p>\n",
.markdown = "*\u00a0a\u00a0*\n",
},
TestCase{
.example = 335,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo<em>bar</em></p>\n",
.markdown = "foo*bar*\n",
},
TestCase{
.example = 336,
.sec = "Emphasis and strong emphasis",
.html = "<p>5<em>6</em>78</p>\n",
.markdown = "5*6*78\n",
},
TestCase{
.example = 337,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo bar</em></p>\n",
.markdown = "_foo bar_\n",
},
TestCase{
.example = 338,
.sec = "Emphasis and strong emphasis",
.html = "<p>_ foo bar_</p>\n",
.markdown = "_ foo bar_\n",
},
TestCase{
.example = 339,
.sec = "Emphasis and strong emphasis",
.html = "<p>a_"foo"_</p>\n",
.markdown = "a_\"foo\"_\n",
},
TestCase{
.example = 340,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo_bar_</p>\n",
.markdown = "foo_bar_\n",
},
TestCase{
.example = 341,
.sec = "Emphasis and strong emphasis",
.html = "<p>5_6_78</p>\n",
.markdown = "5_6_78\n",
},
TestCase{
.example = 342,
.sec = "Emphasis and strong emphasis",
.html = "<p>пристаням_стремятся_</p>\n",
.markdown = "пристаням_стремятся_\n",
},
TestCase{
.example = 343,
.sec = "Emphasis and strong emphasis",
.html = "<p>aa_"bb"_cc</p>\n",
.markdown = "aa_\"bb\"_cc\n",
},
TestCase{
.example = 344,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo-<em>(bar)</em></p>\n",
.markdown = "foo-_(bar)_\n",
},
TestCase{
.example = 345,
.sec = "Emphasis and strong emphasis",
.html = "<p>_foo*</p>\n",
.markdown = "_foo*\n",
},
TestCase{
.example = 346,
.sec = "Emphasis and strong emphasis",
.html = "<p>*foo bar *</p>\n",
.markdown = "*foo bar *\n",
},
TestCase{
.example = 347,
.sec = "Emphasis and strong emphasis",
.html = "<p>*foo bar\n*</p>\n",
.markdown = "*foo bar\n*\n",
},
TestCase{
.example = 348,
.sec = "Emphasis and strong emphasis",
.html = "<p>*(*foo)</p>\n",
.markdown = "*(*foo)\n",
},
TestCase{
.example = 349,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>(<em>foo</em>)</em></p>\n",
.markdown = "*(*foo*)*\n",
},
TestCase{
.example = 350,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo</em>bar</p>\n",
.markdown = "*foo*bar\n",
},
TestCase{
.example = 351,
.sec = "Emphasis and strong emphasis",
.html = "<p>_foo bar _</p>\n",
.markdown = "_foo bar _\n",
},
TestCase{
.example = 352,
.sec = "Emphasis and strong emphasis",
.html = "<p>_(_foo)</p>\n",
.markdown = "_(_foo)\n",
},
TestCase{
.example = 353,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>(<em>foo</em>)</em></p>\n",
.markdown = "_(_foo_)_\n",
},
TestCase{
.example = 354,
.sec = "Emphasis and strong emphasis",
.html = "<p>_foo_bar</p>\n",
.markdown = "_foo_bar\n",
},
TestCase{
.example = 355,
.sec = "Emphasis and strong emphasis",
.html = "<p>_пристаням_стремятся</p>\n",
.markdown = "_пристаням_стремятся\n",
},
TestCase{
.example = 356,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo_bar_baz</em></p>\n",
.markdown = "_foo_bar_baz_\n",
},
TestCase{
.example = 357,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>(bar)</em>.</p>\n",
.markdown = "_(bar)_.\n",
},
TestCase{
.example = 358,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo bar</strong></p>\n",
.markdown = "**foo bar**\n",
},
TestCase{
.example = 359,
.sec = "Emphasis and strong emphasis",
.html = "<p>** foo bar**</p>\n",
.markdown = "** foo bar**\n",
},
TestCase{
.example = 360,
.sec = "Emphasis and strong emphasis",
.html = "<p>a**"foo"**</p>\n",
.markdown = "a**\"foo\"**\n",
},
TestCase{
.example = 361,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo<strong>bar</strong></p>\n",
.markdown = "foo**bar**\n",
},
TestCase{
.example = 362,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo bar</strong></p>\n",
.markdown = "__foo bar__\n",
},
TestCase{
.example = 363,
.sec = "Emphasis and strong emphasis",
.html = "<p>__ foo bar__</p>\n",
.markdown = "__ foo bar__\n",
},
TestCase{
.example = 364,
.sec = "Emphasis and strong emphasis",
.html = "<p>__\nfoo bar__</p>\n",
.markdown = "__\nfoo bar__\n",
},
TestCase{
.example = 365,
.sec = "Emphasis and strong emphasis",
.html = "<p>a__"foo"__</p>\n",
.markdown = "a__\"foo\"__\n",
},
TestCase{
.example = 366,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo__bar__</p>\n",
.markdown = "foo__bar__\n",
},
TestCase{
.example = 367,
.sec = "Emphasis and strong emphasis",
.html = "<p>5__6__78</p>\n",
.markdown = "5__6__78\n",
},
TestCase{
.example = 368,
.sec = "Emphasis and strong emphasis",
.html = "<p>пристаням__стремятся__</p>\n",
.markdown = "пристаням__стремятся__\n",
},
TestCase{
.example = 369,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo, <strong>bar</strong>, baz</strong></p>\n",
.markdown = "__foo, __bar__, baz__\n",
},
TestCase{
.example = 370,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo-<strong>(bar)</strong></p>\n",
.markdown = "foo-__(bar)__\n",
},
TestCase{
.example = 371,
.sec = "Emphasis and strong emphasis",
.html = "<p>**foo bar **</p>\n",
.markdown = "**foo bar **\n",
},
TestCase{
.example = 372,
.sec = "Emphasis and strong emphasis",
.html = "<p>**(**foo)</p>\n",
.markdown = "**(**foo)\n",
},
TestCase{
.example = 373,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>(<strong>foo</strong>)</em></p>\n",
.markdown = "*(**foo**)*\n",
},
TestCase{
.example = 374,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>Gomphocarpus (<em>Gomphocarpus physocarpus</em>, syn.\n<em>Asclepias physocarpa</em>)</strong></p>\n",
.markdown = "**Gomphocarpus (*Gomphocarpus physocarpus*, syn.\n*Asclepias physocarpa*)**\n",
},
TestCase{
.example = 375,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo "<em>bar</em>" foo</strong></p>\n",
.markdown = "**foo \"*bar*\" foo**\n",
},
TestCase{
.example = 376,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo</strong>bar</p>\n",
.markdown = "**foo**bar\n",
},
TestCase{
.example = 377,
.sec = "Emphasis and strong emphasis",
.html = "<p>__foo bar __</p>\n",
.markdown = "__foo bar __\n",
},
TestCase{
.example = 378,
.sec = "Emphasis and strong emphasis",
.html = "<p>__(__foo)</p>\n",
.markdown = "__(__foo)\n",
},
TestCase{
.example = 379,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>(<strong>foo</strong>)</em></p>\n",
.markdown = "_(__foo__)_\n",
},
TestCase{
.example = 380,
.sec = "Emphasis and strong emphasis",
.html = "<p>__foo__bar</p>\n",
.markdown = "__foo__bar\n",
},
TestCase{
.example = 381,
.sec = "Emphasis and strong emphasis",
.html = "<p>__пристаням__стремятся</p>\n",
.markdown = "__пристаням__стремятся\n",
},
TestCase{
.example = 382,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo__bar__baz</strong></p>\n",
.markdown = "__foo__bar__baz__\n",
},
TestCase{
.example = 383,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>(bar)</strong>.</p>\n",
.markdown = "__(bar)__.\n",
},
TestCase{
.example = 384,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo <a href=\"/url\">bar</a></em></p>\n",
.markdown = "*foo [bar](/url)*\n",
},
TestCase{
.example = 385,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo\nbar</em></p>\n",
.markdown = "*foo\nbar*\n",
},
TestCase{
.example = 386,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo <strong>bar</strong> baz</em></p>\n",
.markdown = "_foo __bar__ baz_\n",
},
TestCase{
.example = 387,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo <em>bar</em> baz</em></p>\n",
.markdown = "_foo _bar_ baz_\n",
},
TestCase{
.example = 388,
.sec = "Emphasis and strong emphasis",
.html = "<p><em><em>foo</em> bar</em></p>\n",
.markdown = "__foo_ bar_\n",
},
TestCase{
.example = 389,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo <em>bar</em></em></p>\n",
.markdown = "*foo *bar**\n",
},
TestCase{
.example = 390,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo <strong>bar</strong> baz</em></p>\n",
.markdown = "*foo **bar** baz*\n",
},
TestCase{
.example = 391,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo<strong>bar</strong>baz</em></p>\n",
.markdown = "*foo**bar**baz*\n",
},
TestCase{
.example = 392,
.sec = "Emphasis and strong emphasis",
.html = "<p><em><strong>foo</strong> bar</em></p>\n",
.markdown = "***foo** bar*\n",
},
TestCase{
.example = 393,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo <strong>bar</strong></em></p>\n",
.markdown = "*foo **bar***\n",
},
TestCase{
.example = 394,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo<strong>bar</strong></em></p>\n",
.markdown = "*foo**bar***\n",
},
TestCase{
.example = 395,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo <strong>bar <em>baz</em> bim</strong> bop</em></p>\n",
.markdown = "*foo **bar *baz* bim** bop*\n",
},
TestCase{
.example = 396,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo <a href=\"/url\"><em>bar</em></a></em></p>\n",
.markdown = "*foo [*bar*](/url)*\n",
},
TestCase{
.example = 397,
.sec = "Emphasis and strong emphasis",
.html = "<p>** is not an empty emphasis</p>\n",
.markdown = "** is not an empty emphasis\n",
},
TestCase{
.example = 398,
.sec = "Emphasis and strong emphasis",
.html = "<p>**** is not an empty strong emphasis</p>\n",
.markdown = "**** is not an empty strong emphasis\n",
},
TestCase{
.example = 399,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo <a href=\"/url\">bar</a></strong></p>\n",
.markdown = "**foo [bar](/url)**\n",
},
TestCase{
.example = 400,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo\nbar</strong></p>\n",
.markdown = "**foo\nbar**\n",
},
TestCase{
.example = 401,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo <em>bar</em> baz</strong></p>\n",
.markdown = "__foo _bar_ baz__\n",
},
TestCase{
.example = 402,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo <strong>bar</strong> baz</strong></p>\n",
.markdown = "__foo __bar__ baz__\n",
},
TestCase{
.example = 403,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong><strong>foo</strong> bar</strong></p>\n",
.markdown = "____foo__ bar__\n",
},
TestCase{
.example = 404,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo <strong>bar</strong></strong></p>\n",
.markdown = "**foo **bar****\n",
},
TestCase{
.example = 405,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo <em>bar</em> baz</strong></p>\n",
.markdown = "**foo *bar* baz**\n",
},
TestCase{
.example = 406,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo<em>bar</em>baz</strong></p>\n",
.markdown = "**foo*bar*baz**\n",
},
TestCase{
.example = 407,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong><em>foo</em> bar</strong></p>\n",
.markdown = "***foo* bar**\n",
},
TestCase{
.example = 408,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo <em>bar</em></strong></p>\n",
.markdown = "**foo *bar***\n",
},
TestCase{
.example = 409,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo <em>bar <strong>baz</strong>\nbim</em> bop</strong></p>\n",
.markdown = "**foo *bar **baz**\nbim* bop**\n",
},
TestCase{
.example = 410,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo <a href=\"/url\"><em>bar</em></a></strong></p>\n",
.markdown = "**foo [*bar*](/url)**\n",
},
TestCase{
.example = 411,
.sec = "Emphasis and strong emphasis",
.html = "<p>__ is not an empty emphasis</p>\n",
.markdown = "__ is not an empty emphasis\n",
},
TestCase{
.example = 412,
.sec = "Emphasis and strong emphasis",
.html = "<p>____ is not an empty strong emphasis</p>\n",
.markdown = "____ is not an empty strong emphasis\n",
},
TestCase{
.example = 413,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo ***</p>\n",
.markdown = "foo ***\n",
},
TestCase{
.example = 414,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo <em>*</em></p>\n",
.markdown = "foo *\\**\n",
},
TestCase{
.example = 415,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo <em>_</em></p>\n",
.markdown = "foo *_*\n",
},
TestCase{
.example = 416,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo *****</p>\n",
.markdown = "foo *****\n",
},
TestCase{
.example = 417,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo <strong>*</strong></p>\n",
.markdown = "foo **\\***\n",
},
TestCase{
.example = 418,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo <strong>_</strong></p>\n",
.markdown = "foo **_**\n",
},
TestCase{
.example = 419,
.sec = "Emphasis and strong emphasis",
.html = "<p>*<em>foo</em></p>\n",
.markdown = "**foo*\n",
},
TestCase{
.example = 420,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo</em>*</p>\n",
.markdown = "*foo**\n",
},
TestCase{
.example = 421,
.sec = "Emphasis and strong emphasis",
.html = "<p>*<strong>foo</strong></p>\n",
.markdown = "***foo**\n",
},
TestCase{
.example = 422,
.sec = "Emphasis and strong emphasis",
.html = "<p>***<em>foo</em></p>\n",
.markdown = "****foo*\n",
},
TestCase{
.example = 423,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo</strong>*</p>\n",
.markdown = "**foo***\n",
},
TestCase{
.example = 424,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo</em>***</p>\n",
.markdown = "*foo****\n",
},
TestCase{
.example = 425,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo ___</p>\n",
.markdown = "foo ___\n",
},
TestCase{
.example = 426,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo <em>_</em></p>\n",
.markdown = "foo _\\__\n",
},
TestCase{
.example = 427,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo <em>*</em></p>\n",
.markdown = "foo _*_\n",
},
TestCase{
.example = 428,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo _____</p>\n",
.markdown = "foo _____\n",
},
TestCase{
.example = 429,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo <strong>_</strong></p>\n",
.markdown = "foo __\\___\n",
},
TestCase{
.example = 430,
.sec = "Emphasis and strong emphasis",
.html = "<p>foo <strong>*</strong></p>\n",
.markdown = "foo __*__\n",
},
TestCase{
.example = 431,
.sec = "Emphasis and strong emphasis",
.html = "<p>_<em>foo</em></p>\n",
.markdown = "__foo_\n",
},
TestCase{
.example = 432,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo</em>_</p>\n",
.markdown = "_foo__\n",
},
TestCase{
.example = 433,
.sec = "Emphasis and strong emphasis",
.html = "<p>_<strong>foo</strong></p>\n",
.markdown = "___foo__\n",
},
TestCase{
.example = 434,
.sec = "Emphasis and strong emphasis",
.html = "<p>___<em>foo</em></p>\n",
.markdown = "____foo_\n",
},
TestCase{
.example = 435,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo</strong>_</p>\n",
.markdown = "__foo___\n",
},
TestCase{
.example = 436,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo</em>___</p>\n",
.markdown = "_foo____\n",
},
TestCase{
.example = 437,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo</strong></p>\n",
.markdown = "**foo**\n",
},
TestCase{
.example = 438,
.sec = "Emphasis and strong emphasis",
.html = "<p><em><em>foo</em></em></p>\n",
.markdown = "*_foo_*\n",
},
TestCase{
.example = 439,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong>foo</strong></p>\n",
.markdown = "__foo__\n",
},
TestCase{
.example = 440,
.sec = "Emphasis and strong emphasis",
.html = "<p><em><em>foo</em></em></p>\n",
.markdown = "_*foo*_\n",
},
TestCase{
.example = 441,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong><strong>foo</strong></strong></p>\n",
.markdown = "****foo****\n",
},
TestCase{
.example = 442,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong><strong>foo</strong></strong></p>\n",
.markdown = "____foo____\n",
},
TestCase{
.example = 443,
.sec = "Emphasis and strong emphasis",
.html = "<p><strong><strong><strong>foo</strong></strong></strong></p>\n",
.markdown = "******foo******\n",
},
TestCase{
.example = 444,
.sec = "Emphasis and strong emphasis",
.html = "<p><em><strong>foo</strong></em></p>\n",
.markdown = "***foo***\n",
},
TestCase{
.example = 445,
.sec = "Emphasis and strong emphasis",
.html = "<p><em><strong><strong>foo</strong></strong></em></p>\n",
.markdown = "_____foo_____\n",
},
TestCase{
.example = 446,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo _bar</em> baz_</p>\n",
.markdown = "*foo _bar* baz_\n",
},
TestCase{
.example = 447,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>foo <strong>bar *baz bim</strong> bam</em></p>\n",
.markdown = "*foo __bar *baz bim__ bam*\n",
},
TestCase{
.example = 448,
.sec = "Emphasis and strong emphasis",
.html = "<p>**foo <strong>bar baz</strong></p>\n",
.markdown = "**foo **bar baz**\n",
},
TestCase{
.example = 449,
.sec = "Emphasis and strong emphasis",
.html = "<p>*foo <em>bar baz</em></p>\n",
.markdown = "*foo *bar baz*\n",
},
TestCase{
.example = 450,
.sec = "Emphasis and strong emphasis",
.html = "<p>*<a href=\"/url\">bar*</a></p>\n",
.markdown = "*[bar*](/url)\n",
},
TestCase{
.example = 451,
.sec = "Emphasis and strong emphasis",
.html = "<p>_foo <a href=\"/url\">bar_</a></p>\n",
.markdown = "_foo [bar_](/url)\n",
},
TestCase{
.example = 452,
.sec = "Emphasis and strong emphasis",
.html = "<p>*<img src=\"foo\" title=\"*\"/></p>\n",
.markdown = "*<img src=\"foo\" title=\"*\"/>\n",
},
TestCase{
.example = 453,
.sec = "Emphasis and strong emphasis",
.html = "<p>**<a href=\"**\"></p>\n",
.markdown = "**<a href=\"**\">\n",
},
TestCase{
.example = 454,
.sec = "Emphasis and strong emphasis",
.html = "<p>__<a href=\"__\"></p>\n",
.markdown = "__<a href=\"__\">\n",
},
TestCase{
.example = 455,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>a <code>*</code></em></p>\n",
.markdown = "*a `*`*\n",
},
TestCase{
.example = 456,
.sec = "Emphasis and strong emphasis",
.html = "<p><em>a <code>_</code></em></p>\n",
.markdown = "_a `_`_\n",
},
TestCase{
.example = 457,
.sec = "Emphasis and strong emphasis",
.html = "<p>**a<a href=\"http://foo.bar/?q=**\">http://foo.bar/?q=**</a></p>\n",
.markdown = "**a<http://foo.bar/?q=**>\n",
},
TestCase{
.example = 458,
.sec = "Emphasis and strong emphasis",
.html = "<p>__a<a href=\"http://foo.bar/?q=__\">http://foo.bar/?q=__</a></p>\n",
.markdown = "__a<http://foo.bar/?q=__>\n",
},
TestCase{
.example = 459,
.sec = "Links",
.html = "<p><a href=\"/uri\" title=\"title\">link</a></p>\n",
.markdown = "[link](/uri \"title\")\n",
},
TestCase{
.example = 460,
.sec = "Links",
.html = "<p><a href=\"/uri\">link</a></p>\n",
.markdown = "[link](/uri)\n",
},
TestCase{
.example = 461,
.sec = "Links",
.html = "<p><a href=\"\">link</a></p>\n",
.markdown = "[link]()\n",
},
TestCase{
.example = 462,
.sec = "Links",
.html = "<p><a href=\"\">link</a></p>\n",
.markdown = "[link](<>)\n",
},
TestCase{
.example = 463,
.sec = "Links",
.html = "<p>[link](/my uri)</p>\n",
.markdown = "[link](/my uri)\n",
},
TestCase{
.example = 464,
.sec = "Links",
.html = "<p>[link](</my uri>)</p>\n",
.markdown = "[link](</my uri>)\n",
},
TestCase{
.example = 465,
.sec = "Links",
.html = "<p>[link](foo\nbar)</p>\n",
.markdown = "[link](foo\nbar)\n",
},
TestCase{
.example = 466,
.sec = "Links",
.html = "<p>[link](<foo\nbar>)</p>\n",
.markdown = "[link](<foo\nbar>)\n",
},
TestCase{
.example = 467,
.sec = "Links",
.html = "<p><a href=\"(foo)\">link</a></p>\n",
.markdown = "[link](\\(foo\\))\n",
},
TestCase{
.example = 468,
.sec = "Links",
.html = "<p><a href=\"foo(and(bar))\">link</a></p>\n",
.markdown = "[link](foo(and(bar)))\n",
},
TestCase{
.example = 469,
.sec = "Links",
.html = "<p><a href=\"foo(and(bar)\">link</a></p>\n",
.markdown = "[link](foo\\(and\\(bar\\))\n",
},
TestCase{
.example = 470,
.sec = "Links",
.html = "<p><a href=\"foo(and(bar)\">link</a></p>\n",
.markdown = "[link](<foo(and(bar)>)\n",
},
TestCase{
.example = 471,
.sec = "Links",
.html = "<p><a href=\"foo):\">link</a></p>\n",
.markdown = "[link](foo\\)\\:)\n",
},
TestCase{
.example = 472,
.sec = "Links",
.html = "<p><a href=\"#fragment\">link</a></p>\n<p><a href=\"http://example.com#fragment\">link</a></p>\n<p><a href=\"http://example.com?foo=3#frag\">link</a></p>\n",
.markdown = "[link](#fragment)\n\n[link](http://example.com#fragment)\n\n[link](http://example.com?foo=3#frag)\n",
},
TestCase{
.example = 473,
.sec = "Links",
.html = "<p><a href=\"foo%5Cbar\">link</a></p>\n",
.markdown = "[link](foo\\bar)\n",
},
TestCase{
.example = 474,
.sec = "Links",
.html = "<p><a href=\"foo%20b%C3%A4\">link</a></p>\n",
.markdown = "[link](foo%20bä)\n",
},
TestCase{
.example = 475,
.sec = "Links",
.html = "<p><a href=\"%22title%22\">link</a></p>\n",
.markdown = "[link](\"title\")\n",
},
TestCase{
.example = 476,
.sec = "Links",
.html = "<p><a href=\"/url\" title=\"title\">link</a>\n<a href=\"/url\" title=\"title\">link</a>\n<a href=\"/url\" title=\"title\">link</a></p>\n",
.markdown = "[link](/url \"title\")\n[link](/url 'title')\n[link](/url (title))\n",
},
TestCase{
.example = 477,
.sec = "Links",
.html = "<p><a href=\"/url\" title=\"title ""\">link</a></p>\n",
.markdown = "[link](/url \"title \\\""\")\n",
},
TestCase{
.example = 478,
.sec = "Links",
.html = "<p><a href=\"/url%C2%A0%22title%22\">link</a></p>\n",
.markdown = "[link](/url\u00a0\"title\")\n",
},
TestCase{
.example = 479,
.sec = "Links",
.html = "<p>[link](/url "title "and" title")</p>\n",
.markdown = "[link](/url \"title \"and\" title\")\n",
},
TestCase{
.example = 480,
.sec = "Links",
.html = "<p><a href=\"/url\" title=\"title "and" title\">link</a></p>\n",
.markdown = "[link](/url 'title \"and\" title')\n",
},
TestCase{
.example = 481,
.sec = "Links",
.html = "<p><a href=\"/uri\" title=\"title\">link</a></p>\n",
.markdown = "[link]( /uri\n \"title\" )\n",
},
TestCase{
.example = 482,
.sec = "Links",
.html = "<p>[link] (/uri)</p>\n",
.markdown = "[link] (/uri)\n",
},
TestCase{
.example = 483,
.sec = "Links",
.html = "<p><a href=\"/uri\">link [foo [bar]]</a></p>\n",
.markdown = "[link [foo [bar]]](/uri)\n",
},
TestCase{
.example = 484,
.sec = "Links",
.html = "<p>[link] bar](/uri)</p>\n",
.markdown = "[link] bar](/uri)\n",
},
TestCase{
.example = 485,
.sec = "Links",
.html = "<p>[link <a href=\"/uri\">bar</a></p>\n",
.markdown = "[link [bar](/uri)\n",
},
TestCase{
.example = 486,
.sec = "Links",
.html = "<p><a href=\"/uri\">link [bar</a></p>\n",
.markdown = "[link \\[bar](/uri)\n",
},
TestCase{
.example = 487,
.sec = "Links",
.html = "<p><a href=\"/uri\">link <em>foo <strong>bar</strong> <code>#</code></em></a></p>\n",
.markdown = "[link *foo **bar** `#`*](/uri)\n",
},
TestCase{
.example = 488,
.sec = "Links",
.html = "<p><a href=\"/uri\"><img src=\"moon.jpg\" alt=\"moon\" /></a></p>\n",
.markdown = "[](/uri)\n",
},
TestCase{
.example = 489,
.sec = "Links",
.html = "<p>[foo <a href=\"/uri\">bar</a>](/uri)</p>\n",
.markdown = "[foo [bar](/uri)](/uri)\n",
},
TestCase{
.example = 490,
.sec = "Links",
.html = "<p>[foo <em>[bar <a href=\"/uri\">baz</a>](/uri)</em>](/uri)</p>\n",
.markdown = "[foo *[bar [baz](/uri)](/uri)*](/uri)\n",
},
TestCase{
.example = 491,
.sec = "Links",
.html = "<p><img src=\"uri3\" alt=\"[foo](uri2)\" /></p>\n",
.markdown = "](uri2)](uri3)\n",
},
TestCase{
.example = 492,
.sec = "Links",
.html = "<p>*<a href=\"/uri\">foo*</a></p>\n",
.markdown = "*[foo*](/uri)\n",
},
TestCase{
.example = 493,
.sec = "Links",
.html = "<p><a href=\"baz*\">foo *bar</a></p>\n",
.markdown = "[foo *bar](baz*)\n",
},
TestCase{
.example = 494,
.sec = "Links",
.html = "<p><em>foo [bar</em> baz]</p>\n",
.markdown = "*foo [bar* baz]\n",
},
TestCase{
.example = 495,
.sec = "Links",
.html = "<p>[foo <bar attr=\"](baz)\"></p>\n",
.markdown = "[foo <bar attr=\"](baz)\">\n",
},
TestCase{
.example = 496,
.sec = "Links",
.html = "<p>[foo<code>](/uri)</code></p>\n",
.markdown = "[foo`](/uri)`\n",
},
TestCase{
.example = 497,
.sec = "Links",
.html = "<p>[foo<a href=\"http://example.com/?search=%5D(uri)\">http://example.com/?search=](uri)</a></p>\n",
.markdown = "[foo<http://example.com/?search=](uri)>\n",
},
TestCase{
.example = 498,
.sec = "Links",
.html = "<p><a href=\"/url\" title=\"title\">foo</a></p>\n",
.markdown = "[foo][bar]\n\n[bar]: /url \"title\"\n",
},
TestCase{
.example = 499,
.sec = "Links",
.html = "<p><a href=\"/uri\">link [foo [bar]]</a></p>\n",
.markdown = "[link [foo [bar]]][ref]\n\n[ref]: /uri\n",
},
TestCase{
.example = 500,
.sec = "Links",
.html = "<p><a href=\"/uri\">link [bar</a></p>\n",
.markdown = "[link \\[bar][ref]\n\n[ref]: /uri\n",
},
TestCase{
.example = 501,
.sec = "Links",
.html = "<p><a href=\"/uri\">link <em>foo <strong>bar</strong> <code>#</code></em></a></p>\n",
.markdown = "[link *foo **bar** `#`*][ref]\n\n[ref]: /uri\n",
},
TestCase{
.example = 502,
.sec = "Links",
.html = "<p><a href=\"/uri\"><img src=\"moon.jpg\" alt=\"moon\" /></a></p>\n",
.markdown = "[][ref]\n\n[ref]: /uri\n",
},
TestCase{
.example = 503,
.sec = "Links",
.html = "<p>[foo <a href=\"/uri\">bar</a>]<a href=\"/uri\">ref</a></p>\n",
.markdown = "[foo [bar](/uri)][ref]\n\n[ref]: /uri\n",
},
TestCase{
.example = 504,
.sec = "Links",
.html = "<p>[foo <em>bar <a href=\"/uri\">baz</a></em>]<a href=\"/uri\">ref</a></p>\n",
.markdown = "[foo *bar [baz][ref]*][ref]\n\n[ref]: /uri\n",
},
TestCase{
.example = 505,
.sec = "Links",
.html = "<p>*<a href=\"/uri\">foo*</a></p>\n",
.markdown = "*[foo*][ref]\n\n[ref]: /uri\n",
},
TestCase{
.example = 506,
.sec = "Links",
.html = "<p><a href=\"/uri\">foo *bar</a></p>\n",
.markdown = "[foo *bar][ref]\n\n[ref]: /uri\n",
},
TestCase{
.example = 507,
.sec = "Links",
.html = "<p>[foo <bar attr=\"][ref]\"></p>\n",
.markdown = "[foo <bar attr=\"][ref]\">\n\n[ref]: /uri\n",
},
TestCase{
.example = 508,
.sec = "Links",
.html = "<p>[foo<code>][ref]</code></p>\n",
.markdown = "[foo`][ref]`\n\n[ref]: /uri\n",
},
TestCase{
.example = 509,
.sec = "Links",
.html = "<p>[foo<a href=\"http://example.com/?search=%5D%5Bref%5D\">http://example.com/?search=][ref]</a></p>\n",
.markdown = "[foo<http://example.com/?search=][ref]>\n\n[ref]: /uri\n",
},
TestCase{
.example = 510,
.sec = "Links",
.html = "<p><a href=\"/url\" title=\"title\">foo</a></p>\n",
.markdown = "[foo][BaR]\n\n[bar]: /url \"title\"\n",
},
TestCase{
.example = 511,
.sec = "Links",
.html = "<p><a href=\"/url\">Толпой</a> is a Russian word.</p>\n",
.markdown = "[Толпой][Толпой] is a Russian word.\n\n[ТОЛПОЙ]: /url\n",
},
TestCase{
.example = 512,
.sec = "Links",
.html = "<p><a href=\"/url\">Baz</a></p>\n",
.markdown = "[Foo\n bar]: /url\n\n[Baz][Foo bar]\n",
},
TestCase{
.example = 513,
.sec = "Links",
.html = "<p>[foo] <a href=\"/url\" title=\"title\">bar</a></p>\n",
.markdown = "[foo] [bar]\n\n[bar]: /url \"title\"\n",
},
TestCase{
.example = 514,
.sec = "Links",
.html = "<p>[foo]\n<a href=\"/url\" title=\"title\">bar</a></p>\n",
.markdown = "[foo]\n[bar]\n\n[bar]: /url \"title\"\n",
},
TestCase{
.example = 515,
.sec = "Links",
.html = "<p><a href=\"/url1\">bar</a></p>\n",
.markdown = "[foo]: /url1\n\n[foo]: /url2\n\n[bar][foo]\n",
},
TestCase{
.example = 516,
.sec = "Links",
.html = "<p>[bar][foo!]</p>\n",
.markdown = "[bar][foo\\!]\n\n[foo!]: /url\n",
},
TestCase{
.example = 517,
.sec = "Links",
.html = "<p>[foo][ref[]</p>\n<p>[ref[]: /uri</p>\n",
.markdown = "[foo][ref[]\n\n[ref[]: /uri\n",
},
TestCase{
.example = 518,
.sec = "Links",
.html = "<p>[foo][ref[bar]]</p>\n<p>[ref[bar]]: /uri</p>\n",
.markdown = "[foo][ref[bar]]\n\n[ref[bar]]: /uri\n",
},
TestCase{
.example = 519,
.sec = "Links",
.html = "<p>[[[foo]]]</p>\n<p>[[[foo]]]: /url</p>\n",
.markdown = "[[[foo]]]\n\n[[[foo]]]: /url\n",
},
TestCase{
.example = 520,
.sec = "Links",
.html = "<p><a href=\"/uri\">foo</a></p>\n",
.markdown = "[foo][ref\\[]\n\n[ref\\[]: /uri\n",
},
TestCase{
.example = 521,
.sec = "Links",
.html = "<p><a href=\"/uri\">bar\\</a></p>\n",
.markdown = "[bar\\\\]: /uri\n\n[bar\\\\]\n",
},
TestCase{
.example = 522,
.sec = "Links",
.html = "<p>[]</p>\n<p>[]: /uri</p>\n",
.markdown = "[]\n\n[]: /uri\n",
},
TestCase{
.example = 523,
.sec = "Links",
.html = "<p>[\n]</p>\n<p>[\n]: /uri</p>\n",
.markdown = "[\n ]\n\n[\n ]: /uri\n",
},
TestCase{
.example = 524,
.sec = "Links",
.html = "<p><a href=\"/url\" title=\"title\">foo</a></p>\n",
.markdown = "[foo][]\n\n[foo]: /url \"title\"\n",
},
TestCase{
.example = 525,
.sec = "Links",
.html = "<p><a href=\"/url\" title=\"title\"><em>foo</em> bar</a></p>\n",
.markdown = "[*foo* bar][]\n\n[*foo* bar]: /url \"title\"\n",
},
TestCase{
.example = 526,
.sec = "Links",
.html = "<p><a href=\"/url\" title=\"title\">Foo</a></p>\n",
.markdown = "[Foo][]\n\n[foo]: /url \"title\"\n",
},
TestCase{
.example = 527,
.sec = "Links",
.html = "<p><a href=\"/url\" title=\"title\">foo</a>\n[]</p>\n",
.markdown = "[foo] \n[]\n\n[foo]: /url \"title\"\n",
},
TestCase{
.example = 528,
.sec = "Links",
.html = "<p><a href=\"/url\" title=\"title\">foo</a></p>\n",
.markdown = "[foo]\n\n[foo]: /url \"title\"\n",
},
TestCase{
.example = 529,
.sec = "Links",
.html = "<p><a href=\"/url\" title=\"title\"><em>foo</em> bar</a></p>\n",
.markdown = "[*foo* bar]\n\n[*foo* bar]: /url \"title\"\n",
},
TestCase{
.example = 530,
.sec = "Links",
.html = "<p>[<a href=\"/url\" title=\"title\"><em>foo</em> bar</a>]</p>\n",
.markdown = "[[*foo* bar]]\n\n[*foo* bar]: /url \"title\"\n",
},
TestCase{
.example = 531,
.sec = "Links",
.html = "<p>[[bar <a href=\"/url\">foo</a></p>\n",
.markdown = "[[bar [foo]\n\n[foo]: /url\n",
},
TestCase{
.example = 532,
.sec = "Links",
.html = "<p><a href=\"/url\" title=\"title\">Foo</a></p>\n",
.markdown = "[Foo]\n\n[foo]: /url \"title\"\n",
},
TestCase{
.example = 533,
.sec = "Links",
.html = "<p><a href=\"/url\">foo</a> bar</p>\n",
.markdown = "[foo] bar\n\n[foo]: /url\n",
},
TestCase{
.example = 534,
.sec = "Links",
.html = "<p>[foo]</p>\n",
.markdown = "\\[foo]\n\n[foo]: /url \"title\"\n",
},
TestCase{
.example = 535,
.sec = "Links",
.html = "<p>*<a href=\"/url\">foo*</a></p>\n",
.markdown = "[foo*]: /url\n\n*[foo*]\n",
},
TestCase{
.example = 536,
.sec = "Links",
.html = "<p><a href=\"/url2\">foo</a></p>\n",
.markdown = "[foo][bar]\n\n[foo]: /url1\n[bar]: /url2\n",
},
TestCase{
.example = 537,
.sec = "Links",
.html = "<p><a href=\"/url1\">foo</a></p>\n",
.markdown = "[foo][]\n\n[foo]: /url1\n",
},
TestCase{
.example = 538,
.sec = "Links",
.html = "<p><a href=\"\">foo</a></p>\n",
.markdown = "[foo]()\n\n[foo]: /url1\n",
},
TestCase{
.example = 539,
.sec = "Links",
.html = "<p><a href=\"/url1\">foo</a>(not a link)</p>\n",
.markdown = "[foo](not a link)\n\n[foo]: /url1\n",
},
TestCase{
.example = 540,
.sec = "Links",
.html = "<p>[foo]<a href=\"/url\">bar</a></p>\n",
.markdown = "[foo][bar][baz]\n\n[baz]: /url\n",
},
TestCase{
.example = 541,
.sec = "Links",
.html = "<p><a href=\"/url2\">foo</a><a href=\"/url1\">baz</a></p>\n",
.markdown = "[foo][bar][baz]\n\n[baz]: /url1\n[bar]: /url2\n",
},
TestCase{
.example = 542,
.sec = "Links",
.html = "<p>[foo]<a href=\"/url1\">bar</a></p>\n",
.markdown = "[foo][bar][baz]\n\n[baz]: /url1\n[foo]: /url2\n",
},
TestCase{
.example = 543,
.sec = "Images",
.html = "<p><img src=\"/url\" alt=\"foo\" title=\"title\" /></p>\n",
.markdown = "\n",
},
TestCase{
.example = 544,
.sec = "Images",
.html = "<p><img src=\"train.jpg\" alt=\"foo bar\" title=\"train & tracks\" /></p>\n",
.markdown = "![foo *bar*]\n\n[foo *bar*]: train.jpg \"train & tracks\"\n",
},
TestCase{
.example = 545,
.sec = "Images",
.html = "<p><img src=\"/url2\" alt=\"foo bar\" /></p>\n",
.markdown = "](/url2)\n",
},
TestCase{
.example = 546,
.sec = "Images",
.html = "<p><img src=\"/url2\" alt=\"foo bar\" /></p>\n",
.markdown = "](/url2)\n",
},
TestCase{
.example = 547,
.sec = "Images",
.html = "<p><img src=\"train.jpg\" alt=\"foo bar\" title=\"train & tracks\" /></p>\n",
.markdown = "![foo *bar*][]\n\n[foo *bar*]: train.jpg \"train & tracks\"\n",
},
TestCase{
.example = 548,
.sec = "Images",
.html = "<p><img src=\"train.jpg\" alt=\"foo bar\" title=\"train & tracks\" /></p>\n",
.markdown = "![foo *bar*][foobar]\n\n[FOOBAR]: train.jpg \"train & tracks\"\n",
},
TestCase{
.example = 549,
.sec = "Images",
.html = "<p><img src=\"train.jpg\" alt=\"foo\" /></p>\n",
.markdown = "\n",
},
TestCase{
.example = 550,
.sec = "Images",
.html = "<p>My <img src=\"/path/to/train.jpg\" alt=\"foo bar\" title=\"title\" /></p>\n",
.markdown = "My \n",
},
TestCase{
.example = 551,
.sec = "Images",
.html = "<p><img src=\"url\" alt=\"foo\" /></p>\n",
.markdown = "\n",
},
TestCase{
.example = 552,
.sec = "Images",
.html = "<p><img src=\"/url\" alt=\"\" /></p>\n",
.markdown = "\n",
},
TestCase{
.example = 553,
.sec = "Images",
.html = "<p><img src=\"/url\" alt=\"foo\" /></p>\n",
.markdown = "![foo][bar]\n\n[bar]: /url\n",
},
TestCase{
.example = 554,
.sec = "Images",
.html = "<p><img src=\"/url\" alt=\"foo\" /></p>\n",
.markdown = "![foo][bar]\n\n[BAR]: /url\n",
},
TestCase{
.example = 555,
.sec = "Images",
.html = "<p><img src=\"/url\" alt=\"foo\" title=\"title\" /></p>\n",
.markdown = "![foo][]\n\n[foo]: /url \"title\"\n",
},
TestCase{
.example = 556,
.sec = "Images",
.html = "<p><img src=\"/url\" alt=\"foo bar\" title=\"title\" /></p>\n",
.markdown = "![*foo* bar][]\n\n[*foo* bar]: /url \"title\"\n",
},
TestCase{
.example = 557,
.sec = "Images",
.html = "<p><img src=\"/url\" alt=\"Foo\" title=\"title\" /></p>\n",
.markdown = "![Foo][]\n\n[foo]: /url \"title\"\n",
},
TestCase{
.example = 558,
.sec = "Images",
.html = "<p><img src=\"/url\" alt=\"foo\" title=\"title\" />\n[]</p>\n",
.markdown = "![foo] \n[]\n\n[foo]: /url \"title\"\n",
},
TestCase{
.example = 559,
.sec = "Images",
.html = "<p><img src=\"/url\" alt=\"foo\" title=\"title\" /></p>\n",
.markdown = "![foo]\n\n[foo]: /url \"title\"\n",
},
TestCase{
.example = 560,
.sec = "Images",
.html = "<p><img src=\"/url\" alt=\"foo bar\" title=\"title\" /></p>\n",
.markdown = "![*foo* bar]\n\n[*foo* bar]: /url \"title\"\n",
},
TestCase{
.example = 561,
.sec = "Images",
.html = "<p>![[foo]]</p>\n<p>[[foo]]: /url "title"</p>\n",
.markdown = "![[foo]]\n\n[[foo]]: /url \"title\"\n",
},
TestCase{
.example = 562,
.sec = "Images",
.html = "<p><img src=\"/url\" alt=\"Foo\" title=\"title\" /></p>\n",
.markdown = "![Foo]\n\n[foo]: /url \"title\"\n",
},
TestCase{
.example = 563,
.sec = "Images",
.html = "<p>![foo]</p>\n",
.markdown = "!\\[foo]\n\n[foo]: /url \"title\"\n",
},
TestCase{
.example = 564,
.sec = "Images",
.html = "<p>!<a href=\"/url\" title=\"title\">foo</a></p>\n",
.markdown = "\\![foo]\n\n[foo]: /url \"title\"\n",
},
TestCase{
.example = 565,
.sec = "Autolinks",
.html = "<p><a href=\"http://foo.bar.baz\">http://foo.bar.baz</a></p>\n",
.markdown = "<http://foo.bar.baz>\n",
},
TestCase{
.example = 566,
.sec = "Autolinks",
.html = "<p><a href=\"http://foo.bar.baz/test?q=hello&id=22&boolean\">http://foo.bar.baz/test?q=hello&id=22&boolean</a></p>\n",
.markdown = "<http://foo.bar.baz/test?q=hello&id=22&boolean>\n",
},
TestCase{
.example = 567,
.sec = "Autolinks",
.html = "<p><a href=\"irc://foo.bar:2233/baz\">irc://foo.bar:2233/baz</a></p>\n",
.markdown = "<irc://foo.bar:2233/baz>\n",
},
TestCase{
.example = 568,
.sec = "Autolinks",
.html = "<p><a href=\"MAILTO:<EMAIL>\">MAILTO:<EMAIL></a></p>\n",
.markdown = "<MAILTO:<EMAIL>>\n",
},
TestCase{
.example = 569,
.sec = "Autolinks",
.html = "<p><a href=\"a+b+c:d\">a+b+c:d</a></p>\n",
.markdown = "<a+b+c:d>\n",
},
TestCase{
.example = 570,
.sec = "Autolinks",
.html = "<p><a href=\"made-up-scheme://foo,bar\">made-up-scheme://foo,bar</a></p>\n",
.markdown = "<made-up-scheme://foo,bar>\n",
},
TestCase{
.example = 571,
.sec = "Autolinks",
.html = "<p><a href=\"http://../\">http://../</a></p>\n",
.markdown = "<http://../>\n",
},
TestCase{
.example = 572,
.sec = "Autolinks",
.html = "<p><a href=\"localhost:5001/foo\">localhost:5001/foo</a></p>\n",
.markdown = "<localhost:5001/foo>\n",
},
TestCase{
.example = 573,
.sec = "Autolinks",
.html = "<p><http://foo.bar/baz bim></p>\n",
.markdown = "<http://foo.bar/baz bim>\n",
},
TestCase{
.example = 574,
.sec = "Autolinks",
.html = "<p><a href=\"http://example.com/%5C%5B%5C\">http://example.com/\\[\\</a></p>\n",
.markdown = "<http://example.com/\\[\\>\n",
},
TestCase{
.example = 575,
.sec = "Autolinks",
.html = "<p><a href=\"mailto:<EMAIL>\"><EMAIL></a></p>\n",
.markdown = "<<EMAIL>>\n",
},
TestCase{
.example = 576,
.sec = "Autolinks",
.html = "<p><a href=\"mailto:<EMAIL>\"><EMAIL></a></p>\n",
.markdown = "<<EMAIL>>\n",
},
TestCase{
.example = 577,
.sec = "Autolinks",
.html = "<p><<EMAIL>></p>\n",
.markdown = "<<EMAIL>>\n",
},
TestCase{
.example = 578,
.sec = "Autolinks",
.html = "<p><></p>\n",
.markdown = "<>\n",
},
TestCase{
.example = 579,
.sec = "Autolinks",
.html = "<p>< http://foo.bar ></p>\n",
.markdown = "< http://foo.bar >\n",
},
TestCase{
.example = 580,
.sec = "Autolinks",
.html = "<p><m:abc></p>\n",
.markdown = "<m:abc>\n",
},
TestCase{
.example = 581,
.sec = "Autolinks",
.html = "<p><foo.bar.baz></p>\n",
.markdown = "<foo.bar.baz>\n",
},
TestCase{
.example = 582,
.sec = "Autolinks",
.html = "<p>http://example.com</p>\n",
.markdown = "http://example.com\n",
},
TestCase{
.example = 583,
.sec = "Autolinks",
.html = "<p><EMAIL></p>\n",
.markdown = "<EMAIL>\n",
},
TestCase{
.example = 584,
.sec = "Raw HTML",
.html = "<p><a><bab><c2c></p>\n",
.markdown = "<a><bab><c2c>\n",
},
TestCase{
.example = 585,
.sec = "Raw HTML",
.html = "<p><a/><b2/></p>\n",
.markdown = "<a/><b2/>\n",
},
TestCase{
.example = 586,
.sec = "Raw HTML",
.html = "<p><a /><b2\ndata=\"foo\" ></p>\n",
.markdown = "<a /><b2\ndata=\"foo\" >\n",
},
TestCase{
.example = 587,
.sec = "Raw HTML",
.html = "<p><a foo=\"bar\" bam = 'baz <em>\"</em>'\n_boolean zoop:33=zoop:33 /></p>\n",
.markdown = "<a foo=\"bar\" bam = 'baz <em>\"</em>'\n_boolean zoop:33=zoop:33 />\n",
},
TestCase{
.example = 588,
.sec = "Raw HTML",
.html = "<p>Foo <responsive-image src=\"foo.jpg\" /></p>\n",
.markdown = "Foo <responsive-image src=\"foo.jpg\" />\n",
},
TestCase{
.example = 589,
.sec = "Raw HTML",
.html = "<p><33> <__></p>\n",
.markdown = "<33> <__>\n",
},
TestCase{
.example = 590,
.sec = "Raw HTML",
.html = "<p><a h*#ref="hi"></p>\n",
.markdown = "<a h*#ref=\"hi\">\n",
},
TestCase{
.example = 591,
.sec = "Raw HTML",
.html = "<p><a href="hi'> <a href=hi'></p>\n",
.markdown = "<a href=\"hi'> <a href=hi'>\n",
},
TestCase{
.example = 592,
.sec = "Raw HTML",
.html = "<p>< a><\nfoo><bar/ ></p>\n",
.markdown = "< a><\nfoo><bar/ >\n",
},
TestCase{
.example = 593,
.sec = "Raw HTML",
.html = "<p><a href='bar'title=title></p>\n",
.markdown = "<a href='bar'title=title>\n",
},
TestCase{
.example = 594,
.sec = "Raw HTML",
.html = "<p></a></foo ></p>\n",
.markdown = "</a></foo >\n",
},
TestCase{
.example = 595,
.sec = "Raw HTML",
.html = "<p></a href="foo"></p>\n",
.markdown = "</a href=\"foo\">\n",
},
TestCase{
.example = 596,
.sec = "Raw HTML",
.html = "<p>foo <!-- this is a\ncomment - with hyphen --></p>\n",
.markdown = "foo <!-- this is a\ncomment - with hyphen -->\n",
},
TestCase{
.example = 597,
.sec = "Raw HTML",
.html = "<p>foo <!-- not a comment -- two hyphens --></p>\n",
.markdown = "foo <!-- not a comment -- two hyphens -->\n",
},
TestCase{
.example = 598,
.sec = "Raw HTML",
.html = "<p>foo <!--> foo --></p>\n<p>foo <!-- foo---></p>\n",
.markdown = "foo <!--> foo -->\n\nfoo <!-- foo--->\n",
},
TestCase{
.example = 599,
.sec = "Raw HTML",
.html = "<p>foo <?php echo $a; ?></p>\n",
.markdown = "foo <?php echo $a; ?>\n",
},
TestCase{
.example = 600,
.sec = "Raw HTML",
.html = "<p>foo <!ELEMENT br EMPTY></p>\n",
.markdown = "foo <!ELEMENT br EMPTY>\n",
},
TestCase{
.example = 601,
.sec = "Raw HTML",
.html = "<p>foo <![CDATA[>&<]]></p>\n",
.markdown = "foo <![CDATA[>&<]]>\n",
},
TestCase{
.example = 602,
.sec = "Raw HTML",
.html = "<p>foo <a href=\"ö\"></p>\n",
.markdown = "foo <a href=\"ö\">\n",
},
TestCase{
.example = 603,
.sec = "Raw HTML",
.html = "<p>foo <a href=\"\\*\"></p>\n",
.markdown = "foo <a href=\"\\*\">\n",
},
TestCase{
.example = 604,
.sec = "Raw HTML",
.html = "<p><a href="""></p>\n",
.markdown = "<a href=\"\\\"\">\n",
},
TestCase{
.example = 605,
.sec = "Hard line breaks",
.html = "<p>foo<br />\nbaz</p>\n",
.markdown = "foo \nbaz\n",
},
TestCase{
.example = 606,
.sec = "Hard line breaks",
.html = "<p>foo<br />\nbaz</p>\n",
.markdown = "foo\\\nbaz\n",
},
TestCase{
.example = 607,
.sec = "Hard line breaks",
.html = "<p>foo<br />\nbaz</p>\n",
.markdown = "foo \nbaz\n",
},
TestCase{
.example = 608,
.sec = "Hard line breaks",
.html = "<p>foo<br />\nbar</p>\n",
.markdown = "foo \n bar\n",
},
TestCase{
.example = 609,
.sec = "Hard line breaks",
.html = "<p>foo<br />\nbar</p>\n",
.markdown = "foo\\\n bar\n",
},
TestCase{
.example = 610,
.sec = "Hard line breaks",
.html = "<p><em>foo<br />\nbar</em></p>\n",
.markdown = "*foo \nbar*\n",
},
TestCase{
.example = 611,
.sec = "Hard line breaks",
.html = "<p><em>foo<br />\nbar</em></p>\n",
.markdown = "*foo\\\nbar*\n",
},
TestCase{
.example = 612,
.sec = "Hard line breaks",
.html = "<p><code>code span</code></p>\n",
.markdown = "`code \nspan`\n",
},
TestCase{
.example = 613,
.sec = "Hard line breaks",
.html = "<p><code>code\\ span</code></p>\n",
.markdown = "`code\\\nspan`\n",
},
TestCase{
.example = 614,
.sec = "Hard line breaks",
.html = "<p><a href=\"foo \nbar\"></p>\n",
.markdown = "<a href=\"foo \nbar\">\n",
},
TestCase{
.example = 615,
.sec = "Hard line breaks",
.html = "<p><a href=\"foo\\\nbar\"></p>\n",
.markdown = "<a href=\"foo\\\nbar\">\n",
},
TestCase{
.example = 616,
.sec = "Hard line breaks",
.html = "<p>foo\\</p>\n",
.markdown = "foo\\\n",
},
TestCase{
.example = 617,
.sec = "Hard line breaks",
.html = "<p>foo</p>\n",
.markdown = "foo \n",
},
TestCase{
.example = 618,
.sec = "Hard line breaks",
.html = "<h3>foo\\</h3>\n",
.markdown = "### foo\\\n",
},
TestCase{
.example = 619,
.sec = "Hard line breaks",
.html = "<h3>foo</h3>\n",
.markdown = "### foo \n",
},
TestCase{
.example = 620,
.sec = "Soft line breaks",
.html = "<p>foo\nbaz</p>\n",
.markdown = "foo\nbaz\n",
},
TestCase{
.example = 621,
.sec = "Soft line breaks",
.html = "<p>foo\nbaz</p>\n",
.markdown = "foo \n baz\n",
},
TestCase{
.example = 622,
.sec = "Textual content",
.html = "<p>hello $.;'there</p>\n",
.markdown = "hello $.;'there\n",
},
TestCase{
.example = 623,
.sec = "Textual content",
.html = "<p>Foo χρῆν</p>\n",
.markdown = "Foo χρῆν\n",
},
TestCase{
.example = 624,
.sec = "Textual content",
.html = "<p>Multiple spaces</p>\n",
.markdown = "Multiple spaces\n",
},
}; | src/markdown/test_suite.zig |
const Self = @This();
const std = @import("std");
const wlr = @import("wlroots");
const wayland = @import("wayland");
const wl = wayland.server.wl;
const zriver = wayland.server.zriver;
const server = &@import("main.zig").server;
const util = @import("util.zig");
const Output = @import("Output.zig");
const OutputStatus = @import("OutputStatus.zig");
const Seat = @import("Seat.zig");
const SeatStatus = @import("SeatStatus.zig");
const Server = @import("Server.zig");
const log = std.log.scoped(.river_status);
global: *wl.Global,
server_destroy: wl.Listener(*wl.Server) = wl.Listener(*wl.Server).init(handleServerDestroy),
pub fn init(self: *Self) !void {
self.* = .{
.global = try wl.Global.create(server.wl_server, zriver.StatusManagerV1, 2, *Self, self, bind),
};
server.wl_server.addDestroyListener(&self.server_destroy);
}
fn handleServerDestroy(listener: *wl.Listener(*wl.Server), wl_server: *wl.Server) void {
const self = @fieldParentPtr(Self, "server_destroy", listener);
self.global.destroy();
}
fn bind(client: *wl.Client, self: *Self, version: u32, id: u32) callconv(.C) void {
const status_manager = zriver.StatusManagerV1.create(client, version, id) catch {
client.postNoMemory();
log.crit("out of memory", .{});
return;
};
status_manager.setHandler(*Self, handleRequest, null, self);
}
fn handleRequest(
status_manager: *zriver.StatusManagerV1,
request: zriver.StatusManagerV1.Request,
self: *Self,
) void {
switch (request) {
.destroy => status_manager.destroy(),
.get_river_output_status => |req| {
// ignore if the output is inert
const wlr_output = wlr.Output.fromWlOutput(req.output) orelse return;
const output = @intToPtr(*Output, wlr_output.data);
const node = util.gpa.create(std.SinglyLinkedList(OutputStatus).Node) catch {
status_manager.getClient().postNoMemory();
log.crit("out of memory", .{});
return;
};
const output_status = zriver.OutputStatusV1.create(
status_manager.getClient(),
status_manager.getVersion(),
req.id,
) catch {
status_manager.getClient().postNoMemory();
util.gpa.destroy(node);
log.crit("out of memory", .{});
return;
};
node.data.init(output, output_status);
output.status_trackers.prepend(node);
},
.get_river_seat_status => |req| {
// ignore if the seat is inert
const wlr_seat = wlr.Seat.Client.fromWlSeat(req.seat) orelse return;
const seat = @intToPtr(*Seat, wlr_seat.seat.data);
const node = util.gpa.create(std.SinglyLinkedList(SeatStatus).Node) catch {
status_manager.getClient().postNoMemory();
log.crit("out of memory", .{});
return;
};
const seat_status = zriver.SeatStatusV1.create(
status_manager.getClient(),
status_manager.getVersion(),
req.id,
) catch {
status_manager.getClient().postNoMemory();
util.gpa.destroy(node);
log.crit("out of memory", .{});
return;
};
node.data.init(seat, seat_status);
seat.status_trackers.prepend(node);
},
}
} | source/river-0.1.0/river/StatusManager.zig |
const std = @import("std");
const builtin = std.builtin;
const log = std.log.scoped(.archive);
const macho = std.macho;
const mem = std.mem;
const native_endian = builtin.target.cpu.arch.endian();
const Arch = std.Target.Cpu.Arch;
pub fn decodeArch(cputype: macho.cpu_type_t, comptime logError: bool) !std.Target.Cpu.Arch {
const arch: Arch = switch (cputype) {
macho.CPU_TYPE_ARM64 => .aarch64,
macho.CPU_TYPE_X86_64 => .x86_64,
else => {
if (logError) {
log.err("unsupported cpu architecture 0x{x}", .{cputype});
}
return error.UnsupportedCpuArchitecture;
},
};
return arch;
}
fn readFatStruct(reader: anytype, comptime T: type) !T {
// Fat structures (fat_header & fat_arch) are always written and read to/from
// disk in big endian order.
var res = try reader.readStruct(T);
if (native_endian != builtin.Endian.Big) {
mem.bswapAllFields(T, &res);
}
return res;
}
pub fn getLibraryOffset(reader: anytype, arch: Arch) !u64 {
const fat_header = try readFatStruct(reader, macho.fat_header);
if (fat_header.magic != macho.FAT_MAGIC) return 0;
var fat_arch_index: u32 = 0;
while (fat_arch_index < fat_header.nfat_arch) : (fat_arch_index += 1) {
const fat_arch = try readFatStruct(reader, macho.fat_arch);
// If we come across an architecture that we do not know how to handle, that's
// fine because we can keep looking for one that might match.
const lib_arch = decodeArch(fat_arch.cputype, false) catch |err| switch (err) {
error.UnsupportedCpuArchitecture => continue,
else => |e| return e,
};
if (lib_arch == arch) {
// We have found a matching architecture!
return fat_arch.offset;
}
} else {
log.err("Could not find matching cpu architecture in fat library: expected {s}", .{arch});
return error.MismatchedCpuArchitecture;
}
} | src/link/MachO/fat.zig |
const std = @import("std");
const util = @import("util.zig");
const data = @embedFile("../data/day02.txt");
const MoveCommand = union(enum) {
up_down: i32,
forward: i32,
};
fn followCommandsPt1(commands: []const MoveCommand) util.Point(i32) {
var point = util.Point(i32){};
for (commands) |command| {
switch (command) {
.forward => |val| point.x += val,
.up_down => |val| point.y += val,
}
}
return point;
}
fn followCommandsPt2(commands: []const MoveCommand) util.Point(i32) {
var aim: i32 = 0;
var position = util.Point(i32){};
for (commands) |command| {
switch (command) {
.forward => |val| {
position.x += val;
position.y += (aim * val);
},
.up_down => |val| aim += val,
}
}
return position;
}
pub fn main() !void {
defer {
const leaks = util.gpa_impl.deinit();
std.debug.assert(!leaks);
}
var commands = util.List(MoveCommand).init(util.gpa);
defer commands.deinit();
var it = util.tokenize(u8, data, "\n");
while (it.next()) |line| {
var line_it = util.tokenize(u8, line, " ");
const dir = line_it.next() orelse return error.InvalidInput;
const val = util.parseInt(i32, line_it.next() orelse return error.InvalidInput, 10) catch {
return error.InvalidInput;
};
if (std.mem.eql(u8, dir, "forward")) {
try commands.append(.{ .forward = val });
} else if (std.mem.eql(u8, dir, "down")) {
try commands.append(.{ .up_down = val });
} else if (std.mem.eql(u8, dir, "up")) {
try commands.append(.{ .up_down = -val });
} else return error.InvalidInput;
}
// Part 1
const end_pos_pt1 = followCommandsPt1(commands.items);
util.print("Part 1: {d}, mult: {d}\n", .{ end_pos_pt1, end_pos_pt1.x * end_pos_pt1.y });
// Part 2
const end_pos_pt2 = followCommandsPt2(commands.items);
util.print("Part 2: {d}, mult: {d}\n", .{ end_pos_pt2, end_pos_pt2.x * end_pos_pt2.y });
} | src/day02.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const testing = std.testing;
const expect = testing.expect;
/// An interface for std.process.args which works like C's argc/argv. It just
/// builds upfront an slice containing all of the arguments so that they can be
/// accessed directly.
pub const Args = struct {
argv: [][]u8,
argc: usize,
allocator: Allocator,
pub fn init(allocator: Allocator) !Args {
var it = std.process.args();
var n: usize = 0;
while (it.skip()) {
n += 1;
}
var args = try allocator.alloc([]u8, n);
it = std.process.args();
var i: usize = 0;
while (it.next(allocator)) |arg| {
args[i] = try arg;
i += 1;
}
return Args{
.argv = args,
.argc = n,
.allocator = allocator,
};
}
pub fn deinit(self: *Args) void {
for (self.argv) |arg| {
self.allocator.free(arg);
}
self.allocator.free(self.argv);
}
pub fn has(self: Args, arg: []const u8) bool {
for (self.argv) |current| {
if (std.mem.eql(u8, current, arg)) {
return true;
}
}
return false;
}
pub fn countNonFlagArgs(self: Args) usize {
var c: usize = 0;
for (self.argv[1..]) |arg| {
if (arg[0] != '-') {
c += 1;
}
}
return c;
}
pub const ArgsIteratorMode = enum {
Flags,
NonFlags,
All,
};
pub const ArgsIterator = struct {
args: *const Args,
cur: usize = 1,
mode: ArgsIteratorMode = .All,
pub fn init(args: *const Args, mode: ArgsIteratorMode) ArgsIterator {
return .{
.cur = 1,
.args = args,
.mode = mode,
};
}
pub fn next(self: *ArgsIterator) ?[]const u8 {
while (true) {
if (self.cur >= self.args.argc) {
return null;
}
var ok: bool = switch (self.mode) {
.Flags => self.args.argv[self.cur][0] == '-',
.NonFlags => self.args.argv[self.cur][0] != '-',
.All => true,
};
if (ok) {
defer self.cur += 1;
return self.args.argv[self.cur];
}
self.cur += 1;
}
}
};
pub fn nonFlagArgsIterator(self: Args) ArgsIterator {
return ArgsIterator.init(&self, .NonFlags);
}
pub fn flagArgsIterator(self: Args) ArgsIterator {
return ArgsIterator.init(&self, .Flags);
}
pub fn argsIterator(self: Args) ArgsIterator {
return ArgsIterator.init(&self, .All);
}
};
pub fn addArg(self: *Args, arg: []const u8) !void {
var argCopy = try self.allocator.alloc(u8, arg.len);
@memcpy(argCopy.ptr, arg.ptr, arg.len);
var newArgv = try self.allocator.alloc([]u8, self.argc + 1);
@memcpy(std.mem.sliceAsBytes(newArgv).ptr, std.mem.sliceAsBytes(self.argv).ptr, self.argc * @sizeOf([]u8));
self.allocator.free(self.argv);
self.argv = newArgv;
self.argv[self.argc] = argCopy;
self.argc += 1;
}
test "Args" {
var args = try Args.init(std.testing.allocator);
defer args.deinit();
try expect(args.argc == 2);
try addArg(&args, "test");
try expect(args.argc == 3);
var end: usize = args.argv[1].len;
var start = end - 3;
try expect(std.mem.eql(u8, args.argv[1][start..end], "zig"));
try expect(std.mem.eql(u8, args.argv[2], "test"));
try expect(args.has("test"));
} | src/args.zig |
usingnamespace @import("root").preamble;
/// Semaphore waiting queue node
const WaitingNode = struct {
/// Number of resources thread is waiting for
count: usize = undefined,
/// Task waiting to be waken up
task: *os.thread.Task = undefined,
/// Queue hook
queue_hook: lib.containers.queue.Node = undefined,
};
/// Semaphore is lock that should be used for synchronizing operations that may take too long and/or
/// can't be run in interrupt disabled context (e.g. you need to use it if you allocate memory
/// in locked section). Unlike Mutex, it can be also used to grant access to more than one resource
pub const Semaphore = struct {
/// Atomic queue of waiting tasks
queue: lib.containers.queue.Queue(WaitingNode, "queue_hook") = .{},
/// Spinlock used to prevent more than one thread from accessing mutex data
spinlock: os.thread.Spinlock = .{},
/// Number of resources available
available: usize,
/// Create semaphore with N resources available
pub fn init(count: usize) @This() {
return .{ .available = count };
}
/// Try to acquire `count` resources. Don't call from interrupt context!
pub fn try_acquire(self: *@This(), count: usize) !void {
const task = os.platform.get_current_task();
const lock_state = self.spinlock.lock();
if (self.available >= count) {
self.available -= count;
self.spinlock.unlock(lock_state);
return;
}
var waiting_token: WaitingNode = .{};
waiting_token.count = count;
waiting_token.task = task;
self.queue.enqueue(&waiting_token);
os.thread.scheduler.waitReleaseSpinlock(&self.spinlock);
os.platform.set_interrupts(lock_state);
}
/// Release `count` resources
pub fn release(self: *@This(), count: usize) void {
const lock_state = self.spinlock.lock();
self.available += count;
if (self.queue.front()) |next| {
const resources_needed = next.count;
if (self.available >= resources_needed) {
self.available -= resources_needed;
_ = self.queue.dequeue();
os.thread.scheduler.wake(next.task);
}
}
self.spinlock.unlock(lock_state);
}
// Acquire `count` resources or panic. Don't call from interrupt context!
pub fn acquire(self: *@This(), count: usize) void {
self.try_acquire(count) catch { unreachable; };
}
}; | subprojects/flork/src/thread/semaphore.zig |
const std = @import("std");
const util = @import("util.zig");
const Allocator = std.mem.Allocator;
const testing = std.testing;
const Error = util.Error;
/// Xor8 is the recommended default, no more than a 0.3% false-positive probability.
///
/// See `Xor` for more details.
pub const Xor8 = Xor(u8);
/// Xor16 provides a xor filter with 16-bit fingerprints.
///
/// See `Xor` for more details.
pub const Xor16 = Xor(u16);
/// Xor returns a xor filter with the specified base type, usually u8 or u16, for which the
/// helpers Xor8 and Xor16 may be used.
///
/// We assume that you have a large set of 64-bit integers and you want a data structure to do
/// membership tests using no more than ~8 or ~16 bits per key. If your initial set is made of
/// strings or other types, you first need to hash them to a 64-bit integer.
///
/// Xor8 is the recommended default, no more than a 0.3% false-positive probability.
pub fn Xor(comptime T: type) type {
return struct {
allocator: Allocator,
seed: u64,
blockLength: u64,
fingerprints: []T, // has room for 3*blockLength values
/// probability of success should always be > 0.5 so 100 iterations is highly unlikely
maxIterations: usize = 100,
const Self = @This();
/// initializes a Xor filter with enough capacity for a set containing up to `size` elements.
///
/// `deinit()` must be called by the caller to free the memory.
pub fn init(allocator: Allocator, size: usize) !*Self {
const self = try allocator.create(Self);
var capacity = @floatToInt(usize, 32 + 1.23 * @intToFloat(f64, size));
capacity = capacity / 3 * 3;
self.* = Self{
.allocator = allocator,
.seed = 0,
.fingerprints = try allocator.alloc(T, capacity),
.blockLength = capacity / 3,
};
return self;
}
pub inline fn deinit(self: *Self) void {
self.allocator.destroy(self);
}
/// reports if the specified key is within the set with false-positive rate.
pub inline fn contain(self: *Self, key: u64) bool {
var hash = util.mixSplit(key, self.seed);
var f = @truncate(T, util.fingerprint(hash));
var r0 = @truncate(u32, hash);
var r1 = @truncate(u32, util.rotl64(hash, 21));
var r2 = @truncate(u32, util.rotl64(hash, 42));
var bl = @truncate(u32, self.blockLength);
var h0: u32 = util.reduce(r0, bl);
var h1: u32 = util.reduce(r1, bl) + bl;
var h2: u32 = util.reduce(r2, bl) + 2 * bl;
return f == (self.fingerprints[h0] ^ self.fingerprints[h1] ^ self.fingerprints[h2]);
}
/// reports the size in bytes of the filter.
pub inline fn sizeInBytes(self: *Self) usize {
return 3 * self.blockLength * @sizeOf(T) + @sizeOf(Self);
}
/// populates the filter with the given keys.
///
/// The caller is responsible for ensuring that there are no duplicated keys.
///
/// The inner loop will run up to maxIterations (default 100) and should never fail,
/// except if there are duplicated keys.
///
/// The provided allocator will be used for creating temporary buffers that do not outlive the
/// function call.
pub fn populate(self: *Self, allocator: Allocator, keys: []u64) Error!void {
const iter = try util.sliceIterator(u64).init(allocator, keys);
defer iter.deinit();
return self.populateIter(allocator, iter);
}
/// Identical to populate, except it takes an iterator of keys so you need not store them
/// in-memory.
///
/// `keys.next()` must return `?u64`, the next key or none if the end of the list has been
/// reached. The iterator must reset after hitting the end of the list, such that the `next()`
/// call leads to the first element again.
///
/// `keys.len()` must return the `usize` length.
pub fn populateIter(self: *Self, allocator: Allocator, keys: anytype) Error!void {
var rng_counter: u64 = 1;
self.seed = util.rngSplitMix64(&rng_counter);
var sets = try allocator.alloc(Set, self.blockLength * 3);
defer allocator.free(sets);
var Q = try allocator.alloc(Keyindex, sets.len);
defer allocator.free(Q);
var stack = try allocator.alloc(Keyindex, keys.len());
defer allocator.free(stack);
var sets0 = sets;
var sets1 = sets[self.blockLength..];
var sets2 = sets[2 * self.blockLength ..];
var Q0 = Q;
var Q1 = Q[self.blockLength..];
var Q2 = Q[2 * self.blockLength ..];
var loop: usize = 0;
while (true) : (loop += 1) {
if (loop + 1 > self.maxIterations) {
return Error.KeysLikelyNotUnique; // too many iterations, keys are not unique.
}
for (sets[0..sets.len]) |*b| b.* = std.mem.zeroes(Set);
while (keys.next()) |key| {
var hs = self.getH0H1H2(key);
sets0[hs.h0].xormask ^= hs.h;
sets0[hs.h0].count += 1;
sets1[hs.h1].xormask ^= hs.h;
sets1[hs.h1].count += 1;
sets2[hs.h2].xormask ^= hs.h;
sets2[hs.h2].count += 1;
}
// TODO(upstream): the flush should be sync with the detection that follows scan values
// with a count of one.
var Q0size: usize = 0;
var Q1size: usize = 0;
var Q2size: usize = 0;
{
var i: usize = 0;
while (i < self.blockLength) : (i += 1) {
if (sets0[i].count == 1) {
Q0[Q0size].index = @intCast(u32, i);
Q0[Q0size].hash = sets0[i].xormask;
Q0size += 1;
}
}
}
{
var i: usize = 0;
while (i < self.blockLength) : (i += 1) {
if (sets1[i].count == 1) {
Q1[Q1size].index = @intCast(u32, i);
Q1[Q1size].hash = sets1[i].xormask;
Q1size += 1;
}
}
}
{
var i: usize = 0;
while (i < self.blockLength) : (i += 1) {
if (sets2[i].count == 1) {
Q2[Q2size].index = @intCast(u32, i);
Q2[Q2size].hash = sets2[i].xormask;
Q2size += 1;
}
}
}
var stack_size: usize = 0;
while (Q0size + Q1size + Q2size > 0) {
while (Q0size > 0) {
Q0size -%= 1;
var keyindex = Q0[Q0size];
var index = keyindex.index;
if (sets0[index].count == 0) {
continue; // not actually possible after the initial scan.
}
var hash = keyindex.hash;
var h1 = self.getH1(hash);
var h2 = self.getH2(hash);
stack[stack_size] = keyindex;
stack_size += 1;
sets1[h1].xormask ^= hash;
sets1[h1].count -%= 1;
if (sets1[h1].count == 1) {
Q1[Q1size].index = h1;
Q1[Q1size].hash = sets1[h1].xormask;
Q1size += 1;
}
sets2[h2].xormask ^= hash;
sets2[h2].count -%= 1;
if (sets2[h2].count == 1) {
Q2[Q2size].index = h2;
Q2[Q2size].hash = sets2[h2].xormask;
Q2size += 1;
}
}
while (Q1size > 0) {
Q1size -%= 1;
var keyindex = Q1[Q1size];
var index = keyindex.index;
if (sets1[index].count == 0) {
continue; // not actually possible after the initial scan.
}
var hash = keyindex.hash;
var h0 = self.getH0(hash);
var h2 = self.getH2(hash);
keyindex.index += @truncate(u32, self.blockLength);
stack[stack_size] = keyindex;
stack_size += 1;
sets0[h0].xormask ^= hash;
sets0[h0].count -%= 1;
if (sets0[h0].count == 1) {
Q0[Q0size].index = h0;
Q0[Q0size].hash = sets0[h0].xormask;
Q0size += 1;
}
sets2[h2].xormask ^= hash;
sets2[h2].count -%= 1;
if (sets2[h2].count == 1) {
Q2[Q2size].index = h2;
Q2[Q2size].hash = sets2[h2].xormask;
Q2size += 1;
}
}
while (Q2size > 0) {
Q2size -%= 1;
var keyindex = Q2[Q2size];
var index = keyindex.index;
if (sets2[index].count == 0) {
continue; // not actually possible after the initial scan.
}
var hash = keyindex.hash;
var h0 = self.getH0(hash);
var h1 = self.getH1(hash);
keyindex.index += @truncate(u32, 2 * @intCast(u64, self.blockLength));
stack[stack_size] = keyindex;
stack_size += 1;
sets0[h0].xormask ^= hash;
sets0[h0].count -%= 1;
if (sets0[h0].count == 1) {
Q0[Q0size].index = h0;
Q0[Q0size].hash = sets0[h0].xormask;
Q0size += 1;
}
sets1[h1].xormask ^= hash;
sets1[h1].count -%= 1;
if (sets1[h1].count == 1) {
Q1[Q1size].index = h1;
Q1[Q1size].hash = sets1[h1].xormask;
Q1size += 1;
}
}
}
if (stack_size == keys.len()) {
// success
break;
}
self.seed = util.rngSplitMix64(&rng_counter);
}
var fingerprints0: []T = self.fingerprints;
var fingerprints1: []T = self.fingerprints[self.blockLength..];
var fingerprints2: []T = self.fingerprints[2 * self.blockLength ..];
var stack_size = keys.len();
while (stack_size > 0) {
stack_size -= 1;
var ki = stack[stack_size];
var val: u64 = util.fingerprint(ki.hash);
if (ki.index < @truncate(u32, self.blockLength)) {
val ^= fingerprints1[self.getH1(ki.hash)] ^ fingerprints2[self.getH2(ki.hash)];
} else if (ki.index < 2 * @truncate(u32, self.blockLength)) {
val ^= fingerprints0[self.getH0(ki.hash)] ^ fingerprints2[self.getH2(ki.hash)];
} else {
val ^= fingerprints0[self.getH0(ki.hash)] ^ fingerprints1[self.getH1(ki.hash)];
}
self.fingerprints[ki.index] = @truncate(T, val);
}
return;
}
inline fn getH0H1H2(self: *Self, k: u64) Hashes {
var hash = util.mixSplit(k, self.seed);
var r0 = @truncate(u32, hash);
var r1 = @truncate(u32, util.rotl64(hash, 21));
var r2 = @truncate(u32, util.rotl64(hash, 42));
return Hashes{
.h = hash,
.h0 = util.reduce(r0, @truncate(u32, self.blockLength)),
.h1 = util.reduce(r1, @truncate(u32, self.blockLength)),
.h2 = util.reduce(r2, @truncate(u32, self.blockLength)),
};
}
inline fn getH0(self: *Self, hash: u64) u32 {
var r0 = @truncate(u32, hash);
return util.reduce(r0, @truncate(u32, self.blockLength));
}
inline fn getH1(self: *Self, hash: u64) u32 {
var r1 = @truncate(u32, util.rotl64(hash, 21));
return util.reduce(r1, @truncate(u32, self.blockLength));
}
inline fn getH2(self: *Self, hash: u64) u32 {
var r2 = @truncate(u32, util.rotl64(hash, 42));
return util.reduce(r2, @truncate(u32, self.blockLength));
}
};
}
const Set = struct {
xormask: u64,
count: u32,
};
const Hashes = struct {
h: u64,
h0: u32,
h1: u32,
h2: u32,
};
const H0h1h2 = struct {
h0: u32,
h1: u32,
h2: u32,
};
const Keyindex = struct {
hash: u64,
index: u32,
};
fn xorTest(T: anytype, size: usize, size_in_bytes: usize) !void {
const allocator = std.heap.page_allocator;
const filter = try Xor(T).init(allocator, size);
comptime filter.maxIterations = 100; // proof we can modify maxIterations at comptime.
defer filter.deinit();
var keys = try allocator.alloc(u64, size);
defer allocator.free(keys);
for (keys) |_, i| {
keys[i] = i;
}
try filter.populate(allocator, keys[0..]);
try testing.expect(filter.contain(1) == true);
try testing.expect(filter.contain(5) == true);
try testing.expect(filter.contain(9) == true);
try testing.expect(filter.contain(1234) == true);
try testing.expectEqual(@as(usize, size_in_bytes), filter.sizeInBytes());
for (keys) |key| {
try testing.expect(filter.contain(key) == true);
}
var random_matches: u64 = 0;
const trials = 10000000;
var i: u64 = 0;
var rng = std.rand.DefaultPrng.init(0);
const random = rng.random();
while (i < trials) : (i += 1) {
var random_key: u64 = random.uintAtMost(u64, std.math.maxInt(u64));
if (filter.contain(random_key)) {
if (random_key >= keys.len) {
random_matches += 1;
}
}
}
const fpp = @intToFloat(f64, random_matches) * 1.0 / trials;
std.debug.print("fpp {d:3.10} (estimated)\n", .{fpp});
std.debug.print("\t(keys={}, random_matches={}, trials={})\n", .{ size, random_matches, trials });
std.debug.print("\tbits per entry {d:3.1}\n", .{@intToFloat(f64, filter.sizeInBytes()) * 8.0 / @intToFloat(f64, size)});
}
test "xor8" {
try xorTest(u8, 10000, 12386);
}
test "xor16" {
try xorTest(u16, 10000, 24716);
}
test "xor20" {
try xorTest(u20, 10000, 49376);
}
test "xor32" {
// NOTE: We only use 1m keys here to keep the test running fast. With 100 million keys, the
// test can take a minute or two on a 2020 Macbook and requires ~6.3 GiB of memory. Still,
// estimated fpp is 0 - I leave it to the reader to estimate the fpp of xor32/xor64.
//
// If you have a really beefy machine, it would be cool to try this test with a huge amount of
// keys and higher `trials` in `xorTest`.
try xorTest(u32, 1000000, 4920176);
}
test "xor64" {
// NOTE: We only use 1m keys here to keep the test running fast. With 100 million keys, the
// test can take a minute or two on a 2020 Macbook and requires ~6.3 GiB of memory. Still,
// estimated fpp is 0 - I leave it to the reader to estimate the fpp of xor32/xor64.
//
// If you have a really beefy machine, it would be cool to try this test with a huge amount of
// keys and higher `trials` in `xorTest`.
try xorTest(u64, 1000000, 9840296);
} | src/xorfilter.zig |
//--------------------------------------------------------------------------------
// Section: Types (14)
//--------------------------------------------------------------------------------
const CLSID_FhConfigMgr_Value = @import("../zig.zig").Guid.initString("ed43bb3c-09e9-498a-9df6-2177244c6db4");
pub const CLSID_FhConfigMgr = &CLSID_FhConfigMgr_Value;
const CLSID_FhReassociation_Value = @import("../zig.zig").Guid.initString("4d728e35-16fa-4320-9e8b-bfd7100a8846");
pub const CLSID_FhReassociation = &CLSID_FhReassociation_Value;
pub const FH_TARGET_PROPERTY_TYPE = enum(i32) {
FH_TARGET_NAME = 0,
FH_TARGET_URL = 1,
FH_TARGET_DRIVE_TYPE = 2,
MAX_TARGET_PROPERTY = 3,
};
pub const FH_TARGET_NAME = FH_TARGET_PROPERTY_TYPE.FH_TARGET_NAME;
pub const FH_TARGET_URL = FH_TARGET_PROPERTY_TYPE.FH_TARGET_URL;
pub const FH_TARGET_DRIVE_TYPE = FH_TARGET_PROPERTY_TYPE.FH_TARGET_DRIVE_TYPE;
pub const MAX_TARGET_PROPERTY = FH_TARGET_PROPERTY_TYPE.MAX_TARGET_PROPERTY;
pub const FH_TARGET_DRIVE_TYPES = enum(i32) {
UNKNOWN = 0,
REMOVABLE = 2,
FIXED = 3,
REMOTE = 4,
};
pub const FH_DRIVE_UNKNOWN = FH_TARGET_DRIVE_TYPES.UNKNOWN;
pub const FH_DRIVE_REMOVABLE = FH_TARGET_DRIVE_TYPES.REMOVABLE;
pub const FH_DRIVE_FIXED = FH_TARGET_DRIVE_TYPES.FIXED;
pub const FH_DRIVE_REMOTE = FH_TARGET_DRIVE_TYPES.REMOTE;
// TODO: this type is limited to platform 'windows8.0'
const IID_IFhTarget_Value = @import("../zig.zig").Guid.initString("d87965fd-2bad-4657-bd3b-9567eb300ced");
pub const IID_IFhTarget = &IID_IFhTarget_Value;
pub const IFhTarget = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetStringProperty: fn(
self: *const IFhTarget,
PropertyType: FH_TARGET_PROPERTY_TYPE,
PropertyValue: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetNumericalProperty: fn(
self: *const IFhTarget,
PropertyType: FH_TARGET_PROPERTY_TYPE,
PropertyValue: ?*u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhTarget_GetStringProperty(self: *const T, PropertyType: FH_TARGET_PROPERTY_TYPE, PropertyValue: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhTarget.VTable, self.vtable).GetStringProperty(@ptrCast(*const IFhTarget, self), PropertyType, PropertyValue);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhTarget_GetNumericalProperty(self: *const T, PropertyType: FH_TARGET_PROPERTY_TYPE, PropertyValue: ?*u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhTarget.VTable, self.vtable).GetNumericalProperty(@ptrCast(*const IFhTarget, self), PropertyType, PropertyValue);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows8.0'
const IID_IFhScopeIterator_Value = @import("../zig.zig").Guid.initString("3197abce-532a-44c6-8615-f3666566a720");
pub const IID_IFhScopeIterator = &IID_IFhScopeIterator_Value;
pub const IFhScopeIterator = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
MoveToNextItem: fn(
self: *const IFhScopeIterator,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetItem: fn(
self: *const IFhScopeIterator,
Item: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhScopeIterator_MoveToNextItem(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhScopeIterator.VTable, self.vtable).MoveToNextItem(@ptrCast(*const IFhScopeIterator, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhScopeIterator_GetItem(self: *const T, Item: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhScopeIterator.VTable, self.vtable).GetItem(@ptrCast(*const IFhScopeIterator, self), Item);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const FH_PROTECTED_ITEM_CATEGORY = enum(i32) {
FH_FOLDER = 0,
FH_LIBRARY = 1,
MAX_PROTECTED_ITEM_CATEGORY = 2,
};
pub const FH_FOLDER = FH_PROTECTED_ITEM_CATEGORY.FH_FOLDER;
pub const FH_LIBRARY = FH_PROTECTED_ITEM_CATEGORY.FH_LIBRARY;
pub const MAX_PROTECTED_ITEM_CATEGORY = FH_PROTECTED_ITEM_CATEGORY.MAX_PROTECTED_ITEM_CATEGORY;
pub const FH_LOCAL_POLICY_TYPE = enum(i32) {
FH_FREQUENCY = 0,
FH_RETENTION_TYPE = 1,
FH_RETENTION_AGE = 2,
MAX_LOCAL_POLICY = 3,
};
pub const FH_FREQUENCY = FH_LOCAL_POLICY_TYPE.FH_FREQUENCY;
pub const FH_RETENTION_TYPE = FH_LOCAL_POLICY_TYPE.FH_RETENTION_TYPE;
pub const FH_RETENTION_AGE = FH_LOCAL_POLICY_TYPE.FH_RETENTION_AGE;
pub const MAX_LOCAL_POLICY = FH_LOCAL_POLICY_TYPE.MAX_LOCAL_POLICY;
pub const FH_RETENTION_TYPES = enum(i32) {
FH_RETENTION_DISABLED = 0,
FH_RETENTION_UNLIMITED = 1,
FH_RETENTION_AGE_BASED = 2,
MAX_RETENTION_TYPE = 3,
};
pub const FH_RETENTION_DISABLED = FH_RETENTION_TYPES.FH_RETENTION_DISABLED;
pub const FH_RETENTION_UNLIMITED = FH_RETENTION_TYPES.FH_RETENTION_UNLIMITED;
pub const FH_RETENTION_AGE_BASED = FH_RETENTION_TYPES.FH_RETENTION_AGE_BASED;
pub const MAX_RETENTION_TYPE = FH_RETENTION_TYPES.MAX_RETENTION_TYPE;
pub const FH_BACKUP_STATUS = enum(i32) {
FH_STATUS_DISABLED = 0,
FH_STATUS_DISABLED_BY_GP = 1,
FH_STATUS_ENABLED = 2,
FH_STATUS_REHYDRATING = 3,
MAX_BACKUP_STATUS = 4,
};
pub const FH_STATUS_DISABLED = FH_BACKUP_STATUS.FH_STATUS_DISABLED;
pub const FH_STATUS_DISABLED_BY_GP = FH_BACKUP_STATUS.FH_STATUS_DISABLED_BY_GP;
pub const FH_STATUS_ENABLED = FH_BACKUP_STATUS.FH_STATUS_ENABLED;
pub const FH_STATUS_REHYDRATING = FH_BACKUP_STATUS.FH_STATUS_REHYDRATING;
pub const MAX_BACKUP_STATUS = FH_BACKUP_STATUS.MAX_BACKUP_STATUS;
pub const FH_DEVICE_VALIDATION_RESULT = enum(i32) {
FH_ACCESS_DENIED = 0,
FH_INVALID_DRIVE_TYPE = 1,
FH_READ_ONLY_PERMISSION = 2,
FH_CURRENT_DEFAULT = 3,
FH_NAMESPACE_EXISTS = 4,
FH_TARGET_PART_OF_LIBRARY = 5,
FH_VALID_TARGET = 6,
MAX_VALIDATION_RESULT = 7,
};
pub const FH_ACCESS_DENIED = FH_DEVICE_VALIDATION_RESULT.FH_ACCESS_DENIED;
pub const FH_INVALID_DRIVE_TYPE = FH_DEVICE_VALIDATION_RESULT.FH_INVALID_DRIVE_TYPE;
pub const FH_READ_ONLY_PERMISSION = FH_DEVICE_VALIDATION_RESULT.FH_READ_ONLY_PERMISSION;
pub const FH_CURRENT_DEFAULT = FH_DEVICE_VALIDATION_RESULT.FH_CURRENT_DEFAULT;
pub const FH_NAMESPACE_EXISTS = FH_DEVICE_VALIDATION_RESULT.FH_NAMESPACE_EXISTS;
pub const FH_TARGET_PART_OF_LIBRARY = FH_DEVICE_VALIDATION_RESULT.FH_TARGET_PART_OF_LIBRARY;
pub const FH_VALID_TARGET = FH_DEVICE_VALIDATION_RESULT.FH_VALID_TARGET;
pub const MAX_VALIDATION_RESULT = FH_DEVICE_VALIDATION_RESULT.MAX_VALIDATION_RESULT;
// TODO: this type is limited to platform 'windows8.0'
const IID_IFhConfigMgr_Value = @import("../zig.zig").Guid.initString("6a5fea5b-bf8f-4ee5-b8c3-44d8a0d7331c");
pub const IID_IFhConfigMgr = &IID_IFhConfigMgr_Value;
pub const IFhConfigMgr = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
LoadConfiguration: fn(
self: *const IFhConfigMgr,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
CreateDefaultConfiguration: fn(
self: *const IFhConfigMgr,
OverwriteIfExists: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SaveConfiguration: fn(
self: *const IFhConfigMgr,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
AddRemoveExcludeRule: fn(
self: *const IFhConfigMgr,
Add: BOOL,
Category: FH_PROTECTED_ITEM_CATEGORY,
Item: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetIncludeExcludeRules: fn(
self: *const IFhConfigMgr,
Include: BOOL,
Category: FH_PROTECTED_ITEM_CATEGORY,
Iterator: ?*?*IFhScopeIterator,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetLocalPolicy: fn(
self: *const IFhConfigMgr,
LocalPolicyType: FH_LOCAL_POLICY_TYPE,
PolicyValue: ?*u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetLocalPolicy: fn(
self: *const IFhConfigMgr,
LocalPolicyType: FH_LOCAL_POLICY_TYPE,
PolicyValue: u64,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetBackupStatus: fn(
self: *const IFhConfigMgr,
BackupStatus: ?*FH_BACKUP_STATUS,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SetBackupStatus: fn(
self: *const IFhConfigMgr,
BackupStatus: FH_BACKUP_STATUS,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetDefaultTarget: fn(
self: *const IFhConfigMgr,
DefaultTarget: ?*?*IFhTarget,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ValidateTarget: fn(
self: *const IFhConfigMgr,
TargetUrl: ?BSTR,
ValidationResult: ?*FH_DEVICE_VALIDATION_RESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ProvisionAndSetNewTarget: fn(
self: *const IFhConfigMgr,
TargetUrl: ?BSTR,
TargetName: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ChangeDefaultTargetRecommendation: fn(
self: *const IFhConfigMgr,
Recommend: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
QueryProtectionStatus: fn(
self: *const IFhConfigMgr,
ProtectionState: ?*u32,
ProtectedUntilTime: ?*?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_LoadConfiguration(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).LoadConfiguration(@ptrCast(*const IFhConfigMgr, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_CreateDefaultConfiguration(self: *const T, OverwriteIfExists: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).CreateDefaultConfiguration(@ptrCast(*const IFhConfigMgr, self), OverwriteIfExists);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_SaveConfiguration(self: *const T) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).SaveConfiguration(@ptrCast(*const IFhConfigMgr, self));
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_AddRemoveExcludeRule(self: *const T, Add: BOOL, Category: FH_PROTECTED_ITEM_CATEGORY, Item: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).AddRemoveExcludeRule(@ptrCast(*const IFhConfigMgr, self), Add, Category, Item);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_GetIncludeExcludeRules(self: *const T, Include: BOOL, Category: FH_PROTECTED_ITEM_CATEGORY, Iterator: ?*?*IFhScopeIterator) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).GetIncludeExcludeRules(@ptrCast(*const IFhConfigMgr, self), Include, Category, Iterator);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_GetLocalPolicy(self: *const T, LocalPolicyType: FH_LOCAL_POLICY_TYPE, PolicyValue: ?*u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).GetLocalPolicy(@ptrCast(*const IFhConfigMgr, self), LocalPolicyType, PolicyValue);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_SetLocalPolicy(self: *const T, LocalPolicyType: FH_LOCAL_POLICY_TYPE, PolicyValue: u64) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).SetLocalPolicy(@ptrCast(*const IFhConfigMgr, self), LocalPolicyType, PolicyValue);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_GetBackupStatus(self: *const T, BackupStatus: ?*FH_BACKUP_STATUS) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).GetBackupStatus(@ptrCast(*const IFhConfigMgr, self), BackupStatus);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_SetBackupStatus(self: *const T, BackupStatus: FH_BACKUP_STATUS) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).SetBackupStatus(@ptrCast(*const IFhConfigMgr, self), BackupStatus);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_GetDefaultTarget(self: *const T, DefaultTarget: ?*?*IFhTarget) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).GetDefaultTarget(@ptrCast(*const IFhConfigMgr, self), DefaultTarget);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_ValidateTarget(self: *const T, TargetUrl: ?BSTR, ValidationResult: ?*FH_DEVICE_VALIDATION_RESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).ValidateTarget(@ptrCast(*const IFhConfigMgr, self), TargetUrl, ValidationResult);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_ProvisionAndSetNewTarget(self: *const T, TargetUrl: ?BSTR, TargetName: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).ProvisionAndSetNewTarget(@ptrCast(*const IFhConfigMgr, self), TargetUrl, TargetName);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_ChangeDefaultTargetRecommendation(self: *const T, Recommend: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).ChangeDefaultTargetRecommendation(@ptrCast(*const IFhConfigMgr, self), Recommend);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhConfigMgr_QueryProtectionStatus(self: *const T, ProtectionState: ?*u32, ProtectedUntilTime: ?*?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhConfigMgr.VTable, self.vtable).QueryProtectionStatus(@ptrCast(*const IFhConfigMgr, self), ProtectionState, ProtectedUntilTime);
}
};}
pub usingnamespace MethodMixin(@This());
};
// TODO: this type is limited to platform 'windows8.0'
const IID_IFhReassociation_Value = @import("../zig.zig").Guid.initString("6544a28a-f68d-47ac-91ef-16b2b36aa3ee");
pub const IID_IFhReassociation = &IID_IFhReassociation_Value;
pub const IFhReassociation = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
ValidateTarget: fn(
self: *const IFhReassociation,
TargetUrl: ?BSTR,
ValidationResult: ?*FH_DEVICE_VALIDATION_RESULT,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
ScanTargetForConfigurations: fn(
self: *const IFhReassociation,
TargetUrl: ?BSTR,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
GetConfigurationDetails: fn(
self: *const IFhReassociation,
Index: u32,
UserName: ?*?BSTR,
PcName: ?*?BSTR,
BackupTime: ?*FILETIME,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
SelectConfiguration: fn(
self: *const IFhReassociation,
Index: u32,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
PerformReassociation: fn(
self: *const IFhReassociation,
OverwriteIfExists: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhReassociation_ValidateTarget(self: *const T, TargetUrl: ?BSTR, ValidationResult: ?*FH_DEVICE_VALIDATION_RESULT) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhReassociation.VTable, self.vtable).ValidateTarget(@ptrCast(*const IFhReassociation, self), TargetUrl, ValidationResult);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhReassociation_ScanTargetForConfigurations(self: *const T, TargetUrl: ?BSTR) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhReassociation.VTable, self.vtable).ScanTargetForConfigurations(@ptrCast(*const IFhReassociation, self), TargetUrl);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhReassociation_GetConfigurationDetails(self: *const T, Index: u32, UserName: ?*?BSTR, PcName: ?*?BSTR, BackupTime: ?*FILETIME) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhReassociation.VTable, self.vtable).GetConfigurationDetails(@ptrCast(*const IFhReassociation, self), Index, UserName, PcName, BackupTime);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhReassociation_SelectConfiguration(self: *const T, Index: u32) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhReassociation.VTable, self.vtable).SelectConfiguration(@ptrCast(*const IFhReassociation, self), Index);
}
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IFhReassociation_PerformReassociation(self: *const T, OverwriteIfExists: BOOL) callconv(.Inline) HRESULT {
return @ptrCast(*const IFhReassociation.VTable, self.vtable).PerformReassociation(@ptrCast(*const IFhReassociation, self), OverwriteIfExists);
}
};}
pub usingnamespace MethodMixin(@This());
};
pub const FhBackupStopReason = enum(i32) {
InvalidStopReason = 0,
LimitUserBusyMachineOnAC = 1,
LimitUserIdleMachineOnDC = 2,
LimitUserBusyMachineOnDC = 3,
Cancelled = 4,
};
pub const BackupInvalidStopReason = FhBackupStopReason.InvalidStopReason;
pub const BackupLimitUserBusyMachineOnAC = FhBackupStopReason.LimitUserBusyMachineOnAC;
pub const BackupLimitUserIdleMachineOnDC = FhBackupStopReason.LimitUserIdleMachineOnDC;
pub const BackupLimitUserBusyMachineOnDC = FhBackupStopReason.LimitUserBusyMachineOnDC;
pub const BackupCancelled = FhBackupStopReason.Cancelled;
//--------------------------------------------------------------------------------
// Section: Functions (7)
//--------------------------------------------------------------------------------
// TODO: this type is limited to platform 'windows8.0'
pub extern "fhsvcctl" fn FhServiceOpenPipe(
StartServiceIfStopped: BOOL,
Pipe: ?*FH_SERVICE_PIPE_HANDLE,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.0'
pub extern "fhsvcctl" fn FhServiceClosePipe(
Pipe: FH_SERVICE_PIPE_HANDLE,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.0'
pub extern "fhsvcctl" fn FhServiceStartBackup(
Pipe: FH_SERVICE_PIPE_HANDLE,
LowPriorityIo: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.0'
pub extern "fhsvcctl" fn FhServiceStopBackup(
Pipe: FH_SERVICE_PIPE_HANDLE,
StopTracking: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.0'
pub extern "fhsvcctl" fn FhServiceReloadConfiguration(
Pipe: FH_SERVICE_PIPE_HANDLE,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.0'
pub extern "fhsvcctl" fn FhServiceBlockBackup(
Pipe: FH_SERVICE_PIPE_HANDLE,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.0'
pub extern "fhsvcctl" fn FhServiceUnblockBackup(
Pipe: FH_SERVICE_PIPE_HANDLE,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (6)
//--------------------------------------------------------------------------------
const BOOL = @import("../foundation.zig").BOOL;
const BSTR = @import("../foundation.zig").BSTR;
const FH_SERVICE_PIPE_HANDLE = @import("../system/windows_programming.zig").FH_SERVICE_PIPE_HANDLE;
const FILETIME = @import("../foundation.zig").FILETIME;
const HRESULT = @import("../foundation.zig").HRESULT;
const IUnknown = @import("../system/com.zig").IUnknown;
test {
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
} | deps/zigwin32/win32/storage/file_history.zig |
const gllparser = @import("../gllparser/gllparser.zig");
const Error = gllparser.Error;
const Parser = gllparser.Parser;
const ParserContext = gllparser.Context;
const Result = gllparser.Result;
const NodeName = gllparser.NodeName;
const ResultStream = gllparser.ResultStream;
const PosKey = gllparser.PosKey;
const ParserPath = gllparser.ParserPath;
const Literal = @import("../parser/literal.zig").Literal;
const LiteralValue = @import("../parser/literal.zig").Value;
const std = @import("std");
const testing = std.testing;
const mem = std.mem;
pub fn Context(comptime Payload: type, comptime V: type) type {
return struct {
/// The parser which should be repeatedly parsed.
parser: *Parser(Payload, V),
/// The minimum number of times the parser must successfully match.
min: usize,
/// The maximum number of times the parser can match, or -1 for unlimited.
max: isize,
};
}
/// Represents a single value in the stream of repeated values.
///
/// In the case of a non-ambiguous grammar, a `RepeatedAmbiguous` combinator will yield:
///
/// ```
/// Value{
/// node: value1,
/// next: ResultStream(Value{
/// node: value2,
/// next: ...,
/// })
/// }
/// ```
///
/// In the case of an ambiguous grammar, it would yield streams with potentially multiple values
/// (each representing one possible parse path / interpretation of the grammar):
///
/// ```
/// Value{
/// node: value1,
/// next: ResultStream(
/// Value{
/// node: value2variant1,
/// next: ...,
/// },
/// Value{
/// node: value2variant2,
/// next: ...,
/// },
/// )
/// }
/// ```
///
pub fn Value(comptime V: type) type {
return struct {
node: Result(V),
next: *ResultStream(Result(@This())),
pub fn deinit(self: *const @This(), allocator: mem.Allocator) void {
self.next.deinit();
self.node.deinit(allocator);
allocator.destroy(self.next);
}
pub fn flatten(self: *const @This(), allocator: mem.Allocator, subscriber: PosKey, path: ParserPath) Error!ResultStream(Result(V)) {
var dst = try ResultStream(Result(V)).init(allocator, subscriber);
try self.flatten_into(&dst, allocator, subscriber, path);
dst.close(); // TODO(slimsag): why does deferring this not work?
return dst;
}
pub fn flatten_into(self: *const @This(), dst: *ResultStream(Result(V)), allocator: mem.Allocator, subscriber: PosKey, path: ParserPath) Error!void {
try dst.add(self.node.toUnowned());
var sub = self.next.subscribe(subscriber, path, Result(Value(V)).initError(0, "matches only the empty language"));
nosuspend {
while (sub.next()) |next_path| {
switch (next_path.result) {
.err => try dst.add(Result(V).initError(next_path.offset, next_path.result.err)),
else => try next_path.result.value.flatten_into(dst, allocator, subscriber, path),
}
}
}
}
};
}
/// Matches the `input` repeatedly, between `[min, max]` times (inclusive.)
///
/// The `input` parsers must remain alive for as long as the `RepeatedAmbiguous` parser will be used.
pub fn RepeatedAmbiguous(comptime Payload: type, comptime V: type) type {
return struct {
parser: Parser(Payload, Value(V)) = Parser(Payload, Value(V)).init(parse, nodeName, deinit, countReferencesTo),
input: Context(Payload, V),
const Self = @This();
pub fn init(allocator: mem.Allocator, input: Context(Payload, V)) !*Parser(Payload, Value(V)) {
const self = Self{ .input = input };
return try self.parser.heapAlloc(allocator, self);
}
pub fn initStack(input: Context(Payload, V)) Self {
return Self{ .input = input };
}
pub fn deinit(parser: *Parser(Payload, Value(V)), allocator: mem.Allocator, freed: ?*std.AutoHashMap(usize, void)) void {
const self = @fieldParentPtr(Self, "parser", parser);
self.input.parser.deinit(allocator, freed);
}
pub fn countReferencesTo(parser: *const Parser(Payload, Value(V)), other: usize, freed: *std.AutoHashMap(usize, void)) usize {
const self = @fieldParentPtr(Self, "parser", parser);
if (@ptrToInt(parser) == other) return 1;
return self.input.parser.countReferencesTo(other, freed);
}
pub fn nodeName(parser: *const Parser(Payload, Value(V)), node_name_cache: *std.AutoHashMap(usize, NodeName)) Error!u64 {
const self = @fieldParentPtr(Self, "parser", parser);
var v = std.hash_map.hashString("RepeatedAmbiguous");
v +%= try self.input.parser.nodeName(node_name_cache);
v +%= std.hash_map.getAutoHashFn(usize, void)({}, self.input.min);
v +%= std.hash_map.getAutoHashFn(isize, void)({}, self.input.max);
return v;
}
pub fn parse(parser: *const Parser(Payload, Value(V)), in_ctx: *const ParserContext(Payload, Value(V))) callconv(.Async) Error!void {
const self = @fieldParentPtr(Self, "parser", parser);
var ctx = in_ctx.with(self.input);
defer ctx.results.close();
// Invoke the child parser repeatedly to produce each of our results. Each time we ask
// the child parser to parse, it can produce a set of results (its result stream) which
// are varying parse paths / interpretations. Our set of results (our result stream)
// will contain many more possible paths, for example consider a parser:
//
// S -> [A, B]
//
// Matched once, it can produce one or two separate parse paths / interpretations (A, B,
// or A and B), and we may commit to producing certain ones. But match twice, and it
// could produce:
//
// S -> [AB, BA, AA, BB]
//
// There is an exponential number of repetitive parse paths to follow. Thus, we simply
// follow each path in order, trying one at a time until we commit or reject the
// unwanted paths. We also have two options in how we follow the paths - depth-first in
// order:
//
// AA, AB, BA, BB
//
// Or breadth-first in order:
//
// AA, BA, AB, BB
//
// Depth-first vs. breadth-first could impact the performance of some grammars by
// making it harder to bail out of a given parse path quicker. Similarly, iteration
// order could be more expensive depending on the order of operations, this will be
// slower:
//
// Iteration 0: Try A
// Iteration 0: Try B -> Commit to B
// Iteration 1: Try A
// Iteration 1: Try B -> Commit to B
// Iteration 2: Try A
// Iteration 2: Try B -> Commit to B
//
// Than this:
//
// Iteration 0: Try B -> Commit to B
// Iteration 1: Try B -> Commit to B
// Iteration 2: Try B -> Commit to B
//
// However, the most optimal order is not known ahead of time. Likely the best approach
// would be to assume the next path will be the same as the past path, but in practice
// this would involve more book-keeping and still be a guess. Instead, we just focus on
// exploring all potential paths as quickly as possible (and future efforts will be
// better spent on parallelization of exploring these paths.)
// Return early if we're not trying to parse anything (stream close signals to the
// consumer there were no matches).
if (ctx.input.max == 0) {
return;
}
// First we need to actually invoke the child parser. This will give us [A, B, C] and
// we then invoke RepeatedAmbiguous(child) on the proceeding states to get the associated stream:
//
// stream(
// (A, stream(
// (A, stream(...),
// (B, stream(...),
// (C, stream(...),
// ),
// (B, stream(
// (A, stream(...),
// (B, stream(...),
// (C, stream(...),
// ),
// (C, stream(
// (A, stream(...),
// (B, stream(...),
// (C, stream(...),
// ),
// )
//
const child_node_name = try self.input.parser.nodeName(&in_ctx.memoizer.node_name_cache);
var child_ctx = try in_ctx.initChild(V, child_node_name, ctx.offset);
defer child_ctx.deinitChild();
if (!child_ctx.existing_results) try self.input.parser.parse(&child_ctx);
// For every top-level value (A, B, C in our example above.)
var num_values: usize = 0;
var sub = child_ctx.subscribe();
var offset: usize = ctx.offset;
while (sub.next()) |top_level| {
if (num_values >= ctx.input.max and ctx.input.max != -1) break;
num_values += 1;
switch (top_level.result) {
.err => {
// Going down the path of this top-level value terminated with an error.
if (num_values < 1 or num_values < ctx.input.min) {
try ctx.results.add(Result(Value(V)).initError(top_level.offset, top_level.result.err));
}
continue;
},
else => {
// We got a non-error top-level value (e.g. A, B, C).
// TODO(slimsag): if no consumption, could get stuck forever!
offset = top_level.offset;
// Now get the stream that continues down this path (i.e. the stream
// associated with A, B, C.)
var path_results = try ctx.allocator.create(ResultStream(Result(Value(V))));
path_results.* = try ResultStream(Result(Value(V))).init(ctx.allocator, ctx.key);
var path = RepeatedAmbiguous(Payload, V).initStack(.{
.parser = self.input.parser,
.min = self.input.min,
.max = if (self.input.max == -1) -1 else self.input.max - 1,
});
const path_node_name = try path.parser.nodeName(&in_ctx.memoizer.node_name_cache);
var path_ctx = try in_ctx.initChild(Value(V), path_node_name, top_level.offset);
defer path_ctx.deinitChild();
if (!path_ctx.existing_results) try path.parser.parse(&path_ctx);
var path_results_sub = path_ctx.subscribe();
while (path_results_sub.next()) |next| {
try path_results.add(next.toUnowned());
}
path_results.close();
// Emit our top-level value tuple (e.g. (A, stream(...))
try ctx.results.add(Result(Value(V)).init(top_level.offset, .{
.node = top_level.toUnowned(),
.next = path_results,
}));
},
}
}
if (num_values < ctx.input.min) {
// TODO(slimsag): include number of expected/found matches
try ctx.results.add(Result(Value(V)).initError(offset, "expected more"));
return;
}
return;
}
};
}
test "repeated" {
nosuspend {
const allocator = testing.allocator;
const Payload = void;
const ctx = try ParserContext(Payload, Value(LiteralValue)).init(allocator, "abcabcabc123abc", {});
defer ctx.deinit();
var abcInfinity = try RepeatedAmbiguous(Payload, LiteralValue).init(allocator, .{
.parser = (try Literal(Payload).init(allocator, "abc")).ref(),
.min = 0,
.max = -1,
});
defer abcInfinity.deinit(allocator, null);
try abcInfinity.parse(&ctx);
var sub = ctx.subscribe();
var list = sub.next();
try testing.expect(sub.next() == null); // stream closed
// first element
try testing.expectEqual(@as(usize, 3), list.?.offset);
try testing.expectEqual(@as(usize, 3), list.?.result.value.node.offset);
// flatten the nested multi-dimensional array, since our grammar above is not ambiguous
// this is fine to do and makes testing far easier.
var flattened = try list.?.result.value.flatten(allocator, ctx.key, ctx.path);
defer flattened.deinit();
var flat = flattened.subscribe(ctx.key, ctx.path, Result(LiteralValue).initError(ctx.offset, "matches only the empty language"));
try testing.expectEqual(@as(usize, 3), flat.next().?.offset);
try testing.expectEqual(@as(usize, 6), flat.next().?.offset);
try testing.expectEqual(@as(usize, 9), flat.next().?.offset);
try testing.expect(flat.next() == null); // stream closed
}
} | src/combn/combinator/repeated_ambiguous.zig |
const std = @import("std");
const math = std.math;
const assert = std.debug.assert;
const glfw = @import("glfw");
const gpu = @import("gpu");
const zgpu = @import("zgpu");
const c = zgpu.cimgui;
const zm = @import("zmath");
const zmesh = @import("zmesh");
const wgsl = @import("physically_based_rendering_wgsl.zig");
const content_dir = @import("build_options").content_dir;
const window_title = "zig-gamedev: physically based rendering (wgpu)";
const Vertex = extern struct {
position: [3]f32,
normal: [3]f32,
texcoord: [2]f32,
tangent: [4]f32,
};
const Mesh = struct {
index_offset: u32,
vertex_offset: i32,
num_indices: u32,
num_vertices: u32,
};
const num_mesh_textures = 4;
const cube_mesh = 0;
const helmet_mesh = 1;
const enable_async_shader_compilation = true;
const env_cube_tex_resolution = 1024;
const irradiance_cube_tex_resolution = 128;
const filtered_env_tex_resolution = 512;
const filtered_env_tex_mip_levels = 6;
const brdf_integration_tex_resolution = 512;
const MeshUniforms = extern struct {
object_to_world: zm.Mat,
world_to_clip: zm.Mat,
camera_position: [3]f32,
draw_mode: i32,
};
const DemoState = struct {
gctx: *zgpu.GraphicsContext,
allocator: std.mem.Allocator,
precompute_env_tex_pipe: zgpu.RenderPipelineHandle = .{},
precompute_irradiance_tex_pipe: zgpu.RenderPipelineHandle = .{},
precompute_filtered_env_tex_pipe: zgpu.RenderPipelineHandle = .{},
precompute_brdf_integration_tex_pipe: zgpu.ComputePipelineHandle = .{},
mesh_pipe: zgpu.RenderPipelineHandle = .{},
sample_env_tex_pipe: zgpu.RenderPipelineHandle = .{},
uniform_tex2d_sam_bgl: zgpu.BindGroupLayoutHandle,
uniform_texcube_sam_bgl: zgpu.BindGroupLayoutHandle,
texstorage2d_bgl: zgpu.BindGroupLayoutHandle,
vertex_buf: zgpu.BufferHandle,
index_buf: zgpu.BufferHandle,
depth_tex: zgpu.TextureHandle,
depth_texv: zgpu.TextureViewHandle,
mesh_tex: [num_mesh_textures]zgpu.TextureHandle,
mesh_texv: [num_mesh_textures]zgpu.TextureViewHandle,
env_cube_tex: zgpu.TextureHandle,
env_cube_texv: zgpu.TextureViewHandle,
irradiance_cube_tex: zgpu.TextureHandle,
irradiance_cube_texv: zgpu.TextureViewHandle,
filtered_env_cube_tex: zgpu.TextureHandle,
filtered_env_cube_texv: zgpu.TextureViewHandle,
brdf_integration_tex: zgpu.TextureHandle,
brdf_integration_texv: zgpu.TextureViewHandle,
mesh_bg: zgpu.BindGroupHandle,
env_bg: zgpu.BindGroupHandle,
meshes: std.ArrayList(Mesh),
draw_mode: i32 = 0,
current_hdri_index: i32 = 1,
is_lighting_precomputed: bool = false,
mesh_yaw: f32 = 0.0,
camera: struct {
position: [3]f32 = .{ 3.0, 0.0, 3.0 },
forward: [3]f32 = .{ 0.0, 0.0, 0.0 },
pitch: f32 = 0.0,
yaw: f32 = math.pi + 0.25 * math.pi,
} = .{},
mouse: struct {
cursor: glfw.Window.CursorPos = .{ .xpos = 0.0, .ypos = 0.0 },
} = .{},
};
fn loadAllMeshes(
arena: std.mem.Allocator,
out_meshes: *std.ArrayList(Mesh),
out_vertices: *std.ArrayList(Vertex),
out_indices: *std.ArrayList(u32),
) !void {
var indices = std.ArrayList(u32).init(arena);
var positions = std.ArrayList([3]f32).init(arena);
var normals = std.ArrayList([3]f32).init(arena);
var texcoords = std.ArrayList([2]f32).init(arena);
var tangents = std.ArrayList([4]f32).init(arena);
{
const pre_indices_len = indices.items.len;
const pre_positions_len = positions.items.len;
const data = try zmesh.io.parseAndLoadFile(content_dir ++ "cube.gltf");
defer zmesh.io.cgltf.free(data);
try zmesh.io.appendMeshPrimitive(data, 0, 0, &indices, &positions, &normals, &texcoords, &tangents);
try out_meshes.append(.{
.index_offset = @intCast(u32, pre_indices_len),
.vertex_offset = @intCast(i32, pre_positions_len),
.num_indices = @intCast(u32, indices.items.len - pre_indices_len),
.num_vertices = @intCast(u32, positions.items.len - pre_positions_len),
});
}
{
const pre_indices_len = indices.items.len;
const pre_positions_len = positions.items.len;
const data = try zmesh.io.parseAndLoadFile(content_dir ++ "SciFiHelmet/SciFiHelmet.gltf");
defer zmesh.io.cgltf.free(data);
try zmesh.io.appendMeshPrimitive(data, 0, 0, &indices, &positions, &normals, &texcoords, &tangents);
try out_meshes.append(.{
.index_offset = @intCast(u32, pre_indices_len),
.vertex_offset = @intCast(i32, pre_positions_len),
.num_indices = @intCast(u32, indices.items.len - pre_indices_len),
.num_vertices = @intCast(u32, positions.items.len - pre_positions_len),
});
}
try out_indices.ensureTotalCapacity(indices.items.len);
for (indices.items) |mesh_index| {
out_indices.appendAssumeCapacity(mesh_index);
}
try out_vertices.ensureTotalCapacity(positions.items.len);
for (positions.items) |_, index| {
out_vertices.appendAssumeCapacity(.{
.position = positions.items[index],
.normal = normals.items[index],
.texcoord = texcoords.items[index],
.tangent = tangents.items[index],
});
}
}
fn init(allocator: std.mem.Allocator, window: glfw.Window) !*DemoState {
const gctx = try zgpu.GraphicsContext.init(allocator, window);
var arena_state = std.heap.ArenaAllocator.init(allocator);
defer arena_state.deinit();
const arena = arena_state.allocator();
//
// Create bind group layouts.
//
const mesh_bgl = gctx.createBindGroupLayout(&.{
zgpu.bglBuffer(0, .{ .vertex = true, .fragment = true }, .uniform, true, 0),
zgpu.bglTexture(1, .{ .fragment = true }, .float, .dimension_2d, false),
zgpu.bglTexture(2, .{ .fragment = true }, .float, .dimension_2d, false),
zgpu.bglTexture(3, .{ .fragment = true }, .float, .dimension_2d, false),
zgpu.bglTexture(4, .{ .fragment = true }, .float, .dimension_2d, false),
zgpu.bglTexture(5, .{ .fragment = true }, .float, .dimension_cube, false),
zgpu.bglTexture(6, .{ .fragment = true }, .float, .dimension_cube, false),
zgpu.bglTexture(7, .{ .fragment = true }, .float, .dimension_2d, false),
zgpu.bglSampler(8, .{ .fragment = true }, .filtering),
});
defer gctx.releaseResource(mesh_bgl);
const uniform_tex2d_sam_bgl = gctx.createBindGroupLayout(&.{
zgpu.bglBuffer(0, .{ .vertex = true }, .uniform, true, 0),
zgpu.bglTexture(1, .{ .fragment = true }, .float, .dimension_2d, false),
zgpu.bglSampler(2, .{ .fragment = true }, .filtering),
});
const uniform_texcube_sam_bgl = gctx.createBindGroupLayout(&.{
zgpu.bglBuffer(0, .{ .vertex = true, .fragment = true }, .uniform, true, 0),
zgpu.bglTexture(1, .{ .fragment = true }, .float, .dimension_cube, false),
zgpu.bglSampler(2, .{ .fragment = true }, .filtering),
});
const texstorage2d_bgl = gctx.createBindGroupLayout(&.{
zgpu.bglStorageTexture(0, .{ .compute = true }, .write_only, .rgba16_float, .dimension_2d),
});
//
// Create meshes.
//
zmesh.init(arena);
defer zmesh.deinit();
var meshes = std.ArrayList(Mesh).init(allocator);
var vertices = std.ArrayList(Vertex).init(arena);
var indices = std.ArrayList(u32).init(arena);
try loadAllMeshes(arena, &meshes, &vertices, &indices);
const total_num_vertices = @intCast(u32, vertices.items.len);
const total_num_indices = @intCast(u32, indices.items.len);
// Create a vertex buffer.
const vertex_buf = gctx.createBuffer(.{
.usage = .{ .copy_dst = true, .vertex = true },
.size = total_num_vertices * @sizeOf(Vertex),
});
gctx.queue.writeBuffer(gctx.lookupResource(vertex_buf).?, 0, Vertex, vertices.items);
// Create an index buffer.
const index_buf = gctx.createBuffer(.{
.usage = .{ .copy_dst = true, .index = true },
.size = total_num_indices * @sizeOf(u32),
});
gctx.queue.writeBuffer(gctx.lookupResource(index_buf).?, 0, u32, indices.items);
//
// Create textures.
//
const depth = createDepthTexture(gctx);
// Create mesh textures.
const mesh_texture_paths = &[num_mesh_textures][:0]const u8{
content_dir ++ "SciFiHelmet/SciFiHelmet_AmbientOcclusion.png",
content_dir ++ "SciFiHelmet/SciFiHelmet_BaseColor.png",
content_dir ++ "SciFiHelmet/SciFiHelmet_MetallicRoughness.png",
content_dir ++ "SciFiHelmet/SciFiHelmet_Normal.png",
};
var mesh_tex: [num_mesh_textures]zgpu.TextureHandle = undefined;
var mesh_texv: [num_mesh_textures]zgpu.TextureViewHandle = undefined;
for (mesh_texture_paths) |path, tex_index| {
var image = try zgpu.stbi.Image(u8).init(path, 4);
defer image.deinit();
mesh_tex[tex_index] = gctx.createTexture(.{
.usage = .{ .texture_binding = true, .copy_dst = true },
.size = .{
.width = image.width,
.height = image.height,
.depth_or_array_layers = 1,
},
.format = .rgba8_unorm,
.mip_level_count = math.log2_int(u32, math.max(image.width, image.height)) + 1,
});
mesh_texv[tex_index] = gctx.createTextureView(mesh_tex[tex_index], .{});
gctx.queue.writeTexture(
&.{ .texture = gctx.lookupResource(mesh_tex[tex_index]).? },
&.{
.bytes_per_row = image.bytes_per_row,
.rows_per_image = image.height,
},
&.{ .width = image.width, .height = image.height },
u8,
image.data,
);
}
// Create an empty env. cube texture (we will render to it).
const env_cube_tex = gctx.createTexture(.{
.usage = .{ .texture_binding = true, .render_attachment = true, .copy_dst = true },
.size = .{
.width = env_cube_tex_resolution,
.height = env_cube_tex_resolution,
.depth_or_array_layers = 6,
},
.format = .rgba16_float,
.mip_level_count = math.log2_int(u32, env_cube_tex_resolution) + 1,
});
const env_cube_texv = gctx.createTextureView(env_cube_tex, .{
.dimension = .dimension_cube,
});
// Create an empty irradiance cube texture (we will render to it).
const irradiance_cube_tex = gctx.createTexture(.{
.usage = .{ .texture_binding = true, .render_attachment = true, .copy_dst = true },
.size = .{
.width = irradiance_cube_tex_resolution,
.height = irradiance_cube_tex_resolution,
.depth_or_array_layers = 6,
},
.format = .rgba16_float,
.mip_level_count = math.log2_int(u32, irradiance_cube_tex_resolution) + 1,
});
const irradiance_cube_texv = gctx.createTextureView(irradiance_cube_tex, .{
.dimension = .dimension_cube,
});
// Create an empty filtered env. cube texture (we will render to it).
const filtered_env_cube_tex = gctx.createTexture(.{
.usage = .{ .texture_binding = true, .render_attachment = true },
.size = .{
.width = filtered_env_tex_resolution,
.height = filtered_env_tex_resolution,
.depth_or_array_layers = 6,
},
.format = .rgba16_float,
.mip_level_count = filtered_env_tex_mip_levels,
});
const filtered_env_cube_texv = gctx.createTextureView(filtered_env_cube_tex, .{
.dimension = .dimension_cube,
});
// Create an empty BRDF integration texture (we will generate its content in a compute shader).
const brdf_integration_tex = gctx.createTexture(.{
.usage = .{ .texture_binding = true, .storage_binding = true },
.size = .{
.width = brdf_integration_tex_resolution,
.height = brdf_integration_tex_resolution,
},
.format = .rgba16_float,
.mip_level_count = 1,
});
const brdf_integration_texv = gctx.createTextureView(brdf_integration_tex, .{});
//
// Create samplers.
//
const aniso_sam = gctx.createSampler(.{
.mag_filter = .linear,
.min_filter = .linear,
.mipmap_filter = .linear,
.max_anisotropy = 16,
});
const trilinear_sam = gctx.createSampler(.{
.mag_filter = .linear,
.min_filter = .linear,
.mipmap_filter = .linear,
});
//
// Generates mipmaps on the GPU.
//
{
const commands = commands: {
const encoder = gctx.device.createCommandEncoder(null);
defer encoder.release();
for (mesh_tex) |texture| {
gctx.generateMipmaps(arena, encoder, texture);
}
break :commands encoder.finish(null);
};
defer commands.release();
gctx.submit(&.{commands});
}
//
// Create bind groups.
//
const mesh_bg = gctx.createBindGroup(mesh_bgl, &[_]zgpu.BindGroupEntryInfo{
.{ .binding = 0, .buffer_handle = gctx.uniforms.buffer, .offset = 0, .size = @sizeOf(MeshUniforms) },
.{ .binding = 1, .texture_view_handle = mesh_texv[0] },
.{ .binding = 2, .texture_view_handle = mesh_texv[1] },
.{ .binding = 3, .texture_view_handle = mesh_texv[2] },
.{ .binding = 4, .texture_view_handle = mesh_texv[3] },
.{ .binding = 5, .texture_view_handle = irradiance_cube_texv },
.{ .binding = 6, .texture_view_handle = filtered_env_cube_texv },
.{ .binding = 7, .texture_view_handle = brdf_integration_texv },
.{ .binding = 8, .sampler_handle = aniso_sam },
});
const env_bg = gctx.createBindGroup(uniform_texcube_sam_bgl, &[_]zgpu.BindGroupEntryInfo{
.{ .binding = 0, .buffer_handle = gctx.uniforms.buffer, .offset = 0, .size = @sizeOf(zm.Mat) },
.{ .binding = 1, .texture_view_handle = env_cube_texv },
.{ .binding = 2, .sampler_handle = trilinear_sam },
});
const demo = try allocator.create(DemoState);
demo.* = .{
.gctx = gctx,
.allocator = allocator,
.uniform_tex2d_sam_bgl = uniform_tex2d_sam_bgl,
.uniform_texcube_sam_bgl = uniform_texcube_sam_bgl,
.texstorage2d_bgl = texstorage2d_bgl,
.vertex_buf = vertex_buf,
.index_buf = index_buf,
.depth_tex = depth.tex,
.depth_texv = depth.texv,
.mesh_tex = mesh_tex,
.mesh_texv = mesh_texv,
.env_cube_tex = env_cube_tex,
.env_cube_texv = env_cube_texv,
.irradiance_cube_tex = irradiance_cube_tex,
.irradiance_cube_texv = irradiance_cube_texv,
.filtered_env_cube_tex = filtered_env_cube_tex,
.filtered_env_cube_texv = filtered_env_cube_texv,
.brdf_integration_tex = brdf_integration_tex,
.brdf_integration_texv = brdf_integration_texv,
.mesh_bg = mesh_bg,
.env_bg = env_bg,
.meshes = meshes,
};
//
// Create pipelines.
//
createRenderPipe(
allocator,
gctx,
&.{mesh_bgl},
wgsl.mesh_vs,
wgsl.mesh_fs,
zgpu.GraphicsContext.swapchain_format,
false,
gpu.DepthStencilState{
.format = .depth32_float,
.depth_write_enabled = true,
.depth_compare = .less,
},
&demo.mesh_pipe,
);
createRenderPipe(
allocator,
gctx,
&.{uniform_texcube_sam_bgl},
wgsl.sample_env_tex_vs,
wgsl.sample_env_tex_fs,
zgpu.GraphicsContext.swapchain_format,
true,
gpu.DepthStencilState{
.format = .depth32_float,
.depth_write_enabled = false,
.depth_compare = .less_equal,
},
&demo.sample_env_tex_pipe,
);
createRenderPipe(
allocator,
gctx,
&.{uniform_tex2d_sam_bgl},
wgsl.precompute_env_tex_vs,
wgsl.precompute_env_tex_fs,
.rgba16_float,
true,
null,
&demo.precompute_env_tex_pipe,
);
createRenderPipe(
allocator,
gctx,
&.{uniform_texcube_sam_bgl},
wgsl.precompute_irradiance_tex_vs,
wgsl.precompute_irradiance_tex_fs,
.rgba16_float,
true,
null,
&demo.precompute_irradiance_tex_pipe,
);
createRenderPipe(
allocator,
gctx,
&.{uniform_texcube_sam_bgl},
wgsl.precompute_filtered_env_tex_vs,
wgsl.precompute_filtered_env_tex_fs,
.rgba16_float,
true,
null,
&demo.precompute_filtered_env_tex_pipe,
);
{
const pl = gctx.createPipelineLayout(&.{texstorage2d_bgl});
defer gctx.releaseResource(pl);
const cs_mod = gctx.device.createShaderModule(&gpu.ShaderModule.Descriptor{
.code = .{ .wgsl = wgsl.precompute_brdf_integration_tex_cs },
});
defer cs_mod.release();
const pipe_desc = gpu.ComputePipeline.Descriptor{
.compute = .{
.module = cs_mod,
.entry_point = "main",
},
};
if (enable_async_shader_compilation) {
gctx.createComputePipelineAsync(allocator, pl, pipe_desc, &demo.precompute_brdf_integration_tex_pipe);
} else {
demo.precompute_brdf_integration_tex_pipe = gctx.createComputePipeline(pl, pipe_desc);
}
}
return demo;
}
fn deinit(allocator: std.mem.Allocator, demo: *DemoState) void {
demo.meshes.deinit();
demo.gctx.deinit(allocator);
allocator.destroy(demo);
}
fn update(demo: *DemoState) void {
zgpu.gui.newFrame(demo.gctx.swapchain_descriptor.width, demo.gctx.swapchain_descriptor.height);
if (c.igBegin("Demo Settings", null, c.ImGuiWindowFlags_NoMove | c.ImGuiWindowFlags_NoResize)) {
c.igBulletText(
"Average : %.3f ms/frame (%.1f fps)",
demo.gctx.stats.average_cpu_time,
demo.gctx.stats.fps,
);
c.igBulletText("Left Mouse Button + drag : rotate helmet");
c.igBulletText("Right Mouse Button + drag : rotate camera");
c.igBulletText("W, A, S, D : move camera");
c.igSpacing();
c.igSpacing();
c.igBulletText("Current HDRI : ");
c.igSameLine(0.0, 0.0);
if (c.igCombo_Str(
"##",
&demo.current_hdri_index,
"Newport Loft\x00Drackenstein Quarry\x00Freight Station\x00\x00",
-1,
)) {
demo.is_lighting_precomputed = false;
}
c.igSpacing();
c.igSpacing();
_ = c.igRadioButton_IntPtr("Draw PBR effect", &demo.draw_mode, 0);
_ = c.igRadioButton_IntPtr("Draw Ambient Occlusion texture", &demo.draw_mode, 1);
_ = c.igRadioButton_IntPtr("Draw Base Color texture", &demo.draw_mode, 2);
_ = c.igRadioButton_IntPtr("Draw Metallic texture", &demo.draw_mode, 3);
_ = c.igRadioButton_IntPtr("Draw Roughness texture", &demo.draw_mode, 4);
_ = c.igRadioButton_IntPtr("Draw Normal texture", &demo.draw_mode, 5);
}
c.igEnd();
const window = demo.gctx.window;
// Handle camera rotation with mouse.
{
const cursor = window.getCursorPos() catch unreachable;
const delta_x = @floatCast(f32, cursor.xpos - demo.mouse.cursor.xpos);
const delta_y = @floatCast(f32, cursor.ypos - demo.mouse.cursor.ypos);
demo.mouse.cursor.xpos = cursor.xpos;
demo.mouse.cursor.ypos = cursor.ypos;
if (window.getMouseButton(.left) == .press) {
demo.mesh_yaw += 0.0025 * delta_x;
demo.mesh_yaw = zm.modAngle(demo.mesh_yaw);
} else if (window.getMouseButton(.right) == .press) {
demo.camera.pitch += 0.0025 * delta_y;
demo.camera.yaw += 0.0025 * delta_x;
demo.camera.pitch = math.min(demo.camera.pitch, 0.48 * math.pi);
demo.camera.pitch = math.max(demo.camera.pitch, -0.48 * math.pi);
demo.camera.yaw = zm.modAngle(demo.camera.yaw);
}
}
// Handle camera movement with 'WASD' keys.
{
const speed = zm.f32x4s(2.0);
const delta_time = zm.f32x4s(demo.gctx.stats.delta_time);
const transform = zm.mul(zm.rotationX(demo.camera.pitch), zm.rotationY(demo.camera.yaw));
var forward = zm.normalize3(zm.mul(zm.f32x4(0.0, 0.0, 1.0, 0.0), transform));
zm.storeArr3(&demo.camera.forward, forward);
const right = speed * delta_time * zm.normalize3(zm.cross3(zm.f32x4(0.0, 1.0, 0.0, 0.0), forward));
forward = speed * delta_time * forward;
var cam_pos = zm.loadArr3(demo.camera.position);
if (window.getKey(.w) == .press) {
cam_pos += forward;
} else if (window.getKey(.s) == .press) {
cam_pos -= forward;
}
if (window.getKey(.d) == .press) {
cam_pos += right;
} else if (window.getKey(.a) == .press) {
cam_pos -= right;
}
zm.storeArr3(&demo.camera.position, cam_pos);
}
}
fn draw(demo: *DemoState) void {
const gctx = demo.gctx;
const fb_width = gctx.swapchain_descriptor.width;
const fb_height = gctx.swapchain_descriptor.height;
const cam_world_to_view = zm.lookToLh(
zm.loadArr3(demo.camera.position),
zm.loadArr3(demo.camera.forward),
zm.f32x4(0.0, 1.0, 0.0, 0.0),
);
const cam_view_to_clip = zm.perspectiveFovLh(
0.25 * math.pi,
@intToFloat(f32, fb_width) / @intToFloat(f32, fb_height),
0.01,
200.0,
);
const cam_world_to_clip = zm.mul(cam_world_to_view, cam_view_to_clip);
const back_buffer_view = gctx.swapchain.getCurrentTextureView();
defer back_buffer_view.release();
const commands = commands: {
const encoder = gctx.device.createCommandEncoder(null);
defer encoder.release();
if (!demo.is_lighting_precomputed) {
precomputeImageLighting(demo, encoder);
}
// Draw SciFiHelmet.
pass: {
const vb_info = gctx.lookupResourceInfo(demo.vertex_buf) orelse break :pass;
const ib_info = gctx.lookupResourceInfo(demo.index_buf) orelse break :pass;
const mesh_pipe = gctx.lookupResource(demo.mesh_pipe) orelse break :pass;
const mesh_bg = gctx.lookupResource(demo.mesh_bg) orelse break :pass;
const depth_texv = gctx.lookupResource(demo.depth_texv) orelse break :pass;
const color_attachment = gpu.RenderPassColorAttachment{
.view = back_buffer_view,
.load_op = .clear,
.store_op = .store,
};
const depth_attachment = gpu.RenderPassDepthStencilAttachment{
.view = depth_texv,
.depth_load_op = .clear,
.depth_store_op = .store,
.depth_clear_value = 1.0,
};
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
.color_attachments = &.{color_attachment},
.depth_stencil_attachment = &depth_attachment,
};
const pass = encoder.beginRenderPass(&render_pass_info);
defer {
pass.end();
pass.release();
}
pass.setVertexBuffer(0, vb_info.gpuobj.?, 0, vb_info.size);
pass.setIndexBuffer(ib_info.gpuobj.?, .uint32, 0, ib_info.size);
pass.setPipeline(mesh_pipe);
const object_to_world = zm.rotationY(demo.mesh_yaw);
const mem = gctx.uniformsAllocate(MeshUniforms, 1);
mem.slice[0] = .{
.object_to_world = zm.transpose(object_to_world),
.world_to_clip = zm.transpose(cam_world_to_clip),
.camera_position = demo.camera.position,
.draw_mode = demo.draw_mode,
};
pass.setBindGroup(0, mesh_bg, &.{mem.offset});
pass.drawIndexed(
demo.meshes.items[helmet_mesh].num_indices,
1,
demo.meshes.items[helmet_mesh].index_offset,
demo.meshes.items[helmet_mesh].vertex_offset,
0,
);
}
// Draw env. cube texture.
pass: {
const vb_info = gctx.lookupResourceInfo(demo.vertex_buf) orelse break :pass;
const ib_info = gctx.lookupResourceInfo(demo.index_buf) orelse break :pass;
const env_pipe = gctx.lookupResource(demo.sample_env_tex_pipe) orelse break :pass;
const env_bg = gctx.lookupResource(demo.env_bg) orelse break :pass;
const depth_texv = gctx.lookupResource(demo.depth_texv) orelse break :pass;
const color_attachment = gpu.RenderPassColorAttachment{
.view = back_buffer_view,
.load_op = .load,
.store_op = .store,
};
const depth_attachment = gpu.RenderPassDepthStencilAttachment{
.view = depth_texv,
.depth_load_op = .load,
.depth_store_op = .store,
.depth_clear_value = 1.0,
};
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
.color_attachments = &.{color_attachment},
.depth_stencil_attachment = &depth_attachment,
};
const pass = encoder.beginRenderPass(&render_pass_info);
defer {
pass.end();
pass.release();
}
pass.setVertexBuffer(0, vb_info.gpuobj.?, 0, vb_info.size);
pass.setIndexBuffer(ib_info.gpuobj.?, .uint32, 0, ib_info.size);
pass.setPipeline(env_pipe);
var world_to_view_origin = cam_world_to_view;
world_to_view_origin[3] = zm.f32x4(0.0, 0.0, 0.0, 1.0);
const mem = gctx.uniformsAllocate(zm.Mat, 1);
mem.slice[0] = zm.transpose(zm.mul(world_to_view_origin, cam_view_to_clip));
pass.setBindGroup(0, env_bg, &.{mem.offset});
pass.drawIndexed(
demo.meshes.items[cube_mesh].num_indices,
1,
demo.meshes.items[cube_mesh].index_offset,
demo.meshes.items[cube_mesh].vertex_offset,
0,
);
}
// Gui pass.
{
const color_attachment = gpu.RenderPassColorAttachment{
.view = back_buffer_view,
.load_op = .load,
.store_op = .store,
};
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
.color_attachments = &.{color_attachment},
};
const pass = encoder.beginRenderPass(&render_pass_info);
defer {
pass.end();
pass.release();
}
zgpu.gui.draw(pass);
}
break :commands encoder.finish(null);
};
defer commands.release();
gctx.submit(&.{commands});
if (gctx.present() == .swap_chain_resized) {
// Release old depth texture.
gctx.releaseResource(demo.depth_texv);
gctx.destroyResource(demo.depth_tex);
// Create a new depth texture to match the new window size.
const depth = createDepthTexture(gctx);
demo.depth_tex = depth.tex;
demo.depth_texv = depth.texv;
}
}
fn createDepthTexture(gctx: *zgpu.GraphicsContext) struct {
tex: zgpu.TextureHandle,
texv: zgpu.TextureViewHandle,
} {
const tex = gctx.createTexture(.{
.usage = .{ .render_attachment = true },
.dimension = .dimension_2d,
.size = .{
.width = gctx.swapchain_descriptor.width,
.height = gctx.swapchain_descriptor.height,
.depth_or_array_layers = 1,
},
.format = .depth32_float,
.mip_level_count = 1,
.sample_count = 1,
});
const texv = gctx.createTextureView(tex, .{});
return .{ .tex = tex, .texv = texv };
}
fn precomputeImageLighting(
demo: *DemoState,
encoder: gpu.CommandEncoder,
) void {
const gctx = demo.gctx;
_ = gctx.lookupResource(demo.precompute_env_tex_pipe) orelse return;
_ = gctx.lookupResource(demo.precompute_irradiance_tex_pipe) orelse return;
_ = gctx.lookupResource(demo.precompute_filtered_env_tex_pipe) orelse return;
_ = gctx.lookupResource(demo.precompute_brdf_integration_tex_pipe) orelse return;
// Create HDR source texture (this is an equirect texture, we will generate cubemap from it).
const hdr_source_tex = hdr_source_tex: {
const hdri_paths = [_][:0]const u8{
content_dir ++ "Newport_Loft.hdr",
content_dir ++ "drackenstein_quarry_4k.hdr",
content_dir ++ "freight_station_4k.hdr",
};
zgpu.stbi.setFlipVerticallyOnLoad(true);
var image = zgpu.stbi.Image(f16).init(
hdri_paths[@intCast(usize, demo.current_hdri_index)],
4,
) catch unreachable;
defer {
image.deinit();
zgpu.stbi.setFlipVerticallyOnLoad(false);
}
const hdr_source_tex = gctx.createTexture(.{
.usage = .{ .texture_binding = true, .copy_dst = true },
.size = .{
.width = image.width,
.height = image.height,
.depth_or_array_layers = 1,
},
.format = .rgba16_float,
.mip_level_count = 1,
});
gctx.queue.writeTexture(
&.{ .texture = gctx.lookupResource(hdr_source_tex).? },
&.{
.bytes_per_row = image.bytes_per_row,
.rows_per_image = image.height,
},
&.{ .width = image.width, .height = image.height },
f16,
image.data,
);
break :hdr_source_tex hdr_source_tex;
};
defer gctx.releaseResource(hdr_source_tex);
const hdr_source_texv = gctx.createTextureView(hdr_source_tex, .{});
defer gctx.releaseResource(hdr_source_texv);
var arena_state = std.heap.ArenaAllocator.init(demo.allocator);
defer arena_state.deinit();
const arena = arena_state.allocator();
//
// Step 1.
//
drawToCubeTexture(
gctx,
encoder,
demo.uniform_tex2d_sam_bgl,
demo.precompute_env_tex_pipe,
hdr_source_texv, // Source texture view.
demo.env_cube_tex, // Dest. texture.
0, // Dest. mipmap level to render to.
demo.vertex_buf,
demo.index_buf,
);
gctx.generateMipmaps(arena, encoder, demo.env_cube_tex);
//
// Step 2.
//
drawToCubeTexture(
gctx,
encoder,
demo.uniform_texcube_sam_bgl,
demo.precompute_irradiance_tex_pipe,
demo.env_cube_texv, // Source texture view.
demo.irradiance_cube_tex, // Dest. texture.
0, // Dest. mipmap level to render to.
demo.vertex_buf,
demo.index_buf,
);
gctx.generateMipmaps(arena, encoder, demo.irradiance_cube_tex);
//
// Step 3.
//
{
var mip_level: u32 = 0;
while (mip_level < filtered_env_tex_mip_levels) : (mip_level += 1) {
drawToCubeTexture(
gctx,
encoder,
demo.uniform_texcube_sam_bgl,
demo.precompute_filtered_env_tex_pipe,
demo.env_cube_texv, // Source texture view.
demo.filtered_env_cube_tex, // Dest. texture.
mip_level, // Dest. mipmap level to render to.
demo.vertex_buf,
demo.index_buf,
);
}
}
//
// Step 4.
//
{
const bg = gctx.createBindGroup(demo.texstorage2d_bgl, &[_]zgpu.BindGroupEntryInfo{
.{ .binding = 0, .texture_view_handle = demo.brdf_integration_texv },
});
defer gctx.releaseResource(bg);
const pass = encoder.beginComputePass(null);
defer {
pass.end();
pass.release();
}
const num_groups = @divExact(brdf_integration_tex_resolution, 8);
pass.setPipeline(gctx.lookupResource(demo.precompute_brdf_integration_tex_pipe).?);
pass.setBindGroup(0, gctx.lookupResource(bg).?, null);
pass.dispatch(num_groups, num_groups, 1);
}
demo.is_lighting_precomputed = true;
}
fn drawToCubeTexture(
gctx: *zgpu.GraphicsContext,
encoder: gpu.CommandEncoder,
pipe_bgl: zgpu.BindGroupLayoutHandle,
pipe: zgpu.RenderPipelineHandle,
source_texv: zgpu.TextureViewHandle,
dest_tex: zgpu.TextureHandle,
dest_mip_level: u32,
vertex_buf: zgpu.BufferHandle,
index_buf: zgpu.BufferHandle,
) void {
const dest_tex_info = gctx.lookupResourceInfo(dest_tex) orelse return;
const vb_info = gctx.lookupResourceInfo(vertex_buf) orelse return;
const ib_info = gctx.lookupResourceInfo(index_buf) orelse return;
const pipeline = gctx.lookupResource(pipe) orelse return;
assert(dest_mip_level < dest_tex_info.mip_level_count);
const dest_tex_width = dest_tex_info.size.width >> @intCast(u5, dest_mip_level);
const dest_tex_height = dest_tex_info.size.height >> @intCast(u5, dest_mip_level);
assert(dest_tex_width == dest_tex_height);
const sam = gctx.createSampler(.{
.mag_filter = .linear,
.min_filter = .linear,
.mipmap_filter = .linear,
});
defer gctx.releaseResource(sam);
const Uniforms = extern struct {
object_to_clip: zm.Mat,
roughness: f32,
};
const bg = gctx.createBindGroup(pipe_bgl, &[_]zgpu.BindGroupEntryInfo{
.{ .binding = 0, .buffer_handle = gctx.uniforms.buffer, .offset = 0, .size = @sizeOf(Uniforms) },
.{ .binding = 1, .texture_view_handle = source_texv },
.{ .binding = 2, .sampler_handle = sam },
});
defer gctx.releaseResource(bg);
const zero = zm.f32x4(0.0, 0.0, 0.0, 0.0);
const object_to_view = [_]zm.Mat{
zm.lookToLh(zero, zm.f32x4(1.0, 0.0, 0.0, 0.0), zm.f32x4(0.0, 1.0, 0.0, 0.0)),
zm.lookToLh(zero, zm.f32x4(-1.0, 0.0, 0.0, 0.0), zm.f32x4(0.0, 1.0, 0.0, 0.0)),
zm.lookToLh(zero, zm.f32x4(0.0, 1.0, 0.0, 0.0), zm.f32x4(0.0, 0.0, -1.0, 0.0)),
zm.lookToLh(zero, zm.f32x4(0.0, -1.0, 0.0, 0.0), zm.f32x4(0.0, 0.0, 1.0, 0.0)),
zm.lookToLh(zero, zm.f32x4(0.0, 0.0, 1.0, 0.0), zm.f32x4(0.0, 1.0, 0.0, 0.0)),
zm.lookToLh(zero, zm.f32x4(0.0, 0.0, -1.0, 0.0), zm.f32x4(0.0, 1.0, 0.0, 0.0)),
};
const view_to_clip = zm.perspectiveFovLh(math.pi * 0.5, 1.0, 0.1, 10.0);
var cube_face_idx: u32 = 0;
while (cube_face_idx < 6) : (cube_face_idx += 1) {
const face_texv = gctx.createTextureView(dest_tex, .{
.dimension = .dimension_2d,
.base_mip_level = dest_mip_level,
.mip_level_count = 1,
.base_array_layer = cube_face_idx,
.array_layer_count = 1,
});
defer gctx.releaseResource(face_texv);
const color_attachment = gpu.RenderPassColorAttachment{
.view = gctx.lookupResource(face_texv).?,
.load_op = .clear,
.store_op = .store,
};
const render_pass_info = gpu.RenderPassEncoder.Descriptor{
.color_attachments = &.{color_attachment},
};
const pass = encoder.beginRenderPass(&render_pass_info);
defer {
pass.end();
pass.release();
}
pass.setVertexBuffer(0, vb_info.gpuobj.?, 0, vb_info.size);
pass.setIndexBuffer(ib_info.gpuobj.?, .uint32, 0, ib_info.size);
pass.setPipeline(pipeline);
const mem = gctx.uniformsAllocate(Uniforms, 1);
mem.slice[0] = .{
.object_to_clip = zm.transpose(zm.mul(object_to_view[cube_face_idx], view_to_clip)),
.roughness = @intToFloat(f32, dest_mip_level + 1) / @intToFloat(f32, filtered_env_tex_mip_levels),
};
pass.setBindGroup(0, gctx.lookupResource(bg).?, &.{mem.offset});
// NOTE: We assume that the first mesh in vertex/index buffer is a 'cube'.
pass.drawIndexed(36, 1, 0, 0, 0);
}
}
fn createRenderPipe(
allocator: std.mem.Allocator,
gctx: *zgpu.GraphicsContext,
bgls: []const zgpu.BindGroupLayoutHandle,
wgsl_vs: [:0]const u8,
wgsl_fs: [:0]const u8,
format: gpu.Texture.Format,
only_position_attrib: bool,
depth_state: ?gpu.DepthStencilState,
out_pipe: *zgpu.RenderPipelineHandle,
) void {
const pl = gctx.createPipelineLayout(bgls);
defer gctx.releaseResource(pl);
const vs_desc = gpu.ShaderModule.Descriptor{ .code = .{ .wgsl = wgsl_vs.ptr } };
const vs_mod = gctx.device.createShaderModule(&vs_desc);
defer vs_mod.release();
const fs_desc = gpu.ShaderModule.Descriptor{ .code = .{ .wgsl = wgsl_fs.ptr } };
const fs_mod = gctx.device.createShaderModule(&fs_desc);
defer fs_mod.release();
const color_target = gpu.ColorTargetState{
.format = format,
};
const vertex_attributes = [_]gpu.VertexAttribute{
.{ .format = .float32x3, .offset = 0, .shader_location = 0 },
.{ .format = .float32x3, .offset = @offsetOf(Vertex, "normal"), .shader_location = 1 },
.{ .format = .float32x2, .offset = @offsetOf(Vertex, "texcoord"), .shader_location = 2 },
.{ .format = .float32x4, .offset = @offsetOf(Vertex, "tangent"), .shader_location = 3 },
};
const vertex_buffer_layout = gpu.VertexBufferLayout{
.array_stride = @sizeOf(Vertex),
.attribute_count = if (only_position_attrib) 1 else vertex_attributes.len,
.attributes = &vertex_attributes,
};
// Create a render pipeline.
const pipe_desc = gpu.RenderPipeline.Descriptor{
.vertex = gpu.VertexState{
.module = vs_mod,
.entry_point = "main",
.buffers = &.{vertex_buffer_layout},
},
.fragment = &gpu.FragmentState{
.module = fs_mod,
.entry_point = "main",
.targets = &.{color_target},
},
.depth_stencil = if (depth_state) |ds| &ds else null,
};
if (enable_async_shader_compilation) {
gctx.createRenderPipelineAsync(allocator, pl, pipe_desc, out_pipe);
} else {
out_pipe.* = gctx.createRenderPipeline(pl, pipe_desc);
}
}
pub fn main() !void {
try glfw.init(.{});
defer glfw.terminate();
zgpu.checkSystem(content_dir) catch {
// In case of error zgpu.checkSystem() will print error message.
return;
};
const window = try glfw.Window.create(1400, 1000, window_title, null, null, .{
.client_api = .no_api,
.cocoa_retina_framebuffer = true,
});
defer window.destroy();
try window.setSizeLimits(.{ .width = 400, .height = 400 }, .{ .width = null, .height = null });
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const demo = try init(allocator, window);
defer deinit(allocator, demo);
zgpu.gui.init(window, demo.gctx.device, content_dir, "Roboto-Medium.ttf", 25.0);
defer zgpu.gui.deinit();
while (!window.shouldClose()) {
try glfw.pollEvents();
update(demo);
draw(demo);
}
} | samples/physically_based_rendering_wgpu/src/physically_based_rendering_wgpu.zig |
const std = @import("std");
const assert = std.debug.assert;
const LLVMBool = bool;
pub const LLVMAttributeIndex = c_uint;
pub const ValueRef = opaque {
pub const addAttributeAtIndex = LLVMAddAttributeAtIndex;
extern fn LLVMAddAttributeAtIndex(*const ValueRef, Idx: LLVMAttributeIndex, A: *const AttributeRef) void;
pub const appendBasicBlock = LLVMAppendBasicBlock;
extern fn LLVMAppendBasicBlock(Fn: *const ValueRef, Name: [*:0]const u8) *const BasicBlockRef;
pub const getFirstBasicBlock = LLVMGetFirstBasicBlock;
extern fn LLVMGetFirstBasicBlock(Fn: *const ValueRef) ?*const BasicBlockRef;
// Helper functions
// TODO: Do we want to put these functions here? It allows for convienient function calls
// on ValueRef: llvm_fn.addFnAttr("noreturn")
fn addAttr(val: *const ValueRef, index: LLVMAttributeIndex, name: []const u8) void {
const kind_id = getEnumAttributeKindForName(name.ptr, name.len);
assert(kind_id != 0);
const llvm_attr = ContextRef.getGlobal().createEnumAttribute(kind_id, 0);
val.addAttributeAtIndex(index, llvm_attr);
}
pub fn addFnAttr(val: *const ValueRef, attr_name: []const u8) void {
// TODO: improve this API, `addAttr(-1, attr_name)`
val.addAttr(std.math.maxInt(LLVMAttributeIndex), attr_name);
}
};
pub const TypeRef = opaque {
pub const functionType = LLVMFunctionType;
extern fn LLVMFunctionType(ReturnType: *const TypeRef, ParamTypes: ?[*]*const TypeRef, ParamCount: c_uint, IsVarArg: LLVMBool) *const TypeRef;
pub const constNull = LLVMConstNull;
extern fn LLVMConstNull(Ty: *const TypeRef) *const ValueRef;
pub const constAllOnes = LLVMConstAllOnes;
extern fn LLVMConstAllOnes(Ty: *const TypeRef) *const ValueRef;
pub const getUndef = LLVMGetUndef;
extern fn LLVMGetUndef(Ty: *const TypeRef) *const ValueRef;
};
pub const ModuleRef = opaque {
pub const createWithName = LLVMModuleCreateWithName;
extern fn LLVMModuleCreateWithName(ModuleID: [*:0]const u8) *const ModuleRef;
pub const disposeModule = LLVMDisposeModule;
extern fn LLVMDisposeModule(*const ModuleRef) void;
pub const verifyModule = LLVMVerifyModule;
extern fn LLVMVerifyModule(*const ModuleRef, Action: VerifierFailureAction, OutMessage: *[*:0]const u8) LLVMBool;
pub const addFunction = LLVMAddFunction;
extern fn LLVMAddFunction(*const ModuleRef, Name: [*:0]const u8, FunctionTy: *const TypeRef) *const ValueRef;
pub const getNamedFunction = LLVMGetNamedFunction;
extern fn LLVMGetNamedFunction(*const ModuleRef, Name: [*:0]const u8) ?*const ValueRef;
pub const printToString = LLVMPrintModuleToString;
extern fn LLVMPrintModuleToString(*const ModuleRef) [*:0]const u8;
};
pub const disposeMessage = LLVMDisposeMessage;
extern fn LLVMDisposeMessage(Message: [*:0]const u8) void;
pub const VerifierFailureAction = extern enum {
AbortProcess,
PrintMessage,
ReturnStatus,
};
pub const voidType = LLVMVoidType;
extern fn LLVMVoidType() *const TypeRef;
pub const getEnumAttributeKindForName = LLVMGetEnumAttributeKindForName;
extern fn LLVMGetEnumAttributeKindForName(Name: [*]const u8, SLen: usize) c_uint;
pub const AttributeRef = opaque {};
pub const ContextRef = opaque {
pub const createEnumAttribute = LLVMCreateEnumAttribute;
extern fn LLVMCreateEnumAttribute(*const ContextRef, KindID: c_uint, Val: u64) *const AttributeRef;
pub const getGlobal = LLVMGetGlobalContext;
extern fn LLVMGetGlobalContext() *const ContextRef;
};
pub const intType = LLVMIntType;
extern fn LLVMIntType(NumBits: c_uint) *const TypeRef;
pub const BuilderRef = opaque {
pub const createBuilder = LLVMCreateBuilder;
extern fn LLVMCreateBuilder() *const BuilderRef;
pub const disposeBuilder = LLVMDisposeBuilder;
extern fn LLVMDisposeBuilder(Builder: *const BuilderRef) void;
pub const positionBuilderAtEnd = LLVMPositionBuilderAtEnd;
extern fn LLVMPositionBuilderAtEnd(Builder: *const BuilderRef, Block: *const BasicBlockRef) void;
pub const getInsertBlock = LLVMGetInsertBlock;
extern fn LLVMGetInsertBlock(Builder: *const BuilderRef) *const BasicBlockRef;
pub const buildCall = LLVMBuildCall;
extern fn LLVMBuildCall(*const BuilderRef, Fn: *const ValueRef, Args: ?[*]*const ValueRef, NumArgs: c_uint, Name: [*:0]const u8) *const ValueRef;
pub const buildCall2 = LLVMBuildCall2;
extern fn LLVMBuildCall2(*const BuilderRef, *const TypeRef, Fn: *const ValueRef, Args: [*]*const ValueRef, NumArgs: c_uint, Name: [*:0]const u8) *const ValueRef;
pub const buildRetVoid = LLVMBuildRetVoid;
extern fn LLVMBuildRetVoid(*const BuilderRef) *const ValueRef;
pub const buildUnreachable = LLVMBuildUnreachable;
extern fn LLVMBuildUnreachable(*const BuilderRef) *const ValueRef;
pub const buildAlloca = LLVMBuildAlloca;
extern fn LLVMBuildAlloca(*const BuilderRef, Ty: *const TypeRef, Name: [*:0]const u8) *const ValueRef;
};
pub const BasicBlockRef = opaque {
pub const deleteBasicBlock = LLVMDeleteBasicBlock;
extern fn LLVMDeleteBasicBlock(BB: *const BasicBlockRef) void;
};
pub const TargetMachineRef = opaque {
pub const createTargetMachine = LLVMCreateTargetMachine;
extern fn LLVMCreateTargetMachine(
T: *const TargetRef,
Triple: [*:0]const u8,
CPU: [*:0]const u8,
Features: [*:0]const u8,
Level: CodeGenOptLevel,
Reloc: RelocMode,
CodeModel: CodeMode,
) *const TargetMachineRef;
pub const disposeTargetMachine = LLVMDisposeTargetMachine;
extern fn LLVMDisposeTargetMachine(T: *const TargetMachineRef) void;
pub const emitToFile = LLVMTargetMachineEmitToFile;
extern fn LLVMTargetMachineEmitToFile(*const TargetMachineRef, M: *const ModuleRef, Filename: [*:0]const u8, codegen: CodeGenFileType, ErrorMessage: *[*:0]const u8) LLVMBool;
};
pub const CodeMode = extern enum {
Default,
JITDefault,
Tiny,
Small,
Kernel,
Medium,
Large,
};
pub const CodeGenOptLevel = extern enum {
None,
Less,
Default,
Aggressive,
};
pub const RelocMode = extern enum {
Default,
Static,
PIC,
DynamicNoPic,
ROPI,
RWPI,
ROPI_RWPI,
};
pub const CodeGenFileType = extern enum {
AssemblyFile,
ObjectFile,
};
pub const TargetRef = opaque {
pub const getTargetFromTriple = LLVMGetTargetFromTriple;
extern fn LLVMGetTargetFromTriple(Triple: [*:0]const u8, T: **const TargetRef, ErrorMessage: *[*:0]const u8) LLVMBool;
};
extern fn LLVMInitializeAArch64TargetInfo() void;
extern fn LLVMInitializeAMDGPUTargetInfo() void;
extern fn LLVMInitializeARMTargetInfo() void;
extern fn LLVMInitializeAVRTargetInfo() void;
extern fn LLVMInitializeBPFTargetInfo() void;
extern fn LLVMInitializeHexagonTargetInfo() void;
extern fn LLVMInitializeLanaiTargetInfo() void;
extern fn LLVMInitializeMipsTargetInfo() void;
extern fn LLVMInitializeMSP430TargetInfo() void;
extern fn LLVMInitializeNVPTXTargetInfo() void;
extern fn LLVMInitializePowerPCTargetInfo() void;
extern fn LLVMInitializeRISCVTargetInfo() void;
extern fn LLVMInitializeSparcTargetInfo() void;
extern fn LLVMInitializeSystemZTargetInfo() void;
extern fn LLVMInitializeWebAssemblyTargetInfo() void;
extern fn LLVMInitializeX86TargetInfo() void;
extern fn LLVMInitializeXCoreTargetInfo() void;
extern fn LLVMInitializeAArch64Target() void;
extern fn LLVMInitializeAMDGPUTarget() void;
extern fn LLVMInitializeARMTarget() void;
extern fn LLVMInitializeAVRTarget() void;
extern fn LLVMInitializeBPFTarget() void;
extern fn LLVMInitializeHexagonTarget() void;
extern fn LLVMInitializeLanaiTarget() void;
extern fn LLVMInitializeMipsTarget() void;
extern fn LLVMInitializeMSP430Target() void;
extern fn LLVMInitializeNVPTXTarget() void;
extern fn LLVMInitializePowerPCTarget() void;
extern fn LLVMInitializeRISCVTarget() void;
extern fn LLVMInitializeSparcTarget() void;
extern fn LLVMInitializeSystemZTarget() void;
extern fn LLVMInitializeWebAssemblyTarget() void;
extern fn LLVMInitializeX86Target() void;
extern fn LLVMInitializeXCoreTarget() void;
extern fn LLVMInitializeAArch64TargetMC() void;
extern fn LLVMInitializeAMDGPUTargetMC() void;
extern fn LLVMInitializeARMTargetMC() void;
extern fn LLVMInitializeAVRTargetMC() void;
extern fn LLVMInitializeBPFTargetMC() void;
extern fn LLVMInitializeHexagonTargetMC() void;
extern fn LLVMInitializeLanaiTargetMC() void;
extern fn LLVMInitializeMipsTargetMC() void;
extern fn LLVMInitializeMSP430TargetMC() void;
extern fn LLVMInitializeNVPTXTargetMC() void;
extern fn LLVMInitializePowerPCTargetMC() void;
extern fn LLVMInitializeRISCVTargetMC() void;
extern fn LLVMInitializeSparcTargetMC() void;
extern fn LLVMInitializeSystemZTargetMC() void;
extern fn LLVMInitializeWebAssemblyTargetMC() void;
extern fn LLVMInitializeX86TargetMC() void;
extern fn LLVMInitializeXCoreTargetMC() void;
extern fn LLVMInitializeAArch64AsmPrinter() void;
extern fn LLVMInitializeAMDGPUAsmPrinter() void;
extern fn LLVMInitializeARMAsmPrinter() void;
extern fn LLVMInitializeAVRAsmPrinter() void;
extern fn LLVMInitializeBPFAsmPrinter() void;
extern fn LLVMInitializeHexagonAsmPrinter() void;
extern fn LLVMInitializeLanaiAsmPrinter() void;
extern fn LLVMInitializeMipsAsmPrinter() void;
extern fn LLVMInitializeMSP430AsmPrinter() void;
extern fn LLVMInitializeNVPTXAsmPrinter() void;
extern fn LLVMInitializePowerPCAsmPrinter() void;
extern fn LLVMInitializeRISCVAsmPrinter() void;
extern fn LLVMInitializeSparcAsmPrinter() void;
extern fn LLVMInitializeSystemZAsmPrinter() void;
extern fn LLVMInitializeWebAssemblyAsmPrinter() void;
extern fn LLVMInitializeX86AsmPrinter() void;
extern fn LLVMInitializeXCoreAsmPrinter() void;
extern fn LLVMInitializeAArch64AsmParser() void;
extern fn LLVMInitializeAMDGPUAsmParser() void;
extern fn LLVMInitializeARMAsmParser() void;
extern fn LLVMInitializeAVRAsmParser() void;
extern fn LLVMInitializeBPFAsmParser() void;
extern fn LLVMInitializeHexagonAsmParser() void;
extern fn LLVMInitializeLanaiAsmParser() void;
extern fn LLVMInitializeMipsAsmParser() void;
extern fn LLVMInitializeMSP430AsmParser() void;
extern fn LLVMInitializePowerPCAsmParser() void;
extern fn LLVMInitializeRISCVAsmParser() void;
extern fn LLVMInitializeSparcAsmParser() void;
extern fn LLVMInitializeSystemZAsmParser() void;
extern fn LLVMInitializeWebAssemblyAsmParser() void;
extern fn LLVMInitializeX86AsmParser() void;
pub const initializeAllTargetInfos = LLVMInitializeAllTargetInfos;
fn LLVMInitializeAllTargetInfos() callconv(.C) void {
LLVMInitializeAArch64TargetInfo();
LLVMInitializeAMDGPUTargetInfo();
LLVMInitializeARMTargetInfo();
LLVMInitializeAVRTargetInfo();
LLVMInitializeBPFTargetInfo();
LLVMInitializeHexagonTargetInfo();
LLVMInitializeLanaiTargetInfo();
LLVMInitializeMipsTargetInfo();
LLVMInitializeMSP430TargetInfo();
LLVMInitializeNVPTXTargetInfo();
LLVMInitializePowerPCTargetInfo();
LLVMInitializeRISCVTargetInfo();
LLVMInitializeSparcTargetInfo();
LLVMInitializeSystemZTargetInfo();
LLVMInitializeWebAssemblyTargetInfo();
LLVMInitializeX86TargetInfo();
LLVMInitializeXCoreTargetInfo();
}
pub const initializeAllTargets = LLVMInitializeAllTargets;
fn LLVMInitializeAllTargets() callconv(.C) void {
LLVMInitializeAArch64Target();
LLVMInitializeAMDGPUTarget();
LLVMInitializeARMTarget();
LLVMInitializeAVRTarget();
LLVMInitializeBPFTarget();
LLVMInitializeHexagonTarget();
LLVMInitializeLanaiTarget();
LLVMInitializeMipsTarget();
LLVMInitializeMSP430Target();
LLVMInitializeNVPTXTarget();
LLVMInitializePowerPCTarget();
LLVMInitializeRISCVTarget();
LLVMInitializeSparcTarget();
LLVMInitializeSystemZTarget();
LLVMInitializeWebAssemblyTarget();
LLVMInitializeX86Target();
LLVMInitializeXCoreTarget();
}
pub const initializeAllTargetMCs = LLVMInitializeAllTargetMCs;
fn LLVMInitializeAllTargetMCs() callconv(.C) void {
LLVMInitializeAArch64TargetMC();
LLVMInitializeAMDGPUTargetMC();
LLVMInitializeARMTargetMC();
LLVMInitializeAVRTargetMC();
LLVMInitializeBPFTargetMC();
LLVMInitializeHexagonTargetMC();
LLVMInitializeLanaiTargetMC();
LLVMInitializeMipsTargetMC();
LLVMInitializeMSP430TargetMC();
LLVMInitializeNVPTXTargetMC();
LLVMInitializePowerPCTargetMC();
LLVMInitializeRISCVTargetMC();
LLVMInitializeSparcTargetMC();
LLVMInitializeSystemZTargetMC();
LLVMInitializeWebAssemblyTargetMC();
LLVMInitializeX86TargetMC();
LLVMInitializeXCoreTargetMC();
}
pub const initializeAllAsmPrinters = LLVMInitializeAllAsmPrinters;
fn LLVMInitializeAllAsmPrinters() callconv(.C) void {
LLVMInitializeAArch64AsmPrinter();
LLVMInitializeAMDGPUAsmPrinter();
LLVMInitializeARMAsmPrinter();
LLVMInitializeAVRAsmPrinter();
LLVMInitializeBPFAsmPrinter();
LLVMInitializeHexagonAsmPrinter();
LLVMInitializeLanaiAsmPrinter();
LLVMInitializeMipsAsmPrinter();
LLVMInitializeMSP430AsmPrinter();
LLVMInitializeNVPTXAsmPrinter();
LLVMInitializePowerPCAsmPrinter();
LLVMInitializeRISCVAsmPrinter();
LLVMInitializeSparcAsmPrinter();
LLVMInitializeSystemZAsmPrinter();
LLVMInitializeWebAssemblyAsmPrinter();
LLVMInitializeX86AsmPrinter();
LLVMInitializeXCoreAsmPrinter();
}
pub const initializeAllAsmParsers = LLVMInitializeAllAsmParsers;
fn LLVMInitializeAllAsmParsers() callconv(.C) void {
LLVMInitializeAArch64AsmParser();
LLVMInitializeAMDGPUAsmParser();
LLVMInitializeARMAsmParser();
LLVMInitializeAVRAsmParser();
LLVMInitializeBPFAsmParser();
LLVMInitializeHexagonAsmParser();
LLVMInitializeLanaiAsmParser();
LLVMInitializeMipsAsmParser();
LLVMInitializeMSP430AsmParser();
LLVMInitializePowerPCAsmParser();
LLVMInitializeRISCVAsmParser();
LLVMInitializeSparcAsmParser();
LLVMInitializeSystemZAsmParser();
LLVMInitializeWebAssemblyAsmParser();
LLVMInitializeX86AsmParser();
}
extern fn ZigLLDLinkCOFF(argc: c_int, argv: [*:null]const ?[*:0]const u8, can_exit_early: bool) c_int;
extern fn ZigLLDLinkELF(argc: c_int, argv: [*:null]const ?[*:0]const u8, can_exit_early: bool) c_int;
extern fn ZigLLDLinkMachO(argc: c_int, argv: [*:null]const ?[*:0]const u8, can_exit_early: bool) c_int;
extern fn ZigLLDLinkWasm(argc: c_int, argv: [*:null]const ?[*:0]const u8, can_exit_early: bool) c_int;
pub const LinkCOFF = ZigLLDLinkCOFF;
pub const LinkELF = ZigLLDLinkELF;
pub const LinkMachO = ZigLLDLinkMachO;
pub const LinkWasm = ZigLLDLinkWasm;
pub const ObjectFormatType = extern enum(c_int) {
Unknown,
COFF,
ELF,
MachO,
Wasm,
XCOFF,
};
pub const GetHostCPUName = LLVMGetHostCPUName;
extern fn LLVMGetHostCPUName() ?[*:0]u8;
pub const GetNativeFeatures = ZigLLVMGetNativeFeatures;
extern fn ZigLLVMGetNativeFeatures() ?[*:0]u8;
pub const WriteArchive = ZigLLVMWriteArchive;
extern fn ZigLLVMWriteArchive(
archive_name: [*:0]const u8,
file_names_ptr: [*]const [*:0]const u8,
file_names_len: usize,
os_type: OSType,
) bool;
pub const OSType = extern enum(c_int) {
UnknownOS = 0,
Ananas = 1,
CloudABI = 2,
Darwin = 3,
DragonFly = 4,
FreeBSD = 5,
Fuchsia = 6,
IOS = 7,
KFreeBSD = 8,
Linux = 9,
Lv2 = 10,
MacOSX = 11,
NetBSD = 12,
OpenBSD = 13,
Solaris = 14,
Win32 = 15,
Haiku = 16,
Minix = 17,
RTEMS = 18,
NaCl = 19,
CNK = 20,
AIX = 21,
CUDA = 22,
NVCL = 23,
AMDHSA = 24,
PS4 = 25,
ELFIAMCU = 26,
TvOS = 27,
WatchOS = 28,
Mesa3D = 29,
Contiki = 30,
AMDPAL = 31,
HermitCore = 32,
Hurd = 33,
WASI = 34,
Emscripten = 35,
};
pub const ArchType = extern enum(c_int) {
UnknownArch = 0,
arm = 1,
armeb = 2,
aarch64 = 3,
aarch64_be = 4,
aarch64_32 = 5,
arc = 6,
avr = 7,
bpfel = 8,
bpfeb = 9,
hexagon = 10,
mips = 11,
mipsel = 12,
mips64 = 13,
mips64el = 14,
msp430 = 15,
ppc = 16,
ppc64 = 17,
ppc64le = 18,
r600 = 19,
amdgcn = 20,
riscv32 = 21,
riscv64 = 22,
sparc = 23,
sparcv9 = 24,
sparcel = 25,
systemz = 26,
tce = 27,
tcele = 28,
thumb = 29,
thumbeb = 30,
x86 = 31,
x86_64 = 32,
xcore = 33,
nvptx = 34,
nvptx64 = 35,
le32 = 36,
le64 = 37,
amdil = 38,
amdil64 = 39,
hsail = 40,
hsail64 = 41,
spir = 42,
spir64 = 43,
kalimba = 44,
shave = 45,
lanai = 46,
wasm32 = 47,
wasm64 = 48,
renderscript32 = 49,
renderscript64 = 50,
ve = 51,
};
pub const ParseCommandLineOptions = ZigLLVMParseCommandLineOptions;
extern fn ZigLLVMParseCommandLineOptions(argc: usize, argv: [*]const [*:0]const u8) void;
pub const WriteImportLibrary = ZigLLVMWriteImportLibrary;
extern fn ZigLLVMWriteImportLibrary(
def_path: [*:0]const u8,
arch: ArchType,
output_lib_path: [*c]const u8,
kill_at: bool,
) bool; | src/llvm_bindings.zig |
const std = @import("std");
const root = @import("root");
/// Queues a build job for the C code of Wasm3.
/// This builds a static library that depends on libc, so make sure to link that into your exe!
pub fn compile(b: *std.build.Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget, wasm3_src_root: []const u8) *std.build.LibExeObjStep {
const lib = b.addStaticLibrary("wasm3", null);
lib.setBuildMode(mode);
lib.setTarget(target);
lib.linkLibC();
lib.disable_sanitize_c = true;
lib.defineCMacro("d_m3HasWASI");
const src_dir = std.fs.path.join(b.allocator, &[_][]const u8{wasm3_src_root, "source"}) catch unreachable;
var src_dir_handle = std.fs.cwd().openDir(src_dir, .{.iterate = true}) catch unreachable;
defer src_dir_handle.close();
lib.c_std = .C99;
const cflags = [_][]const u8 {
"-Wall", "-Wextra", "-Wparentheses", "-Wundef", "-Wpointer-arith", "-Wstrict-aliasing=2",
"-Werror=implicit-function-declaration",
"-Wno-unused-function", "-Wno-unused-variable", "-Wno-unused-parameter", "-Wno-missing-field-initializers"
};
var core_src_file: ?[]const u8 = undefined;
var iter = src_dir_handle.iterate();
while(iter.next() catch unreachable) |ent| {
if(ent.kind == .File) {
if(std.ascii.endsWithIgnoreCase(ent.name, ".c")) {
const path = std.fs.path.join(b.allocator, &[_][]const u8{src_dir, ent.name}) catch unreachable;
if(std.ascii.eqlIgnoreCase(ent.name, "m3_core.c")) {
core_src_file = path;
continue;
}
lib.addCSourceFile(path, &cflags);
}
}
}
std.debug.assert(core_src_file != null);
{ // Patch source files.
// wasm3 has a built-in limit for what it thinks should be the maximum sane length for a utf-8 string
// It's 2000 characters, which seems reasonable enough.
//
// Here's the thing - C++ is not reasonable.
// libc++'s rtti symbols exceed four-freakin'-thousand characters sometimes.
// In order to support compiled C++ programs, we patch this value.
//
// It's kind of ugly, but it works!
var build_root_handle = std.fs.cwd().openDir(wasm3_src_root, .{}) catch unreachable;
defer build_root_handle.close();
std.fs.cwd().copyFile(core_src_file.?, build_root_handle, "m3_core.c", .{}) catch unreachable;
lib.addCSourceFile(std.fs.path.join(b.allocator, &[_][]const u8{wasm3_src_root, "m3_core.c"}) catch unreachable, &cflags);
build_root_handle.writeFile("m3_core.h", "#include <m3_core.h>\n" ++
"#undef d_m3MaxSaneUtf8Length\n" ++
"#define d_m3MaxSaneUtf8Length 10000\n") catch unreachable;
}
lib.addIncludeDir(src_dir);
lib.addCSourceFile(std.fs.path.join(b.allocator, &[_][]const u8{
std.fs.path.dirname(@src().file).?,
"src", "wasm3_extra.c"
}) catch unreachable, &cflags);
return lib;
}
/// Compiles Wasm3 and links it into the provided exe.
/// If you use this API, you do not need to also use the compile() function.
pub fn addTo(exe: *std.build.LibExeObjStep, wasm3_src_root: []const u8) void {
var lib = compile(exe.builder, exe.build_mode, exe.target, wasm3_src_root);
exe.linkLibC();
exe.linkLibrary(lib);
} | submod_build_plugin.zig |
const std = @import("std");
pub const Token = struct {
tag: Tag,
loc: Loc,
pub const Loc = struct {
start: usize,
end: usize,
};
pub const keywords = std.ComptimeStringMap(Tag, .{
.{ "and", .insn_and },
.{ "add", .insn_add },
.{ "lda", .insn_lda },
.{ "sta", .insn_sta },
.{ "bun", .insn_bun },
.{ "bsa", .insn_bsa },
.{ "isz", .insn_isz },
.{ "cla", .insn_cla },
.{ "cle", .insn_cle },
.{ "cma", .insn_cma },
.{ "cme", .insn_cme },
.{ "cir", .insn_cir },
.{ "cil", .insn_cil },
.{ "inc", .insn_inc },
.{ "spa", .insn_spa },
.{ "sna", .insn_sna },
.{ "sza", .insn_sza },
.{ "sze", .insn_sze },
.{ "hlt", .insn_hlt },
.{ "inp", .insn_inp },
.{ "out", .insn_out },
.{ "ski", .insn_ski },
.{ "sko", .insn_sko },
.{ "ion", .insn_ion },
.{ "iof", .insn_iof },
.{ "org", .keyword_org },
.{ "dat", .keyword_dat },
});
pub fn getKeyword(bytes: []const u8) ?Tag {
return keywords.get(bytes);
}
pub const Tag = enum {
invalid,
identifier,
label,
l_bracket,
r_bracket,
number,
eof,
insn_and,
insn_add,
insn_lda,
insn_sta,
insn_bun,
insn_bsa,
insn_isz,
insn_cla,
insn_cle,
insn_cma,
insn_cme,
insn_cir,
insn_cil,
insn_inc,
insn_spa,
insn_sna,
insn_sza,
insn_sze,
insn_hlt,
insn_inp,
insn_out,
insn_ski,
insn_sko,
insn_ion,
insn_iof,
keyword_dat,
keyword_org,
pub fn isDirective(self: *@This()) bool {
return switch (self.*) {
.invalid, .identifier, .label, .l_bracket, .r_bracket, .number, .eof => false,
else => true,
};
}
};
pub fn isDirective(self: *@This()) bool {
return self.tag.isDirective();
}
};
pub const Lexer = struct {
buffer: [:0]const u8,
index: usize,
const Self = @This();
pub fn dump(self: *Self, token: *const Token) void {
std.log.info("{s} \"{s}\"", .{ @tagName(token.tag), self.buffer[token.loc.start..token.loc.end] });
}
pub fn init(buffer: [:0]const u8) Self {
return Lexer{
.buffer = buffer,
.index = 0,
};
}
pub fn getSlice(self: *const Self, token: *const Token) []const u8 {
return self.buffer[token.loc.start..token.loc.end];
}
const State = enum {
start,
identifier,
num_maybe_neg,
num_zero,
num_maybe_bin,
num_bin,
num_maybe_hex,
num_hex,
num_oct,
num_dec,
comment,
};
pub fn next(self: *Self) Token {
// start at the begining
var state: State = .start;
// default result
var result = Token{
.tag = .eof,
.loc = .{
.start = self.index,
.end = undefined,
},
};
// keep looping until the state machine decides we're done and breaks
while (true) : (self.index += 1) {
// the character we're looking at
const c = self.buffer[self.index];
// switch on the current state
switch (state) {
// and then switch on the char for each state
.start => switch (c) {
0 => break, // end of file
' ', '\n', '\t', '\r' => {
result.loc.start += 1; // move past whitespace
},
'-' => {
state = .num_maybe_neg;
},
'0' => {
state = .num_zero;
},
'1'...'9' => {
state = .num_dec;
},
'a'...'z', 'A'...'Z' => {
// no idea if this is an identifier, an instruction,
// a keyword, or a label at this point.
state = .identifier;
},
'[' => {
result.tag = .l_bracket;
self.index += 1; // not sure why we'd do this here...
break;
},
']' => {
result.tag = .r_bracket;
self.index += 1;
break;
},
';' => {
state = .comment;
},
else => {
result.tag = .invalid;
result.loc.end = self.index;
self.index += 1;
return result;
},
},
.num_maybe_neg => switch (c) {
'0' => {
state = .num_zero;
},
'1'...'9' => {
state = .num_dec;
},
else => {
result.tag = .invalid;
result.loc.end = self.index;
return result;
},
},
.num_zero => switch (c) {
'0'...'7' => {
state = .num_oct;
},
'b' => {
state = .num_maybe_bin;
},
'x' => {
state = .num_maybe_hex;
},
'8'...'9', 'a', 'c'...'w', 'y'...'z', 'A'...'Z', '_' => {
result.tag = .invalid;
result.loc.end = self.index;
return result;
},
else => {
result.tag = .number;
break;
},
},
.num_oct => switch (c) {
'0'...'7' => {}, // continue
'8'...'9', 'a'...'z', 'A'...'Z', '_' => {
result.tag = .invalid;
result.loc.end = self.index;
return result;
},
else => {
result.tag = .number;
break;
},
},
.num_maybe_bin => switch (c) {
'0', '1' => {
state = .num_bin;
},
else => {
result.tag = .invalid;
result.loc.end = self.index;
return result;
},
},
.num_bin => switch (c) {
'0', '1' => {}, // continue
'2'...'9', 'a'...'z', 'A'...'Z', '_' => {
result.tag = .invalid;
result.loc.end = self.index;
return result;
},
else => {
result.tag = .number;
break;
},
},
.num_maybe_hex => switch (c) {
'0'...'9', 'a'...'f', 'A'...'F' => {
state = .num_hex;
},
else => {
result.tag = .invalid;
result.loc.end = self.index;
return result;
},
},
.num_hex => switch (c) {
'0'...'9', 'a'...'f', 'A'...'F' => {}, // continue
'g'...'z', 'G'...'Z', '_' => {
result.tag = .invalid;
result.loc.end = self.index;
return result;
},
else => {
result.tag = .number;
break;
},
},
.num_dec => switch (c) {
'0'...'9' => {}, // continue
'a'...'z', 'A'...'Z', '_' => {
result.tag = .invalid;
result.loc.end = self.index;
return result;
},
else => {
result.tag = .number;
break;
},
},
.identifier => switch (c) {
'a'...'z', 'A'...'Z', '0'...'9', '_' => {},
':' => {
result.tag = .label;
self.index += 1;
break;
},
else => {
if (Token.getKeyword(self.buffer[result.loc.start..self.index])) |ident| {
result.tag = ident;
} else {
result.tag = .identifier;
}
break;
},
},
.comment => switch (c) {
'\n' => {
result.loc.start = self.index + 1;
state = .start;
},
else => {},
},
}
}
if (result.tag == .eof) {
result.loc.start = self.index;
}
result.loc.end = self.index;
return result;
}
};
test "lexer - label" {
try testLex("label1: ident", &.{ .label, .identifier });
}
test "lexer - numbers" {
try testLex("-i", &.{ .invalid, .identifier });
try testLex("0xx", &.{ .invalid, .identifier });
try testLex("0b12", &.{ .invalid, .number });
try testLex("08", &.{ .invalid, .number });
try testLex("-", &.{.invalid});
try testLex("0x", &.{.invalid});
try testLex("0b", &.{.invalid});
try testLex("0", &.{.number});
}
test "lexer - keywords" {
try testLex("dat guy", &.{ .keyword_dat, .identifier });
}
// taken verbatim from the Zig standard library source
fn testLex(source: [:0]const u8, expected_tokens: []const Token.Tag) !void {
var lexer = Lexer.init(source);
for (expected_tokens) |expected_token_id| {
const token = lexer.next();
if (token.tag != expected_token_id) {
std.debug.panic("expected {s}, found {s}\n", .{
@tagName(expected_token_id), @tagName(token.tag),
});
}
}
const last_token = lexer.next();
try std.testing.expectEqual(Token.Tag.eof, last_token.tag);
try std.testing.expectEqual(source.len, last_token.loc.start);
} | src/lexer.zig |
const std = @import("std");
const ast = @import("ast.zig");
const DFS = @import("utils.zig").DepthFirstIterator;
const log = std.log;
pub const Language = enum {
Unknown = 0,
Python,
R,
Julia,
C,
Cpp,
pub inline fn match(language_name: []const u8) Language {
var result: Language = undefined;
if (language_name.len > 0) {
var name_buf: [25]u8 = undefined;
std.mem.copy(u8, name_buf[0..], language_name);
const name = name_buf[0..language_name.len];
// convert name to all lowercase
for (name) |*byte| {
byte.* = std.ascii.toLower(byte.*);
}
if (name.len == 1) {
if (name[0] == 'r') {
result = .R;
} else if (name[0] == 'c') {
result = .C;
}
} else if (std.mem.eql(u8, name, "python") or std.mem.eql(u8, name, "py")) {
result = .Python;
} else if (std.mem.eql(u8, name, "jl") or std.mem.eql(u8, name, "julia")) {
result = .Julia;
} else if (std.mem.eql(u8, name, "c++") or std.mem.eql(u8, name, "cpp")) {
// TODO cpp compiler version
result = .Cpp;
} else {
result = .Unknown;
}
} else {
result = .Unknown;
}
return result;
}
};
const python_helper = @embedFile("./lang_helpers/python.py");
const r_helper = @embedFile("./lang_helpers/R.r");
pub const CodeRunner = struct {
root_node: *ast.Node,
code_datas: std.ArrayList(*ast.Node.CodeData),
lang: Language,
merged_code: std.ArrayList(u8),
runner: *std.ChildProcess,
out_buf: std.heap.ArenaAllocator,
// TODO @CleanUp is a type even needed for this? we could just use a function that returns
// a struct with a deinit method?
pub fn init(allocator: *std.mem.Allocator, language: Language, root_node: *ast.Node) !CodeRunner {
var code_runner = CodeRunner{
.root_node = root_node,
.code_datas = std.ArrayList(*ast.Node.CodeData).init(allocator),
.lang = language,
.merged_code = std.ArrayList(u8).init(allocator),
.runner = undefined,
.out_buf = std.heap.ArenaAllocator.init(allocator),
};
return code_runner;
}
pub fn deinit(self: *CodeRunner) void {
self.code_datas.deinit();
self.merged_code.deinit();
self.out_buf.deinit();
}
fn gather_code_blocks(self: *CodeRunner, root_node: *ast.Node) !void {
var dfs = DFS(ast.Node, true).init(root_node);
switch(self.lang) {
.Python => try self.merged_code.appendSlice(python_helper),
.R => try self.merged_code.appendSlice(r_helper),
else => {},
}
// TODO make self.lang comptime so these switches won't be at runtime
// or as comptime proc argument
const lang = self.lang;
while (dfs.next()) |node_info| {
if (!node_info.is_end)
continue;
switch (node_info.data.data) {
.FencedCode, .CodeSpan => |*data| {
if (data.language == lang and data.run) {
try self.code_datas.append(data);
try self.merged_code.appendSlice(data.code);
switch (self.lang) {
.Python => {
// runtime exceptions are written to stderr using our system
// by register our own sys.excepthook
try self.merged_code.appendSlice(
\\
\\sys.stdout.real_flush()
\\sys.stderr.real_flush()
\\
);
},
.R => {
// using sink(connection) to divert stdout or stderr output
// to the passed connection
//
// sink() or sink(file = NULL) ends the last diversion
// (there is a diversion stack) of the specified type
// but calling sink twice for our stdout+err diversions warns
// about there not being a sink to remove
// can't reset the stdout_buf vector (functional language YAY)
// and re-assigning errors since there's a binding to out_tcon
// -> close and then re-open connection
// the connection needs to be closed before sending the buf contents
// to stdout otherwise some content might not be written to the
// buf yet, since it only flushes once a \n is reached
try self.merged_code.appendSlice(
\\
\\sink()
\\sink(type="message")
\\close(out_tcon)
\\close(err_tcon)
\\write_to_con_with_length(stdout_buf, stdout())
\\write_to_con_with_length(stderr_buf, stderr())
\\stdout_buf <- vector("character")
\\stderr_buf <- vector("character")
\\out_tcon <- textConnection('stdout_buf', 'wr', local = TRUE)
\\err_tcon <- textConnection('stderr_buf', 'wr', local = TRUE)
\\sink(out_tcon)
\\sink(err_tcon, type="message")
\\
);
},
else => {},
}
}
},
else => {},
}
}
switch(self.lang) {
.Python => try self.merged_code.appendSlice(
\\
\\sys.exit(0)
),
.R => {
// close textconnections
try self.merged_code.appendSlice(
\\
\\sink()
\\sink(type="message")
\\close(err_tcon)
\\close(out_tcon)
);
},
else => {},
}
log.debug(
"Lang: {s} Code generated: \n{s}\n---\n", .{ @tagName(self.lang), self.merged_code.items });
}
pub fn run(self: *CodeRunner) !void {
const allocator = &self.out_buf.allocator;
const cmd = switch (self.lang) {
.Python => &[_][]const u8{"python"},
.R => &[_][]const u8{"R", "--save", "--quiet", "--no-echo"},
else => return,
};
self.runner = try std.ChildProcess.init(cmd, allocator);
self.runner.stdin_behavior = .Pipe;
self.runner.stdout_behavior = .Pipe;
self.runner.stderr_behavior = .Pipe;
// order important otherwise stdin etc. not initialized
try self.runner.spawn();
// NOTE: afaict there are no better ways to determine if an executable is on the PATH
// than try to run it, since on Windows checking all dirs on PATH is not sufficient
// since there might be regex entries that influence it etc.
// (which std.ChildProcess.spawn isn't even doing though, but this is stil prob the
// most sane way)
// That's why we delay the work of merging to code etc. till after we spawned the
// process
try self.gather_code_blocks(self.root_node);
// write program code to stdin
try self.runner.stdin.?.writer().writeAll(self.merged_code.items);
self.runner.stdin.?.close();
// has to be set to null otherwise the ChildProcess tries to close it again
// and hits unreachable code
self.runner.stdin = null;
log.debug("Done writing to stdin!\n", .{});
// might deadlock due to https://github.com/ziglang/zig/issues/6343
// weirdly only WindowsTerminal seems to have a problem with it and stops
// responding, cmd.exe works fine as does running it in a debugger
const stdout = try self.runner.stdout.?.reader().readAllAlloc(allocator, 10 * 1024 * 1024);
errdefer allocator.free(stdout);
log.debug("Done reading from stdout!\n", .{});
const stderr = try self.runner.stderr.?.reader().readAllAlloc(allocator, 10 * 1024 * 1024);
errdefer allocator.free(stderr);
log.debug("Done reading from stderr!\n", .{});
// NOTE: on POSIX the availability of the executable is apparently not checked
// when spawning the process, it's delayed till we .wait() on it
const term = try self.runner.wait();
log.debug("Done waiting on code execution child: {s} result {any}!\n",
.{ @tagName(self.lang), term });
const success = switch (term) {
.Exited => |code| if (code == 0) true else false,
.Stopped, .Signal, .Unknown => false,
};
if (success) {
switch (self.lang) {
.R, .Python => {
self.assign_output_to_nodes_text(stdout, false);
self.assign_output_to_nodes_text(stderr, true);
},
else => {
self.assign_output_to_nodes_bin(stdout, false);
self.assign_output_to_nodes_bin(stderr, true);
},
}
} else {
std.debug.print("ERR: {s}\n", .{ stderr });
// TODO should other stdout/err chunks also be printed for printf debugging?
// execution failed, output the process' stderr
var err_chunk: []const u8 = "No error message captured!";
switch (self.lang) {
.R, .Python => {
var iter = ChunkTextIterator.init(stderr, 0);
// assume error msg is contained in last chunk of stderr
while (iter.next()) |chunk| { err_chunk = chunk; }
},
else => {},
}
log.err("Code execution failed for language {s}:\n{s}",
.{ @tagName(self.lang), err_chunk });
}
}
fn assign_output_to_nodes_bin(self: *CodeRunner, bytes: []const u8, comptime is_err: bool) void {
var iter = ChunkBinIterator.init(bytes, 0);
var code_chunk: u16 = 0;
while (iter.next()) |chunk| {
log.debug("\nCHUNK OUT:\n'''{s}'''\n----------\n", .{ chunk });
if (!is_err) {
if (chunk.len > 0)
self.code_datas.items[code_chunk].stdout = chunk;
} else {
if (chunk.len > 0)
self.code_datas.items[code_chunk].stderr = chunk;
}
code_chunk += 1;
}
}
fn assign_output_to_nodes_text(self: *CodeRunner, bytes: []const u8, comptime is_err: bool) void {
var iter = ChunkTextIterator.init(bytes, 0);
var code_chunk: u16 = 0;
while (iter.next()) |chunk| {
log.debug("\nCHUNK OUT:\n'''{s}'''\n----------\n", .{ chunk });
if (!is_err) {
if (chunk.len > 0)
self.code_datas.items[code_chunk].stdout = chunk;
} else {
if (chunk.len > 0)
self.code_datas.items[code_chunk].stderr = chunk;
}
code_chunk += 1;
}
}
pub const ChunkTextIterator = struct {
bytes: []const u8,
offset: u32,
pub fn init(bytes: []const u8, offset: u32) @This() {
return .{
.bytes = bytes,
.offset = offset,
};
}
// @Compiler
// return [] for chunk len 0 and null on iteration end
// or null for len 0 and return error for iteration end?
// returning error is extremely weird to use since
// you can't just use the |payload| capture syntax
pub fn next(self: *@This()) ?[]const u8 {
var i: u32 = self.offset;
if (i >= self.bytes.len) return null;
// chunk out length as text followed by ';'
const text_len_start = i;
while (self.bytes[i] != ';') : ( i += 1 ) {}
const text_len_end = i;
i += 1; // skip ;
const chunk_out_len = std.fmt.parseUnsigned(
u32, self.bytes[text_len_start..text_len_end], 10) catch unreachable;
const chunk_out = self.bytes[i..i + chunk_out_len];
i += chunk_out_len;
self.offset = i;
return chunk_out;
}
};
pub const ChunkBinIterator = struct {
bytes: []const u8,
offset: u32,
pub fn init(bytes: []const u8, offset: u32) @This() {
return .{
.bytes = bytes,
.offset = offset,
};
}
pub fn next(self: *@This()) ?[]const u8 {
var i: u32 = self.offset;
if (i >= self.bytes.len) return null;
// first 4 bytes that contain chunk out length
// const chunk_out_len = std.mem.readIntNative(u32, @ptrCast(*const [4]u8, &stdout[i]));
// const chunk_out_len = std.mem.bytesToValue(u32, @ptrCast(*const [4]u8, &stdout[i]));
// bytes slice has alignemnt of 1, casting to *u32 means changing alignment to 4
// but only higher aligments coerce to lower ones so we have to use an alignCast
// (which has a safety check in debug builds)
// below errors out with "incorrect alignment" (unsure) since the []u8 is 1-aligned
// and u32 is 4-aligned
// const chunk_out_len = @ptrCast(*const u32, @alignCast(@alignOf(u32), &stdout[i])).*;
// just specify that the *u32 is 1-aligned
const chunk_out_len = @ptrCast(*align(1)const u32, &self.bytes[i]).*;
const chunk_out = self.bytes[i+4..i+4+chunk_out_len];
i += 4 + chunk_out_len;
self.offset = i;
return chunk_out;
}
};
}; | src/code_chunks.zig |
const sf = @import("../sfml.zig");
pub const SoundBuffer = struct {
const Self = @This();
// Constructor/destructor
/// Loads music from a file
pub fn initFromFile(path: [:0]const u8) !Self {
var sound = sf.c.sfSoundBuffer_createFromFile(path);
if (sound == null)
return sf.Error.resourceLoadingError;
return Self{ .ptr = sound.? };
}
/// Creates a sound buffer from sample data
pub fn initFromSamples(samples: []const i16, channel_count: usize, sample_rate: usize) !Self {
var sound = sf.c.sfSoundBuffer_createFromSamples(@ptrCast([*c]const c_short, samples.ptr), samples.len, @intCast(c_uint, channel_count), @intCast(c_uint, sample_rate));
if (sound == null)
return sf.Error.resourceLoadingError;
return Self{ .ptr = sound.? };
}
pub const initFromMemory = @compileError("Function is not implemented yet.");
pub const initFromStream = @compileError("Function is not implemented yet.");
/// Destroys this music object
pub fn deinit(self: Self) void {
sf.c.sfSoundBuffer_destroy(self.ptr);
}
// Getters / Setters
/// Gets the duration of the sound
pub fn getDuration(self: Self) sf.Time {
return sf.Time.fromCSFML(sf.c.sfSoundBuffer_getDuration(self.ptr));
}
/// Gets the sample count of this sound
pub fn getSampleCount(self: Self) usize {
return @intCast(usize, sf.c.sfSoundBuffer_getSampleCount(self.ptr));
}
/// Gets the sample rate of this sound (n° of samples per second, often 44100)
pub fn getSampleRate(self: Self) usize {
return @intCast(usize, sf.c.sfSoundBuffer_getSampleRate(self.ptr));
}
/// Gets the channel count (2 is stereo for instance)
pub fn getChannelCount(self: Self) usize {
return @intCast(usize, sf.c.sfSoundBuffer_getChannelCount(self.ptr));
}
// Misc
/// Save the sound buffer to an audio file
pub fn saveToFile(self: Self, path: [:0]const u8) !void {
if (sf.c.sfSoundBuffer_saveToFile(self.ptr, path) != 1)
return sf.Error.savingInFileFailed;
}
/// Pointer to the csfml texture
ptr: *sf.c.sfSoundBuffer,
};
test "sound buffer: sane getter and setters" {
const std = @import("std");
const tst = std.testing;
const allocator = std.heap.page_allocator;
var samples = try allocator.alloc(i16, 44100 * 3);
defer allocator.free(samples);
var buffer = try sf.SoundBuffer.initFromSamples(samples, 1, 44100);
defer buffer.deinit();
tst.expectWithinMargin(@as(f32, 3), buffer.getDuration().asSeconds(), 0.001);
tst.expectEqual(@as(usize, 44100 * 3), buffer.getSampleCount());
tst.expectEqual(@as(usize, 44100), buffer.getSampleRate());
tst.expectEqual(@as(usize, 1), buffer.getChannelCount());
} | src/sfml/audio/sound_buffer.zig |
const std = @import("std");
const pkgs = struct {
const tvg = std.build.Pkg{
.name = "tvg",
.path = "src/lib/tvg.zig",
};
const args = std.build.Pkg{
.name = "args",
.path = "lib/zig-args/args.zig",
};
};
pub fn build(b: *std.build.Builder) !void {
const target = b.standardTargetOptions(.{});
const mode = b.standardReleaseOptions();
const enable_dotnet = b.option(bool, "enable-dotnet", "Enables building the .NET based tools.") orelse false;
if (enable_dotnet) {
const svg2cs = b.addSystemCommand(&[_][]const u8{
"mcs",
"/out:zig-cache/bin/svg2tvg.exe",
"src/tools/svg2tvg.cs",
});
b.getInstallStep().dependOn(&svg2cs.step);
}
const render = b.addExecutable("tvg-render", "src/tools/render.zig");
render.setBuildMode(mode);
render.setTarget(target);
render.addPackage(pkgs.tvg);
render.addPackage(pkgs.args);
render.install();
const text = b.addExecutable("tvg-text", "src/tools/text.zig");
text.setBuildMode(mode);
text.setTarget(target);
text.addPackage(pkgs.tvg);
text.addPackage(pkgs.args);
text.install();
const ground_truth_generator = b.addExecutable("ground-truth-generator", "src/data/ground-truth.zig");
ground_truth_generator.setBuildMode(mode);
ground_truth_generator.setTarget(target);
ground_truth_generator.addPackage(pkgs.tvg);
ground_truth_generator.install();
const generate_ground_truth = ground_truth_generator.run();
const gen_gt_step = b.step("generate", "Regenerates the ground truth data.");
gen_gt_step.dependOn(&generate_ground_truth.step);
const files = [_][]const u8{
"app_menu.tvg", "shield.tvg", "workspace.tvg", "workspace_add.tvg", "feature-showcase.tvg",
};
inline for (files) |file| {
const tvg_conversion = render.run();
tvg_conversion.addArg(file);
tvg_conversion.addArg("--output");
tvg_conversion.addArg(file[0 .. file.len - 3] ++ "ppm");
tvg_conversion.cwd = "examples";
const tvgt_conversion = text.run();
tvgt_conversion.addArg(file);
tvgt_conversion.addArg("--output");
tvgt_conversion.addArg(file[0 .. file.len - 3] ++ "tvgt");
tvgt_conversion.cwd = "examples";
const png_conversion = b.addSystemCommand(&[_][]const u8{
"convert",
"-strip",
file[0 .. file.len - 3] ++ "ppm",
file[0 .. file.len - 3] ++ "png",
});
png_conversion.cwd = "examples";
png_conversion.step.dependOn(&tvg_conversion.step);
gen_gt_step.dependOn(&tvgt_conversion.step);
gen_gt_step.dependOn(&png_conversion.step);
}
const tvg_tests = b.addTest("src/lib/tvg.zig");
tvg_tests.addPackage(std.build.Pkg{
.name = "ground-truth",
.path = "src/data/ground-truth.zig",
.dependencies = &[_]std.build.Pkg{
pkgs.tvg,
},
});
const test_step = b.step("test", "Runs all tests");
test_step.dependOn(&tvg_tests.step);
} | build.zig |
const std = @import("std");
const tracy = @import("tracy");
const Tree = @import("fetch-rewards-be-coding-exercise").transaction_tree.Tree;
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = &gpa.allocator;
const t = tracy.trace(@src());
defer t.end();
var tree = Tree.init(allocator);
defer tree.deinit();
var default_prng = std.rand.DefaultPrng.init(13500266781291126803);
const random = &default_prng.random;
var transactions = std.ArrayList(i128).init(allocator);
defer transactions.deinit();
var running_balance = std.ArrayList(i128).init(allocator);
defer running_balance.deinit();
// Generate random transactions
{
const t1 = tracy.trace(@src());
defer t1.end();
var balance: i128 = 0;
var i: usize = 0;
while (i < 1_000_000) : (i += 1) {
const amount = random.int(i64);
try transactions.append(amount);
try running_balance.append(balance);
balance += amount;
}
try running_balance.append(balance);
}
// Put transactions into tree in random order
{
const t1 = tracy.trace(@src());
defer t1.end();
var shuffled_transaction_indices = try std.ArrayList(usize).initCapacity(allocator, transactions.items.len);
defer shuffled_transaction_indices.deinit();
for (transactions.items) |_, i| {
shuffled_transaction_indices.appendAssumeCapacity(i);
}
random.shuffle(usize, shuffled_transaction_indices.items);
for (shuffled_transaction_indices.items) |txIdx| {
const amount = transactions.items[txIdx];
try tree.putNoClobber(@intCast(i64, txIdx), amount);
}
}
// Ensure that the tree matches the running balance at each index
{
const t1 = tracy.trace(@src());
defer t1.end();
for (running_balance.items) |balance, txIdx| {
try std.testing.expectEqual(balance, tree.getBalance(@intCast(i64, txIdx)));
}
}
} | benchmark/insert_random_check_balance.zig |
export fn mission0_main() noreturn {
Bss.prepare();
Exceptions.prepare();
Mission.prepare();
Uart.prepare();
Timer0.prepare();
Timer1.prepare();
Timer2.prepare();
LedMatrix.prepare();
CycleActivity.prepare();
KeyboardActivity.prepare();
StatusActivity.prepare();
Mission.register(&mission1_vector_table, "turn on all leds without libraries", "mission1_turn_on_all_leds_without_libraries.zig");
Mission.register(&mission2_vector_table, "model railroad motor pwm controlled by buttons", "mission2_model_railroad_pwm.zig");
Mission.register(&mission3_vector_table, "sensors - temperature, orientation", "mission3_sensors.zig");
log("available missions:", .{});
for (Mission.missions) |*m, i| {
log("{}. {}", .{ i + 1, m.title });
}
while (true) {
CycleActivity.update();
KeyboardActivity.update();
StatusActivity.update();
}
}
const CycleActivity = struct {
var cycle_counter: u32 = undefined;
var cycle_time: u32 = undefined;
var last_cycle_start: ?u32 = undefined;
var last_second_ticks: u32 = undefined;
var max_cycle_time: u32 = undefined;
var up_time_seconds: u32 = undefined;
fn prepare() void {
cycle_counter = 0;
cycle_time = 0;
last_cycle_start = null;
last_second_ticks = 0;
max_cycle_time = 0;
up_time_seconds = 0;
}
fn update() void {
LedMatrix.update();
cycle_counter += 1;
const new_cycle_start = Timer0.capture();
if (new_cycle_start -% last_second_ticks >= 1000 * 1000) {
up_time_seconds += 1;
last_second_ticks = new_cycle_start;
}
if (last_cycle_start) |start| {
cycle_time = new_cycle_start -% start;
max_cycle_time = math.max(cycle_time, max_cycle_time);
}
last_cycle_start = new_cycle_start;
}
};
const KeyboardActivity = struct {
var column: u32 = undefined;
fn prepare() void {
column = 1;
}
fn update() void {
if (!Uart.isReadByteReady()) {
return;
}
const byte = Uart.readByte();
switch (byte) {
3 => {
SystemControlBlock.requestSystemReset();
},
12 => {
StatusActivity.redraw();
},
27 => {
Uart.writeByteBlocking('$');
column += 1;
},
'1', '2', '3', '4', '5', '6', '7', '8', '9' => {
Mission.missions[byte - '1'].activate();
},
'\r' => {
Uart.writeText("\n");
column = 1;
},
else => {
Uart.writeByteBlocking(byte);
column += 1;
},
}
}
};
const StatusActivity = struct {
var prev_now: u32 = undefined;
fn prepare() void {
prev_now = CycleActivity.up_time_seconds;
redraw();
}
fn redraw() void {
Terminal.clearScreen();
Terminal.setScrollingRegion(5, 99);
Terminal.move(5 - 1, 1);
log("keyboard input will be echoed below:", .{});
}
fn update() void {
Uart.loadTxd();
const now = CycleActivity.up_time_seconds;
if (now >= prev_now + 1) {
Terminal.hideCursor();
Terminal.move(1, 1);
Terminal.line("reset {x} up {:3}s cycle {}us max {}us", .{ Power.registers.reset_reason, CycleActivity.up_time_seconds, CycleActivity.cycle_time, CycleActivity.max_cycle_time });
Terminal.line("gpio.in {x:8}", .{Gpio.registers.in & ~@as(u32, 0x0300fff0)});
Terminal.line("", .{});
Terminal.showCursor();
restoreInputLine();
prev_now = now;
}
}
};
fn restoreInputLine() void {
Terminal.move(99, KeyboardActivity.column);
}
const Mission = struct {
title: []const u8,
panic: fn ([]const u8, ?*builtin.StackTrace) noreturn,
vector_table_address: *allowzero u32,
var missions: []Mission = undefined;
var missions_buf: [5]Mission = undefined;
fn activate(self: *Mission) void {
const reset_sp = @intToPtr(*allowzero u32, @ptrToInt(self.vector_table_address) + 0).*;
const reset_pc = @intToPtr(*allowzero u32, @ptrToInt(self.vector_table_address) + 4).*;
asm volatile (
\\ mov sp,%[reset_sp]
\\ bx %[reset_pc]
:
: [reset_pc] "{r0}" (reset_pc),
[reset_sp] "{r1}" (reset_sp)
);
}
fn prepare() void {
missions = missions_buf[0..0];
}
fn register(vector_table_address: *allowzero u32, comptime title: []const u8, comptime source_file: []const u8) void {
missions = missions_buf[0 .. missions.len + 1];
var m = &missions[missions.len - 1];
const import = @import(source_file);
m.title = title;
m.panic = import.panic;
m.vector_table_address = vector_table_address;
}
};
comptime {
const mission_id = 0;
asm (typicalVectorTable(mission_id));
}
const release_tag = "0.4";
const status_display_lines = 6 + 5;
extern var mission1_vector_table: u32;
extern var mission2_vector_table: u32;
extern var mission3_vector_table: u32;
usingnamespace @import("lib_basics.zig").typical; | mission0_mission_selector.zig |
const std = @import("std");
const string = []const u8;
const range = @import("range").range;
const flag = @import("flag");
const linters = [_]fn (std.mem.Allocator, []const u8, *Source, std.fs.File.Writer) WorkError!void{
@import("./tools/dupe_import.zig").work,
@import("./tools/todo.zig").work,
@import("./tools/file_as_struct.zig").work,
@import("./tools/unused_decl.zig").work,
};
pub const WorkError = std.mem.Allocator.Error || std.fs.File.Writer.Error || error{};
const Rule = enum {
dupe_import,
todo,
file_as_struct,
unused_decl,
};
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const alloc = gpa.allocator();
defer _ = gpa.deinit();
//
flag.init(alloc);
defer flag.deinit();
try flag.addMulti("do");
try flag.addMulti("skip");
try flag.addMulti("file");
_ = try flag.parse(.single);
const do = flag.getMulti("do") orelse @as([]const string, &.{});
const skip = flag.getMulti("skip") orelse @as([]const string, &.{});
const files = flag.getMulti("file");
var rulestorun = std.ArrayList(Rule).init(alloc);
defer rulestorun.deinit();
if (do.len > 0 and skip.len > 0) {
std.log.err("-do and -skip are mutually exclusive", .{});
std.os.exit(1);
}
if (do.len > 0) {
for (do) |item| {
const r = std.meta.stringToEnum(Rule, item) orelse std.debug.panic("invalid rule name passed to -do: {s}", .{item});
try rulestorun.append(r);
}
} else {
try rulestorun.appendSlice(std.enums.values(Rule));
if (skip.len > 0) {
for (skip) |item| {
const r = std.meta.stringToEnum(Rule, item) orelse std.debug.panic("invalid rule name passed to -skip: {s}", .{item});
_ = removeItem(Rule, &rulestorun, r);
}
}
}
//
var dir = try std.fs.cwd().openDir("./", .{ .iterate = true });
defer dir.close();
const out = std.io.getStdOut().writer();
if (files) |_| {
for (files.?) |item| {
try doFile(alloc, dir, item, rulestorun.items, out);
}
} else {
var walker = try dir.walk(alloc);
defer walker.deinit();
while (try walker.next()) |item| {
if (item.kind != .File) continue;
try doFile(alloc, dir, item.path, rulestorun.items, out);
}
}
}
fn doFile(alloc: std.mem.Allocator, dir: std.fs.Dir, path: string, rules: []const Rule, out: std.fs.File.Writer) !void {
var arena = std.heap.ArenaAllocator.init(alloc);
const alloc2 = arena.allocator();
defer arena.deinit();
if (!std.mem.endsWith(u8, path, ".zig")) return;
// TODO eventually do .gitignore parsing
if (std.mem.startsWith(u8, path, "zig-cache")) return;
if (std.mem.startsWith(u8, path, "zig-bin")) return;
if (std.mem.startsWith(u8, path, ".zigmod")) return;
if (std.mem.startsWith(u8, path, ".gyro")) return;
const f = try dir.openFile(path, .{});
defer f.close();
const r = f.reader();
const content = try r.readAllAlloc(alloc2, 1 * 1024 * 1024 * 1024 * 4);
const nulcont = try negspan(alloc2, u8, content, 0);
var src = Source{
.alloc = alloc2,
.source = nulcont,
};
for (rules) |jtem| {
try linters[@enumToInt(jtem)](alloc2, path, &src, out);
}
}
fn negspan(alloc: std.mem.Allocator, comptime T: type, input: []const T, comptime term: T) ![:term]const T {
var list = std.ArrayList(T).init(alloc);
defer list.deinit();
for (input) |c| try list.append(c);
try list.append(term);
const res = list.toOwnedSlice();
return res[0 .. res.len - 1 :term];
}
pub const Loc = struct {
line: usize,
pos: usize,
};
pub fn locToLoc(source: [:0]const u8, loc: std.zig.Token.Loc) Loc {
var line: usize = 1;
var pos: usize = 1;
for (range(loc.start)) |_, i| {
pos += 1;
if (source[i] != '\n') continue;
line += 1;
pos = 1;
}
return Loc{ .line = line, .pos = pos };
}
pub const Source = struct {
alloc: std.mem.Allocator,
source: [:0]const u8,
_tokens: ?[]const std.zig.Token = null,
_ast: ?std.zig.Ast = null,
pub fn tokens(self: *Source) ![]const std.zig.Token {
if (self._tokens) |_| {
return self._tokens.?;
}
var tks = std.zig.Tokenizer.init(self.source);
var list = std.ArrayList(std.zig.Token).init(self.alloc);
while (true) {
const tok = tks.next();
if (tok.tag == .eof) break;
try list.append(tok);
}
self._tokens = list.toOwnedSlice();
return self._tokens.?;
}
pub fn ast(self: *Source) !std.zig.Ast {
if (self._ast) |_| {
return self._ast.?;
}
self._ast = try std.zig.parse(self.alloc, self.source);
return self._ast.?;
}
};
fn removeItem(comptime T: type, haystack: *std.ArrayList(T), needle: T) ?T {
for (haystack.items) |item, i| {
if (item == needle) return haystack.orderedRemove(i);
}
return null;
} | src/main.zig |
const std = @import("std");
const pike = @import("pike");
const sync = @import("sync.zig");
const os = std.os;
const net = std.net;
const mem = std.mem;
const meta = std.meta;
const testing = std.testing;
usingnamespace @import("socket.zig");
pub fn Client(comptime opts: Options) type {
return struct {
const Self = @This();
const Node = struct {
ptr: *Connection,
next: ?*Node = null,
};
const ClientSocket = Socket(.client, opts);
const Protocol = opts.protocol_type;
pub const Connection = struct {
node: Node,
socket: ClientSocket,
frame: @Frame(Self.runConnection),
};
protocol: Protocol,
notifier: *const pike.Notifier,
allocator: *mem.Allocator,
address: net.Address,
lock: sync.Mutex = .{},
done: bool = false,
pool: [opts.max_connections_per_client]*Connection = undefined,
pool_len: usize = 0,
cleanup_counter: sync.Counter = .{},
cleanup_queue: ?*Node = null,
pub fn init(protocol: Protocol, allocator: *mem.Allocator, notifier: *const pike.Notifier, address: net.Address) Self {
return Self{ .protocol = protocol, .allocator = allocator, .notifier = notifier, .address = address };
}
pub fn deinit(self: *Self) void {
var pool: [opts.max_connections_per_client]*Connection = undefined;
var pool_len: usize = 0;
{
const held = self.lock.acquire();
defer held.release();
if (self.done) {
return;
} else {
self.done = true;
}
pool = self.pool;
pool_len = self.pool_len;
self.pool = undefined;
self.pool_len = 0;
}
for (pool[0..pool_len]) |conn| {
conn.socket.deinit();
if (comptime meta.trait.hasFn("close")(meta.Child(Protocol))) {
self.protocol.close(.client, &conn.socket);
}
}
self.cleanup_counter.wait();
self.purge();
}
pub fn purge(self: *Self) void {
const held = self.lock.acquire();
defer held.release();
while (self.cleanup_queue) |head| {
await head.ptr.frame catch {};
self.cleanup_queue = head.next;
if (comptime meta.trait.hasFn("purge")(meta.Child(Protocol))) {
var items: [opts.write_queue_size]opts.message_type = undefined;
const queue = &head.ptr.socket.write_queue;
const remaining = queue.tail -% queue.head;
var i: usize = 0;
while (i < remaining) : (i += 1) {
items[i] = queue.items[(queue.head + i) % queue.items.len];
}
queue.head = queue.tail;
self.protocol.purge(.client, &head.ptr.socket, items[0..remaining]);
}
self.allocator.destroy(head.ptr);
}
}
fn cleanup(self: *Self, node: *Node) void {
const held = self.lock.acquire();
defer held.release();
node.next = self.cleanup_queue;
self.cleanup_queue = node;
}
pub fn bootstrap(self: *Self) !void {
_ = try self.getConnection();
}
pub fn write(self: *Self, message: opts.message_type) !void {
const conn = try self.getConnection();
try conn.socket.write(message);
}
pub fn getConnection(self: *Self) !*Connection {
defer self.purge();
const held = self.lock.acquire();
defer held.release();
if (self.done) return error.OperationCancelled;
var pool = self.pool[0..self.pool_len];
if (pool.len == 0) return self.initConnection();
var min_conn = pool[0];
var min_pending = min_conn.socket.write_queue.pending();
if (min_pending == 0) return min_conn;
for (pool[1..]) |conn| {
const pending = conn.socket.write_queue.pending();
if (pending == 0) return conn;
if (pending < min_pending) {
min_conn = conn;
min_pending = pending;
}
}
if (pool.len < opts.max_connections_per_client) {
return self.initConnection();
}
return min_conn;
}
fn initConnection(self: *Self) !*Connection {
const conn = try self.allocator.create(Connection);
errdefer self.allocator.destroy(conn);
conn.node = .{ .ptr = conn };
conn.socket = ClientSocket.init(
try pike.Socket.init(os.AF_INET, os.SOCK_STREAM, os.IPPROTO_TCP, 0),
self.address,
);
errdefer conn.socket.deinit();
try conn.socket.unwrap().registerTo(self.notifier);
try conn.socket.unwrap().connect(conn.socket.address);
if (comptime meta.trait.hasFn("handshake")(meta.Child(Protocol))) {
conn.socket.context = try self.protocol.handshake(.client, &conn.socket.inner);
}
self.pool[self.pool_len] = conn;
self.pool_len += 1;
conn.frame = async self.runConnection(conn);
return conn;
}
fn deleteConnection(self: *Self, conn: *Connection) bool {
const held = self.lock.acquire();
defer held.release();
var pool = self.pool[0..self.pool_len];
if (mem.indexOfScalar(*Connection, pool, conn)) |i| {
mem.copy(*Connection, pool[i..], pool[i + 1 ..]);
self.pool_len -= 1;
return true;
}
return false;
}
fn runConnection(self: *Self, conn: *Connection) !void {
self.cleanup_counter.add(1);
defer {
if (self.deleteConnection(conn)) {
conn.socket.deinit();
if (comptime meta.trait.hasFn("close")(meta.Child(Protocol))) {
self.protocol.close(.client, &conn.socket);
}
}
self.cleanup(&conn.node);
self.cleanup_counter.add(-1);
}
yield();
try conn.socket.run(self.protocol);
}
};
} | client.zig |
const std = @import("std");
const gen3 = @import("../gen3.zig");
const rom = @import("../rom.zig");
const script = @import("../script.zig");
const mem = std.mem;
const lu16 = rom.int.lu16;
const lu32 = rom.int.lu32;
pub const CommandDecoder = script.CommandDecoder(Command, struct {
fn isEnd(cmd: Command) bool {
switch (cmd.tag) {
Command.Kind.end,
Command.Kind.@"return",
=> return true,
else => return false,
}
}
}.isEnd);
pub const STD_10 = 10;
pub const STD_FIND_ITEM = 1;
pub const STD_MSG_BOX_AUTO_CLOSE = 6;
pub const STD_MSG_BOX_DEFAULT = 4;
pub const STD_MSG_BOX_GET_POINTS = 9;
pub const STD_MSG_BOX_NPC = 2;
pub const STD_MSG_BOX_SIGN = 3;
pub const STD_MSG_BOX_YES_NO = 5;
pub const STD_OBTAIN_DECORATION = 7;
pub const STD_OBTAIN_ITEM = 0;
pub const STD_REGISTER_MATCH_CALL = 8;
pub const Command = packed struct {
tag: Kind,
_data: Data,
/// HACK: Zig crashes when trying to access `_data` during code generation. This
/// seem to happen because &cmd.data gives a bit aligned pointer, which then
/// does not get properly handled in codegen. This function works around this
/// by manually skipping the tag field to get the data field.
pub fn data(cmd: *Command) *Data {
const bytes = mem.asBytes(cmd);
return mem.bytesAsValue(Data, bytes[@sizeOf(Kind)..][0..@sizeOf(Data)]);
}
const Data = packed union {
// Does nothing.
nop: void,
// Does nothing.
nop1: void,
// Terminates script execution.
end: void,
// Jumps back to after the last-executed call statement, and continues script execution from there.
@"return": void,
// Jumps to destination and continues script execution from there. The location of the calling script is remembered and can be returned to later.
call: call,
// Jumps to destination and continues script execution from there.
goto: goto,
// If the result of the last comparison matches condition (see Comparison operators), jumps to destination and continues script execution from there.
goto_if: goto_if,
// If the result of the last comparison matches condition (see Comparison operators), calls destination.
call_if: call_if,
// Jumps to the standard function at index function.
gotostd: gotostd,
// Calls the standard function at index function.
callstd: callstd,
// If the result of the last comparison matches condition (see Comparison operators), jumps to the standard function at index function.
gotostd_if: gotostd_if,
// If the result of the last comparison matches condition (see Comparison operators), calls the standard function at index function.
callstd_if: callstd_if,
// Executes a script stored in a default RAM location.
gotoram: void,
// Terminates script execution and "resets the script RAM".
killscript: void,
// Sets some status related to Mystery Event.
setmysteryeventstatus: setmysteryeventstatus,
// Sets the specified script bank to value.
loadword: loadword,
// Sets the specified script bank to value.
loadbyte: loadbyte,
// Sets the byte at offset to value.
writebytetoaddr: writebytetoaddr,
// Copies the byte value at source into the specified script bank.
loadbytefromaddr: loadbytefromaddr,
// Not sure. Judging from XSE's description I think it takes the least-significant byte in bank source and writes it to destination.
setptrbyte: setptrbyte,
// Copies the contents of bank source into bank destination.
copylocal: copylocal,
// Copies the byte at source to destination, replacing whatever byte was previously there.
copybyte: copybyte,
// Changes the value of destination to value.
setvar: setvar,
// // Changes the value of destination by adding value to it. Overflow is not prevented (0xFFFF + 1 = 0x0000).
addvar: addvar,
// // Changes the value of destination by subtracting value to it. Overflow is not prevented (0x0000 - 1 = 0xFFFF).
subvar: subvar,
// Copies the value of source into destination.
copyvar: copyvar,
// If source is not a variable, then this function acts like setvar. Otherwise, it acts like copyvar.
setorcopyvar: setorcopyvar,
// Compares the values of script banks a and b, after forcing the values to bytes.
compare_local_to_local: compare_local_to_local,
// Compares the least-significant byte of the value of script bank a to a fixed byte value (b).
compare_local_to_value: compare_local_to_value,
// Compares the least-significant byte of the value of script bank a to the byte located at offset b.
compare_local_to_addr: compare_local_to_addr,
// Compares the byte located at offset a to the least-significant byte of the value of script bank b.
compare_addr_to_local: compare_addr_to_local,
// Compares the byte located at offset a to a fixed byte value (b).
compare_addr_to_value: compare_addr_to_value,
// Compares the byte located at offset a to the byte located at offset b.
compare_addr_to_addr: compare_addr_to_addr,
// Compares the value of `var` to a fixed word value (b).
compare_var_to_value: compare_var_to_value,
// Compares the value of `var1` to the value of `var2`.
compare_var_to_var: compare_var_to_var,
// Calls the native C function stored at `func`.
callnative: callnative,
// Replaces the script with the function stored at `func`. Execution returns to the bytecode script when func returns TRUE.
gotonative: gotonative,
// Calls a special function; that is, a function designed for use by scripts and listed in a table of pointers.
special: special,
// Calls a special function. That function's output (if any) will be written to the variable you specify.
specialvar: specialvar,
// Blocks script execution until a command or ASM code manually unblocks it. Generally used with specific commands and specials. If this command runs, and a subsequent command or piece of ASM does not unblock state, the script will remain blocked indefinitely (essentially a hang).
waitstate: void,
// Blocks script execution for time (frames? milliseconds?).
delay: delay,
// Sets a to 1.
setflag: setflag,
// Sets a to 0.
clearflag: clearflag,
// Compares a to 1.
checkflag: checkflag,
// Initializes the RTC`s local time offset to the given hour and minute. In FireRed, this command is a nop.
initclock: initclock,
// Runs time based events. In FireRed, this command is a nop.
dodailyevents: void,
// Sets the values of variables 0x8000, 0x8001, and 0x8002 to the current hour, minute, and second. In FRLG, this command sets those variables to zero.
gettime: void,
// Plays the specified (sound_number) sound. Only one sound may play at a time, with newer ones interrupting older ones.
playse: playse,
// Blocks script execution until the currently-playing sound (triggered by playse) finishes playing.
waitse: void,
// Plays the specified (fanfare_number) fanfare.
playfanfare: playfanfare,
// Blocks script execution until all currently-playing fanfares finish.
waitfanfare: void,
// Plays the specified (song_number) song. The byte is apparently supposed to be 0x00.
playbgm: playbgm,
// Saves the specified (song_number) song to be played later.
savebgm: savebgm,
// Crossfades the currently-playing song into the map's default song.
fadedefaultbgm: void,
// Crossfades the currently-playng song into the specified (song_number) song.
fadenewbgm: fadenewbgm,
// Fades out the currently-playing song.
fadeoutbgm: fadeoutbgm,
// Fades the previously-playing song back in.
fadeinbgm: fadeinbgm,
// Sends the player to Warp warp on Map bank.map. If the specified warp is 0xFF, then the player will instead be sent to (X, Y) on the map.
warp: warp,
// Clone of warp that does not play a sound effect.
warpsilent: warpsilent,
// Clone of warp that plays a door opening animation before stepping upwards into it.
warpdoor: warpdoor,
// Warps the player to another map using a hole animation.
warphole: warphole,
// Clone of warp that uses a teleport effect. It is apparently only used in R/S/E.
warpteleport: warpteleport,
// Sets the warp destination to be used later.
setwarp: setwarp,
// Sets the warp destination that a warp to Warp 127 on Map 127.127 will connect to. Useful when a map has warps that need to go to script-controlled locations (i.e. elevators).
setdynamicwarp: setdynamicwarp,
// Sets the destination that diving or emerging from a dive will take the player to.
setdivewarp: setdivewarp,
// Sets the destination that falling into a hole will take the player to.
setholewarp: setholewarp,
// Retrieves the player's zero-indexed X- and Y-coordinates in the map, and stores them in the specified variables.
getplayerxy: getplayerxy,
// Retrieves the number of Pokemon in the player's party, and stores that number in variable 0x800D (LASTRESULT).
getpartysize: void,
// Attempts to add quantity of item index to the player's Bag. If the player has enough room, the item will be added and variable 0x800D (LASTRESULT) will be set to 0x0001; otherwise, LASTRESULT is set to 0x0000.
additem: additem,
// Removes quantity of item index from the player's Bag.
removeitem: removeitem,
// Checks if the player has enough space in their Bag to hold quantity more of item index. Sets variable 0x800D (LASTRESULT) to 0x0001 if there is room, or 0x0000 is there is no room.
checkitemspace: checkitemspace,
// Checks if the player has quantity or more of item index in their Bag. Sets variable 0x800D (LASTRESULT) to 0x0001 if the player has enough of the item, or 0x0000 if they have fewer than quantity of the item.
checkitem: checkitem,
// Checks which Bag pocket the specified (index) item belongs in, and writes the value to variable 0x800D (LASTRESULT). This script is used to show the name of the proper Bag pocket when the player receives an item via callstd (simplified to giveitem in XSE).
checkitemtype: checkitemtype,
// Adds a quantity amount of item index to the player's PC. Both arguments can be variables.
givepcitem: givepcitem,
// Checks for quantity amount of item index in the player's PC. Both arguments can be variables.
checkpcitem: checkpcitem,
// Adds decoration to the player's PC. In FireRed, this command is a nop. (The argument is read, but not used for anything.)
givedecoration: givedecoration,
// Removes a decoration from the player's PC. In FireRed, this command is a nop. (The argument is read, but not used for anything.)
takedecoration: takedecoration,
// Checks for decoration in the player's PC. In FireRed, this command is a nop. (The argument is read, but not used for anything.)
checkdecor: checkdecor,
// Checks if the player has enough space in their PC to hold decoration. Sets variable 0x800D (LASTRESULT) to 0x0001 if there is room, or 0x0000 is there is no room. In FireRed, this command is a nop. (The argument is read, but not used for anything.)
checkdecorspace: checkdecorspace,
// Applies the movement data at movements to the specified (index) Object. Also closes any standard message boxes that are still open.
// If no map is specified, then the current map is used.
applymovement: applymovement,
// Really only useful if the object has followed from one map to another (e.g. Wally during the catching event).
applymovementmap: applymovementmap,
// Blocks script execution until the movements being applied to the specified (index) Object finish. If the specified Object is 0x0000, then the command will block script execution until all Objects affected by applymovement finish their movements. If the specified Object is not currently being manipulated with applymovement, then this command does nothing.
// If no map is specified, then the current map is used.
waitmovement: waitmovement,
waitmovementmap: waitmovementmap,
// Attempts to hide the specified (index) Object on the specified (map_group, map_num) map, by setting its visibility flag if it has a valid one. If the Object does not have a valid visibility flag, this command does nothing.
// If no map is specified, then the current map is used.
removeobject: removeobject,
removeobjectmap: removeobjectmap,
// Unsets the specified (index) Object's visibility flag on the specified (map_group, map_num) map if it has a valid one. If the Object does not have a valid visibility flag, this command does nothing.
// If no map is specified, then the current map is used.
addobject: addobject,
addobjectmap: addobjectmap,
// Sets the specified (index) Object's position on the current map.
setobjectxy: setobjectxy,
showobjectat: showobjectat,
hideobjectat: hideobjectat,
// If the script was called by an Object, then that Object will turn to face toward the metatile that the player is standing on.
faceplayer: void,
turnobject: turnobject,
// If the Trainer flag for Trainer index is not set, this command does absolutely nothing.
trainerbattle: trainerbattle,
// Starts a trainer battle using the battle information stored in RAM (usually by trainerbattle, which actually calls this command behind-the-scenes), and blocks script execution until the battle finishes.
trainerbattlebegin: void,
// Goes to address after the trainerbattle command (called by the battle functions, see battle_setup.c)
gotopostbattlescript: void,
// Goes to address specified in the trainerbattle command (called by the battle functions, see battle_setup.c)
gotobeatenscript: void,
// Compares Flag (trainer + 0x500) to 1. (If the flag is set, then the trainer has been defeated by the player.)
checktrainerflag: checktrainerflag,
// Sets Flag (trainer + 0x500).
settrainerflag: settrainerflag,
// Clears Flag (trainer + 0x500).
cleartrainerflag: cleartrainerflag,
setobjectxyperm: setobjectxyperm,
moveobjectoffscreen: moveobjectoffscreen,
setobjectmovementtype: setobjectmovementtype,
// If a standard message box (or its text) is being drawn on-screen, this command blocks script execution until the box and its text have been fully drawn.
waitmessage: void,
// Starts displaying a standard message box containing the specified text. If text is a pointer, then the string at that offset will be loaded and used. If text is script bank 0, then the value of script bank 0 will be treated as a pointer to the text. (You can use loadpointer to place a string pointer in a script bank.)
message: message,
// Closes the current message box.
closemessage: void,
// Ceases movement for all Objects on-screen.
lockall: void,
// If the script was called by an Object, then that Object's movement will cease.
lock: void,
// Resumes normal movement for all Objects on-screen, and closes any standard message boxes that are still open.
releaseall: void,
// If the script was called by an Object, then that Object's movement will resume. This command also closes any standard message boxes that are still open.
release: void,
// Blocks script execution until the player presses any key.
waitbuttonpress: void,
// Displays a YES/NO multichoice box at the specified coordinates, and blocks script execution until the user makes a selection. Their selection is stored in variable 0x800D (LASTRESULT); 0x0000 for "NO" or if the user pressed B, and 0x0001 for "YES".
yesnobox: yesnobox,
// Displays a multichoice box from which the user can choose a selection, and blocks script execution until a selection is made. Lists of options are predefined and the one to be used is specified with list. If b is set to a non-zero value, then the user will not be allowed to back out of the multichoice with the B button.
multichoice: multichoice,
// Displays a multichoice box from which the user can choose a selection, and blocks script execution until a selection is made. Lists of options are predefined and the one to be used is specified with list. The default argument determines the initial position of the cursor when the box is first opened; it is zero-indexed, and if it is too large, it is treated as 0x00. If b is set to a non-zero value, then the user will not be allowed to back out of the multichoice with the B button.
multichoicedefault: multichoicedefault,
// Displays a multichoice box from which the user can choose a selection, and blocks script execution until a selection is made. Lists of options are predefined and the one to be used is specified with list. The per_row argument determines how many list items will be shown on a single row of the box.
multichoicegrid: multichoicegrid,
// Nopped in Emerald.
drawbox: void,
// Nopped in Emerald, but still consumes parameters.
erasebox: erasebox,
// Nopped in Emerald, but still consumes parameters.
drawboxtext: drawboxtext,
// Displays a box containing the front sprite for the specified (species) Pokemon species.
drawmonpic: drawmonpic,
// Hides all boxes displayed with drawmonpic.
erasemonpic: void,
// Draws an image of the winner of the contest. In FireRed, this command is a nop. (The argument is discarded.)
drawcontestwinner: drawcontestwinner,
// Displays the string at pointer as braille text in a standard message box. The string must be formatted to use braille characters and needs to provide six extra starting characters that are skipped (in RS, these characters determined the box's size and position, but in Emerald these are calculated automatically).
braillemessage: braillemessage,
// Gives the player one of the specified (species) Pokemon at level level holding item. The unknown arguments should all be zeroes.
givemon: givemon,
giveegg: giveegg,
setmonmove: setmonmove,
// Checks if at least one Pokemon in the player's party knows the specified (index) attack. If so, variable 0x800D (LASTRESULT) is set to the (zero-indexed) slot number of the first Pokemon that knows the move. If not, LASTRESULT is set to 0x0006. Variable 0x8004 is also set to this Pokemon's species.
checkpartymove: checkpartymove,
// Writes the name of the Pokemon at index species to the specified buffer.
bufferspeciesname: bufferspeciesname,
// Writes the name of the species of the first Pokemon in the player's party to the specified buffer.
bufferleadmonspeciesname: bufferleadmonspeciesname,
// Writes the nickname of the Pokemon in slot slot (zero-indexed) of the player's party to the specified buffer. If an empty or invalid slot is specified, ten spaces ("") are written to the buffer.
bufferpartymonnick: bufferpartymonnick,
// Writes the name of the item at index item to the specified buffer. If the specified index is larger than the number of items in the game (0x176), the name of item 0 ("????????") is buffered instead.
bufferitemname: bufferitemname,
// Writes the name of the decoration at index decoration to the specified buffer. In FireRed, this command is a nop.
bufferdecorationname: bufferdecorationname,
// Writes the name of the move at index move to the specified buffer.
buffermovename: buffermovename,
// Converts the value of input to a decimal string, and writes that string to the specified buffer.
buffernumberstring: buffernumberstring,
// Writes the standard string identified by index to the specified buffer. This command has no protections in place at all, so specifying an invalid standard string (e.x. 0x2B) can and usually will cause data corruption.
bufferstdstring: bufferstdstring,
// Copies the string at offset to the specified buffer.
bufferstring: bufferstring,
// Opens the Pokemart system, offering the specified products for sale.
pokemart: pokemart,
// Opens the Pokemart system and treats the list of items as decorations.
pokemartdecoration: pokemartdecoration,
// Apparent clone of pokemartdecoration.
pokemartdecoration2: pokemartdecoration2,
// Starts up the slot machine minigame.
playslotmachine: playslotmachine,
// Sets a berry tree's specific berry and growth stage. In FireRed, this command is a nop.
setberrytree: setberrytree,
// This allows you to choose a Pokemon to use in a contest. In FireRed, this command sets the byte at 0x03000EA8 to 0x01.
choosecontestmon: void,
// Starts a contest. In FireRed, this command is a nop.
startcontest: void,
// Shows the results of a contest. In FireRed, this command is a nop.
showcontestresults: void,
// Starts a contest over a link connection. In FireRed, this command is a nop.
contestlinktransfer: void,
// Stores a random integer between 0 and limit in variable 0x800D (LASTRESULT).
random: random,
// If check is 0x00, this command adds value to the player's money.
givemoney: givemoney,
// If check is 0x00, this command subtracts value from the player's money.
takemoney: takemoney,
// If check is 0x00, this command will check if the player has value or more money; script variable 0x800D (LASTRESULT) is set to 0x0001 if the player has enough money, or 0x0000 if the do not.
checkmoney: checkmoney,
// Spawns a secondary box showing how much money the player has.
showmoneybox: showmoneybox,
// Hides the secondary box spawned by showmoney.
hidemoneybox: void,
// Updates the secondary box spawned by showmoney. Consumes but does not use arguments.
updatemoneybox: updatemoneybox,
// Gets the price reduction for the index given. In FireRed, this command is a nop.
getpricereduction: getpricereduction,
// Fades the screen to and from black and white. Mode 0x00 fades from black, mode 0x01 fades out to black, mode 0x2 fades in from white, and mode 0x3 fades out to white.
fadescreen: fadescreen,
// Fades the screen to and from black and white. Mode 0x00 fades from black, mode 0x01 fades out to black, mode 0x2 fades in from white, and mode 0x3 fades out to white. Other modes may exist.
fadescreenspeed: fadescreenspeed,
setflashradius: setflashradius,
animateflash: animateflash,
messageautoscroll: messageautoscroll,
// Executes the specified field move animation.
dofieldeffect: dofieldeffect,
// Sets up the field effect argument argument with the value value.
setfieldeffectargument: setfieldeffectargument,
// Blocks script execution until all playing field move animations complete.
waitfieldeffect: waitfieldeffect,
// Sets which healing place the player will return to if all of the Pokemon in their party faint.
setrespawn: setrespawn,
// Checks the player's gender. If male, then 0x0000 is stored in variable 0x800D (LASTRESULT). If female, then 0x0001 is stored in LASTRESULT.
checkplayergender: void,
// Plays the specified (species) Pokemon's cry. You can use waitcry to block script execution until the sound finishes.
playmoncry: playmoncry,
// Changes the metatile at (x, y) on the current map.
setmetatile: setmetatile,
// Queues a weather change to the default weather for the map.
resetweather: void,
// Queues a weather change to type weather.
setweather: setweather,
// Executes the weather change queued with resetweather or setweather. The current weather will smoothly fade into the queued weather.
doweather: void,
// This command manages cases in which maps have tiles that change state when stepped on (specifically, cracked/breakable floors).
setstepcallback: setstepcallback,
setmaplayoutindex: setmaplayoutindex,
setobjectpriority: setobjectpriority,
resetobjectpriority: resetobjectpriority,
createvobject: createvobject,
turnvobject: turnvobject,
// Opens the door metatile at (X, Y) with an animation.
opendoor: opendoor,
// Closes the door metatile at (X, Y) with an animation.
closedoor: closedoor,
// Waits for the door animation started with opendoor or closedoor to finish.
waitdooranim: void,
// Sets the door tile at (x, y) to be open without an animation.
setdooropen: setdooropen,
// Sets the door tile at (x, y) to be closed without an animation.
setdoorclosed: setdoorclosed,
// In Emerald, this command consumes its parameters and does nothing. In FireRed, this command is a nop.
addelevmenuitem: addelevmenuitem,
// In FireRed and Emerald, this command is a nop.
showelevmenu: void,
checkcoins: checkcoins,
givecoins: givecoins,
takecoins: takecoins,
// Prepares to start a wild battle against a species at Level level holding item. Running this command will not affect normal wild battles. You start the prepared battle with dowildbattle.
setwildbattle: setwildbattle,
// Starts a wild battle against the Pokemon generated by setwildbattle. Blocks script execution until the battle finishes.
dowildbattle: void,
setvaddress: setvaddress,
vgoto: vgoto,
vcall: vcall,
vgoto_if: vgoto_if,
vcall_if: vcall_if,
vmessage: vmessage,
vloadptr: vloadptr,
vbufferstring: vbufferstring,
// Spawns a secondary box showing how many Coins the player has.
showcoinsbox: showcoinsbox,
// Hides the secondary box spawned by showcoins. It consumes its arguments but doesn't use them.
hidecoinsbox: hidecoinsbox,
// Updates the secondary box spawned by showcoins. It consumes its arguments but doesn't use them.
updatecoinsbox: updatecoinsbox,
// Increases the value of the specified game stat by 1. The stat's value will not be allowed to exceed 0x00FFFFFF.
incrementgamestat: incrementgamestat,
// Sets the destination that using an Escape Rope or Dig will take the player to.
setescapewarp: setescapewarp,
// Blocks script execution until cry finishes.
waitmoncry: void,
// Writes the name of the specified (box) PC box to the specified buffer.
bufferboxname: bufferboxname,
// Sets the color of the text in standard message boxes. 0x00 produces blue (male) text, 0x01 produces red (female) text, 0xFF resets the color to the default for the current OW's gender, and all other values produce black text.
textcolor: textcolor,
// The exact purpose of this command is unknown, but it is related to the blue help-text box that appears on the bottom of the screen when the Main Menu is opened.
loadhelp: loadhelp,
// The exact purpose of this command is unknown, but it is related to the blue help-text box that appears on the bottom of the screen when the Main Menu is opened.
unloadhelp: void,
// After using this command, all standard message boxes will use the signpost frame.
signmsg: void,
// Ends the effects of signmsg, returning message box frames to normal.
normalmsg: void,
// Compares the value of a hidden variable to a dword.
comparehiddenvar: comparehiddenvar,
// Makes the Pokemon in the specified slot of the player's party obedient. It will not randomly disobey orders in battle.
setmonobedient: setmonobedient,
// Checks if the Pokemon in the specified slot of the player's party is obedient. If the Pokemon is disobedient, 0x0001 is written to script variable 0x800D (LASTRESULT). If the Pokemon is obedient (or if the specified slot is empty or invalid), 0x0000 is written.
checkmonobedience: checkmonobedience,
// Depending on factors I haven't managed to understand yet, this command may cause script execution to jump to the offset specified by the pointer at 0x020375C0.
execram: void,
// Sets worldmapflag to 1. This allows the player to Fly to the corresponding map, if that map has a flightspot.
setworldmapflag: setworldmapflag,
// Clone of warpteleport? It is apparently only used in FR/LG, and only with specials.[source]
warpteleport2: warpteleport2,
// Changes the location where the player caught the Pokemon in the specified slot of their party.
setmonmetlocation: setmonmetlocation,
mossdeepgym1: mossdeepgym1,
mossdeepgym2: void,
// In FireRed, this command is a nop.
mossdeepgym3: mossdeepgym3,
mossdeepgym4: void,
warp7: warp7,
cmd_d8: void,
cmd_d9: void,
hidebox2: void,
message3: message3,
fadescreenswapbuffers: fadescreenswapbuffers,
buffertrainerclassname: buffertrainerclassname,
buffertrainername: buffertrainername,
pokenavcall: pokenavcall,
warp8: warp8,
buffercontesttypestring: buffercontesttypestring,
// Writes the name of the specified (item) item to the specified buffer. If the specified item is a Berry (0x85 - 0xAE) or Poke Ball (0x4) and if the quantity is 2 or more, the buffered string will be pluralized ("IES" or "S" appended). If the specified item is the Enigma Berry, I have no idea what this command does (but testing showed no pluralization). If the specified index is larger than the number of items in the game (0x176), the name of item 0 ("????????") is buffered instead.
bufferitemnameplural: bufferitemnameplural,
};
pub const Kind = enum(u8) {
nop = 0x00,
nop1 = 0x01,
end = 0x02,
@"return" = 0x03,
call = 0x04,
goto = 0x05,
goto_if = 0x06,
call_if = 0x07,
gotostd = 0x08,
callstd = 0x09,
gotostd_if = 0x0a,
callstd_if = 0x0b,
gotoram = 0x0c,
killscript = 0x0d,
setmysteryeventstatus = 0x0e,
loadword = 0x0f,
loadbyte = 0x10,
writebytetoaddr = 0x11,
loadbytefromaddr = 0x12,
setptrbyte = 0x13,
copylocal = 0x14,
copybyte = 0x15,
setvar = 0x16,
addvar = 0x17,
subvar = 0x18,
copyvar = 0x19,
setorcopyvar = 0x1a,
compare_local_to_local = 0x1b,
compare_local_to_value = 0x1c,
compare_local_to_addr = 0x1d,
compare_addr_to_local = 0x1e,
compare_addr_to_value = 0x1f,
compare_addr_to_addr = 0x20,
compare_var_to_value = 0x21,
compare_var_to_var = 0x22,
callnative = 0x23,
gotonative = 0x24,
special = 0x25,
specialvar = 0x26,
waitstate = 0x27,
delay = 0x28,
setflag = 0x29,
clearflag = 0x2a,
checkflag = 0x2b,
initclock = 0x2c,
dodailyevents = 0x2d,
gettime = 0x2e,
playse = 0x2f,
waitse = 0x30,
playfanfare = 0x31,
waitfanfare = 0x32,
playbgm = 0x33,
savebgm = 0x34,
fadedefaultbgm = 0x35,
fadenewbgm = 0x36,
fadeoutbgm = 0x37,
fadeinbgm = 0x38,
warp = 0x39,
warpsilent = 0x3a,
warpdoor = 0x3b,
warphole = 0x3c,
warpteleport = 0x3d,
setwarp = 0x3e,
setdynamicwarp = 0x3f,
setdivewarp = 0x40,
setholewarp = 0x41,
getplayerxy = 0x42,
getpartysize = 0x43,
additem = 0x44,
removeitem = 0x45,
checkitemspace = 0x46,
checkitem = 0x47,
checkitemtype = 0x48,
givepcitem = 0x49,
checkpcitem = 0x4a,
givedecoration = 0x4b,
takedecoration = 0x4c,
checkdecor = 0x4d,
checkdecorspace = 0x4e,
applymovement = 0x4f,
applymovementmap = 0x50,
waitmovement = 0x51,
waitmovementmap = 0x52,
removeobject = 0x53,
removeobjectmap = 0x54,
addobject = 0x55,
addobjectmap = 0x56,
setobjectxy = 0x57,
showobjectat = 0x58,
hideobjectat = 0x59,
faceplayer = 0x5a,
turnobject = 0x5b,
trainerbattle = 0x5c,
trainerbattlebegin = 0x5d,
gotopostbattlescript = 0x5e,
gotobeatenscript = 0x5f,
checktrainerflag = 0x60,
settrainerflag = 0x61,
cleartrainerflag = 0x62,
setobjectxyperm = 0x63,
moveobjectoffscreen = 0x64,
setobjectmovementtype = 0x65,
waitmessage = 0x66,
message = 0x67,
closemessage = 0x68,
lockall = 0x69,
lock = 0x6a,
releaseall = 0x6b,
release = 0x6c,
waitbuttonpress = 0x6d,
yesnobox = 0x6e,
multichoice = 0x6f,
multichoicedefault = 0x70,
multichoicegrid = 0x71,
drawbox = 0x72,
erasebox = 0x73,
drawboxtext = 0x74,
drawmonpic = 0x75,
erasemonpic = 0x76,
drawcontestwinner = 0x77,
braillemessage = 0x78,
givemon = 0x79,
giveegg = 0x7a,
setmonmove = 0x7b,
checkpartymove = 0x7c,
bufferspeciesname = 0x7d,
bufferleadmonspeciesname = 0x7e,
bufferpartymonnick = 0x7f,
bufferitemname = 0x80,
bufferdecorationname = 0x81,
buffermovename = 0x82,
buffernumberstring = 0x83,
bufferstdstring = 0x84,
bufferstring = 0x85,
pokemart = 0x86,
pokemartdecoration = 0x87,
pokemartdecoration2 = 0x88,
playslotmachine = 0x89,
setberrytree = 0x8a,
choosecontestmon = 0x8b,
startcontest = 0x8c,
showcontestresults = 0x8d,
contestlinktransfer = 0x8e,
random = 0x8f,
givemoney = 0x90,
takemoney = 0x91,
checkmoney = 0x92,
showmoneybox = 0x93,
hidemoneybox = 0x94,
updatemoneybox = 0x95,
getpricereduction = 0x96,
fadescreen = 0x97,
fadescreenspeed = 0x98,
setflashradius = 0x99,
animateflash = 0x9a,
messageautoscroll = 0x9b,
dofieldeffect = 0x9c,
setfieldeffectargument = 0x9d,
waitfieldeffect = 0x9e,
setrespawn = 0x9f,
checkplayergender = 0xa0,
playmoncry = 0xa1,
setmetatile = 0xa2,
resetweather = 0xa3,
setweather = 0xa4,
doweather = 0xa5,
setstepcallback = 0xa6,
setmaplayoutindex = 0xa7,
setobjectpriority = 0xa8,
resetobjectpriority = 0xa9,
createvobject = 0xaa,
turnvobject = 0xab,
opendoor = 0xac,
closedoor = 0xad,
waitdooranim = 0xae,
setdooropen = 0xaf,
setdoorclosed = 0xb0,
addelevmenuitem = 0xb1,
showelevmenu = 0xb2,
checkcoins = 0xb3,
givecoins = 0xb4,
takecoins = 0xb5,
setwildbattle = 0xb6,
dowildbattle = 0xb7,
setvaddress = 0xb8,
vgoto = 0xb9,
vcall = 0xba,
vgoto_if = 0xbb,
vcall_if = 0xbc,
vmessage = 0xbd,
vloadptr = 0xbe,
vbufferstring = 0xbf,
showcoinsbox = 0xc0,
hidecoinsbox = 0xc1,
updatecoinsbox = 0xc2,
incrementgamestat = 0xc3,
setescapewarp = 0xc4,
waitmoncry = 0xc5,
bufferboxname = 0xc6,
textcolor = 0xc7,
loadhelp = 0xc8,
unloadhelp = 0xc9,
signmsg = 0xca,
normalmsg = 0xcb,
comparehiddenvar = 0xcc,
setmonobedient = 0xcd,
checkmonobedience = 0xce,
execram = 0xcf,
setworldmapflag = 0xd0,
warpteleport2 = 0xd1,
setmonmetlocation = 0xd2,
mossdeepgym1 = 0xd3,
mossdeepgym2 = 0xd4,
mossdeepgym3 = 0xd5,
mossdeepgym4 = 0xd6,
warp7 = 0xd7,
cmd_d8 = 0xd8,
cmd_d9 = 0xd9,
hidebox2 = 0xda,
message3 = 0xdb,
fadescreenswapbuffers = 0xdc,
buffertrainerclassname = 0xdd,
buffertrainername = 0xde,
pokenavcall = 0xdf,
warp8 = 0xe0,
buffercontesttypestring = 0xe1,
bufferitemnameplural = 0xe2,
};
pub const call = packed struct {
destination: lu32,
};
pub const goto = packed struct {
destination: lu32,
};
pub const goto_if = packed struct {
condition: u8,
destination: lu32,
};
pub const call_if = packed struct {
condition: u8,
destination: lu32,
};
pub const gotostd = packed struct {
function: u8,
};
pub const callstd = packed struct {
function: u8,
};
pub const gotostd_if = packed struct {
condition: u8,
function: u8,
};
pub const callstd_if = packed struct {
condition: u8,
function: u8,
};
pub const setmysteryeventstatus = packed struct {
value: u8,
};
pub const loadword = packed struct {
destination: u8,
value: gen3.Ptr([*:0xff]u8),
};
pub const loadbyte = packed struct {
destination: u8,
value: u8,
};
pub const writebytetoaddr = packed struct {
value: u8,
offset: lu32,
};
pub const loadbytefromaddr = packed struct {
destination: u8,
source: lu32,
};
pub const setptrbyte = packed struct {
source: u8,
destination: lu32,
};
pub const copylocal = packed struct {
destination: u8,
source: u8,
};
pub const copybyte = packed struct {
destination: lu32,
source: lu32,
};
pub const setvar = packed struct {
destination: lu16,
value: lu16,
};
pub const addvar = packed struct {
destination: lu16,
value: lu16,
};
pub const subvar = packed struct {
destination: lu16,
value: lu16,
};
pub const copyvar = packed struct {
destination: lu16,
source: lu16,
};
pub const setorcopyvar = packed struct {
destination: lu16,
source: lu16,
};
pub const compare_local_to_local = packed struct {
byte1: u8,
byte2: u8,
};
pub const compare_local_to_value = packed struct {
a: u8,
b: u8,
};
pub const compare_local_to_addr = packed struct {
a: u8,
b: lu32,
};
pub const compare_addr_to_local = packed struct {
a: lu32,
b: u8,
};
pub const compare_addr_to_value = packed struct {
a: lu32,
b: u8,
};
pub const compare_addr_to_addr = packed struct {
a: lu32,
b: lu32,
};
pub const compare_var_to_value = packed struct {
@"var": lu16,
value: lu16,
};
pub const compare_var_to_var = packed struct {
var1: lu16,
var2: lu16,
};
pub const callnative = packed struct {
func: lu32,
};
pub const gotonative = packed struct {
func: lu32,
};
pub const special = packed struct {
special_function: lu16,
};
pub const specialvar = packed struct {
output: lu16,
special_function: lu16,
};
pub const delay = packed struct {
time: lu16,
};
pub const setflag = packed struct {
a: lu16,
};
pub const clearflag = packed struct {
a: lu16,
};
pub const checkflag = packed struct {
a: lu16,
};
pub const initclock = packed struct {
hour: lu16,
minute: lu16,
};
pub const playse = packed struct {
sound_number: lu16,
};
pub const playfanfare = packed struct {
fanfare_number: lu16,
};
pub const playbgm = packed struct {
song_number: lu16,
unknown: u8,
};
pub const savebgm = packed struct {
song_number: lu16,
};
pub const fadenewbgm = packed struct {
song_number: lu16,
};
pub const fadeoutbgm = packed struct {
speed: u8,
};
pub const fadeinbgm = packed struct {
speed: u8,
};
pub const warp = packed struct {
map: lu16,
warp: u8,
x: lu16,
y: lu16,
};
pub const warpsilent = packed struct {
map: lu16,
warp: u8,
x: lu16,
y: lu16,
};
pub const warpdoor = packed struct {
map: lu16,
warp: u8,
x: lu16,
y: lu16,
};
pub const warphole = packed struct {
map: lu16,
};
pub const warpteleport = packed struct {
map: lu16,
warp: u8,
x: lu16,
y: lu16,
};
pub const setwarp = packed struct {
map: lu16,
warp: u8,
x: lu16,
y: lu16,
};
pub const setdynamicwarp = packed struct {
map: lu16,
warp: u8,
x: lu16,
y: lu16,
};
pub const setdivewarp = packed struct {
map: lu16,
warp: u8,
x: lu16,
y: lu16,
};
pub const setholewarp = packed struct {
map: lu16,
warp: u8,
x: lu16,
y: lu16,
};
pub const getplayerxy = packed struct {
x: lu16,
y: lu16,
};
pub const additem = packed struct {
index: lu16,
quantity: lu16,
};
pub const removeitem = packed struct {
index: lu16,
quantity: lu16,
};
pub const checkitemspace = packed struct {
index: lu16,
quantity: lu16,
};
pub const checkitem = packed struct {
index: lu16,
quantity: lu16,
};
pub const checkitemtype = packed struct {
index: lu16,
};
pub const givepcitem = packed struct {
index: lu16,
quantity: lu16,
};
pub const checkpcitem = packed struct {
index: lu16,
quantity: lu16,
};
pub const givedecoration = packed struct {
decoration: lu16,
};
pub const takedecoration = packed struct {
decoration: lu16,
};
pub const checkdecor = packed struct {
decoration: lu16,
};
pub const checkdecorspace = packed struct {
decoration: lu16,
};
pub const applymovement = packed struct {
index: lu16,
movements: lu32,
};
pub const applymovementmap = packed struct {
index: lu16,
movements: lu32,
map: lu16,
};
pub const waitmovement = packed struct {
index: lu16,
};
pub const waitmovementmap = packed struct {
index: lu16,
map: lu16,
};
pub const removeobject = packed struct {
index: lu16,
};
pub const removeobjectmap = packed struct {
index: lu16,
map: lu16,
};
pub const addobject = packed struct {
index: lu16,
};
pub const addobjectmap = packed struct {
index: lu16,
map: lu16,
};
pub const setobjectxy = packed struct {
index: lu16,
x: lu16,
y: lu16,
};
pub const showobjectat = packed struct {
index: lu16,
map: lu16,
};
pub const hideobjectat = packed struct {
index: lu16,
map: lu16,
};
pub const turnobject = packed struct {
index: lu16,
direction: u8,
};
pub const TrainerBattleType = enum(u8) {
trainer_battle_single = 0,
trainer_battle_continue_script_no_music = 1,
trainer_battle_continue_script = 2,
trainer_battle_single_no_intro_text = 3,
trainer_battle_double = 4,
trainer_battle_rematch = 5,
trainer_battle_continue_script_double = 6,
trainer_battle_rematch_double = 7,
trainer_battle_continue_script_double_no_music = 8,
trainer_battle_pyramid = 9,
trainer_battle_set_trainer_a = 10,
trainer_battle_set_trainer_b = 11,
trainer_battle12 = 12,
};
pub const trainerbattle = packed struct {
type: TrainerBattleType,
trainer: lu16,
local_id: lu16,
pointers: packed union {
trainer_battle_single: packed struct {
pointer1: lu32, // text
pointer2: lu32, // text
},
trainer_battle_continue_script_no_music: packed struct {
pointer1: lu32, // text
pointer2: lu32, // text
pointer3: lu32, // event script
},
trainer_battle_continue_script: packed struct {
pointer1: lu32, // text
pointer2: lu32, // text
pointer3: lu32, // event script
},
trainer_battle_single_no_intro_text: packed struct {
pointer1: lu32, // text
},
trainer_battle_double: packed struct {
pointer1: lu32, // text
pointer2: lu32, // text
pointer3: lu32, // text
},
trainer_battle_rematch: packed struct {
pointer1: lu32, // text
pointer2: lu32, // text
},
trainer_battle_continue_script_double: packed struct {
pointer1: lu32, // text
pointer2: lu32, // text
pointer3: lu32, // text
pointer4: lu32, // event script
},
trainer_battle_rematch_double: packed struct {
pointer1: lu32, // text
pointer2: lu32, // text
pointer3: lu32, // text
},
trainer_battle_continue_script_double_no_music: packed struct {
pointer1: lu32, // text
pointer2: lu32, // text
pointer3: lu32, // text
pointer4: lu32, // event script
},
trainer_battle_pyramid: packed struct {
pointer1: lu32, // text
pointer2: lu32, // text
},
trainer_battle_set_trainer_a: packed struct {
pointer1: lu32, // text
pointer2: lu32, // text
},
trainer_battle_set_trainer_b: packed struct {
pointer1: lu32, // text
pointer2: lu32, // text
},
trainer_battle12: packed struct {
pointer1: lu32, // text
pointer2: lu32, // text
},
},
};
pub const checktrainerflag = packed struct {
trainer: lu16,
};
pub const settrainerflag = packed struct {
trainer: lu16,
};
pub const cleartrainerflag = packed struct {
trainer: lu16,
};
pub const setobjectxyperm = packed struct {
index: lu16,
x: lu16,
y: lu16,
};
pub const moveobjectoffscreen = packed struct {
index: lu16,
};
pub const setobjectmovementtype = packed struct {
word: lu16,
byte: u8,
};
pub const message = packed struct {
text: gen3.Ptr([*:0xff]u8),
};
pub const yesnobox = packed struct {
x: u8,
y: u8,
};
pub const multichoice = packed struct {
x: u8,
y: u8,
list: u8,
b: u8,
};
pub const multichoicedefault = packed struct {
x: u8,
y: u8,
list: u8,
default: u8,
b: u8,
};
pub const multichoicegrid = packed struct {
x: u8,
y: u8,
list: u8,
per_row: u8,
b: u8,
};
pub const erasebox = packed struct {
byte1: u8,
byte2: u8,
byte3: u8,
byte4: u8,
};
pub const drawboxtext = packed struct {
byte1: u8,
byte2: u8,
byte3: u8,
byte4: u8,
};
pub const drawmonpic = packed struct {
species: lu16,
x: u8,
y: u8,
};
pub const drawcontestwinner = packed struct {
a: u8,
};
pub const braillemessage = packed struct {
text: lu32,
};
pub const givemon = packed struct {
species: lu16,
level: u8,
item: lu16,
unknown1: lu32,
unknown2: lu32,
unknown3: u8,
};
pub const giveegg = packed struct {
species: lu16,
};
pub const setmonmove = packed struct {
index: u8,
slot: u8,
move: lu16,
};
pub const checkpartymove = packed struct {
index: lu16,
};
pub const bufferspeciesname = packed struct {
out: u8,
species: lu16,
};
pub const bufferleadmonspeciesname = packed struct {
out: u8,
};
pub const bufferpartymonnick = packed struct {
out: u8,
slot: lu16,
};
pub const bufferitemname = packed struct {
out: u8,
item: lu16,
};
pub const bufferdecorationname = packed struct {
out: u8,
decoration: lu16,
};
pub const buffermovename = packed struct {
out: u8,
move: lu16,
};
pub const buffernumberstring = packed struct {
out: u8,
input: lu16,
};
pub const bufferstdstring = packed struct {
out: u8,
index: lu16,
};
pub const bufferstring = packed struct {
out: u8,
offset: lu32,
};
pub const pokemart = packed struct {
products: lu32,
};
pub const pokemartdecoration = packed struct {
products: lu32,
};
pub const pokemartdecoration2 = packed struct {
products: lu32,
};
pub const playslotmachine = packed struct {
word: lu16,
};
pub const setberrytree = packed struct {
tree_id: u8,
berry: u8,
growth_stage: u8,
};
pub const random = packed struct {
limit: lu16,
};
pub const givemoney = packed struct {
value: lu32,
check: u8,
};
pub const takemoney = packed struct {
value: lu32,
check: u8,
};
pub const checkmoney = packed struct {
value: lu32,
check: u8,
};
pub const showmoneybox = packed struct {
x: u8,
y: u8,
check: u8,
};
pub const updatemoneybox = packed struct {
x: u8,
y: u8,
};
pub const getpricereduction = packed struct {
index: lu16,
};
pub const fadescreen = packed struct {
effect: u8,
};
pub const fadescreenspeed = packed struct {
effect: u8,
speed: u8,
};
pub const setflashradius = packed struct {
word: lu16,
};
pub const animateflash = packed struct {
byte: u8,
};
pub const messageautoscroll = packed struct {
pointer: lu32,
};
pub const dofieldeffect = packed struct {
animation: lu16,
};
pub const setfieldeffectargument = packed struct {
argument: u8,
param: lu16,
};
pub const waitfieldeffect = packed struct {
animation: lu16,
};
pub const setrespawn = packed struct {
heallocation: lu16,
};
pub const playmoncry = packed struct {
species: lu16,
effect: lu16,
};
pub const setmetatile = packed struct {
x: lu16,
y: lu16,
metatile_number: lu16,
tile_attrib: lu16,
};
pub const setweather = packed struct {
type: lu16,
};
pub const setstepcallback = packed struct {
subroutine: u8,
};
pub const setmaplayoutindex = packed struct {
index: lu16,
};
pub const setobjectpriority = packed struct {
index: lu16,
map: lu16,
priority: u8,
};
pub const resetobjectpriority = packed struct {
index: lu16,
map: lu16,
};
pub const createvobject = packed struct {
sprite: u8,
byte2: u8,
x: lu16,
y: lu16,
elevation: u8,
direction: u8,
};
pub const turnvobject = packed struct {
index: u8,
direction: u8,
};
pub const opendoor = packed struct {
x: lu16,
y: lu16,
};
pub const closedoor = packed struct {
x: lu16,
y: lu16,
};
pub const setdooropen = packed struct {
x: lu16,
y: lu16,
};
pub const setdoorclosed = packed struct {
x: lu16,
y: lu16,
};
pub const addelevmenuitem = packed struct {
a: u8,
b: lu16,
c: lu16,
d: lu16,
};
pub const checkcoins = packed struct {
out: lu16,
};
pub const givecoins = packed struct {
count: lu16,
};
pub const takecoins = packed struct {
count: lu16,
};
pub const setwildbattle = packed struct {
species: lu16,
level: u8,
item: lu16,
};
pub const setvaddress = packed struct {
pointer: lu32,
};
pub const vgoto = packed struct {
pointer: lu32,
};
pub const vcall = packed struct {
pointer: lu32,
};
pub const vgoto_if = packed struct {
byte: u8,
pointer: lu32,
};
pub const vcall_if = packed struct {
byte: u8,
pointer: lu32,
};
pub const vmessage = packed struct {
pointer: lu32,
};
pub const vloadptr = packed struct {
pointer: lu32,
};
pub const vbufferstring = packed struct {
byte: u8,
pointer: lu32,
};
pub const showcoinsbox = packed struct {
x: u8,
y: u8,
};
pub const hidecoinsbox = packed struct {
x: u8,
y: u8,
};
pub const updatecoinsbox = packed struct {
x: u8,
y: u8,
};
pub const incrementgamestat = packed struct {
stat: u8,
};
pub const setescapewarp = packed struct {
map: lu16,
warp: u8,
x: lu16,
y: lu16,
};
pub const bufferboxname = packed struct {
out: u8,
box: lu16,
};
pub const textcolor = packed struct {
color: u8,
};
pub const loadhelp = packed struct {
pointer: lu32,
};
pub const comparehiddenvar = packed struct {
a: u8,
value: lu32,
};
pub const setmonobedient = packed struct {
slot: lu16,
};
pub const checkmonobedience = packed struct {
slot: lu16,
};
pub const setworldmapflag = packed struct {
worldmapflag: lu16,
};
pub const warpteleport2 = packed struct {
map: lu16,
warp: u8,
x: lu16,
y: lu16,
};
pub const setmonmetlocation = packed struct {
slot: lu16,
location: u8,
};
pub const mossdeepgym1 = packed struct {
unknown: lu16,
};
pub const mossdeepgym3 = packed struct {
@"var": lu16,
};
pub const warp7 = packed struct {
map: lu16,
byte: u8,
word1: lu16,
word2: lu16,
};
pub const message3 = packed struct {
pointer: lu32,
};
pub const fadescreenswapbuffers = packed struct {
byte: u8,
};
pub const buffertrainerclassname = packed struct {
out: u8,
class: lu16,
};
pub const buffertrainername = packed struct {
out: u8,
trainer: lu16,
};
pub const pokenavcall = packed struct {
pointer: lu32,
};
pub const warp8 = packed struct {
map: lu16,
byte: u8,
word1: lu16,
word2: lu16,
};
pub const buffercontesttypestring = packed struct {
out: u8,
word: lu16,
};
pub const bufferitemnameplural = packed struct {
out: u8,
item: lu16,
quantity: lu16,
};
}; | src/core/gen3/script.zig |
const std = @import("std");
const use_test_input = false;
const filename = if (use_test_input) "day-11_test-input" else "day-11_real-input";
const edge_length = 10;
pub fn main() !void {
std.debug.print("--- Day 11 ---\n", .{});
var file = try std.fs.cwd().openFile(filename, .{});
defer file.close();
var grid: [edge_length * edge_length]u8 = undefined;
{
var i: usize = 0;
while (i < grid.len):(i += 1) {
const char = try file.reader().readByte();
grid[i] = char - '0';
if (i % edge_length == edge_length - 1) {
try file.reader().skipUntilDelimiterOrEof('\n');
}
}
}
var iteration: u32 = 1;
while (iteration <= 1000):(iteration += 1) {
var flash_buffer: [3 * edge_length * edge_length * @sizeOf(usize)]u8 = undefined;
var flash_allocator = std.heap.FixedBufferAllocator.init(flash_buffer[0..]);
var flash_indices = try std.ArrayList(usize).initCapacity(flash_allocator.allocator(), edge_length * edge_length);
var handles_indices = try std.ArrayList(usize).initCapacity(flash_allocator.allocator(), edge_length * edge_length);
for (grid) |*value, i| {
value.* += 1;
if (value.* > 9) {
value.* = 100;
try flash_indices.append(i);
}
}
while (flash_indices.items.len > 0) {
const flash_index = flash_indices.pop();
if (grid[flash_index] == 200) continue;
grid[flash_index] = 200;
try handles_indices.append(flash_index);
var coords = getCoords(flash_index);
for ([_]i8 { -1, 0, 1 }) |y_offset| {
for ([_]i8 { -1, 0, 1 }) |x_offset| {
if (x_offset == 0 and y_offset == 0) continue;
const x = coords.x + x_offset;
const y = coords.y + y_offset;
if (x < 0 or x >= edge_length) continue;
if (y < 0 or y >= edge_length) continue;
const i = getIndex(x, y);
if (grid[i] < 100) {
grid[i] += 1;
if (grid[i] > 9) {
grid[i] = 100;
try flash_indices.append(i);
}
}
}
}
}
if (handles_indices.items.len == edge_length * edge_length) {
std.debug.print("all octopuses flashed in iteration {}\n", .{ iteration });
break;
}
for (handles_indices.items) |i| {
grid[i] = 0;
}
}
}
fn printGrid(grid: []u8) void {
for (grid) |value, i| {
std.debug.print("{}", .{ value });
if (i % edge_length == edge_length - 1) {
std.debug.print("\n", .{});
}
}
}
fn getCoords(index: usize) struct { x: i32, y: i32, } {
return .{
.x = @intCast(i32, index % edge_length),
.y = @intCast(i32, index / edge_length),
};
}
fn getIndex(x: i32, y: i32) usize {
return @intCast(usize, y * edge_length + x);
} | day-11.zig |
pub const MDM_REGISTRATION_FACILITY_CODE = @as(u32, 25);
pub const DEVICE_ENROLLER_FACILITY_CODE = @as(u32, 24);
pub const MREGISTER_E_DEVICE_MESSAGE_FORMAT_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845247));
pub const MENROLL_E_DEVICE_MESSAGE_FORMAT_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910783));
pub const MREGISTER_E_DEVICE_AUTHENTICATION_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845246));
pub const MENROLL_E_DEVICE_AUTHENTICATION_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910782));
pub const MREGISTER_E_DEVICE_AUTHORIZATION_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845245));
pub const MENROLL_E_DEVICE_AUTHORIZATION_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910781));
pub const MREGISTER_E_DEVICE_CERTIFCATEREQUEST_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845244));
pub const MENROLL_E_DEVICE_CERTIFCATEREQUEST_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910780));
pub const MREGISTER_E_DEVICE_CONFIGMGRSERVER_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845243));
pub const MENROLL_E_DEVICE_CONFIGMGRSERVER_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910779));
pub const MREGISTER_E_DEVICE_INTERNALSERVICE_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845242));
pub const MENROLL_E_DEVICE_INTERNALSERVICE_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910778));
pub const MREGISTER_E_DEVICE_INVALIDSECURITY_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845241));
pub const MENROLL_E_DEVICE_INVALIDSECURITY_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910777));
pub const MREGISTER_E_DEVICE_UNKNOWN_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845240));
pub const MENROLL_E_DEVICE_UNKNOWN_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910776));
pub const MREGISTER_E_REGISTRATION_IN_PROGRESS = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845239));
pub const MENROLL_E_ENROLLMENT_IN_PROGRESS = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910775));
pub const MREGISTER_E_DEVICE_ALREADY_REGISTERED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845238));
pub const MENROLL_E_DEVICE_ALREADY_ENROLLED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910774));
pub const MREGISTER_E_DEVICE_NOT_REGISTERED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845237));
pub const MENROLL_E_DEVICE_NOT_ENROLLED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910773));
pub const MREGISTER_E_DISCOVERY_REDIRECTED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845236));
pub const MREGISTER_E_DEVICE_NOT_AD_REGISTERED_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845235));
pub const MENROLL_E_DISCOVERY_SEC_CERT_DATE_INVALID = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910771));
pub const MREGISTER_E_DISCOVERY_FAILED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145845234));
pub const MENROLL_E_PASSWORD_NEEDED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910770));
pub const MENROLL_E_WAB_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910769));
pub const MENROLL_E_CONNECTIVITY = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910768));
pub const MENROLL_S_ENROLLMENT_SUSPENDED = @import("../zig.zig").typedConst(HRESULT, @as(i32, 1572881));
pub const MENROLL_E_INVALIDSSLCERT = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910766));
pub const MENROLL_E_DEVICECAPREACHED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910765));
pub const MENROLL_E_DEVICENOTSUPPORTED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910764));
pub const MENROLL_E_NOTSUPPORTED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910763));
pub const MENROLL_E_NOTELIGIBLETORENEW = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910762));
pub const MENROLL_E_INMAINTENANCE = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910761));
pub const MENROLL_E_USERLICENSE = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910760));
pub const MENROLL_E_ENROLLMENTDATAINVALID = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910759));
pub const MENROLL_E_INSECUREREDIRECT = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910758));
pub const MENROLL_E_PLATFORM_WRONG_STATE = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910757));
pub const MENROLL_E_PLATFORM_LICENSE_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910756));
pub const MENROLL_E_PLATFORM_UNKNOWN_ERROR = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910755));
pub const MENROLL_E_PROV_CSP_CERTSTORE = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910754));
pub const MENROLL_E_PROV_CSP_W7 = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910753));
pub const MENROLL_E_PROV_CSP_DMCLIENT = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910752));
pub const MENROLL_E_PROV_CSP_PFW = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910751));
pub const MENROLL_E_PROV_CSP_MISC = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910750));
pub const MENROLL_E_PROV_UNKNOWN = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910749));
pub const MENROLL_E_PROV_SSLCERTNOTFOUND = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910748));
pub const MENROLL_E_PROV_CSP_APPMGMT = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910747));
pub const MENROLL_E_DEVICE_MANAGEMENT_BLOCKED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910746));
pub const MENROLL_E_CERTPOLICY_PRIVATEKEYCREATION_FAILED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910745));
pub const MENROLL_E_CERTAUTH_FAILED_TO_FIND_CERT = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910744));
pub const MENROLL_E_EMPTY_MESSAGE = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910743));
pub const MENROLL_E_USER_CANCELED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910742));
pub const MENROLL_E_MDM_NOT_CONFIGURED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2145910741));
pub const DEVICEREGISTRATIONTYPE_MDM_ONLY = @as(u32, 0);
pub const DEVICEREGISTRATIONTYPE_MAM = @as(u32, 5);
pub const DEVICEREGISTRATIONTYPE_MDM_DEVICEWIDE_WITH_AAD = @as(u32, 6);
pub const DEVICEREGISTRATIONTYPE_MDM_USERSPECIFIC_WITH_AAD = @as(u32, 13);
//--------------------------------------------------------------------------------
// Section: Types (3)
//--------------------------------------------------------------------------------
pub const MANAGEMENT_SERVICE_INFO = extern struct {
pszMDMServiceUri: ?PWSTR,
pszAuthenticationUri: ?PWSTR,
};
pub const MANAGEMENT_REGISTRATION_INFO = extern struct {
fDeviceRegisteredWithManagement: BOOL,
dwDeviceRegistionKind: u32,
pszUPN: ?PWSTR,
pszMDMServiceUri: ?PWSTR,
};
pub const REGISTRATION_INFORMATION_CLASS = enum(i32) {
DeviceRegistrationBasicInfo = 1,
MaxDeviceInfoClass = 2,
};
pub const DeviceRegistrationBasicInfo = REGISTRATION_INFORMATION_CLASS.DeviceRegistrationBasicInfo;
pub const MaxDeviceInfoClass = REGISTRATION_INFORMATION_CLASS.MaxDeviceInfoClass;
//--------------------------------------------------------------------------------
// Section: Functions (12)
//--------------------------------------------------------------------------------
// TODO: this type is limited to platform 'windows8.1'
pub extern "MDMRegistration" fn GetDeviceRegistrationInfo(
DeviceInformationClass: REGISTRATION_INFORMATION_CLASS,
ppDeviceRegistrationInfo: ?*?*c_void,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.1'
pub extern "MDMRegistration" fn IsDeviceRegisteredWithManagement(
pfIsDeviceRegisteredWithManagement: ?*BOOL,
cchUPN: u32,
pszUPN: ?[*:0]u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.1'
pub extern "MDMRegistration" fn IsManagementRegistrationAllowed(
pfIsManagementRegistrationAllowed: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
pub extern "MDMRegistration" fn IsMdmUxWithoutAadAllowed(
isEnrollmentAllowed: ?*BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.1'
pub extern "MDMRegistration" fn SetManagedExternally(
IsManagedExternally: BOOL,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.1'
pub extern "MDMRegistration" fn DiscoverManagementService(
pszUPN: ?[*:0]const u16,
ppMgmtInfo: ?*?*MANAGEMENT_SERVICE_INFO,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.1'
pub extern "MDMRegistration" fn RegisterDeviceWithManagementUsingAADCredentials(
UserToken: ?HANDLE,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.1'
pub extern "MDMRegistration" fn RegisterDeviceWithManagementUsingAADDeviceCredentials(
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.1'
pub extern "MDMRegistration" fn RegisterDeviceWithManagement(
pszUPN: ?[*:0]const u16,
ppszMDMServiceUri: ?[*:0]const u16,
ppzsAccessToken: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.1'
pub extern "MDMRegistration" fn UnregisterDeviceWithManagement(
enrollmentID: ?[*:0]const u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.1'
pub extern "MDMRegistration" fn GetManagementAppHyperlink(
cchHyperlink: u32,
pszHyperlink: [*:0]u16,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
// TODO: this type is limited to platform 'windows8.1'
pub extern "MDMRegistration" fn DiscoverManagementServiceEx(
pszUPN: ?[*:0]const u16,
pszDiscoveryServiceCandidate: ?[*:0]const u16,
ppMgmtInfo: ?*?*MANAGEMENT_SERVICE_INFO,
) callconv(@import("std").os.windows.WINAPI) HRESULT;
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (4)
//--------------------------------------------------------------------------------
const BOOL = @import("../foundation.zig").BOOL;
const HANDLE = @import("../foundation.zig").HANDLE;
const HRESULT = @import("../foundation.zig").HRESULT;
const PWSTR = @import("../foundation.zig").PWSTR;
test {
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
} | deps/zigwin32/win32/management/mobile_device_management_registration.zig |
const std = @import("std");
const json = std.json;
const mem = std.mem;
const Allocator = mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const dict_module = @import("dict.zig");
const DictArrayUnmanaged = dict_module.DictArrayUnmanaged;
const ArrayListUnmanaged = std.ArrayListUnmanaged;
const logger = std.log.scoped(.ft);
const util = @import("util.zig");
// probably only StrMgmt.copy should ever be used
pub const StrMgmt = enum {
copy, move, weak,
pub fn asText(
comptime options: StrMgmt
) switch (options) {
.copy => @TypeOf("copy"),
.move => @TypeOf("move"),
.weak => @TypeOf("move"),
} {
return switch (options) {
.copy => "copy",
.move => "move",
.weak => "weak",
};
}
};
pub const Notes = struct {
/// comptime interfaces: [ deinit, readFromJson, clone/swap/move, ]
text: []const u8 = "",
child_nodes: DictArrayUnmanaged(Notes) = .{},
depth: u8 = 0,
const max_depth = 255;
pub const FromJsonError = error {
bad_type, bad_field,
allocator_required, max_depth_reached,
};
pub const Error = util.ToJsonError||FromJsonError||DictArrayUnmanaged(Notes).Error;
pub fn deinit(this: *Notes, ator: Allocator) void {
logger.debug("Notes.deinit() w/ depth={d}, ator={*}", .{this.depth, ator.vtable});
ator.free(this.text);
this.deinitChildNodes(ator);
}
pub fn deinitChildNodes(this: *Notes, ator: Allocator) void {
logger.debug("Notes.deinitChildNotes() w/ depth={d}, ator={*}", .{this.depth, ator.vtable});
// var val_it = this.child_nodes.data.valueIterator();
// while (val_it.next()) |val_ptr| {
// val_ptr.deinit(ator);
// }
for (this.child_nodes.data.values()) |*val| {
val.deinit(ator);
}
this.child_nodes.deinit(ator); // child_nodes keys are freed here
}
pub fn readFromJson(
this: *Notes,
json_notes: *json.Value,
allocator: anytype,
comptime options: StrMgmt,
) Error!void {
AOCheck(allocator, options);
logger.debug("Notes.readFromJson() w/ depth={d}, options={s}", .{this.depth, options.asText()});
switch (json_notes.*) {
json.Value.Object => |map| {
inline for (@typeInfo(Notes).Struct.fields) |field| {
if (map.get(field.name)) |*val| {
switch (field.field_type) {
u8 => {
logger.warn("in Logger.warn() found \"{s}\" field", .{field.name});
},
[]const u8 => {
switch (val.*) {
json.Value.String, json.Value.NumberString => {
try this.readFromJson(val, allocator, options);
},
else => {
logger.err(
"in Notes.readFromJson()" ++
" j_notes.get(\"text\")" ++
" is not of type {s}"
, .{"json.String"}
);
return FromJsonError.bad_field;
},
}
},
DictArrayUnmanaged(Notes) => {
if (allocatorCapture(allocator)) |ator| {
switch (val.*) {
json.Value.Object => |*nmap| {
this.child_nodes = try dictArrayFromJsonObj(
nmap, ator, options, this.depth
);
},
else => {
logger.err(
"in Notes.readFromJson()" ++
" j_notes.get(\"child_nodes\")" ++
" is not of type {s}"
, .{"json.ObjectMap"}
);
return FromJsonError.bad_field;
},
}
} else {
logger.err(
\\in Notes.readFromJson()
\\ allocator required
, .{}
);
return FromJsonError.allocator_required;
}
},
else => {
@compileError("Notes.readFromJson() nonexhaustive switch on field_type");
},
}
}
}
},
json.Value.String, json.Value.NumberString => |*str| {
switch (options) {
.copy => {
if (allocatorCapture(allocator)) |ator| {
this.text = try strCopyAlloc(str.*, ator);
} else {
unreachable; // AOCheck()
}
},
.move => {
this.text = str.*;
str.* = "";
},
.weak => {
this.text = str.*;
},
}
},
else => {
logger.err(
"in Notes.readFromJson() j_notes is of neither type" ++
" {s} nor {s}"
, .{"json.ObjectMap", "json.String"}
);
return FromJsonError.bad_type;
},
}
}
fn dictArrayFromJsonObj(
obj: *json.ObjectMap,
ator: Allocator,
comptime options: StrMgmt,
depth: u8,
) Error!DictArrayUnmanaged(Notes) {
logger.debug("Notes.dectArrayFromJsonObj() on depth={d}", .{depth});
if (max_depth == depth) {
logger.err(
\\in Notes.readFromJson() max depth reached
, .{}
);
return FromJsonError.max_depth_reached;
}
var res = DictArrayUnmanaged(Notes){};
errdefer {
var val_it = res.valueIterator();
while (val_it.next()) |val_ptr| {
val_ptr.deinit(ator);
}
res.deinit(ator);
}
var j_it = obj.iterator();
while (j_it.next()) |entry| {
if (res.contains(entry.key_ptr.*)) {
logger.warn(
\\in Notes.readFromJson() repeated key '{s}' in j_note_dict
\\ skipping...
, .{entry.key_ptr.*}
);
} else {
var notes = Notes{};
errdefer notes.deinit(ator);
notes.depth = depth + 1;
try notes.readFromJson(entry.value_ptr, ator, options);
switch (options) {
.copy => {
try res.putNoClobber(entry.key_ptr.*, notes, ator, .{.kopy=true});
},
.move => {
try res.putNoClobber(entry.key_ptr.*, notes, ator, .{.kopy=false});
entry.key_ptr.* = "";
},
.weak => {
try res.putNoClobber(entry.key_ptr.*, notes, ator, .{.kopy=false});
},
}
}
}
return res;
}
pub fn clone(self: Notes, ator: Allocator) !Notes {
logger.debug("Notes.clone() w/ ator={*}", .{ator.vtable});
var copy = Notes{};
errdefer copy.deinit(ator);
copy.depth = self.depth;
copy.text = try strCopyAlloc(self.text, ator);
// errdefer copy.deinit() handles copy.text deallocation
var s_it = self.data.iterator();
while (s_it.next()) |s_entry| {
var copy_label = try strCopyAlloc(s_entry.key_ptr.*);
errdefer copy.ator.free(copy_label);
var copy_notes = try s_entry.value_ptr.copy();
errdefer copy_notes.deinit();
try copy.data.putNoClobber(copy_label, copy_notes, .move);
}
return copy;
}
pub fn swap(this: *Notes, other: *Notes) void {
logger.debug("Notes.swap()", .{});
var swapper = other.move();
other.* = this.*;
this.* = swapper.move();
}
pub fn move(this: *Notes) Notes {
logger.debug("Notes.move()", .{});
var res = Notes{.text = this.text, .child_nodes = this.child_nodes};
this.* = Notes{};
return res;
}
fn AOCheck(allocator: anytype, comptime options: StrMgmt) void {
switch (options) {
.copy => switch (@TypeOf(allocator)) {
Allocator => {},
@TypeOf(null) => @compileError("Notes: can't .copy strings w\\o allocator, did you mean .weak?"),
else => @compileError("Notes: nonexhaustive switch in AOCheck()"),
},
.move, .weak => {},
}
}
fn allocatorCapture(allocator: anytype) ?Allocator {
switch (@TypeOf(allocator)) {
Allocator => return allocator,
@TypeOf(null) => return null,
else => @compileError("Notes: nonexhaustive switch in allocatorCapture()"),
}
}
};
const testing = std.testing;
const expect = testing.expect;
const expectError = testing.expectError;
const tator = testing.allocator;
const simple_text_source =
\\"simple text"
;
const nested_1_source =
\\{
\\ "text": "depth 0",
\\ "child_nodes": {
\\ "node1": "simple dimple"
\\ }
\\}
;
const nested_2_source =
\\{
\\ "text": "depth 0",
\\ "child_nodes": {
\\ "node1": {
\\ "text": "depth 1",
\\ "child_nodes": {
\\ "node2": "pop it"
\\ }
\\ }
\\ }
\\}
;
fn testBasicAnswers(notes: *Notes) !void {
if (notes.child_nodes.count() == 0) {
try expect(strEqual("simple text", notes.text));
} else {
try expect(strEqual("depth 0", notes.text));
if (notes.child_nodes.get("node1")) |*node1| {
try expect(strEqual("simple dimple", node1.text) or strEqual("depth 1", node1.text));
if (node1.child_nodes.get("node2")) |*node2| {
try expect(strEqual("pop it", node2.text));
}
} else {
unreachable;
}
}
}
fn testBasic(src: []const u8, comptime options: StrMgmt) !void {
var parser = json.Parser.init(tator, false);
defer parser.deinit();
var tree = try parser.parse(src);
defer tree.deinit();
var notes = Notes{};
try notes.readFromJson(&tree.root, tator, options);
try testBasicAnswers(¬es);
switch (options) {
.copy => {
defer notes.deinit(tator);
},
.weak, .move => {
defer notes.child_nodes.data.deinit(tator);
defer {
if (notes.child_nodes.count() > 0) {
var n_it = notes.child_nodes.iterator();
while (n_it.next()) |node_entry| {
node_entry.value_ptr.child_nodes.data.deinit(tator);
}
}
}
defer {
if (notes.child_nodes.count() > 0) {
var n_it = notes.child_nodes.iterator();
while (n_it.next()) |node_entry| {
if (node_entry.value_ptr.child_nodes.count() > 0) {
var nn_it = node_entry.value_ptr.child_nodes.iterator();
while (nn_it.next()) |nnode_entry| {
nnode_entry.value_ptr.child_nodes.data.deinit(tator);
}
}
}
}
}
},
}
}
test "basic" {
try testBasic(simple_text_source, .copy);
try testBasic(simple_text_source, .move);
try testBasic(simple_text_source, .weak);
try testBasic(nested_1_source, .copy);
try testBasic(nested_1_source, .move);
try testBasic(nested_1_source, .weak);
try testBasic(nested_2_source, .copy);
try testBasic(nested_2_source, .move);
try testBasic(nested_2_source, .weak);
}
const bad_type_0_source =
\\1
;
const bad_type_1_source =
\\{"child_nodes": {"node": 1}}
;
const bad_field_0_source =
\\{"text": 1}
;
const bad_field_1_source =
\\{"text": "simple text", "child_nodes": 1}
;
fn testError(eerr: anyerror, src: []const u8) !void {
var parser = json.Parser.init(tator, false);
defer parser.deinit();
var tree = try parser.parse(src);
defer tree.deinit();
var notes = Notes{};
defer notes.deinit(tator);
try expectError(eerr, notes.readFromJson(&tree.root, tator, .copy));
}
test "errors" {
try testError(Notes.FromJsonError.bad_type, bad_type_0_source);
try testError(Notes.FromJsonError.bad_type, bad_type_1_source);
try testError(Notes.FromJsonError.bad_field, bad_field_0_source);
try testError(Notes.FromJsonError.bad_field, bad_field_1_source);
}
test "to json" {
var parser = json.Parser.init(tator, false);
defer parser.deinit();
var tree = try parser.parse(nested_2_source);
defer tree.deinit();
var notes = Notes{};
try notes.readFromJson(&tree.root, tator, .copy);
defer notes.deinit(tator);
var j_notes_ = try util.toJson(notes, tator, .{});
defer j_notes_.deinit();
var j_notes = j_notes_.value.Object;
try expect(util.strEqual(notes.text, j_notes.get("text").?.String));
var subnotes = notes.child_nodes.get("node1").?;
var j_subnotes = j_notes.get("child_nodes").?.Object;
try expect(util.strEqual(subnotes.text, j_subnotes.get("node1").?.Object.get("text").?.String));
var subsubnotes = subnotes.child_nodes.get("node2").?;
var j_subsubnotes = j_subnotes.get("node1").?.Object.get("child_nodes").?.Object;
try expect(util.strEqual(subsubnotes.text, j_subsubnotes.get("node2").?.Object.get("text").?.String));
}
fn strCopyAlloc(from: []const u8, ator: Allocator) ![]u8 {
var res = try ator.alloc(u8, from.len);
for (from) |c, i| {
res[i] = c;
}
return res;
}
fn strEqual(lhs: []const u8, rhs: []const u8) bool {
if (lhs.len != rhs.len)
return false;
for (lhs) |c, i| {
if (c != rhs[i])
return false;
}
return true;
} | src/notes.zig |
const std = @import("std");
const ir = @import("ir.zig");
const trace = @import("tracy.zig").trace;
/// Perform Liveness Analysis over the `Body`. Each `Inst` will have its `deaths` field populated.
pub fn analyze(
/// Used for temporary storage during the analysis.
gpa: *std.mem.Allocator,
/// Used to tack on extra allocations in the same lifetime as the existing instructions.
arena: *std.mem.Allocator,
body: ir.Body,
) error{OutOfMemory}!void {
const tracy = trace(@src());
defer tracy.end();
var table = std.AutoHashMap(*ir.Inst, void).init(gpa);
defer table.deinit();
try table.ensureCapacity(body.instructions.len);
try analyzeWithTable(arena, &table, body);
}
fn analyzeWithTable(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void), body: ir.Body) error{OutOfMemory}!void {
var i: usize = body.instructions.len;
while (i != 0) {
i -= 1;
const base = body.instructions[i];
try analyzeInst(arena, table, base);
}
}
fn analyzeInst(arena: *std.mem.Allocator, table: *std.AutoHashMap(*ir.Inst, void), base: *ir.Inst) error{OutOfMemory}!void {
if (table.contains(base)) {
base.deaths = 0;
} else {
// No tombstone for this instruction means it is never referenced,
// and its birth marks its own death. Very metal 🤘
base.deaths = 1 << ir.Inst.unreferenced_bit_index;
}
switch (base.tag) {
.constant => return,
.block => {
const inst = base.castTag(.block).?;
try analyzeWithTable(arena, table, inst.body);
// We let this continue so that it can possibly mark the block as
// unreferenced below.
},
.condbr => {
const inst = base.castTag(.condbr).?;
var true_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
defer true_table.deinit();
try true_table.ensureCapacity(inst.then_body.instructions.len);
try analyzeWithTable(arena, &true_table, inst.then_body);
var false_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator);
defer false_table.deinit();
try false_table.ensureCapacity(inst.else_body.instructions.len);
try analyzeWithTable(arena, &false_table, inst.else_body);
// Each death that occurs inside one branch, but not the other, needs
// to be added as a death immediately upon entering the other branch.
// During the iteration of the table, we additionally propagate the
// deaths to the parent table.
var true_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
defer true_entry_deaths.deinit();
var false_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator);
defer false_entry_deaths.deinit();
{
var it = false_table.iterator();
while (it.next()) |entry| {
const false_death = entry.key;
if (!true_table.contains(false_death)) {
try true_entry_deaths.append(false_death);
// Here we are only adding to the parent table if the following iteration
// would miss it.
try table.putNoClobber(false_death, {});
}
}
}
{
var it = true_table.iterator();
while (it.next()) |entry| {
const true_death = entry.key;
try table.putNoClobber(true_death, {});
if (!false_table.contains(true_death)) {
try false_entry_deaths.append(true_death);
}
}
}
inst.true_death_count = std.math.cast(@TypeOf(inst.true_death_count), true_entry_deaths.items.len) catch return error.OutOfMemory;
inst.false_death_count = std.math.cast(@TypeOf(inst.false_death_count), false_entry_deaths.items.len) catch return error.OutOfMemory;
const allocated_slice = try arena.alloc(*ir.Inst, true_entry_deaths.items.len + false_entry_deaths.items.len);
inst.deaths = allocated_slice.ptr;
// Continue on with the instruction analysis. The following code will find the condition
// instruction, and the deaths flag for the CondBr instruction will indicate whether the
// condition's lifetime ends immediately before entering any branch.
},
else => {},
}
const needed_bits = base.operandCount();
if (needed_bits <= ir.Inst.deaths_bits) {
var bit_i: ir.Inst.DeathsBitIndex = 0;
while (base.getOperand(bit_i)) |operand| : (bit_i += 1) {
const prev = try table.fetchPut(operand, {});
if (prev == null) {
// Death.
base.deaths |= @as(ir.Inst.DeathsInt, 1) << bit_i;
}
}
} else {
@panic("Handle liveness analysis for instructions with many parameters");
}
std.log.debug(.liveness, "analyze {}: 0b{b}\n", .{ base.tag, base.deaths });
} | src-self-hosted/liveness.zig |
const Output = @This();
const std = @import("std");
const log = std.log;
const math = std.math;
const mem = std.mem;
const os = std.os;
const wayland = @import("wayland");
const wl = wayland.client.wl;
const wp = wayland.client.wp;
const ext = wayland.client.ext;
const Lock = @import("Lock.zig");
const gpa = std.heap.c_allocator;
lock: *Lock,
name: u32,
wl_output: *wl.Output,
surface: ?*wl.Surface = null,
viewport: ?*wp.Viewport = null,
lock_surface: ?*ext.SessionLockSurfaceV1 = null,
// These fields are not used before the first configure is received.
width: u31 = undefined,
height: u31 = undefined,
pub fn create_surface(output: *Output) !void {
const surface = try output.lock.compositor.?.createSurface();
output.surface = surface;
const lock_surface = try output.lock.session_lock.?.getLockSurface(surface, output.wl_output);
lock_surface.setListener(*Output, lock_surface_listener, output);
output.lock_surface = lock_surface;
output.viewport = try output.lock.viewporter.?.getViewport(surface);
}
pub fn destroy(output: *Output) void {
output.wl_output.release();
if (output.viewport) |viewport| viewport.destroy();
if (output.surface) |surface| surface.destroy();
if (output.lock_surface) |lock_surface| lock_surface.destroy();
const node = @fieldParentPtr(std.SinglyLinkedList(Output).Node, "data", output);
output.lock.outputs.remove(node);
gpa.destroy(node);
}
fn lock_surface_listener(
_: *ext.SessionLockSurfaceV1,
event: ext.SessionLockSurfaceV1.Event,
output: *Output,
) void {
const lock = output.lock;
switch (event) {
.configure => |ev| {
output.width = @truncate(u31, ev.width);
output.height = @truncate(u31, ev.height);
output.lock_surface.?.ackConfigure(ev.serial);
output.attach_buffer(lock.buffers[@enumToInt(lock.color)]);
},
}
}
pub fn attach_buffer(output: *Output, buffer: *wl.Buffer) void {
output.surface.?.attach(buffer, 0, 0);
output.surface.?.damageBuffer(0, 0, math.maxInt(i32), math.maxInt(i32));
output.viewport.?.setDestination(output.width, output.height);
output.surface.?.commit();
} | src/Output.zig |
const std = @import("std");
const with_trace = true;
const assert = std.debug.assert;
fn trace(comptime fmt: []const u8, args: anytype) void {
if (with_trace) std.debug.print(fmt, args);
}
fn matches(line: []const u8, pattern: []const u8, sep: []const u8) ?[2]u32 {
if (line.len < pattern.len)
return null;
if (!std.mem.eql(u8, line[0..pattern.len], pattern))
return null;
var it = std.mem.tokenize(u8, line[pattern.len..], sep);
const v1 = it.next().?;
const v2 = it.next().?;
return [_]u32{
std.fmt.parseInt(u32, v1, 10) catch unreachable,
std.fmt.parseInt(u32, v2, 10) catch unreachable,
};
}
pub fn main() anyerror!void {
const stdout = std.io.getStdOut().writer();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = arena.allocator();
const limit = 1 * 1024 * 1024 * 1024;
const text = try std.fs.cwd().readFileAlloc(allocator, "day8.txt", limit);
defer allocator.free(text);
const width = 50;
const height = 6;
const stride = width + 1;
var screen = ([1]u8{' '} ** width ++ [1]u8{'\n'}) ** height;
trace("screen=\n{}\n", .{screen});
{
var it = std.mem.tokenize(u8, text, "\n");
while (it.next()) |line_full| {
const line = std.mem.trim(u8, line_full, " \n\r\t");
if (line.len == 0)
continue;
if (matches(line, "rect ", " x")) |size| {
var y: u32 = 0;
while (y < size[1]) : (y += 1) {
std.mem.set(u8, screen[y * stride + 0 .. y * stride + size[0]], '#');
}
} else if (matches(line, "rotate row y=", " by")) |rotrow| {
const y = rotrow[0];
const r = rotrow[1];
const row = screen[y * stride + 0 .. y * stride + width];
var oldrow: [width]u8 = undefined;
std.mem.copy(u8, &oldrow, row);
var x: u32 = 0;
while (x < width) : (x += 1) {
row[x] = oldrow[(x + width - r) % width];
}
} else if (matches(line, "rotate column x=", " by")) |rotcol| {
const x = rotcol[0];
const r = rotcol[1];
var oldcol: [height]u8 = undefined;
var y: u32 = 0;
while (y < height) : (y += 1) {
oldcol[y] = screen[x + y * stride];
}
y = 0;
while (y < height) : (y += 1) {
screen[x + y * stride] = oldcol[(y + height - r) % height];
}
}
trace("line={}, screen=\n{}\n", .{ line, screen });
}
}
const count = blk: {
var c: u32 = 0;
for (screen) |p| {
if (p == '#') c += 1;
}
break :blk c;
};
try stdout.print("answer='{}'\n", .{count});
} | 2016/day8.zig |
const std = @import("std");
const tvg = @import("tvg.zig");
fn JoinLength(comptime T: type) comptime_int {
const info = @typeInfo(T);
var len: usize = 0;
inline for (info.Struct.fields) |fld| {
len += @typeInfo(fld.field_type).Array.len;
}
return len;
}
fn join(list: anytype) [JoinLength(@TypeOf(list))]u8 {
const T = @TypeOf(list);
const info = @typeInfo(T);
var array = [1]u8{0x55} ** JoinLength(T);
comptime var offset: usize = 0;
inline for (info.Struct.fields) |fld, i| {
const len = @typeInfo(fld.field_type).Array.len;
std.mem.copy(u8, array[offset .. offset + len], &list[i]);
offset += len;
}
return array;
}
fn writeU16(buf: *[2]u8, value: u16) void {
buf[0] = @truncate(u8, value >> 0);
buf[1] = @truncate(u8, value >> 8);
}
pub const Gradient = struct {
point_0: tvg.Point,
point_1: tvg.Point,
color_0: u7,
color_1: u7,
};
pub const StyleSpec = enum(u2) {
flat = 0,
linear = 1,
radial = 2,
fn byteSize(self: @This()) usize {
return switch (self) {
.flat => 1,
.linear, .radial => 10,
};
}
};
fn StyleType(comptime spec: StyleSpec) type {
return switch (spec) {
.flat => u7,
.linear => Gradient,
.radial => Gradient,
};
}
// TODO: Add 8 or 16 bit precision option
pub fn create(comptime scale: tvg.Scale) type {
return struct {
pub fn unit(value: f32) [2]u8 {
var buf: [2]u8 = undefined;
writeU16(&buf, @bitCast(u16, scale.map(value).raw()));
return buf;
}
pub fn byte(val: u8) [1]u8 {
return [1]u8{val};
}
fn command(cmd: tvg.format.Command) [1]u8 {
return [1]u8{@enumToInt(cmd)};
}
pub fn point(x: f32, y: f32) [4]u8 {
return join(.{ unit(x), unit(y) });
}
pub fn header(width: f32, height: f32) [8]u8 {
return join(.{
tvg.magic_number,
byte(tvg.current_version),
byte(@enumToInt(scale)),
unit(width),
unit(height),
});
}
pub fn colorTable(comptime colors: []const tvg.Color) [2 + 4 * colors.len]u8 {
var buf: [2 + 4 * colors.len]u8 = undefined;
std.mem.set(u8, &buf, 0x55);
writeU16(buf[0..2], @intCast(u16, colors.len));
for (colors) |c, i| {
buf[2 + 4 * i + 0] = c.r;
buf[2 + 4 * i + 1] = c.g;
buf[2 + 4 * i + 2] = c.b;
buf[2 + 4 * i + 3] = c.a;
}
return buf;
}
fn countAndStyle(items: usize, style_type: StyleSpec) [1]u8 {
std.debug.assert(items > 0);
std.debug.assert(items <= 64);
const style = @enumToInt(style_type);
return .{(@as(u8, style) << 6) | if (items == 64) @as(u6, 0) else @truncate(u6, items)};
}
fn gradient(grad: Gradient) [10]u8 {
return join(.{
point(grad.point_0.x, grad.point_0.y),
point(grad.point_1.x, grad.point_1.y),
byte(grad.color_0),
byte(grad.color_1),
});
}
fn encodeStyle(comptime style_type: StyleSpec, value: StyleType(style_type)) [style_type.byteSize()]u8 {
return switch (style_type) {
.flat => byte(value),
.linear, .radial => gradient(value),
};
}
pub fn fillPolygon(num_items: usize, comptime style_type: StyleSpec, style: StyleType(style_type)) [2 + style_type.byteSize()]u8 {
return join(.{ command(.fill_polygon), countAndStyle(num_items, style_type), encodeStyle(style_type, style) });
}
pub fn fillRectangles(num_items: usize, comptime style_type: StyleSpec, style: StyleType(style_type)) [2 + style_type.byteSize()]u8 {
return join(.{ command(.fill_rectangles), countAndStyle(num_items, style_type), encodeStyle(style_type, style) });
}
pub fn fillPath(num_items: usize, comptime style_type: StyleSpec, style: StyleType(style_type)) [2 + style_type.byteSize()]u8 {
return join(.{ command(.fill_path), countAndStyle(num_items, style_type), encodeStyle(style_type, style) });
}
pub fn drawLines(num_items: usize, line_width: f32, comptime style_type: StyleSpec, style: StyleType(style_type)) [4 + style_type.byteSize()]u8 {
return join(.{ command(.draw_lines), countAndStyle(num_items, style_type), encodeStyle(style_type, style), unit(line_width) });
}
pub fn drawLineLoop(num_items: usize, line_width: f32, comptime style_type: StyleSpec, style: StyleType(style_type)) [4 + style_type.byteSize()]u8 {
return join(.{ command(.draw_line_loop), countAndStyle(num_items - 1, style_type), encodeStyle(style_type, style), unit(line_width) });
}
pub fn drawLineStrip(num_items: usize, line_width: f32, comptime style_type: StyleSpec, style: StyleType(style_type)) [4 + style_type.byteSize()]u8 {
return join(.{ command(.draw_line_strip), countAndStyle(num_items - 1, style_type), encodeStyle(style_type, style), unit(line_width) });
}
pub fn drawPath(num_items: usize, line_width: f32, comptime style_type: StyleSpec, style: StyleType(style_type)) [4 + style_type.byteSize()]u8 {
return join(.{ command(.draw_line_path), countAndStyle(num_items, style_type), encodeStyle(style_type, style), unit(line_width) });
}
pub fn outlineFillPolygon(
num_items: usize,
line_width: f32,
comptime fill_style_type: StyleSpec,
fill_style: StyleType(fill_style_type),
comptime line_style_type: StyleSpec,
line_style: StyleType(line_style_type),
) [5 + fill_style_type.byteSize() + line_style_type.byteSize()]u8 {
return join(.{ command(.outline_fill_polygon), countAndStyle(num_items, fill_style_type), byte(@enumToInt(line_style_type)), encodeStyle(line_style_type, line_style), encodeStyle(fill_style_type, fill_style), unit(line_width) });
}
pub fn outlineFillRectangles(
num_items: usize,
line_width: f32,
comptime fill_style_type: StyleSpec,
fill_style: StyleType(fill_style_type),
comptime line_style_type: StyleSpec,
line_style: StyleType(line_style_type),
) [5 + fill_style_type.byteSize() + line_style_type.byteSize()]u8 {
return join(.{ command(.outline_fill_rectangles), countAndStyle(num_items, fill_style_type), byte(@enumToInt(line_style_type)), encodeStyle(line_style_type, line_style), encodeStyle(fill_style_type, fill_style), unit(line_width) });
}
pub fn outlineFillPath(
num_items: usize,
line_width: f32,
comptime fill_style_type: StyleSpec,
fill_style: StyleType(fill_style_type),
comptime line_style_type: StyleSpec,
line_style: StyleType(line_style_type),
) [5 + fill_style_type.byteSize() + line_style_type.byteSize()]u8 {
return join(.{ command(.outline_fill_path), countAndStyle(num_items, fill_style_type), byte(@enumToInt(line_style_type)), encodeStyle(line_style_type, line_style), encodeStyle(fill_style_type, fill_style), unit(line_width) });
}
pub fn rectangle(x: f32, y: f32, w: f32, h: f32) [8]u8 {
return join(.{ unit(x), unit(y), unit(w), unit(h) });
}
pub const path = struct {
pub fn line(x: f32, y: f32) [5]u8 {
return join(.{ byte(0), point(x, y) });
}
pub fn horiz(x: f32) [3]u8 {
return join(.{ byte(1), unit(x) });
}
pub fn vert(y: f32) [3]u8 {
return join(.{ byte(2), unit(y) });
}
pub fn bezier(c0x: f32, c0y: f32, c1x: f32, c1y: f32, p1x: f32, p1y: f32) [13]u8 {
return join(.{ byte(3), point(c0x, c0y), point(c1x, c1y), point(p1x, p1y) });
}
pub fn arc_circle(radius: f32, large_arc: bool, sweep: bool, p1x: f32, p1y: f32) [8]u8 {
const flag: u8 = (if (large_arc) @as(u8, 1) else 0) | (if (sweep) @as(u8, 2) else 0);
return join(.{ byte(4), byte(flag), unit(radius), point(p1x, p1y) });
}
pub fn arc_ellipse(radius_x: f32, radius_y: f32, rotation: f32, large_arc: bool, sweep: bool, p1x: f32, p1y: f32) [12]u8 {
const flag: u8 = (if (large_arc) @as(u8, 1) else 0) | (if (sweep) @as(u8, 2) else 0);
return join(.{ byte(5), byte(flag), unit(radius_x), unit(radius_y), unit(rotation), point(p1x, p1y) });
}
pub fn close() [1]u8 {
return byte(6);
}
};
pub const end_of_document = [1]u8{0x00};
};
}
const test_builder = create(.@"1/256");
test "join" {
std.testing.expectEqualSlices(
u8,
&[_]u8{ 1, 2, 3, 4, 5, 6, 7 },
&join(.{ [_]u8{ 1, 2 }, [_]u8{ 3, 4, 5, 6 }, [_]u8{7} }),
);
}
test "Builder.unit" {
std.testing.expectEqualSlices(u8, &[_]u8{ 0, 1 }, &create(.@"1/256").unit(1));
std.testing.expectEqualSlices(u8, &[_]u8{ 0, 1 }, &create(.@"1/16").unit(16));
std.testing.expectEqualSlices(u8, &[_]u8{ 0, 2 }, &create(.@"1/16").unit(32));
std.testing.expectEqualSlices(u8, &[_]u8{ 1, 0 }, &create(.@"1/1").unit(1));
}
test "Builder.byte" {
std.testing.expectEqual([_]u8{1}, test_builder.byte(1));
std.testing.expectEqual([_]u8{4}, test_builder.byte(4));
std.testing.expectEqual([_]u8{255}, test_builder.byte(255));
} | src/lib/builder.zig |
pub const HeaderType = enum {
Accept,
AcceptCharset,
AcceptEncoding,
AcceptLanguage,
AcceptRanges,
AccessControlAllowCredentials,
AccessControlAllowHeaders,
AccessControlAllowMethods,
AccessControlAllowOrigin,
AccessControlExposeHeaders,
AccessControlMaxAge,
AccessControlRequestHeaders,
AccessControlRequestMethod,
Age,
Allow,
AltSvc,
Authorization,
CacheControl,
Connection,
ContentDisposition,
ContentEncoding,
ContentLanguage,
ContentLength,
ContentLocation,
ContentRange,
ContentSecurityPolicy,
ContentSecurityPolicyReportOnly,
ContentType,
Cookie,
Custom,
Dnt,
Date,
Etag,
Expect,
Expires,
Forwarded,
From,
Host,
IfMatch,
IfModifiedSince,
IfNoneMatch,
IfRange,
IfUnmodifiedSince,
LastModified,
Link,
Location,
MaxForwards,
Origin,
Pragma,
ProxyAuthenticate,
ProxyAuthorization,
PublicKeyPins,
Range,
Referer,
ReferrerPolicy,
Refresh,
RetryAfter,
SecWebSocketAccept,
SecWebSocketExtensions,
SecWebSocketKey,
SecWebSocketProtocol,
SecWebSocketVersion,
Server,
SetCookie,
StrictTransportSecurity,
Te,
Trailer,
TransferEncoding,
UserAgent,
Upgrade,
UpgradeInsecureRequests,
Vary,
Via,
Warning,
WwwAuthenticate,
XContentTypeOptions,
XDnsPrefetchControl,
XFrameOptions,
XXssProtection,
fn lowercased_equals(lowered: []const u8, value: []const u8) callconv(.Inline) bool {
if (lowered.len != value.len) {
return false;
}
for (value) |char, i| {
if (HEADER_NAME_MAP[char] != lowered[i]) {
return false;
}
}
return true;
}
pub fn from_bytes(value: []const u8) HeaderType {
switch (value.len) {
2 => {
if (lowercased_equals("te", value)) {
return .Te;
}
},
3 => {
if (lowercased_equals("age", value)) {
return .Age;
} else if (lowercased_equals("dnt", value)) {
return .Dnt;
} else if (lowercased_equals("via", value)) {
return .Via;
}
},
4 => {
if (lowercased_equals("host", value)) {
return .Host;
} else if (lowercased_equals("date", value)) {
return .Date;
} else if (lowercased_equals("etag", value)) {
return .Etag;
} else if (lowercased_equals("from", value)) {
return .From;
} else if (lowercased_equals("link", value)) {
return .Link;
} else if (lowercased_equals("vary", value)) {
return .Vary;
}
},
5 => {
if (lowercased_equals("allow", value)) {
return .Allow;
} else if (lowercased_equals("range", value)) {
return .Range;
}
},
6 => {
if (lowercased_equals("accept", value)) {
return .Accept;
} else if (lowercased_equals("cookie", value)) {
return .Cookie;
} else if (lowercased_equals("expect", value)) {
return .Expect;
} else if (lowercased_equals("origin", value)) {
return .Origin;
} else if (lowercased_equals("pragma", value)) {
return .Pragma;
} else if (lowercased_equals("server", value)) {
return .Server;
}
},
7 => {
if (lowercased_equals("alt-svc", value)) {
return .AltSvc;
} else if (lowercased_equals("expires", value)) {
return .Expires;
} else if (lowercased_equals("referer", value)) {
return .Referer;
} else if (lowercased_equals("refresh", value)) {
return .Refresh;
} else if (lowercased_equals("trailer", value)) {
return .Trailer;
} else if (lowercased_equals("upgrade", value)) {
return .Upgrade;
} else if (lowercased_equals("warning", value)) {
return .Warning;
}
},
8 => {
if (lowercased_equals("if-match", value)) {
return .IfMatch;
} else if (lowercased_equals("if-range", value)) {
return .IfRange;
} else if (lowercased_equals("location", value)) {
return .Location;
}
},
9 => {
if (lowercased_equals("forwarded", value)) {
return .Forwarded;
}
},
10 => {
if (lowercased_equals("connection", value)) {
return .Connection;
} else if (lowercased_equals("set-cookie", value)) {
return .SetCookie;
} else if (lowercased_equals("user-agent", value)) {
return .UserAgent;
}
},
11 => {
if (lowercased_equals("retry-after", value)) {
return .RetryAfter;
}
},
12 => {
if (lowercased_equals("content-type", value)) {
return .ContentType;
} else if (lowercased_equals("max-forwards", value)) {
return .MaxForwards;
}
},
13 => {
if (lowercased_equals("accept-ranges", value)) {
return .AcceptRanges;
} else if (lowercased_equals("authorization", value)) {
return .Authorization;
} else if (lowercased_equals("cache-control", value)) {
return .CacheControl;
} else if (lowercased_equals("content-range", value)) {
return .ContentRange;
} else if (lowercased_equals("if-none-match", value)) {
return .IfNoneMatch;
} else if (lowercased_equals("last-modified", value)) {
return .LastModified;
}
},
14 => {
if (lowercased_equals("content-length", value)) {
return .ContentLength;
} else if (lowercased_equals("accept-charset", value)) {
return .AcceptCharset;
}
},
15 => {
if (lowercased_equals("accept-encoding", value)) {
return .AcceptEncoding;
} else if (lowercased_equals("accept-language", value)) {
return .AcceptLanguage;
} else if (lowercased_equals("public-key-pins", value)) {
return .PublicKeyPins;
} else if (lowercased_equals("referrer-policy", value)) {
return .ReferrerPolicy;
} else if (lowercased_equals("x-frame-options", value)) {
return .XFrameOptions;
}
},
16 => {
if (lowercased_equals("content-encoding", value)) {
return .ContentEncoding;
} else if (lowercased_equals("content-language", value)) {
return .ContentLanguage;
} else if (lowercased_equals("content-location", value)) {
return .ContentLocation;
} else if (lowercased_equals("www-authenticate", value)) {
return .WwwAuthenticate;
} else if (lowercased_equals("x-xss-protection", value)) {
return .XXssProtection;
}
},
17 => {
if (lowercased_equals("if-modified-since", value)) {
return .IfModifiedSince;
} else if (lowercased_equals("sec-websocket-key", value)) {
return .SecWebSocketKey;
} else if (lowercased_equals("transfer-encoding", value)) {
return .TransferEncoding;
}
},
18 => {
if (lowercased_equals("proxy-authenticate", value)) {
return .ProxyAuthenticate;
}
},
19 => {
if (lowercased_equals("content-disposition", value)) {
return .ContentDisposition;
} else if (lowercased_equals("if-unmodified-since", value)) {
return .IfUnmodifiedSince;
} else if (lowercased_equals("proxy-authorization", value)) {
return .ProxyAuthorization;
}
},
20 => {
if (lowercased_equals("sec-websocket-accept", value)) {
return .SecWebSocketAccept;
}
},
21 => {
if (lowercased_equals("sec-websocket-version", value)) {
return .SecWebSocketVersion;
}
},
22 => {
if (lowercased_equals("access-control-max-age", value)) {
return .AccessControlMaxAge;
} else if (lowercased_equals("sec-websocket-protocol", value)) {
return .SecWebSocketProtocol;
} else if (lowercased_equals("x-content-type-options", value)) {
return .XContentTypeOptions;
} else if (lowercased_equals("x-dns-prefetch-control", value)) {
return .XDnsPrefetchControl;
}
},
23 => {
if (lowercased_equals("content-security-policy", value)) {
return .ContentSecurityPolicy;
}
},
24 => {
if (lowercased_equals("sec-websocket-extensions", value)) {
return .SecWebSocketExtensions;
}
},
25 => {
if (lowercased_equals("strict-transport-security", value)) {
return .StrictTransportSecurity;
} else if (lowercased_equals("upgrade-insecure-requests", value)) {
return .UpgradeInsecureRequests;
}
},
27 => {
if (lowercased_equals("access-control-allow-origin", value)) {
return .AccessControlAllowOrigin;
}
},
28 => {
if (lowercased_equals("access-control-allow-headers", value)) {
return .AccessControlAllowHeaders;
} else if (lowercased_equals("access-control-allow-methods", value)) {
return .AccessControlAllowMethods;
}
},
29 => {
if (lowercased_equals("access-control-expose-headers", value)) {
return .AccessControlExposeHeaders;
} else if (lowercased_equals("access-control-request-method", value)) {
return .AccessControlRequestMethod;
}
},
30 => {
if (lowercased_equals("access-control-request-headers", value)) {
return .AccessControlRequestHeaders;
}
},
32 => {
if (lowercased_equals("access-control-allow-credentials", value)) {
return .AccessControlAllowCredentials;
}
},
35 => {
if (lowercased_equals("content-security-policy-report-only", value)) {
return .ContentSecurityPolicyReportOnly;
}
},
else => {
return .Custom;
},
}
return .Custom;
}
pub fn as_http1(self: HeaderType, value: []const u8) []const u8 {
return switch (self) {
.Accept => "Accept",
.AcceptCharset => "Accept-Charset",
.AcceptEncoding => "Accept-Encoding",
.AcceptLanguage => "Accept-Language",
.AcceptRanges => "Accept-Ranges",
.AccessControlAllowCredentials => "Access-Control-Allow-Credentials",
.AccessControlAllowHeaders => "Access-Control-Allow-Headers",
.AccessControlAllowMethods => "Access-Control-Allow-Methods",
.AccessControlAllowOrigin => "Access-Control-Allow-Origin",
.AccessControlExposeHeaders => "Access-Control-Expose-Headers",
.AccessControlMaxAge => "Access-Control-Max-Age",
.AccessControlRequestHeaders => "Access-Control-Request-Headers",
.AccessControlRequestMethod => "Access-Control-Request-Method",
.Age => "Age",
.Allow => "Allow",
.AltSvc => "Alt-Svc",
.Authorization => "Authorization",
.CacheControl => "Cache-Control",
.Connection => "Connection",
.ContentDisposition => "Content-Disposition",
.ContentEncoding => "Content-Encoding",
.ContentLanguage => "Content-Language",
.ContentLength => "Content-Length",
.ContentLocation => "Content-Location",
.ContentRange => "Content-Range",
.ContentSecurityPolicy => "Content-Security-Policy",
.ContentSecurityPolicyReportOnly => "Content-Security-Policy-Report-Only",
.ContentType => "Content-Type",
.Cookie => "Cookie",
.Custom => value,
.Date => "Date",
.Dnt => "Dnt",
.Etag => "Etag",
.Expect => "Expect",
.Expires => "Expires",
.Forwarded => "Forwarded",
.From => "From",
.Host => "Host",
.IfMatch => "If-Match",
.IfModifiedSince => "If-Modified-Since",
.IfNoneMatch => "If-None-Match",
.IfRange => "If-Range",
.IfUnmodifiedSince => "If-Unmodified-Since",
.LastModified => "Last-Modified",
.Link => "Link",
.Location => "Location",
.MaxForwards => "Max-Forwards",
.Origin => "Origin",
.Pragma => "Pragma",
.ProxyAuthenticate => "Proxy-Authenticate",
.ProxyAuthorization => "Proxy-Authorization",
.PublicKeyPins => "Public-Key-Pins",
.Range => "Range",
.Referer => "Referer",
.ReferrerPolicy => "Referrer-Policy",
.Refresh => "Refresh",
.RetryAfter => "Retry-After",
.SecWebSocketAccept => "Sec-WebSocket-Accept",
.SecWebSocketExtensions => "Sec-WebSocket-Extensions",
.SecWebSocketKey => "Sec-WebSocket-Key",
.SecWebSocketProtocol => "Sec-WebSocket-Protocol",
.SecWebSocketVersion => "Sec-WebSocket-Version",
.Server => "Server",
.SetCookie => "Set-Cookie",
.StrictTransportSecurity => "Strict-Transport-Security",
.Te => "Te",
.Trailer => "Trailer",
.TransferEncoding => "Transfer-Encoding",
.UserAgent => "User-Agent",
.Upgrade => "Upgrade",
.UpgradeInsecureRequests => "Upgrade-Insecure-Requests",
.Vary => "Vary",
.Via => "Via",
.Warning => "Warning",
.WwwAuthenticate => "WWW-Authenticate",
.XContentTypeOptions => "X-Content-Type-Options",
.XDnsPrefetchControl => "X-DNS-Prefetch-Control",
.XFrameOptions => "X-Frame-Options",
.XXssProtection => "X-XSS-Protection",
};
}
pub fn as_http2(self: HeaderType, value: []const u8) []const u8 {
return switch (self) {
.Accept => "accept",
.AcceptCharset => "accept-charset",
.AcceptEncoding => "accept-encoding",
.AcceptLanguage => "accept-language",
.AcceptRanges => "accept-ranges",
.AccessControlAllowCredentials => "access-control-allow-credentials",
.AccessControlAllowHeaders => "access-control-allow-headers",
.AccessControlAllowMethods => "access-control-allow-methods",
.AccessControlAllowOrigin => "access-control-allow-origin",
.AccessControlExposeHeaders => "access-control-expose-headers",
.AccessControlMaxAge => "access-control-max-age",
.AccessControlRequestHeaders => "access-control-request-headers",
.AccessControlRequestMethod => "access-control-request-method",
.Age => "age",
.Allow => "allow",
.AltSvc => "alt-svc",
.Authorization => "authorization",
.CacheControl => "cache-control",
.Connection => "connection",
.ContentDisposition => "content-disposition",
.ContentEncoding => "content-encoding",
.ContentLanguage => "content-language",
.ContentLength => "content-length",
.ContentLocation => "content-location",
.ContentRange => "content-range",
.ContentSecurityPolicy => "content-security-policy",
.ContentSecurityPolicyReportOnly => "content-security-policy-report-only",
.ContentType => "content-type",
.Cookie => "cookie",
.Custom => value,
.Date => "date",
.Dnt => "dnt",
.Etag => "etag",
.Expect => "expect",
.Expires => "expires",
.Forwarded => "forwarded",
.From => "from",
.Host => "host",
.IfMatch => "if-match",
.IfModifiedSince => "if-modified-since",
.IfNoneMatch => "if-none-match",
.IfRange => "if-range",
.IfUnmodifiedSince => "if-unmodified-since",
.LastModified => "last-modified",
.Link => "link",
.Location => "location",
.MaxForwards => "max-forwards",
.Origin => "origin",
.Pragma => "pragma",
.ProxyAuthenticate => "proxy-authenticate",
.ProxyAuthorization => "proxy-authorization",
.PublicKeyPins => "public-key-pins",
.Range => "range",
.Referer => "referer",
.ReferrerPolicy => "referrer-policy",
.Refresh => "refresh",
.RetryAfter => "retry-after",
.SecWebSocketAccept => "sec-websocket-accept",
.SecWebSocketExtensions => "sec-websocket-extensions",
.SecWebSocketKey => "sec-websocket-key",
.SecWebSocketProtocol => "sec-websocket-protocol",
.SecWebSocketVersion => "sec-websocket-version",
.Server => "server",
.SetCookie => "set-cookie",
.StrictTransportSecurity => "strict-transport-security",
.Te => "te",
.Trailer => "trailer",
.TransferEncoding => "transfer-encoding",
.UserAgent => "user-agent",
.Upgrade => "upgrade",
.UpgradeInsecureRequests => "upgrade-insecure-requests",
.Vary => "vary",
.Via => "via",
.Warning => "warning",
.WwwAuthenticate => "www-authenticate",
.XContentTypeOptions => "x-content-type-options",
.XDnsPrefetchControl => "x-dns-prefetch-control",
.XFrameOptions => "x-frame-options",
.XXssProtection => "x-xss-protection",
};
}
};
pub const HeaderName = struct {
type: HeaderType,
value: []const u8,
const Error = error{
Invalid,
};
pub fn parse(name: []const u8) Error!HeaderName {
if (name.len == 0) {
return error.Invalid;
}
for (name) |char| {
if (HEADER_NAME_MAP[char] == 0) {
return error.Invalid;
}
}
return HeaderName{ .type = HeaderType.from_bytes(name), .value = name };
}
pub fn raw(self: HeaderName) callconv(.Inline) []const u8 {
return self.value;
}
pub fn as_http1(self: HeaderName) callconv(.Inline) []const u8 {
return self.type.as_http1(self.value);
}
pub fn as_http2(self: HeaderName) callconv(.Inline) []const u8 {
return self.type.as_http2(self.value);
}
pub fn type_of(name: []const u8) HeaderType {
return HeaderType.from_bytes(name);
}
};
// ASCII codes accepted for an header's name
// Cf: Borrowed from Seamonstar's httparse library
// https://github.com/seanmonstar/httparse/blob/01e68542605d8a24a707536561c27a336d4090dc/src/lib.rs#L96
const HEADER_NAME_MAP = [_]u8{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// \0 \t \n \r
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// commands
0, '!', 0, '#', '$', '%', '&', '\'', 0, 0, '*', '+', 0, '-', '.', 0,
// \s " ( ) , /
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 0, 0, 0, 0, 0, 0,
// : ; < = > ?
0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
// @ A B C D E F G H I J K L M N O
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 0, 0, 0, '^', '_',
// P Q R S T U V W X Y Z [ \ ]
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
//
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 0, '|', 0, '~', 0,
// { } del
// ====== Extended ASCII (aka. obs-text) ======
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
const std = @import("std");
const expect = std.testing.expect;
const expectError = std.testing.expectError;
test "Parse - Standard header names have a lower-cased representation" {
var name = try HeaderName.parse("Content-Length");
expect(std.mem.eql(u8, name.raw(), "Content-Length"));
expect(name.type == .ContentLength);
}
test "Parse - Standard header names tagging is case insensitive" {
var name = try HeaderName.parse("CoNtEnT-LeNgTh");
expect(std.mem.eql(u8, name.raw(), "CoNtEnT-LeNgTh"));
expect(name.type == .ContentLength);
}
test "Parse - Custom header names have no lower-cased representation" {
var name = try HeaderName.parse("Gotta-Go-Fast");
expect(std.mem.eql(u8, name.raw(), "Gotta-Go-Fast"));
expect(name.type == .Custom);
}
test "Parse - Invalid character returns an error" {
const fail = HeaderName.parse("Cont(ent-Length");
expectError(error.Invalid, fail);
}
test "Parse - Empty name is invalid" {
const fail = HeaderName.parse("");
expectError(error.Invalid, fail);
}
test "TypeOf - Standard header name" {
expect(HeaderName.type_of("Content-Length") == .ContentLength);
expect(HeaderName.type_of("Host") == .Host);
}
test "TypeOf - Standard headers matching is case insensitive" {
expect(HeaderName.type_of("CoNTeNt-LeNgTh") == .ContentLength);
}
test "TypeOf - Custom header" {
expect(HeaderName.type_of("Gotta-Go-Fast") == .Custom);
}
test "AsHttp1 - Standard headers are titled" {
var name = try HeaderName.parse("Content-Length");
expect(std.mem.eql(u8, name.as_http1(), "Content-Length"));
}
test "AsHttp1 - Custom headers keeps their case" {
var name = try HeaderName.parse("Gotta-Go-Fast");
expect(std.mem.eql(u8, name.as_http1(), "Gotta-Go-Fast"));
}
test "AsHttp2 - Standard headers are lowercased" {
var name = try HeaderName.parse("Content-Length");
expect(std.mem.eql(u8, name.as_http2(), "content-length"));
}
test "AsHttp2 - Custom headers keeps their case" {
var name = try HeaderName.parse("Gotta-Go-Fast");
expect(std.mem.eql(u8, name.as_http2(), "Gotta-Go-Fast"));
}
test "Parse" {
const Case = struct {
value: []const u8,
type: HeaderType,
http1: []const u8,
http2: []const u8,
};
var cases = [_]Case{
.{ .value = "Accept", .type = .Accept, .http1 = "Accept", .http2 = "accept" },
.{ .value = "Accept-Charset", .type = .AcceptCharset, .http1 = "Accept-Charset", .http2 = "accept-charset" },
.{ .value = "Accept-Encoding", .type = .AcceptEncoding, .http1 = "Accept-Encoding", .http2 = "accept-encoding" },
.{ .value = "Accept-Language", .type = .AcceptLanguage, .http1 = "Accept-Language", .http2 = "accept-language" },
.{ .value = "Accept-Ranges", .type = .AcceptRanges, .http1 = "Accept-Ranges", .http2 = "accept-ranges" },
.{ .value = "Access-Control-Allow-Credentials", .type = .AccessControlAllowCredentials, .http1 = "Access-Control-Allow-Credentials", .http2 = "access-control-allow-credentials" },
.{ .value = "Access-Control-Allow-Headers", .type = .AccessControlAllowHeaders, .http1 = "Access-Control-Allow-Headers", .http2 = "access-control-allow-headers" },
.{ .value = "Access-Control-Allow-Methods", .type = .AccessControlAllowMethods, .http1 = "Access-Control-Allow-Methods", .http2 = "access-control-allow-methods" },
.{ .value = "Access-Control-Allow-Origin", .type = .AccessControlAllowOrigin, .http1 = "Access-Control-Allow-Origin", .http2 = "access-control-allow-origin" },
.{ .value = "Access-Control-Expose-Headers", .type = .AccessControlExposeHeaders, .http1 = "Access-Control-Expose-Headers", .http2 = "access-control-expose-headers" },
.{ .value = "Access-Control-Max-Age", .type = .AccessControlMaxAge, .http1 = "Access-Control-Max-Age", .http2 = "access-control-max-age" },
.{ .value = "Access-Control-Request-Headers", .type = .AccessControlRequestHeaders, .http1 = "Access-Control-Request-Headers", .http2 = "access-control-request-headers" },
.{ .value = "Access-Control-Request-Method", .type = .AccessControlRequestMethod, .http1 = "Access-Control-Request-Method", .http2 = "access-control-request-method" },
.{ .value = "Age", .type = .Age, .http1 = "Age", .http2 = "age" },
.{ .value = "Allow", .type = .Allow, .http1 = "Allow", .http2 = "allow" },
.{ .value = "Alt-Svc", .type = .AltSvc, .http1 = "Alt-Svc", .http2 = "alt-svc" },
.{ .value = "Authorization", .type = .Authorization, .http1 = "Authorization", .http2 = "authorization" },
.{ .value = "Cache-Control", .type = .CacheControl, .http1 = "Cache-Control", .http2 = "cache-control" },
.{ .value = "Connection", .type = .Connection, .http1 = "Connection", .http2 = "connection" },
.{ .value = "Content-Length", .type = .ContentLength, .http1 = "Content-Length", .http2 = "content-length" },
.{ .value = "Content-Location", .type = .ContentLocation, .http1 = "Content-Location", .http2 = "content-location" },
.{ .value = "Content-Range", .type = .ContentRange, .http1 = "Content-Range", .http2 = "content-range" },
.{ .value = "Content-Security-Policy", .type = .ContentSecurityPolicy, .http1 = "Content-Security-Policy", .http2 = "content-security-policy" },
.{ .value = "Content-Security-Policy-Report-Only", .type = .ContentSecurityPolicyReportOnly, .http1 = "Content-Security-Policy-Report-Only", .http2 = "content-security-policy-report-only" },
.{ .value = "Content-Type", .type = .ContentType, .http1 = "Content-Type", .http2 = "content-type" },
.{ .value = "Cookie", .type = .Cookie, .http1 = "Cookie", .http2 = "cookie" },
.{ .value = "I-Am-A-Custom-Header", .type = .Custom, .http1 = "I-Am-A-Custom-Header", .http2 = "I-Am-A-Custom-Header" },
.{ .value = "Dnt", .type = .Dnt, .http1 = "Dnt", .http2 = "dnt" },
.{ .value = "Date", .type = .Date, .http1 = "Date", .http2 = "date" },
.{ .value = "Etag", .type = .Etag, .http1 = "Etag", .http2 = "etag" },
.{ .value = "Expect", .type = .Expect, .http1 = "Expect", .http2 = "expect" },
.{ .value = "Expires", .type = .Expires, .http1 = "Expires", .http2 = "expires" },
.{ .value = "Forwarded", .type = .Forwarded, .http1 = "Forwarded", .http2 = "forwarded" },
.{ .value = "From", .type = .From, .http1 = "From", .http2 = "from" },
.{ .value = "Host", .type = .Host, .http1 = "Host", .http2 = "host" },
.{ .value = "If-Match", .type = .IfMatch, .http1 = "If-Match", .http2 = "if-match" },
.{ .value = "If-Modified-Since", .type = .IfModifiedSince, .http1 = "If-Modified-Since", .http2 = "if-modified-since" },
.{ .value = "If-None-Match", .type = .IfNoneMatch, .http1 = "If-None-Match", .http2 = "if-none-match" },
.{ .value = "If-Range", .type = .IfRange, .http1 = "If-Range", .http2 = "if-range" },
.{ .value = "If-Unmodified-Since", .type = .IfUnmodifiedSince, .http1 = "If-Unmodified-Since", .http2 = "if-unmodified-since" },
.{ .value = "Last-Modified", .type = .LastModified, .http1 = "Last-Modified", .http2 = "last-modified" },
.{ .value = "Link", .type = .Link, .http1 = "Link", .http2 = "link" },
.{ .value = "Location", .type = .Location, .http1 = "Location", .http2 = "location" },
.{ .value = "Max-Forwards", .type = .MaxForwards, .http1 = "Max-Forwards", .http2 = "max-forwards" },
.{ .value = "Origin", .type = .Origin, .http1 = "Origin", .http2 = "origin" },
.{ .value = "Pragma", .type = .Pragma, .http1 = "Pragma", .http2 = "pragma" },
.{ .value = "Proxy-Authenticate", .type = .ProxyAuthenticate, .http1 = "Proxy-Authenticate", .http2 = "proxy-authenticate" },
.{ .value = "Proxy-Authorization", .type = .ProxyAuthorization, .http1 = "Proxy-Authorization", .http2 = "proxy-authorization" },
.{ .value = "Public-Key-Pins", .type = .PublicKeyPins, .http1 = "Public-Key-Pins", .http2 = "public-key-pins" },
.{ .value = "Range", .type = .Range, .http1 = "Range", .http2 = "range" },
.{ .value = "Referer", .type = .Referer, .http1 = "Referer", .http2 = "referer" },
.{ .value = "Referrer-Policy", .type = .ReferrerPolicy, .http1 = "Referrer-Policy", .http2 = "referrer-policy" },
.{ .value = "Refresh", .type = .Refresh, .http1 = "Refresh", .http2 = "refresh" },
.{ .value = "Retry-After", .type = .RetryAfter, .http1 = "Retry-After", .http2 = "retry-after" },
.{ .value = "Sec-WebSocket-Accept", .type = .SecWebSocketAccept, .http1 = "Sec-WebSocket-Accept", .http2 = "sec-websocket-accept" },
.{ .value = "Sec-WebSocket-Extensions", .type = .SecWebSocketExtensions, .http1 = "Sec-WebSocket-Extensions", .http2 = "sec-websocket-extensions" },
.{ .value = "Sec-WebSocket-Key", .type = .SecWebSocketKey, .http1 = "Sec-WebSocket-Key", .http2 = "sec-websocket-key" },
.{ .value = "Sec-WebSocket-Protocol", .type = .SecWebSocketProtocol, .http1 = "Sec-WebSocket-Protocol", .http2 = "sec-websocket-protocol" },
.{ .value = "Sec-WebSocket-Version", .type = .SecWebSocketVersion, .http1 = "Sec-WebSocket-Version", .http2 = "sec-websocket-version" },
.{ .value = "Server", .type = .Server, .http1 = "Server", .http2 = "server" },
.{ .value = "Set-Cookie", .type = .SetCookie, .http1 = "Set-Cookie", .http2 = "set-cookie" },
.{ .value = "Strict-Transport-Security", .type = .StrictTransportSecurity, .http1 = "Strict-Transport-Security", .http2 = "strict-transport-security" },
.{ .value = "Te", .type = .Te, .http1 = "Te", .http2 = "te" },
.{ .value = "Trailer", .type = .Trailer, .http1 = "Trailer", .http2 = "trailer" },
.{ .value = "Transfer-Encoding", .type = .TransferEncoding, .http1 = "Transfer-Encoding", .http2 = "transfer-encoding" },
.{ .value = "User-Agent", .type = .UserAgent, .http1 = "User-Agent", .http2 = "user-agent" },
.{ .value = "Upgrade", .type = .Upgrade, .http1 = "Upgrade", .http2 = "upgrade" },
.{ .value = "Upgrade-Insecure-Requests", .type = .UpgradeInsecureRequests, .http1 = "Upgrade-Insecure-Requests", .http2 = "upgrade-insecure-requests" },
.{ .value = "Vary", .type = .Vary, .http1 = "Vary", .http2 = "vary" },
.{ .value = "Via", .type = .Via, .http1 = "Via", .http2 = "via" },
.{ .value = "Warning", .type = .Warning, .http1 = "Warning", .http2 = "warning" },
.{ .value = "WWW-Authenticate", .type = .WwwAuthenticate, .http1 = "WWW-Authenticate", .http2 = "www-authenticate" },
.{ .value = "X-Content-Type-Options", .type = .XContentTypeOptions, .http1 = "X-Content-Type-Options", .http2 = "x-content-type-options" },
.{ .value = "X-DNS-Prefetch-Control", .type = .XDnsPrefetchControl, .http1 = "X-DNS-Prefetch-Control", .http2 = "x-dns-prefetch-control" },
.{ .value = "X-Frame-Options", .type = .XFrameOptions, .http1 = "X-Frame-Options", .http2 = "x-frame-options" },
.{ .value = "X-XSS-Protection", .type = .XXssProtection, .http1 = "X-XSS-Protection", .http2 = "x-xss-protection" },
};
for (cases) |case| {
var name = try HeaderName.parse(case.value);
expect(name.type == case.type);
expect(std.mem.eql(u8, name.as_http1(), case.http1));
expect(std.mem.eql(u8, name.as_http2(), case.http2));
}
} | src/headers/name.zig |
const std = @import("std");
const logger = std.log.scoped(.day02);
const real_data = @embedFile("../data/day02.txt");
pub fn main() !void {
logger.info("Part one: {}", .{partOne(real_data)});
logger.info("Part two: {}", .{partTwo(real_data)});
}
fn partOne(data: []const u8) !u64 {
var lines = std.mem.tokenize(u8, data, "\n");
var position: u64 = 0;
var depth: u64 = 0;
while (lines.next()) |line| {
var parts = std.mem.split(u8, line, " ");
const command = parts.next().?;
const amount = try std.fmt.parseInt(u64, parts.next().?, 10);
if (std.mem.eql(u8, command, "forward")) {
position += amount;
} else if (std.mem.eql(u8, command, "down")) {
depth += amount;
} else if (std.mem.eql(u8, command, "up")) {
depth -= amount;
} else unreachable;
}
return position * depth;
}
fn partTwo(data: []const u8) !u64 {
var lines = std.mem.tokenize(u8, data, "\n");
var position: u64 = 0;
var depth: u64 = 0;
var aim: u64 = 0;
while (lines.next()) |line| {
var parts = std.mem.split(u8, line, " ");
const command = parts.next().?;
const amount = try std.fmt.parseInt(u64, parts.next().?, 10);
if (std.mem.eql(u8, command, "forward")) {
position += amount;
depth += aim * amount;
} else if (std.mem.eql(u8, command, "down")) {
aim += amount;
} else if (std.mem.eql(u8, command, "up")) {
aim -= amount;
} else unreachable;
}
return position * depth;
}
test "part one works with explanation input" {
const test_data =
\\forward 5
\\down 5
\\forward 8
\\up 3
\\down 8
\\forward 2
;
try std.testing.expectEqual(try partOne(test_data), 150);
}
test "part two works with explanation input" {
const test_data =
\\forward 5
\\down 5
\\forward 8
\\up 3
\\down 8
\\forward 2
;
try std.testing.expectEqual(try partTwo(test_data), 900);
} | src/day02.zig |
const std = @import("std");
usingnamespace @import("zalgebra");
const gl = @import("c.zig").gl;
const c_allocator = std.heap.c_allocator;
const panic = std.debug.panic;
pub const Shader = struct {
name: []const u8,
program_id: u32,
vertex_id: u32,
fragment_id: u32,
geometry_id: ?u32,
pub fn create(
name: []const u8,
vert_content: []const u8,
frag_content: []const u8,
) !Shader {
var sp: Shader = undefined;
sp.name = name;
{
sp.vertex_id = gl.glCreateShader(gl.GL_VERTEX_SHADER);
const source_ptr: ?[*]const u8 = vert_content.ptr;
const source_len = @intCast(gl.GLint, vert_content.len);
gl.glShaderSource(sp.vertex_id, 1, &source_ptr, &source_len);
gl.glCompileShader(sp.vertex_id);
var ok: gl.GLint = undefined;
gl.glGetShaderiv(sp.vertex_id, gl.GL_COMPILE_STATUS, &ok);
if (ok == 0) {
var error_size: gl.GLint = undefined;
gl.glGetShaderiv(sp.vertex_id, gl.GL_INFO_LOG_LENGTH, &error_size);
const message = try c_allocator.alloc(u8, @intCast(usize, error_size));
gl.glGetShaderInfoLog(sp.vertex_id, error_size, &error_size, message.ptr);
panic("Error compiling vertex shader:\n{s}\n", .{message});
}
}
{
sp.fragment_id = gl.glCreateShader(gl.GL_FRAGMENT_SHADER);
const source_ptr: ?[*]const u8 = frag_content.ptr;
const source_len = @intCast(gl.GLint, frag_content.len);
gl.glShaderSource(sp.fragment_id, 1, &source_ptr, &source_len);
gl.glCompileShader(sp.fragment_id);
var ok: gl.GLint = undefined;
gl.glGetShaderiv(sp.fragment_id, gl.GL_COMPILE_STATUS, &ok);
if (ok == 0) {
var error_size: gl.GLint = undefined;
gl.glGetShaderiv(sp.fragment_id, gl.GL_INFO_LOG_LENGTH, &error_size);
const message = try c_allocator.alloc(u8, @intCast(usize, error_size));
gl.glGetShaderInfoLog(sp.fragment_id, error_size, &error_size, message.ptr);
panic("Error compiling fragment shader:\n{s}\n", .{message});
}
}
sp.program_id = gl.glCreateProgram();
gl.glAttachShader(sp.program_id, sp.vertex_id);
gl.glAttachShader(sp.program_id, sp.fragment_id);
gl.glLinkProgram(sp.program_id);
var ok: gl.GLint = undefined;
gl.glGetProgramiv(sp.program_id, gl.GL_LINK_STATUS, &ok);
if (ok == 0) {
var error_size: gl.GLint = undefined;
gl.glGetProgramiv(sp.program_id, gl.GL_INFO_LOG_LENGTH, &error_size);
const message = try c_allocator.alloc(u8, @intCast(usize, error_size));
gl.glGetProgramInfoLog(sp.program_id, error_size, &error_size, message.ptr);
panic("Error linking shader program: {s}\n", .{message});
}
// Cleanup shaders (from gl doc).
gl.glDeleteShader(sp.vertex_id);
gl.glDeleteShader(sp.fragment_id);
return sp;
}
pub fn setMat4(sp: Shader, name: [*c]const u8, value: *const mat4) void {
const id = gl.glGetUniformLocation(sp.program_id, name);
gl.glUniformMatrix4fv(id, 1, gl.GL_FALSE, value.get_data());
}
pub fn setInteger(sp: Shader, name: [*c]const u8, value: i32) void {
const id = gl.glGetUniformLocation(sp.program_id, name);
gl.glUniform1i(id, value);
}
// pub fn setBool(sp: Shader, name: [*gl]const u8, value: bool) void {
// const id = gl.glGetUniformLocation(sp.program_id, name);
// gl.glUniform1i(id, @boolToInt(value));
// }
// pub fn setFloat(sp: Shader, name: [*gl]const u8, value: f32) void {
// const id = gl.glGetUniformLocation(sp.program_id, name);
// gl.glUniform1f(id, value);
// }
// pub fn setRgb(sp: Shader, name: [*gl]const u8, value: *const vec3) void {
// const id = gl.glGetUniformLocation(sp.program_id, name);
// gl.glUniform3f(id, value.x / 255.0, value.y / 255.0, value.z / 255.0);
// }
// pub fn setRgba(sp: Shader, name: [*gl]const u8, value: *const vec4) void {
// const id = gl.glGetUniformLocation(sp.program_id, name);
// gl.glUniform4f(id, value.x / 255.0, value.y / 255.0, value.z / 255.0, value.w);
// }
}; | demo/common/shader.zig |
const std = @import("std");
const Arena = std.heap.ArenaAllocator;
const expectEqual = std.testing.expectEqual;
const expectEqualStrings = std.testing.expectEqualStrings;
const yeti = @import("yeti");
const initCodebase = yeti.initCodebase;
const MockFileSystem = yeti.FileSystem;
const components = yeti.components;
const tokenize = yeti.tokenize;
const analyzeSemantics = yeti.analyzeSemantics;
const literalOf = yeti.query.literalOf;
const typeOf = yeti.query.typeOf;
const parentType = yeti.query.parentType;
const valueType = yeti.query.valueType;
const Entity = yeti.ecs.Entity;
test "tokenize uniform function call syntax" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
const module = try codebase.createEntity(.{});
const code = "10.min(20)";
var tokens = try tokenize(module, code);
{
const token = tokens.next().?;
try expectEqual(token.get(components.TokenKind), .int);
try expectEqualStrings(literalOf(token), "10");
try expectEqual(token.get(components.Span), .{
.begin = .{ .column = 0, .row = 0 },
.end = .{ .column = 2, .row = 0 },
});
}
{
const token = tokens.next().?;
try expectEqual(token.get(components.TokenKind), .dot);
try expectEqual(token.get(components.Span), .{
.begin = .{ .column = 2, .row = 0 },
.end = .{ .column = 3, .row = 0 },
});
}
{
const token = tokens.next().?;
try expectEqual(token.get(components.TokenKind), .symbol);
try expectEqualStrings(literalOf(token), "min");
try expectEqual(token.get(components.Span), .{
.begin = .{ .column = 3, .row = 0 },
.end = .{ .column = 6, .row = 0 },
});
}
{
const token = tokens.next().?;
try expectEqual(token.get(components.TokenKind), .left_paren);
try expectEqual(token.get(components.Span), .{
.begin = .{ .column = 6, .row = 0 },
.end = .{ .column = 7, .row = 0 },
});
}
{
const token = tokens.next().?;
try expectEqual(token.get(components.TokenKind), .int);
try expectEqualStrings(literalOf(token), "20");
try expectEqual(token.get(components.Span), .{
.begin = .{ .column = 7, .row = 0 },
.end = .{ .column = 9, .row = 0 },
});
}
{
const token = tokens.next().?;
try expectEqual(token.get(components.TokenKind), .right_paren);
try expectEqual(token.get(components.Span), .{
.begin = .{ .column = 9, .row = 0 },
.end = .{ .column = 10, .row = 0 },
});
}
try expectEqual(tokens.next(), null);
}
test "analyze semantics of uniform function call syntax" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
const builtins = codebase.get(components.Builtins);
var fs = try MockFileSystem.init(&arena);
_ = try fs.newFile("foo.yeti",
\\min(x: i64, y: i64) i64 {
\\ if x < y { x } else { y }
\\}
\\
\\start() i64 {
\\ 10.min(20)
\\}
);
_ = try analyzeSemantics(codebase, fs, "foo.yeti");
const module = try analyzeSemantics(codebase, fs, "foo.yeti");
const top_level = module.get(components.TopLevel);
const start = top_level.findString("start").get(components.Overloads).slice()[0];
try expectEqualStrings(literalOf(start.get(components.Module).entity), "foo");
try expectEqualStrings(literalOf(start.get(components.Name).entity), "start");
try expectEqual(start.get(components.Parameters).len(), 0);
try expectEqual(start.get(components.ReturnType).entity, builtins.I64);
const body = start.get(components.Body).slice();
try expectEqual(body.len, 1);
const call = body[0];
try expectEqual(call.get(components.AstKind), .call);
const arguments = call.get(components.Arguments).slice();
try expectEqual(arguments.len, 2);
try expectEqualStrings(literalOf(arguments[0]), "10");
try expectEqualStrings(literalOf(arguments[1]), "20");
try expectEqual(typeOf(call), builtins.I64);
const min = call.get(components.Callable).entity;
try expectEqualStrings(literalOf(min.get(components.Name).entity), "min");
}
test "analyze semantics of uniform function call syntax omit parenthesis" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
const builtins = codebase.get(components.Builtins);
var fs = try MockFileSystem.init(&arena);
_ = try fs.newFile("foo.yeti",
\\square(x: i64) i64 {
\\ x * x
\\}
\\
\\start() i64 {
\\ 10.square
\\}
);
_ = try analyzeSemantics(codebase, fs, "foo.yeti");
const module = try analyzeSemantics(codebase, fs, "foo.yeti");
const top_level = module.get(components.TopLevel);
const start = top_level.findString("start").get(components.Overloads).slice()[0];
try expectEqualStrings(literalOf(start.get(components.Module).entity), "foo");
try expectEqualStrings(literalOf(start.get(components.Name).entity), "start");
try expectEqual(start.get(components.Parameters).len(), 0);
try expectEqual(start.get(components.ReturnType).entity, builtins.I64);
const body = start.get(components.Body).slice();
try expectEqual(body.len, 1);
const call = body[0];
try expectEqual(call.get(components.AstKind), .call);
const arguments = call.get(components.Arguments).slice();
try expectEqual(arguments.len, 1);
try expectEqualStrings(literalOf(arguments[0]), "10");
try expectEqual(typeOf(call), builtins.I64);
const square = call.get(components.Callable).entity;
try expectEqualStrings(literalOf(square.get(components.Name).entity), "square");
}
test "analyze semantics of uniform function call syntax on locals" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
const builtins = codebase.get(components.Builtins);
var fs = try MockFileSystem.init(&arena);
_ = try fs.newFile("foo.yeti",
\\square(x: i64) i64 {
\\ x * x
\\}
\\
\\start() i64 {
\\ x = 10
\\ x.square()
\\}
);
_ = try analyzeSemantics(codebase, fs, "foo.yeti");
const module = try analyzeSemantics(codebase, fs, "foo.yeti");
const top_level = module.get(components.TopLevel);
const start = top_level.findString("start").get(components.Overloads).slice()[0];
try expectEqualStrings(literalOf(start.get(components.Module).entity), "foo");
try expectEqualStrings(literalOf(start.get(components.Name).entity), "start");
try expectEqual(start.get(components.Parameters).len(), 0);
try expectEqual(start.get(components.ReturnType).entity, builtins.I64);
const body = start.get(components.Body).slice();
try expectEqual(body.len, 2);
const define = body[0];
try expectEqual(define.get(components.AstKind), .define);
try expectEqual(typeOf(define), builtins.Void);
try expectEqualStrings(literalOf(define.get(components.Value).entity), "10");
const x = define.get(components.Local).entity;
try expectEqualStrings(literalOf(x.get(components.Name).entity), "x");
try expectEqual(typeOf(x), builtins.I64);
const call = body[1];
try expectEqual(call.get(components.AstKind), .call);
const arguments = call.get(components.Arguments).slice();
try expectEqual(arguments.len, 1);
try expectEqual(arguments[0], x);
try expectEqual(typeOf(call), builtins.I64);
const square = call.get(components.Callable).entity;
try expectEqualStrings(literalOf(square.get(components.Name).entity), "square");
}
test "analyze semantics of uniform function call syntax on structs" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
const builtins = codebase.get(components.Builtins);
var fs = try MockFileSystem.init(&arena);
_ = try fs.newFile("foo.yeti",
\\struct Square {
\\ length: f64
\\}
\\
\\area(s: Square) f64 {
\\ s.length * s.length
\\}
\\
\\start() f64 {
\\ s = Square(10)
\\ s.area()
\\}
);
_ = try analyzeSemantics(codebase, fs, "foo.yeti");
const module = try analyzeSemantics(codebase, fs, "foo.yeti");
const top_level = module.get(components.TopLevel);
const start = top_level.findString("start").get(components.Overloads).slice()[0];
try expectEqualStrings(literalOf(start.get(components.Module).entity), "foo");
try expectEqualStrings(literalOf(start.get(components.Name).entity), "start");
try expectEqual(start.get(components.Parameters).len(), 0);
try expectEqual(start.get(components.ReturnType).entity, builtins.F64);
const body = start.get(components.Body).slice();
try expectEqual(body.len, 2);
const define = body[0];
try expectEqual(define.get(components.AstKind), .define);
try expectEqual(typeOf(define), builtins.Void);
const Square = typeOf(define.get(components.Value).entity);
try expectEqualStrings(literalOf(Square), "Square");
const s = define.get(components.Local).entity;
try expectEqualStrings(literalOf(s.get(components.Name).entity), "s");
try expectEqual(typeOf(s), Square);
const call = body[1];
try expectEqual(call.get(components.AstKind), .call);
const arguments = call.get(components.Arguments).slice();
try expectEqual(arguments.len, 1);
try expectEqual(arguments[0], s);
try expectEqual(typeOf(call), builtins.F64);
const area = call.get(components.Callable).entity;
try expectEqualStrings(literalOf(area.get(components.Name).entity), "area");
}
test "analyze semantics of uniform function call syntax on structs omit parenthesis" {
var arena = Arena.init(std.heap.page_allocator);
defer arena.deinit();
var codebase = try initCodebase(&arena);
const builtins = codebase.get(components.Builtins);
var fs = try MockFileSystem.init(&arena);
_ = try fs.newFile("foo.yeti",
\\struct Square {
\\ length: f64
\\}
\\
\\area(s: Square) f64 {
\\ s.length * s.length
\\}
\\
\\start() f64 {
\\ s = Square(10)
\\ s.area
\\}
);
_ = try analyzeSemantics(codebase, fs, "foo.yeti");
const module = try analyzeSemantics(codebase, fs, "foo.yeti");
const top_level = module.get(components.TopLevel);
const start = top_level.findString("start").get(components.Overloads).slice()[0];
try expectEqualStrings(literalOf(start.get(components.Module).entity), "foo");
try expectEqualStrings(literalOf(start.get(components.Name).entity), "start");
try expectEqual(start.get(components.Parameters).len(), 0);
try expectEqual(start.get(components.ReturnType).entity, builtins.F64);
const body = start.get(components.Body).slice();
try expectEqual(body.len, 2);
const define = body[0];
try expectEqual(define.get(components.AstKind), .define);
try expectEqual(typeOf(define), builtins.Void);
const Square = typeOf(define.get(components.Value).entity);
try expectEqualStrings(literalOf(Square), "Square");
const s = define.get(components.Local).entity;
try expectEqualStrings(literalOf(s.get(components.Name).entity), "s");
try expectEqual(typeOf(s), Square);
const call = body[1];
try expectEqual(call.get(components.AstKind), .call);
const arguments = call.get(components.Arguments).slice();
try expectEqual(arguments.len, 1);
try expectEqual(arguments[0], s);
try expectEqual(typeOf(call), builtins.F64);
const area = call.get(components.Callable).entity;
try expectEqualStrings(literalOf(area.get(components.Name).entity), "area");
} | src/tests/test_uniform_function_call_syntax.zig |
const c = @cImport({
@cInclude("cfl_image.h");
});
pub const Image = struct {
inner: ?*c.Fl_Image,
pub fn scale(self: *Image, width: i32, height: i32, proportional: bool, can_expand: bool) void {
c.Fl_Image_scale(self.inner, width, height, @boolToInt(proportional), @boolToInt(can_expand));
}
pub fn raw(self: *Image) ?*c.Fl_Image {
return self.inner;
}
pub fn fromRaw(ptr: ?*c.Fl_Image) Image {
return Image{
.inner = ptr,
};
}
pub fn fromImagePtr(img: ?*c.Fl_Image) Image {
return Image{
.inner = @ptrCast(?*c.Fl_Image, img),
};
}
pub fn fromVoidPtr(ptr: ?*c_void) Image {
return Image{
.inner = @ptrCast(?*c.Fl_Image, ptr),
};
}
pub fn toVoidPtr(self: *Image) ?*c_void {
return @ptrCast(?*c_void, self.inner);
}
pub fn delete(self: *Image) void {
c.Fl_Image_delete(self.inner);
self.inner = null;
}
pub fn copy(self: *const Image) Image {
const img = c.Fl_Image_copy(self.inner);
return Image{
.inner = img,
};
}
pub fn draw(self: *Image, arg2: i32, arg3: i32, arg4: i32, arg5: i32) void {
return c.Fl_Image_draw(self.inner, arg2, arg3, arg4, arg5);
}
pub fn w(self: *const Image) i32 {
return c.Fl_Image_width(self.inner);
}
pub fn h(self: *const Image) i32 {
return c.Fl_Image_height(self.inner);
}
pub fn count(self: *const Image) u32 {
return c.Fl_Image_count(self.inner);
}
pub fn dataW(self: *const Image) u32 {
return c.Fl_Image_data_w(self.inner);
}
pub fn dataH(self: *const Image) u32 {
return c.Fl_Image_data_h(self.inner);
}
pub fn depth(self: *const Image) u32 {
return c.Fl_Image_d(self.inner);
}
pub fn ld(self: *const Image) u32 {
return c.Fl_Image_ld(self.inner);
}
};
pub const SharedImage = struct {
inner: ?*c.Fl_Shared_Image,
pub fn load(path: [*c]const u8) !SharedImage {
const ptr = c.Fl_Shared_Image_get(path, 0, 0);
if (ptr == null or c.Fl_Shared_Image_fail(ptr) < 0) return error.InvalidParemeter;
return SharedImage{ .inner = ptr };
}
pub fn fromImage(img: *const Image) !SharedImage {
const x = c.Fl_Shared_Image_from_rgb(img.inner, 0);
if (x == null or c.Fl_Shared_Image_fail(x) < 0) return error.InvalidParemeter;
return SharedImage{.inner = x};
}
pub fn raw(self: *SharedImage) ?*c.Fl_Shared_Image {
return self.inner;
}
pub fn fromRaw(ptr: ?*c.Fl_Shared_Image) SharedImage {
return SharedImage{
.inner = ptr,
};
}
pub fn fromImagePtr(img: ?*c.Fl_Image) SharedImage {
return SharedImage{
.inner = @ptrCast(?*c.Fl_Shared_Image, img),
};
}
pub fn fromVoidPtr(ptr: ?*c_void) SharedImage {
return SharedImage{
.inner = @ptrCast(?*c.Fl_Shared_Image, ptr),
};
}
pub fn toVoidPtr(self: *SharedImage) ?*c_void {
return @ptrCast(?*c_void, self.inner);
}
pub fn asImage(self: *const SharedImage) Image {
return Image{ .inner = @ptrCast(?*c.Fl_Image, self.inner) };
}
};
pub const SvgImage = struct {
inner: ?*c.Fl_SVG_Image,
pub fn load(path: [*c]const u8) !SvgImage {
const x = c.Fl_SVG_Image_new(path);
if (x == null or c.Fl_SVG_Image_fail(x) < 0) return error.InvalidParemeter;
return SvgImage{ .inner = x };
}
pub fn fromData(data: [*c]const u8) !SvgImage {
const x = c.Fl_SVG_Image_from(data);
if (x == null or c.Fl_SVG_Image_fail(x) < 0) return error.InvalidParemeter;
return SvgImage{.inner = x};
}
pub fn raw(self: *SvgImage) ?*c.Fl_SVG_Image {
return self.inner;
}
pub fn fromRaw(ptr: ?*c.Fl_SVG_Image) SvgImage {
return SvgImage{
.inner = ptr,
};
}
pub fn fromImagePtr(img: ?*c.Fl_Image) SvgImage {
return SvgImage{
.inner = @ptrCast(?*c.Fl_SVG_Image, img),
};
}
pub fn fromVoidPtr(ptr: ?*c_void) SvgImage {
return SvgImage{
.inner = @ptrCast(?*c.Fl_SVG_Image, ptr),
};
}
pub fn toVoidPtr(self: *SvgImage) ?*c_void {
return @ptrCast(?*c_void, self.inner);
}
pub fn asImage(self: *const SvgImage) Image {
return Image{ .inner = @ptrCast(?*c.Fl_Image, self.inner) };
}
};
pub const JpegImage = struct {
inner: ?*c.Fl_JPEG_Image,
pub fn load(path: [*c]const u8) !JpegImage {
const x = c.Fl_JPEG_Image_new(path);
if (x == null or c.Fl_JPEG_Image_fail(x) < 0) return error.InvalidParemeter;
return JpegImage{ .inner = x };
}
pub fn fromData(data: [*c]const u8) !JpegImage {
const x = c.Fl_JPEG_Image_from(data);
if (x == null or c.Fl_JPEG_Image_fail(x) < 0) return error.InvalidParemeter;
return JpegImage{.inner = x};
}
pub fn raw(self: *JpegImage) ?*c.Fl_JPEG_Image {
return self.inner;
}
pub fn fromRaw(ptr: ?*c.Fl_JPEG_Image) JpegImage {
return JpegImage{
.inner = ptr,
};
}
pub fn fromImagePtr(img: ?*c.Fl_Image) JpegImage {
return JpegImage{
.inner = @ptrCast(?*c.Fl_JPEG_Image, img),
};
}
pub fn fromVoidPtr(ptr: ?*c_void) JpegImage {
return JpegImage{
.inner = @ptrCast(?*c.Fl_JPEG_Image, ptr),
};
}
pub fn toVoidPtr(self: *JpegImage) ?*c_void {
return @ptrCast(?*c_void, self.inner);
}
pub fn asImage(self: *const JpegImage) Image {
return Image{ .inner = @ptrCast(?*c.Fl_Image, self.inner) };
}
};
pub const BmpImage = struct {
inner: ?*c.Fl_BMP_Image,
pub fn load(path: [*c]const u8) !BmpImage {
const x = c.Fl_BMP_Image_new(path);
if (x == null or c.Fl_BMP_Image_fail(x) < 0) return error.InvalidParemeter;
return BmpImage{ .inner = x };
}
pub fn fromData(data: [*c]const u8) !BmpImage {
const x = c.Fl_BMP_Image_from(data);
if (x == null or c.Fl_BMP_Image_fail(x) < 0) return error.InvalidParemeter;
return BmpImage{.inner = x};
}
pub fn raw(self: *BmpImage) ?*c.Fl_BMP_Image {
return self.inner;
}
pub fn fromRaw(ptr: ?*c.Fl_BMP_Image) BmpImage {
return BmpImage{
.inner = ptr,
};
}
pub fn fromImagePtr(img: ?*c.Fl_Image) BmpImage {
return BmpImage{
.inner = @ptrCast(?*c.Fl_BMP_Image, img),
};
}
pub fn fromVoidPtr(ptr: ?*c_void) BmpImage {
return BmpImage{
.inner = @ptrCast(?*c.Fl_BMP_Image, ptr),
};
}
pub fn toVoidPtr(self: *BmpImage) ?*c_void {
return @ptrCast(?*c_void, self.inner);
}
pub fn asImage(self: *const BmpImage) Image {
return Image{ .inner = @ptrCast(?*c.Fl_Image, self.inner) };
}
};
pub const RgbImage = struct {
inner: ?*c.Fl_RGB_Image,
pub fn new(data: [*c]const u8, w: u32, h: u32, depth: u32) !RgbImage {
const ptr = c.Fl_RGB_Image_new(data, w, h, depth);
if (ptr == null or c.Fl_RGB_Image_fail(ptr) < 0) return error.InvalidParemeter;
return RgbImage{ .inner = ptr };
}
pub fn fromData(data: [*c]const u8, w: u32, h: u32, depth: u32) !RgbImage {
const ptr = c.Fl_RGB_Image_from_data(data, w, h, depth);
if (ptr == null or c.Fl_RGB_Image_fail(ptr) < 0) return error.InvalidParemeter;
return RgbImage{ .inner = ptr };
}
pub fn raw(self: *RgbImage) ?*c.Fl_RGB_Image {
return self.inner;
}
pub fn fromRaw(ptr: ?*c.Fl_RGB_Image) RgbImage {
return RgbImage{
.inner = ptr,
};
}
pub fn fromImagePtr(img: ?*c.Fl_Image) RgbImage {
return RgbImage{
.inner = @ptrCast(?*c.Fl_RGB_Image, img),
};
}
pub fn fromVoidPtr(ptr: ?*c_void) RgbImage {
return RgbImage{
.inner = @ptrCast(?*c.Fl_RGB_Image, ptr),
};
}
pub fn toVoidPtr(self: *RgbImage) ?*c_void {
return @ptrCast(?*c_void, self.inner);
}
pub fn asImage(self: *const RgbImage) Image {
return Image{ .inner = @ptrCast(?*c.Fl_Image, self.inner) };
}
};
pub const PngImage = struct {
inner: ?*c.Fl_PNG_Image,
pub fn load(path: [*c]const u8) !PngImage {
const x = c.Fl_PNG_Image_new(path);
if (x == null or c.Fl_PNG_Image_fail(x) < 0) return error.InvalidParemeter;
return PngImage{ .inner = x };
}
pub fn fromData(data: [*c]const u8) !PngImage {
const x = c.Fl_PNG_Image_from(data);
if (x == null or c.Fl_PNG_Image_fail(x) < 0) return error.InvalidParemeter;
return PngImage{.inner = x};
}
pub fn raw(self: *PngImage) ?*c.Fl_PNG_Image {
return self.inner;
}
pub fn fromRaw(ptr: ?*c.Fl_PNG_Image) PngImage {
return PngImage{
.inner = ptr,
};
}
pub fn fromImagePtr(img: ?*c.Fl_Image) PngImage {
return PngImage{
.inner = @ptrCast(?*c.Fl_PNG_Image, img),
};
}
pub fn fromVoidPtr(ptr: ?*c_void) PngImage {
return PngImage{
.inner = @ptrCast(?*c.Fl_PNG_Image, ptr),
};
}
pub fn toVoidPtr(self: *PngImage) ?*c_void {
return @ptrCast(?*c_void, self.inner);
}
pub fn asImage(self: *const PngImage) Image {
return Image{ .inner = @ptrCast(?*c.Fl_Image, self.inner) };
}
};
pub const GifImage = struct {
inner: ?*c.Fl_GIF_Image,
pub fn load(path: [*c]const u8) !GifImage {
const x = c.Fl_GIF_Image_new(path);
if (x == null or c.Fl_GIF_Image_fail(x) < 0) return error.InvalidParemeter;
return GifImage{ .inner = x };
}
pub fn fromData(data: [*c]const u8) !GifImage {
const x = c.Fl_GIF_Image_from(data);
if (x == null or c.Fl_GIF_Image_fail(x) < 0) return error.InvalidParemeter;
return GifImage{.inner = x};
}
pub fn raw(self: *GifImage) ?*c.Fl_GIF_Image {
return self.inner;
}
pub fn fromRaw(ptr: ?*c.Fl_GIF_Image) GifImage {
return GifImage{
.inner = ptr,
};
}
pub fn fromImagePtr(img: ?*c.Fl_Image) GifImage {
return GifImage{
.inner = @ptrCast(?*c.Fl_GIF_Image, img),
};
}
pub fn fromVoidPtr(ptr: ?*c_void) GifImage {
return GifImage{
.inner = @ptrCast(?*c.Fl_GIF_Image, ptr),
};
}
pub fn toVoidPtr(self: *GifImage) ?*c_void {
return @ptrCast(?*c_void, self.inner);
}
pub fn asImage(self: *const GifImage) Image {
return Image{ .inner = @ptrCast(?*c.Fl_Image, self.inner) };
}
};
pub const TiledImage = struct {
inner: ?*c.Fl_Tiled_Image,
pub fn new(img: *const Image, w: i32, h: i32) TiledImage {
const ptr = c.Fl_Tiled_Image_new(img.inner, w, h);
return TiledImage{ .inner = ptr };
}
pub fn raw(self: *TiledImage) ?*c.Fl_Tiled_Image {
return self.inner;
}
pub fn fromRaw(ptr: ?*c.Fl_Tiled_Image) TiledImage {
return TiledImage{
.inner = ptr,
};
}
pub fn fromImagePtr(img: ?*c.Fl_Image) TiledImage {
return TiledImage{
.inner = @ptrCast(?*c.Fl_Tiled_Image, img),
};
}
pub fn fromVoidPtr(ptr: ?*c_void) TiledImage {
return TiledImage{
.inner = @ptrCast(?*c.Fl_Tiled_Image, ptr),
};
}
pub fn toVoidPtr(self: *TiledImage) ?*c_void {
return @ptrCast(?*c_void, self.inner);
}
pub fn asImage(self: *const TiledImage) Image {
return Image{ .inner = @ptrCast(?*c.Fl_Image, self.inner) };
}
};
test "" {
@import("std").testing.refAllDecls(@This());
} | src/image.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const command = @import("command.zig");
const help = @import("./help.zig");
const argp = @import("./arg.zig");
const iterators = @import("./iterators.zig");
pub const ParseResult = struct {
action: command.Action,
args: []const []const u8,
};
pub fn run(cmd: *const command.Command, alloc: Allocator) anyerror!void {
var iter = std.process.args();
var it = iterators.SystemArgIterator{
.iter = &iter,
.alloc = alloc,
};
var cr = try Parser(iterators.SystemArgIterator).init(cmd, it, alloc);
var result = try cr.parse();
cr.deinit();
iter.deinit();
return result.action(result.args);
}
var help_option = command.Option{
.long_name = "help",
.help = "Show this help output.",
.short_alias = 'h',
.value = command.OptionValue{ .bool = false },
};
const ValueList = std.ArrayList([]const u8);
const ValueListMap = std.AutoHashMap(*command.Option, ValueList);
pub fn Parser(comptime Iterator: type) type {
return struct {
const Self = @This();
alloc: Allocator,
arg_iterator: Iterator,
current_command: *const command.Command,
command_path: std.ArrayList(*const command.Command),
captured_arguments: std.ArrayList([]const u8),
value_lists: ?ValueListMap,
pub fn init(cmd: *const command.Command, it: Iterator, alloc: Allocator) !*Self {
var s = try alloc.create(Parser(Iterator));
s.alloc = alloc;
s.arg_iterator = it;
s.current_command = cmd;
s.command_path = try std.ArrayList(*const command.Command).initCapacity(alloc, 16);
s.captured_arguments = try std.ArrayList([]const u8).initCapacity(alloc, 16);
s.value_lists = null;
return s;
}
pub fn deinit(self: *Self) void {
self.captured_arguments.deinit();
self.command_path.deinit();
self.alloc.destroy(self);
}
pub fn parse(self: *Self) anyerror!ParseResult {
validate_command(self.current_command);
_ = self.next_arg();
var args_only = false;
while (self.next_arg()) |arg| {
if (args_only) {
try self.captured_arguments.append(arg);
} else if (argp.interpret(arg)) |int| {
args_only = try self.process_interpretation(&int);
} else |err| {
switch (err) {
error.MissingOptionArgument => fail("missing argument: '{s}'", .{arg}),
}
}
}
return self.finalize();
}
fn finalize(self: *Self) ParseResult {
ensure_all_required_set(self.current_command);
var args = self.captured_arguments.toOwnedSlice();
if (self.value_lists) |vl| {
var it = vl.iterator();
while (it.next()) |entry| {
var option: *command.Option = entry.key_ptr.*;
option.value.string_list = entry.value_ptr.toOwnedSlice();
}
self.value_lists.?.deinit();
}
if (self.current_command.action) |action| {
return ParseResult{ .action = action, .args = args };
} else {
fail("command '{s}': no subcommand provided", .{self.current_command.name});
unreachable;
}
}
fn process_interpretation(self: *Self, int: *const argp.ArgumentInterpretation) !bool {
var args_only = false;
try switch (int.*) {
.option => |opt| self.process_option(&opt),
.double_dash => {
args_only = true;
},
.other => |some_name| {
if (find_subcommand(self.current_command, some_name)) |cmd| {
ensure_all_required_set(self.current_command);
validate_command(cmd);
try self.command_path.append(self.current_command);
self.current_command = cmd;
} else {
try self.captured_arguments.append(some_name);
args_only = true;
}
},
};
return args_only;
}
fn next_arg(self: *Self) ?[]const u8 {
return self.arg_iterator.next();
}
fn process_option(self: *Self, option: *const argp.OptionInterpretation) !void {
var opt: *command.Option = switch (option.option_type) {
.long => find_option_by_name(self.current_command, option.name),
.short => a: {
set_boolean_options(self.current_command, option.name[0 .. option.name.len - 1]);
break :a find_option_by_alias(self.current_command, option.name[option.name.len - 1]);
},
};
if (opt == &help_option) {
try help.print_command_help(self.current_command, self.command_path.toOwnedSlice());
std.os.exit(0);
}
switch (opt.value) {
.bool => opt.value = command.OptionValue{ .bool = true },
else => {
const arg = option.value orelse self.next_arg() orelse {
fail("missing argument for {s}", .{opt.long_name});
unreachable;
};
try self.parse_and_set_option_value(arg, opt);
},
}
}
fn parse_and_set_option_value(self: *Self, text: []const u8, option: *command.Option) !void {
switch (option.value) {
.bool => unreachable,
.string => option.value = command.OptionValue{ .string = text },
.int => {
if (std.fmt.parseInt(i64, text, 10)) |iv| {
option.value = command.OptionValue{ .int = iv };
} else |_| {
fail("option({s}): cannot parse int value", .{option.long_name});
unreachable;
}
},
.float => {
if (std.fmt.parseFloat(f64, text)) |fv| {
option.value = command.OptionValue{ .float = fv };
} else |_| {
fail("option({s}): cannot parse float value", .{option.long_name});
unreachable;
}
},
.string_list => {
if (self.value_lists == null) {
self.value_lists = ValueListMap.init(self.alloc);
}
var res = try self.value_lists.?.getOrPut(option);
if (!res.found_existing) {
res.value_ptr.* = try ValueList.initCapacity(self.alloc, 16);
}
try res.value_ptr.append(text);
},
}
}
};
}
fn fail(comptime fmt: []const u8, args: anytype) void {
var w = std.io.getStdErr().writer();
std.fmt.format(w, "ERROR: ", .{}) catch unreachable;
std.fmt.format(w, fmt, args) catch unreachable;
std.fmt.format(w, "\n", .{}) catch unreachable;
std.os.exit(1);
}
fn find_subcommand(cmd: *const command.Command, subcommand_name: []const u8) ?*const command.Command {
if (cmd.subcommands) |sc_list| {
for (sc_list) |sc| {
if (std.mem.eql(u8, sc.name, subcommand_name)) {
return sc;
}
}
}
return null;
}
fn find_option_by_name(cmd: *const command.Command, option_name: []const u8) *command.Option {
if (std.mem.eql(u8, "help", option_name)) {
return &help_option;
}
if (cmd.options) |option_list| {
for (option_list) |option| {
if (std.mem.eql(u8, option.long_name, option_name)) {
return option;
}
}
}
fail("no such option '--{s}'", .{option_name});
unreachable;
}
fn find_option_by_alias(cmd: *const command.Command, option_alias: u8) *command.Option {
if (option_alias == 'h') {
return &help_option;
}
if (cmd.options) |option_list| {
for (option_list) |option| {
if (option.short_alias) |alias| {
if (alias == option_alias) {
return option;
}
}
}
}
fail("no such option alias '-{c}'", .{option_alias});
unreachable;
}
fn validate_command(cmd: *const command.Command) void {
if (cmd.subcommands == null) {
if (cmd.action == null) {
fail("command '{s}' has neither subcommands no an aciton assigned", .{cmd.name});
}
} else {
if (cmd.action != null) {
fail("command '{s}' has subcommands and an action assigned. Commands with subcommands are not allowed to have action.", .{cmd.name});
}
}
}
fn set_boolean_options(cmd: *const command.Command, options: []const u8) void {
for (options) |alias| {
var opt = find_option_by_alias(cmd, alias);
if (opt.value == command.OptionValue.bool) {
opt.value.bool = true;
} else {
fail("'-{c}' is not a boolean option", .{alias});
}
}
}
fn ensure_all_required_set(cmd: *const command.Command) void {
if (cmd.options) |list| {
for (list) |option| {
if (option.required) {
var not_set = switch (option.value) {
.bool => false,
.string => |x| x == null,
.int => |x| x == null,
.float => |x| x == null,
.string_list => |x| x == null,
};
if (not_set) {
fail("option '{s}' is required", .{option.long_name});
}
}
}
}
} | src/parser.zig |
const std = @import("std");
const builtin = @import("builtin");
const IsWasm = builtin.target.isWasm();
const stdx = @import("stdx");
const fatal = stdx.fatal;
const platform = @import("platform");
const graphics = @import("graphics");
const Color = graphics.Color;
const ui = @import("ui");
const Row = ui.widgets.Row;
const Text = ui.widgets.Text;
const TextButton = ui.widgets.TextButton;
const Padding = ui.widgets.Padding;
const Center = ui.widgets.Center;
const Column = ui.widgets.Column;
const ColorPicker = ui.widgets.ColorPicker;
const Sized = ui.widgets.Sized;
const helper = @import("helper.zig");
const log = stdx.log.scoped(.main);
pub const App = struct {
box_color: Color,
duck_color: Color,
const Self = @This();
pub fn init(self: *Self, _: *ui.InitContext) void {
self.box_color = Color.Blue;
self.duck_color = Color.Yellow;
}
pub fn build(self: *Self, c: *ui.BuildContext) ui.FrameId {
const S = struct {
fn onBoxColorPreview(self_: *Self, color: Color) void {
self_.box_color = color;
}
fn onBoxColor(self_: *Self, color: Color, save: bool) void {
_ = save;
self_.box_color = color;
}
fn onDuckColorPreview(self_: *Self, color: Color) void {
self_.duck_color = color;
}
fn onDuckColor(self_: *Self, color: Color, save: bool) void {
_ = save;
self_.duck_color = color;
}
};
return c.decl(Sized, .{
.width = 250,
.child = c.decl(Column, .{
.expand = false,
.children = c.list(.{
c.decl(ColorPicker, .{
.label = "Box Color",
.init_val = self.box_color,
.onPreviewChange = c.funcExt(self, S.onBoxColorPreview),
.onResult = c.funcExt(self, S.onBoxColor),
}),
c.decl(ColorPicker, .{
.label = "Duck Color",
.init_val = self.duck_color,
.onPreviewChange = c.funcExt(self, S.onDuckColorPreview),
.onResult = c.funcExt(self, S.onDuckColor),
}),
}),
}),
});
}
};
var app: helper.App = undefined;
var main_cam: graphics.Camera = undefined;
var cam_mod: graphics.CameraModule = undefined;
const box = @embedFile("../../examples/assets/models/box.gltf");
const box_bin = @embedFile("../../examples/assets/models/Box0.bin");
var box_node: graphics.NodeGLTF = undefined;
const duck = @embedFile("../../examples/assets/models/duck.gltf");
const duck_bin = @embedFile("../../examples/assets/models/Duck0.bin");
var duck_node: graphics.NodeGLTF = undefined;
var app_root: *App = undefined;
pub fn main() !void {
// This is the app loop for desktop. For web/wasm see wasm exports below.
app.init("3d");
defer app.deinit();
// Setup model buffers.
var buffers = std.StringHashMap([]const u8).init(app.alloc);
defer buffers.deinit();
var box_bin_aligned = try stdx.mem.dupeAlign(app.alloc, u8, 2, box_bin);
defer app.alloc.free(box_bin_aligned);
var duck_bin_aligned = try stdx.mem.dupeAlign(app.alloc, u8, 2, duck_bin);
defer app.alloc.free(duck_bin_aligned);
try buffers.put("Box0.bin", box_bin_aligned);
try buffers.put("Duck0.bin", duck_bin_aligned);
// Load models.
var box_h = try app.gctx.loadGLTF(box);
defer box_h.deinit();
try box_h.loadBuffers(.{
.static_buffer_map = buffers,
});
box_node = try box_h.loadNode(app.alloc, 0);
defer box_node.deinit(app.alloc);
var duck_h = try app.gctx.loadGLTF(duck);
defer duck_h.deinit();
try duck_h.loadBuffers(.{
.static_buffer_map = buffers,
});
duck_node = try duck_h.loadNode(app.alloc, 0);
defer duck_node.deinit(app.alloc);
const aspect = app.win.getAspectRatio();
main_cam.initPerspective3D(60, aspect, 0.1, 1000);
main_cam.moveForward(90);
main_cam.moveUp(10);
main_cam.setRotation(0, 0);
cam_mod.init(&main_cam, &app.dispatcher);
// Update ui once to bind user root.
const ui_width = @intToFloat(f32, app.win.getWidth());
const ui_height = @intToFloat(f32, app.win.getHeight());
app.ui_mod.update(0, {}, buildRoot, ui_width, ui_height) catch unreachable;
app_root = app.ui_mod.getUserRoot(App).?;
app.runEventLoop(update);
}
fn buildRoot(_: void, c: *ui.BuildContext) ui.FrameId {
return c.decl(App, .{});
}
fn update(delta_ms: f32) void {
cam_mod.update(delta_ms);
// Render 3D scene.
const gctx = app.gctx;
gctx.setCamera(main_cam);
gctx.drawPlane();
gctx.setFillColor(Color.Red);
gctx.fillTriangle3D(0, 0, 0, 20, 0, 0, 0, 20, 0);
var box_xform = graphics.Transform.initIdentity();
box_xform.translate3D(-1, 1, 0);
box_xform.scale3D(20, 20, 20);
gctx.setFillColor(app_root.box_color);
gctx.fillMesh3D(box_xform, box_node.verts, box_node.indexes);
gctx.setStrokeColor(Color.Black);
gctx.strokeMesh3D(box_xform, box_node.verts, box_node.indexes);
var duck_xform = graphics.Transform.initIdentity();
duck_xform.translate3D(-150, 0, 0);
gctx.setFillColor(app_root.duck_color);
gctx.fillMesh3D(duck_xform, duck_node.verts, duck_node.indexes);
gctx.setStrokeColor(Color.Black);
gctx.strokeMesh3D(duck_xform, duck_node.verts, duck_node.indexes);
// Render ui.
gctx.setCamera(app.cam);
gctx.setFillColor(Color.White);
gctx.fillTextFmt(10, 710, "cam pos: ({d:.1},{d:.1},{d:.1})", .{main_cam.world_pos.x, main_cam.world_pos.y, main_cam.world_pos.z});
gctx.fillTextFmt(10, 730, "forward: ({d:.1},{d:.1},{d:.1})", .{main_cam.forward_nvec.x, main_cam.forward_nvec.y, main_cam.forward_nvec.z});
gctx.fillTextFmt(10, 750, "up: ({d:.1},{d:.1},{d:.1})", .{main_cam.up_nvec.x, main_cam.up_nvec.y, main_cam.up_nvec.z});
gctx.fillTextFmt(10, 770, "right: ({d:.1},{d:.1},{d:.1})", .{main_cam.right_nvec.x, main_cam.right_nvec.y, main_cam.right_nvec.z});
const ui_width = @intToFloat(f32, app.win.getWidth());
const ui_height = @intToFloat(f32, app.win.getHeight());
app.ui_mod.updateAndRender(delta_ms, {}, buildRoot, ui_width, ui_height) catch unreachable;
}
pub usingnamespace if (IsWasm) struct {
export fn wasmInit() *const u8 {
return helper.wasmInit(&app, "Counter");
}
export fn wasmUpdate(cur_time_ms: f64, input_buffer_len: u32) *const u8 {
return helper.wasmUpdate(cur_time_ms, input_buffer_len, &app, update);
}
/// Not that useful since it's a long lived process in the browser.
export fn wasmDeinit() void {
app.deinit();
stdx.wasm.deinit();
}
} else struct {}; | ui/examples/3d.zig |
const std = @import("std");
const os = std.os;
const mem = std.mem;
const elf = std.elf;
const math = std.math;
const assert = std.debug.assert;
const native_arch = std.Target.current.cpu.arch;
// This file implements the two TLS variants [1] used by ELF-based systems.
//
// The variant I has the following layout in memory:
// -------------------------------------------------------
// | DTV | Zig | DTV | Alignment | TLS |
// | storage | thread data | pointer | | block |
// ------------------------^------------------------------
// `-- The thread pointer register points here
//
// In this case we allocate additional space for our control structure that's
// placed _before_ the DTV pointer together with the DTV.
//
// NOTE: Some systems such as power64 or mips use this variant with a twist: the
// alignment is not present and the tp and DTV addresses are offset by a
// constant.
//
// On the other hand the variant II has the following layout in memory:
// ---------------------------------------
// | TLS | TCB | Zig | DTV |
// | block | | thread data | storage |
// --------^------------------------------
// `-- The thread pointer register points here
//
// The structure of the TCB is not defined by the ABI so we reserve enough space
// for a single pointer as some architectures such as i386 and x86_64 need a
// pointer to the TCB block itself at the address pointed by the tp.
//
// In this case the control structure and DTV are placed one after another right
// after the TLS block data.
//
// At the moment the DTV is very simple since we only support static TLS, all we
// need is a two word vector to hold the number of entries (1) and the address
// of the first TLS block.
//
// [1] https://www.akkadia.org/drepper/tls.pdf
const TLSVariant = enum {
VariantI,
VariantII,
};
const tls_variant = switch (native_arch) {
.arm, .armeb, .thumb, .aarch64, .aarch64_be, .riscv32, .riscv64, .mips, .mipsel, .powerpc, .powerpc64, .powerpc64le => TLSVariant.VariantI,
.x86_64, .i386, .sparcv9 => TLSVariant.VariantII,
else => @compileError("undefined tls_variant for this architecture"),
};
// Controls how many bytes are reserved for the Thread Control Block
const tls_tcb_size = switch (native_arch) {
// ARM EABI mandates enough space for two pointers: the first one points to
// the DTV while the second one is unspecified but reserved
.arm, .armeb, .thumb, .aarch64, .aarch64_be => 2 * @sizeOf(usize),
// One pointer-sized word that points either to the DTV or the TCB itself
else => @sizeOf(usize),
};
// Controls if the TP points to the end of the TCB instead of its beginning
const tls_tp_points_past_tcb = switch (native_arch) {
.riscv32, .riscv64, .mips, .mipsel, .powerpc, .powerpc64, .powerpc64le => true,
else => false,
};
// Some architectures add some offset to the tp and dtv addresses in order to
// make the generated code more efficient
const tls_tp_offset = switch (native_arch) {
.mips, .mipsel, .powerpc, .powerpc64, .powerpc64le => 0x7000,
else => 0,
};
const tls_dtv_offset = switch (native_arch) {
.mips, .mipsel, .powerpc, .powerpc64, .powerpc64le => 0x8000,
.riscv32, .riscv64 => 0x800,
else => 0,
};
// Per-thread storage for Zig's use
const CustomData = struct {
dummy: usize,
};
// Dynamic Thread Vector
const DTV = extern struct {
entries: usize,
tls_block: [1][*]u8,
};
// Holds all the information about the process TLS image
const TLSImage = struct {
init_data: []const u8,
alloc_size: usize,
alloc_align: usize,
tcb_offset: usize,
dtv_offset: usize,
data_offset: usize,
data_size: usize,
// Only used on the i386 architecture
gdt_entry_number: usize,
};
pub var tls_image: TLSImage = undefined;
pub fn setThreadPointer(addr: usize) void {
switch (native_arch) {
.i386 => {
var user_desc = std.os.linux.user_desc{
.entry_number = tls_image.gdt_entry_number,
.base_addr = addr,
.limit = 0xfffff,
.seg_32bit = 1,
.contents = 0, // Data
.read_exec_only = 0,
.limit_in_pages = 1,
.seg_not_present = 0,
.useable = 1,
};
const rc = std.os.linux.syscall1(.set_thread_area, @ptrToInt(&user_desc));
assert(rc == 0);
const gdt_entry_number = user_desc.entry_number;
// We have to keep track of our slot as it's also needed for clone()
tls_image.gdt_entry_number = gdt_entry_number;
// Update the %gs selector
asm volatile ("movl %[gs_val], %%gs"
:
: [gs_val] "r" (gdt_entry_number << 3 | 3),
);
},
.x86_64 => {
const rc = std.os.linux.syscall2(.arch_prctl, std.os.linux.ARCH_SET_FS, addr);
assert(rc == 0);
},
.aarch64 => {
asm volatile (
\\ msr tpidr_el0, %[addr]
:
: [addr] "r" (addr),
);
},
.arm, .thumb => {
const rc = std.os.linux.syscall1(.set_tls, addr);
assert(rc == 0);
},
.riscv64 => {
asm volatile (
\\ mv tp, %[addr]
:
: [addr] "r" (addr),
);
},
.mips, .mipsel => {
const rc = std.os.linux.syscall1(.set_thread_area, addr);
assert(rc == 0);
},
.powerpc => {
asm volatile (
\\ mr 2, %[addr]
:
: [addr] "r" (addr),
);
},
.powerpc64, .powerpc64le => {
asm volatile (
\\ mr 13, %[addr]
:
: [addr] "r" (addr),
);
},
.sparcv9 => {
asm volatile (
\\ mov %[addr], %%g7
:
: [addr] "r" (addr),
);
},
else => @compileError("Unsupported architecture"),
}
}
fn initTLS(phdrs: []elf.Phdr) void {
var tls_phdr: ?*elf.Phdr = null;
var img_base: usize = 0;
for (phdrs) |*phdr| {
switch (phdr.p_type) {
elf.PT_PHDR => img_base = @ptrToInt(phdrs.ptr) - phdr.p_vaddr,
elf.PT_TLS => tls_phdr = phdr,
else => {},
}
}
var tls_align_factor: usize = undefined;
var tls_data: []const u8 = undefined;
var tls_data_alloc_size: usize = undefined;
if (tls_phdr) |phdr| {
// The effective size in memory is represented by p_memsz, the length of
// the data stored in the PT_TLS segment is p_filesz and may be less
// than the former
tls_align_factor = phdr.p_align;
tls_data = @intToPtr([*]u8, img_base + phdr.p_vaddr)[0..phdr.p_filesz];
tls_data_alloc_size = phdr.p_memsz;
} else {
tls_align_factor = @alignOf(usize);
tls_data = &[_]u8{};
tls_data_alloc_size = 0;
}
// Offsets into the allocated TLS area
var tcb_offset: usize = undefined;
var dtv_offset: usize = undefined;
var data_offset: usize = undefined;
// Compute the total size of the ABI-specific data plus our own control
// structures. All the offset calculated here assume a well-aligned base
// address.
const alloc_size = switch (tls_variant) {
.VariantI => blk: {
var l: usize = 0;
dtv_offset = l;
l += @sizeOf(DTV);
// Add some padding here so that the thread pointer (tcb_offset) is
// aligned to p_align and the CustomData structure can be found by
// simply subtracting its @sizeOf from the tp value
const delta = (l + @sizeOf(CustomData)) & (tls_align_factor - 1);
if (delta > 0)
l += tls_align_factor - delta;
l += @sizeOf(CustomData);
tcb_offset = l;
l += mem.alignForward(tls_tcb_size, tls_align_factor);
data_offset = l;
l += tls_data_alloc_size;
break :blk l;
},
.VariantII => blk: {
var l: usize = 0;
data_offset = l;
l += mem.alignForward(tls_data_alloc_size, tls_align_factor);
// The thread pointer is aligned to p_align
tcb_offset = l;
l += tls_tcb_size;
// The CustomData structure is right after the TCB with no padding
// in between so it can be easily found
l += @sizeOf(CustomData);
l = mem.alignForward(l, @alignOf(DTV));
dtv_offset = l;
l += @sizeOf(DTV);
break :blk l;
},
};
tls_image = TLSImage{
.init_data = tls_data,
.alloc_size = alloc_size,
.alloc_align = tls_align_factor,
.tcb_offset = tcb_offset,
.dtv_offset = dtv_offset,
.data_offset = data_offset,
.data_size = tls_data_alloc_size,
.gdt_entry_number = @bitCast(usize, @as(isize, -1)),
};
}
inline fn alignPtrCast(comptime T: type, ptr: [*]u8) *T {
return @ptrCast(*T, @alignCast(@alignOf(T), ptr));
}
/// Initializes all the fields of the static TLS area and returns the computed
/// architecture-specific value of the thread-pointer register
pub fn prepareTLS(area: []u8) usize {
// Clear the area we're going to use, just to be safe
mem.set(u8, area, 0);
// Prepare the DTV
const dtv = alignPtrCast(DTV, area.ptr + tls_image.dtv_offset);
dtv.entries = 1;
dtv.tls_block[0] = area.ptr + tls_dtv_offset + tls_image.data_offset;
// Prepare the TCB
const tcb_ptr = alignPtrCast([*]u8, area.ptr + tls_image.tcb_offset);
tcb_ptr.* = switch (tls_variant) {
.VariantI => area.ptr + tls_image.dtv_offset,
.VariantII => area.ptr + tls_image.tcb_offset,
};
// Copy the data
mem.copy(u8, area[tls_image.data_offset..], tls_image.init_data);
// Return the corrected (if needed) value for the tp register
return @ptrToInt(area.ptr) + tls_tp_offset +
if (tls_tp_points_past_tcb) tls_image.data_offset else tls_image.tcb_offset;
}
// The main motivation for the size chosen here is this is how much ends up being
// requested for the thread local variables of the std.crypto.random implementation.
// I'm not sure why it ends up being so much; the struct itself is only 64 bytes.
// I think it has to do with being page aligned and LLVM or LLD is not smart enough
// to lay out the TLS data in a space conserving way. Anyway I think it's fine
// because it's less than 3 pages of memory, and putting it in the ELF like this
// is equivalent to moving the mmap call below into the kernel, avoiding syscall
// overhead.
var main_thread_tls_buffer: [0x2100]u8 align(mem.page_size) = undefined;
pub fn initStaticTLS(phdrs: []elf.Phdr) void {
initTLS(phdrs);
const tls_area = blk: {
// Fast path for the common case where the TLS data is really small,
// avoid an allocation and use our local buffer.
if (tls_image.alloc_align <= mem.page_size and
tls_image.alloc_size <= main_thread_tls_buffer.len)
{
break :blk main_thread_tls_buffer[0..tls_image.alloc_size];
}
const alloc_tls_area = os.mmap(
null,
tls_image.alloc_size + tls_image.alloc_align - 1,
os.PROT_READ | os.PROT_WRITE,
os.MAP_PRIVATE | os.MAP_ANONYMOUS,
-1,
0,
) catch os.abort();
// Make sure the slice is correctly aligned.
const begin_addr = @ptrToInt(alloc_tls_area.ptr);
const begin_aligned_addr = mem.alignForward(begin_addr, tls_image.alloc_align);
const start = begin_aligned_addr - begin_addr;
break :blk alloc_tls_area[start .. start + tls_image.alloc_size];
};
const tp_value = prepareTLS(tls_area);
setThreadPointer(tp_value);
} | lib/std/os/linux/tls.zig |
const std = @import("std");
const math = std.math;
const mem = std.mem;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Air = @import("Air.zig");
const Type = @import("type.zig").Type;
const Module = @import("Module.zig");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectEqualSlices = std.testing.expectEqualSlices;
const log = std.log.scoped(.register_manager);
pub fn RegisterManager(
comptime Function: type,
comptime Register: type,
comptime callee_preserved_regs: []const Register,
) type {
// architectures which do not have a concept of registers should
// refrain from using RegisterManager
assert(callee_preserved_regs.len > 0); // see note above
return struct {
/// Tracks the AIR instruction allocated to every register or
/// `null` if no instruction is allocated to a register
///
/// The key must be canonical register.
registers: [callee_preserved_regs.len]?Air.Inst.Index = [_]?Air.Inst.Index{null} ** callee_preserved_regs.len,
/// Tracks which registers are free (in which case the
/// corresponding bit is set to 1)
free_registers: FreeRegInt = math.maxInt(FreeRegInt),
/// Tracks all registers allocated in the course of this
/// function
allocated_registers: FreeRegInt = 0,
/// Tracks registers which are temporarily blocked from being
/// allocated
frozen_registers: FreeRegInt = 0,
const Self = @This();
/// An integer whose bits represent all the registers and
/// whether they are free.
const FreeRegInt = std.meta.Int(.unsigned, callee_preserved_regs.len);
const ShiftInt = math.Log2Int(FreeRegInt);
fn getFunction(self: *Self) *Function {
return @fieldParentPtr(Function, "register_manager", self);
}
fn getRegisterMask(reg: Register) ?FreeRegInt {
const index = reg.allocIndex() orelse return null;
const shift = @intCast(ShiftInt, index);
const mask = @as(FreeRegInt, 1) << shift;
return mask;
}
fn markRegAllocated(self: *Self, reg: Register) void {
const mask = getRegisterMask(reg) orelse return;
self.allocated_registers |= mask;
}
fn markRegUsed(self: *Self, reg: Register) void {
const mask = getRegisterMask(reg) orelse return;
self.free_registers &= ~mask;
}
fn markRegFree(self: *Self, reg: Register) void {
const mask = getRegisterMask(reg) orelse return;
self.free_registers |= mask;
}
/// Returns true when this register is not tracked
pub fn isRegFree(self: Self, reg: Register) bool {
const mask = getRegisterMask(reg) orelse return true;
return self.free_registers & mask != 0;
}
/// Returns whether this register was allocated in the course
/// of this function.
///
/// Returns false when this register is not tracked
pub fn isRegAllocated(self: Self, reg: Register) bool {
const mask = getRegisterMask(reg) orelse return false;
return self.allocated_registers & mask != 0;
}
/// Returns whether this register is frozen
///
/// Returns false when this register is not tracked
pub fn isRegFrozen(self: Self, reg: Register) bool {
const mask = getRegisterMask(reg) orelse return false;
return self.frozen_registers & mask != 0;
}
/// Prevents the registers from being allocated until they are
/// unfrozen again
pub fn freezeRegs(self: *Self, regs: []const Register) void {
for (regs) |reg| {
const mask = getRegisterMask(reg) orelse continue;
self.frozen_registers |= mask;
}
}
/// Enables the allocation of the registers
pub fn unfreezeRegs(self: *Self, regs: []const Register) void {
for (regs) |reg| {
const mask = getRegisterMask(reg) orelse continue;
self.frozen_registers &= ~mask;
}
}
/// Returns true when at least one register is frozen
pub fn frozenRegsExist(self: Self) bool {
return self.frozen_registers != 0;
}
/// Allocates a specified number of registers, optionally
/// tracking them. Returns `null` if not enough registers are
/// free.
pub fn tryAllocRegs(
self: *Self,
comptime count: comptime_int,
insts: [count]?Air.Inst.Index,
) ?[count]Register {
comptime assert(count > 0 and count <= callee_preserved_regs.len);
const free_registers = @popCount(FreeRegInt, self.free_registers);
if (free_registers < count) return null;
var regs: [count]Register = undefined;
var i: usize = 0;
for (callee_preserved_regs) |reg| {
if (i >= count) break;
if (self.isRegFrozen(reg)) continue;
if (self.isRegFree(reg)) {
regs[i] = reg;
i += 1;
}
}
assert(i == count);
for (regs) |reg, j| {
self.markRegAllocated(reg);
if (insts[j]) |inst| {
// Track the register
const index = reg.allocIndex().?; // allocIndex() on a callee-preserved reg should never return null
self.registers[index] = inst;
self.markRegUsed(reg);
}
}
return regs;
}
/// Allocates a register and optionally tracks it with a
/// corresponding instruction. Returns `null` if all registers
/// are allocated.
pub fn tryAllocReg(self: *Self, inst: ?Air.Inst.Index) ?Register {
return if (tryAllocRegs(self, 1, .{inst})) |regs| regs[0] else null;
}
/// Allocates a specified number of registers, optionally
/// tracking them. Asserts that count is not
/// larger than the total number of registers available.
pub fn allocRegs(
self: *Self,
comptime count: comptime_int,
insts: [count]?Air.Inst.Index,
) ![count]Register {
comptime assert(count > 0 and count <= callee_preserved_regs.len);
const result = self.tryAllocRegs(count, insts) orelse blk: {
// We'll take over the first count registers. Spill
// the instructions that were previously there to a
// stack allocations.
var regs: [count]Register = undefined;
var i: usize = 0;
for (callee_preserved_regs) |reg| {
if (i >= count) break;
if (self.isRegFrozen(reg)) continue;
regs[i] = reg;
self.markRegAllocated(reg);
const index = reg.allocIndex().?; // allocIndex() on a callee-preserved reg should never return null
if (insts[i]) |inst| {
// Track the register
if (self.isRegFree(reg)) {
self.markRegUsed(reg);
} else {
const spilled_inst = self.registers[index].?;
try self.getFunction().spillInstruction(reg, spilled_inst);
}
self.registers[index] = inst;
} else {
// Don't track the register
if (!self.isRegFree(reg)) {
const spilled_inst = self.registers[index].?;
try self.getFunction().spillInstruction(reg, spilled_inst);
self.freeReg(reg);
}
}
i += 1;
}
break :blk regs;
};
log.debug("allocated registers {any} for insts {any}", .{ result, insts });
return result;
}
/// Allocates a register and optionally tracks it with a
/// corresponding instruction.
pub fn allocReg(self: *Self, inst: ?Air.Inst.Index) !Register {
return (try self.allocRegs(1, .{inst}))[0];
}
/// Spills the register if it is currently allocated. If a
/// corresponding instruction is passed, will also track this
/// register.
pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) !void {
const index = reg.allocIndex() orelse return;
self.markRegAllocated(reg);
if (inst) |tracked_inst|
if (!self.isRegFree(reg)) {
// Move the instruction that was previously there to a
// stack allocation.
const spilled_inst = self.registers[index].?;
self.registers[index] = tracked_inst;
try self.getFunction().spillInstruction(reg, spilled_inst);
} else {
self.getRegAssumeFree(reg, tracked_inst);
}
else {
if (!self.isRegFree(reg)) {
// Move the instruction that was previously there to a
// stack allocation.
const spilled_inst = self.registers[index].?;
try self.getFunction().spillInstruction(reg, spilled_inst);
self.freeReg(reg);
}
}
}
/// Allocates the specified register with the specified
/// instruction. Asserts that the register is free and no
/// spilling is necessary.
pub fn getRegAssumeFree(self: *Self, reg: Register, inst: Air.Inst.Index) void {
const index = reg.allocIndex() orelse return;
self.markRegAllocated(reg);
assert(self.registers[index] == null);
self.registers[index] = inst;
self.markRegUsed(reg);
}
/// Marks the specified register as free
pub fn freeReg(self: *Self, reg: Register) void {
const index = reg.allocIndex() orelse return;
log.debug("freeing register {}", .{reg});
self.registers[index] = null;
self.markRegFree(reg);
}
};
}
const MockRegister1 = enum(u2) {
r0,
r1,
r2,
r3,
pub fn allocIndex(self: MockRegister1) ?u2 {
inline for (callee_preserved_regs) |cpreg, i| {
if (self == cpreg) return i;
}
return null;
}
const callee_preserved_regs = [_]MockRegister1{ .r2, .r3 };
};
const MockRegister2 = enum(u2) {
r0,
r1,
r2,
r3,
pub fn allocIndex(self: MockRegister2) ?u2 {
inline for (callee_preserved_regs) |cpreg, i| {
if (self == cpreg) return i;
}
return null;
}
const callee_preserved_regs = [_]MockRegister2{ .r0, .r1, .r2, .r3 };
};
fn MockFunction(comptime Register: type) type {
return struct {
allocator: Allocator,
register_manager: RegisterManager(Self, Register, &Register.callee_preserved_regs) = .{},
spilled: std.ArrayListUnmanaged(Register) = .{},
const Self = @This();
pub fn deinit(self: *Self) void {
self.spilled.deinit(self.allocator);
}
pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void {
_ = inst;
try self.spilled.append(self.allocator, reg);
}
};
}
const MockFunction1 = MockFunction(MockRegister1);
const MockFunction2 = MockFunction(MockRegister2);
test "default state" {
const allocator = std.testing.allocator;
var function = MockFunction1{
.allocator = allocator,
};
defer function.deinit();
try expect(!function.register_manager.isRegAllocated(.r2));
try expect(!function.register_manager.isRegAllocated(.r3));
try expect(function.register_manager.isRegFree(.r2));
try expect(function.register_manager.isRegFree(.r3));
}
test "tryAllocReg: no spilling" {
const allocator = std.testing.allocator;
var function = MockFunction1{
.allocator = allocator,
};
defer function.deinit();
const mock_instruction: Air.Inst.Index = 1;
try expectEqual(@as(?MockRegister1, .r2), function.register_manager.tryAllocReg(mock_instruction));
try expectEqual(@as(?MockRegister1, .r3), function.register_manager.tryAllocReg(mock_instruction));
try expectEqual(@as(?MockRegister1, null), function.register_manager.tryAllocReg(mock_instruction));
try expect(function.register_manager.isRegAllocated(.r2));
try expect(function.register_manager.isRegAllocated(.r3));
try expect(!function.register_manager.isRegFree(.r2));
try expect(!function.register_manager.isRegFree(.r3));
function.register_manager.freeReg(.r2);
function.register_manager.freeReg(.r3);
try expect(function.register_manager.isRegAllocated(.r2));
try expect(function.register_manager.isRegAllocated(.r3));
try expect(function.register_manager.isRegFree(.r2));
try expect(function.register_manager.isRegFree(.r3));
}
test "allocReg: spilling" {
const allocator = std.testing.allocator;
var function = MockFunction1{
.allocator = allocator,
};
defer function.deinit();
const mock_instruction: Air.Inst.Index = 1;
try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(mock_instruction));
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction));
// Spill a register
try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(mock_instruction));
try expectEqualSlices(MockRegister1, &[_]MockRegister1{.r2}, function.spilled.items);
// No spilling necessary
function.register_manager.freeReg(.r3);
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction));
try expectEqualSlices(MockRegister1, &[_]MockRegister1{.r2}, function.spilled.items);
// Frozen registers
function.register_manager.freeReg(.r3);
{
function.register_manager.freezeRegs(&.{.r2});
defer function.register_manager.unfreezeRegs(&.{.r2});
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction));
}
try expect(!function.register_manager.frozenRegsExist());
}
test "tryAllocRegs" {
const allocator = std.testing.allocator;
var function = MockFunction2{
.allocator = allocator,
};
defer function.deinit();
try expectEqual([_]MockRegister2{ .r0, .r1, .r2 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }).?);
try expect(function.register_manager.isRegAllocated(.r0));
try expect(function.register_manager.isRegAllocated(.r1));
try expect(function.register_manager.isRegAllocated(.r2));
try expect(!function.register_manager.isRegAllocated(.r3));
// Frozen registers
function.register_manager.freeReg(.r0);
function.register_manager.freeReg(.r2);
function.register_manager.freeReg(.r3);
{
function.register_manager.freezeRegs(&.{.r1});
defer function.register_manager.unfreezeRegs(&.{.r1});
try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }).?);
}
try expect(!function.register_manager.frozenRegsExist());
try expect(function.register_manager.isRegAllocated(.r0));
try expect(function.register_manager.isRegAllocated(.r1));
try expect(function.register_manager.isRegAllocated(.r2));
try expect(function.register_manager.isRegAllocated(.r3));
}
test "allocRegs" {
const allocator = std.testing.allocator;
var function = MockFunction2{
.allocator = allocator,
};
defer function.deinit();
const mock_instruction: Air.Inst.Index = 1;
try expectEqual([_]MockRegister2{ .r0, .r1, .r2 }, try function.register_manager.allocRegs(3, .{
mock_instruction,
mock_instruction,
mock_instruction,
}));
try expect(function.register_manager.isRegAllocated(.r0));
try expect(function.register_manager.isRegAllocated(.r1));
try expect(function.register_manager.isRegAllocated(.r2));
try expect(!function.register_manager.isRegAllocated(.r3));
// Frozen registers
function.register_manager.freeReg(.r0);
function.register_manager.freeReg(.r2);
function.register_manager.freeReg(.r3);
{
function.register_manager.freezeRegs(&.{.r1});
defer function.register_manager.unfreezeRegs(&.{.r1});
try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, try function.register_manager.allocRegs(3, .{ null, null, null }));
}
try expect(!function.register_manager.frozenRegsExist());
try expect(function.register_manager.isRegAllocated(.r0));
try expect(function.register_manager.isRegAllocated(.r1));
try expect(function.register_manager.isRegAllocated(.r2));
try expect(function.register_manager.isRegAllocated(.r3));
}
test "getReg" {
const allocator = std.testing.allocator;
var function = MockFunction1{
.allocator = allocator,
};
defer function.deinit();
const mock_instruction: Air.Inst.Index = 1;
try function.register_manager.getReg(.r3, mock_instruction);
try expect(!function.register_manager.isRegAllocated(.r2));
try expect(function.register_manager.isRegAllocated(.r3));
try expect(function.register_manager.isRegFree(.r2));
try expect(!function.register_manager.isRegFree(.r3));
// Spill r3
try function.register_manager.getReg(.r3, mock_instruction);
try expect(!function.register_manager.isRegAllocated(.r2));
try expect(function.register_manager.isRegAllocated(.r3));
try expect(function.register_manager.isRegFree(.r2));
try expect(!function.register_manager.isRegFree(.r3));
try expectEqualSlices(MockRegister1, &[_]MockRegister1{.r3}, function.spilled.items);
} | src/register_manager.zig |
const std = @import("std.zig");
fn ok(comptime s: []const u8) void {
std.testing.expect(std.json.validate(s));
}
fn err(comptime s: []const u8) void {
std.testing.expect(!std.json.validate(s));
}
fn any(comptime s: []const u8) void {
std.testing.expect(true);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Additional tests not part of test JSONTestSuite.
test "json.test.y_trailing_comma_after_empty" {
ok(
\\{"1":[],"2":{},"3":"4"}
);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
test "json.test.y_array_arraysWithSpaces" {
ok(
\\[[] ]
);
}
test "json.test.y_array_empty" {
ok(
\\[]
);
}
test "json.test.y_array_empty-string" {
ok(
\\[""]
);
}
test "json.test.y_array_ending_with_newline" {
ok(
\\["a"]
);
}
test "json.test.y_array_false" {
ok(
\\[false]
);
}
test "json.test.y_array_heterogeneous" {
ok(
\\[null, 1, "1", {}]
);
}
test "json.test.y_array_null" {
ok(
\\[null]
);
}
test "json.test.y_array_with_1_and_newline" {
ok(
\\[1
\\]
);
}
test "json.test.y_array_with_leading_space" {
ok(
\\ [1]
);
}
test "json.test.y_array_with_several_null" {
ok(
\\[1,null,null,null,2]
);
}
test "json.test.y_array_with_trailing_space" {
ok("[2] ");
}
test "json.test.y_number_0e+1" {
ok(
\\[0e+1]
);
}
test "json.test.y_number_0e1" {
ok(
\\[0e1]
);
}
test "json.test.y_number_after_space" {
ok(
\\[ 4]
);
}
test "json.test.y_number_double_close_to_zero" {
ok(
\\[-0.000000000000000000000000000000000000000000000000000000000000000000000000000001]
);
}
test "json.test.y_number_int_with_exp" {
ok(
\\[20e1]
);
}
test "json.test.y_number" {
ok(
\\[123e65]
);
}
test "json.test.y_number_minus_zero" {
ok(
\\[-0]
);
}
test "json.test.y_number_negative_int" {
ok(
\\[-123]
);
}
test "json.test.y_number_negative_one" {
ok(
\\[-1]
);
}
test "json.test.y_number_negative_zero" {
ok(
\\[-0]
);
}
test "json.test.y_number_real_capital_e" {
ok(
\\[1E22]
);
}
test "json.test.y_number_real_capital_e_neg_exp" {
ok(
\\[1E-2]
);
}
test "json.test.y_number_real_capital_e_pos_exp" {
ok(
\\[1E+2]
);
}
test "json.test.y_number_real_exponent" {
ok(
\\[123e45]
);
}
test "json.test.y_number_real_fraction_exponent" {
ok(
\\[123.456e78]
);
}
test "json.test.y_number_real_neg_exp" {
ok(
\\[1e-2]
);
}
test "json.test.y_number_real_pos_exponent" {
ok(
\\[1e+2]
);
}
test "json.test.y_number_simple_int" {
ok(
\\[123]
);
}
test "json.test.y_number_simple_real" {
ok(
\\[123.456789]
);
}
test "json.test.y_object_basic" {
ok(
\\{"asd":"sdf"}
);
}
test "json.test.y_object_duplicated_key_and_value" {
ok(
\\{"a":"b","a":"b"}
);
}
test "json.test.y_object_duplicated_key" {
ok(
\\{"a":"b","a":"c"}
);
}
test "json.test.y_object_empty" {
ok(
\\{}
);
}
test "json.test.y_object_empty_key" {
ok(
\\{"":0}
);
}
test "json.test.y_object_escaped_null_in_key" {
ok(
\\{"foo\u0000bar": 42}
);
}
test "json.test.y_object_extreme_numbers" {
ok(
\\{ "min": -1.0e+28, "max": 1.0e+28 }
);
}
test "json.test.y_object" {
ok(
\\{"asd":"sdf", "dfg":"fgh"}
);
}
test "json.test.y_object_long_strings" {
ok(
\\{"x":[{"id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}], "id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}
);
}
test "json.test.y_object_simple" {
ok(
\\{"a":[]}
);
}
test "json.test.y_object_string_unicode" {
ok(
\\{"title":"\u041f\u043e\u043b\u0442\u043e\u0440\u0430 \u0417\u0435\u043c\u043b\u0435\u043a\u043e\u043f\u0430" }
);
}
test "json.test.y_object_with_newlines" {
ok(
\\{
\\"a": "b"
\\}
);
}
test "json.test.y_string_1_2_3_bytes_UTF-8_sequences" {
ok(
\\["\u0060\u012a\u12AB"]
);
}
test "json.test.y_string_accepted_surrogate_pair" {
ok(
\\["\uD801\udc37"]
);
}
test "json.test.y_string_accepted_surrogate_pairs" {
ok(
\\["\ud83d\ude39\ud83d\udc8d"]
);
}
test "json.test.y_string_allowed_escapes" {
ok(
\\["\"\\\/\b\f\n\r\t"]
);
}
test "json.test.y_string_backslash_and_u_escaped_zero" {
ok(
\\["\\u0000"]
);
}
test "json.test.y_string_backslash_doublequotes" {
ok(
\\["\""]
);
}
test "json.test.y_string_comments" {
ok(
\\["a/*b*/c/*d//e"]
);
}
test "json.test.y_string_double_escape_a" {
ok(
\\["\\a"]
);
}
test "json.test.y_string_double_escape_n" {
ok(
\\["\\n"]
);
}
test "json.test.y_string_escaped_control_character" {
ok(
\\["\u0012"]
);
}
test "json.test.y_string_escaped_noncharacter" {
ok(
\\["\uFFFF"]
);
}
test "json.test.y_string_in_array" {
ok(
\\["asd"]
);
}
test "json.test.y_string_in_array_with_leading_space" {
ok(
\\[ "asd"]
);
}
test "json.test.y_string_last_surrogates_1_and_2" {
ok(
\\["\uDBFF\uDFFF"]
);
}
test "json.test.y_string_nbsp_uescaped" {
ok(
\\["new\u00A0line"]
);
}
test "json.test.y_string_nonCharacterInUTF-8_U+10FFFF" {
ok(
\\[""]
);
}
test "json.test.y_string_nonCharacterInUTF-8_U+FFFF" {
ok(
\\[""]
);
}
test "json.test.y_string_null_escape" {
ok(
\\["\u0000"]
);
}
test "json.test.y_string_one-byte-utf-8" {
ok(
\\["\u002c"]
);
}
test "json.test.y_string_pi" {
ok(
\\["π"]
);
}
test "json.test.y_string_reservedCharacterInUTF-8_U+1BFFF" {
ok(
\\[""]
);
}
test "json.test.y_string_simple_ascii" {
ok(
\\["asd "]
);
}
test "json.test.y_string_space" {
ok(
\\" "
);
}
test "json.test.y_string_surrogates_U+1D11E_MUSICAL_SYMBOL_G_CLEF" {
ok(
\\["\uD834\uDd1e"]
);
}
test "json.test.y_string_three-byte-utf-8" {
ok(
\\["\u0821"]
);
}
test "json.test.y_string_two-byte-utf-8" {
ok(
\\["\u0123"]
);
}
test "json.test.y_string_u+2028_line_sep" {
ok("[\"\xe2\x80\xa8\"]");
}
test "json.test.y_string_u+2029_par_sep" {
ok("[\"\xe2\x80\xa9\"]");
}
test "json.test.y_string_uescaped_newline" {
ok(
\\["new\u000Aline"]
);
}
test "json.test.y_string_uEscape" {
ok(
\\["\u0061\u30af\u30EA\u30b9"]
);
}
test "json.test.y_string_unescaped_char_delete" {
ok("[\"\x7f\"]");
}
test "json.test.y_string_unicode_2" {
ok(
\\["⍂㈴⍂"]
);
}
test "json.test.y_string_unicodeEscapedBackslash" {
ok(
\\["\u005C"]
);
}
test "json.test.y_string_unicode_escaped_double_quote" {
ok(
\\["\u0022"]
);
}
test "json.test.y_string_unicode" {
ok(
\\["\uA66D"]
);
}
test "json.test.y_string_unicode_U+10FFFE_nonchar" {
ok(
\\["\uDBFF\uDFFE"]
);
}
test "json.test.y_string_unicode_U+1FFFE_nonchar" {
ok(
\\["\uD83F\uDFFE"]
);
}
test "json.test.y_string_unicode_U+200B_ZERO_WIDTH_SPACE" {
ok(
\\["\u200B"]
);
}
test "json.test.y_string_unicode_U+2064_invisible_plus" {
ok(
\\["\u2064"]
);
}
test "json.test.y_string_unicode_U+FDD0_nonchar" {
ok(
\\["\uFDD0"]
);
}
test "json.test.y_string_unicode_U+FFFE_nonchar" {
ok(
\\["\uFFFE"]
);
}
test "json.test.y_string_utf8" {
ok(
\\["€𝄞"]
);
}
test "json.test.y_string_with_del_character" {
ok("[\"a\x7fa\"]");
}
test "json.test.y_structure_lonely_false" {
ok(
\\false
);
}
test "json.test.y_structure_lonely_int" {
ok(
\\42
);
}
test "json.test.y_structure_lonely_negative_real" {
ok(
\\-0.1
);
}
test "json.test.y_structure_lonely_null" {
ok(
\\null
);
}
test "json.test.y_structure_lonely_string" {
ok(
\\"asd"
);
}
test "json.test.y_structure_lonely_true" {
ok(
\\true
);
}
test "json.test.y_structure_string_empty" {
ok(
\\""
);
}
test "json.test.y_structure_trailing_newline" {
ok(
\\["a"]
);
}
test "json.test.y_structure_true_in_array" {
ok(
\\[true]
);
}
test "json.test.y_structure_whitespace_array" {
ok(" [] ");
}
////////////////////////////////////////////////////////////////////////////////////////////////////
test "json.test.n_array_1_true_without_comma" {
err(
\\[1 true]
);
}
test "json.test.n_array_a_invalid_utf8" {
err(
\\[aå]
);
}
test "json.test.n_array_colon_instead_of_comma" {
err(
\\["": 1]
);
}
test "json.test.n_array_comma_after_close" {
//err(
// \\[""],
//);
}
test "json.test.n_array_comma_and_number" {
err(
\\[,1]
);
}
test "json.test.n_array_double_comma" {
err(
\\[1,,2]
);
}
test "json.test.n_array_double_extra_comma" {
err(
\\["x",,]
);
}
test "json.test.n_array_extra_close" {
err(
\\["x"]]
);
}
test "json.test.n_array_extra_comma" {
//err(
// \\["",]
//);
}
test "json.test.n_array_incomplete_invalid_value" {
err(
\\[x
);
}
test "json.test.n_array_incomplete" {
err(
\\["x"
);
}
test "json.test.n_array_inner_array_no_comma" {
err(
\\[3[4]]
);
}
test "json.test.n_array_invalid_utf8" {
err(
\\[ÿ]
);
}
test "json.test.n_array_items_separated_by_semicolon" {
err(
\\[1:2]
);
}
test "json.test.n_array_just_comma" {
err(
\\[,]
);
}
test "json.test.n_array_just_minus" {
err(
\\[-]
);
}
test "json.test.n_array_missing_value" {
err(
\\[ , ""]
);
}
test "json.test.n_array_newlines_unclosed" {
err(
\\["a",
\\4
\\,1,
);
}
test "json.test.n_array_number_and_comma" {
err(
\\[1,]
);
}
test "json.test.n_array_number_and_several_commas" {
err(
\\[1,,]
);
}
test "json.test.n_array_spaces_vertical_tab_formfeed" {
err("[\"\x0aa\"\\f]");
}
test "json.test.n_array_star_inside" {
err(
\\[*]
);
}
test "json.test.n_array_unclosed" {
err(
\\[""
);
}
test "json.test.n_array_unclosed_trailing_comma" {
err(
\\[1,
);
}
test "json.test.n_array_unclosed_with_new_lines" {
err(
\\[1,
\\1
\\,1
);
}
test "json.test.n_array_unclosed_with_object_inside" {
err(
\\[{}
);
}
test "json.test.n_incomplete_false" {
err(
\\[fals]
);
}
test "json.test.n_incomplete_null" {
err(
\\[nul]
);
}
test "json.test.n_incomplete_true" {
err(
\\[tru]
);
}
test "json.test.n_multidigit_number_then_00" {
err("123\x00");
}
test "json.test.n_number_0.1.2" {
err(
\\[0.1.2]
);
}
test "json.test.n_number_-01" {
err(
\\[-01]
);
}
test "json.test.n_number_0.3e" {
err(
\\[0.3e]
);
}
test "json.test.n_number_0.3e+" {
err(
\\[0.3e+]
);
}
test "json.test.n_number_0_capital_E" {
err(
\\[0E]
);
}
test "json.test.n_number_0_capital_E+" {
err(
\\[0E+]
);
}
test "json.test.n_number_0.e1" {
err(
\\[0.e1]
);
}
test "json.test.n_number_0e" {
err(
\\[0e]
);
}
test "json.test.n_number_0e+" {
err(
\\[0e+]
);
}
test "json.test.n_number_1_000" {
err(
\\[1 000.0]
);
}
test "json.test.n_number_1.0e-" {
err(
\\[1.0e-]
);
}
test "json.test.n_number_1.0e" {
err(
\\[1.0e]
);
}
test "json.test.n_number_1.0e+" {
err(
\\[1.0e+]
);
}
test "json.test.n_number_-1.0." {
err(
\\[-1.0.]
);
}
test "json.test.n_number_1eE2" {
err(
\\[1eE2]
);
}
test "json.test.n_number_.-1" {
err(
\\[.-1]
);
}
test "json.test.n_number_+1" {
err(
\\[+1]
);
}
test "json.test.n_number_.2e-3" {
err(
\\[.2e-3]
);
}
test "json.test.n_number_2.e-3" {
err(
\\[2.e-3]
);
}
test "json.test.n_number_2.e+3" {
err(
\\[2.e+3]
);
}
test "json.test.n_number_2.e3" {
err(
\\[2.e3]
);
}
test "json.test.n_number_-2." {
err(
\\[-2.]
);
}
test "json.test.n_number_9.e+" {
err(
\\[9.e+]
);
}
test "json.test.n_number_expression" {
err(
\\[1+2]
);
}
test "json.test.n_number_hex_1_digit" {
err(
\\[0x1]
);
}
test "json.test.n_number_hex_2_digits" {
err(
\\[0x42]
);
}
test "json.test.n_number_infinity" {
err(
\\[Infinity]
);
}
test "json.test.n_number_+Inf" {
err(
\\[+Inf]
);
}
test "json.test.n_number_Inf" {
err(
\\[Inf]
);
}
test "json.test.n_number_invalid+-" {
err(
\\[0e+-1]
);
}
test "json.test.n_number_invalid-negative-real" {
err(
\\[-123.123foo]
);
}
test "json.test.n_number_invalid-utf-8-in-bigger-int" {
err(
\\[123å]
);
}
test "json.test.n_number_invalid-utf-8-in-exponent" {
err(
\\[1e1å]
);
}
test "json.test.n_number_invalid-utf-8-in-int" {
err(
\\[0å]
);
}
test "json.test.n_number_++" {
err(
\\[++1234]
);
}
test "json.test.n_number_minus_infinity" {
err(
\\[-Infinity]
);
}
test "json.test.n_number_minus_sign_with_trailing_garbage" {
err(
\\[-foo]
);
}
test "json.test.n_number_minus_space_1" {
err(
\\[- 1]
);
}
test "json.test.n_number_-NaN" {
err(
\\[-NaN]
);
}
test "json.test.n_number_NaN" {
err(
\\[NaN]
);
}
test "json.test.n_number_neg_int_starting_with_zero" {
err(
\\[-012]
);
}
test "json.test.n_number_neg_real_without_int_part" {
err(
\\[-.123]
);
}
test "json.test.n_number_neg_with_garbage_at_end" {
err(
\\[-1x]
);
}
test "json.test.n_number_real_garbage_after_e" {
err(
\\[1ea]
);
}
test "json.test.n_number_real_with_invalid_utf8_after_e" {
err(
\\[1eå]
);
}
test "json.test.n_number_real_without_fractional_part" {
err(
\\[1.]
);
}
test "json.test.n_number_starting_with_dot" {
err(
\\[.123]
);
}
test "json.test.n_number_U+FF11_fullwidth_digit_one" {
err(
\\[ï¼]
);
}
test "json.test.n_number_with_alpha_char" {
err(
\\[1.8011670033376514H-308]
);
}
test "json.test.n_number_with_alpha" {
err(
\\[1.2a-3]
);
}
test "json.test.n_number_with_leading_zero" {
err(
\\[012]
);
}
test "json.test.n_object_bad_value" {
err(
\\["x", truth]
);
}
test "json.test.n_object_bracket_key" {
err(
\\{[: "x"}
);
}
test "json.test.n_object_comma_instead_of_colon" {
err(
\\{"x", null}
);
}
test "json.test.n_object_double_colon" {
err(
\\{"x"::"b"}
);
}
test "json.test.n_object_emoji" {
err(
\\{ð¨ð}
);
}
test "json.test.n_object_garbage_at_end" {
err(
\\{"a":"a" 123}
);
}
test "json.test.n_object_key_with_single_quotes" {
err(
\\{key: 'value'}
);
}
test "json.test.n_object_lone_continuation_byte_in_key_and_trailing_comma" {
err(
\\{"¹":"0",}
);
}
test "json.test.n_object_missing_colon" {
err(
\\{"a" b}
);
}
test "json.test.n_object_missing_key" {
err(
\\{:"b"}
);
}
test "json.test.n_object_missing_semicolon" {
err(
\\{"a" "b"}
);
}
test "json.test.n_object_missing_value" {
err(
\\{"a":
);
}
test "json.test.n_object_no-colon" {
err(
\\{"a"
);
}
test "json.test.n_object_non_string_key_but_huge_number_instead" {
err(
\\{9999E9999:1}
);
}
test "json.test.n_object_non_string_key" {
err(
\\{1:1}
);
}
test "json.test.n_object_repeated_null_null" {
err(
\\{null:null,null:null}
);
}
test "json.test.n_object_several_trailing_commas" {
err(
\\{"id":0,,,,,}
);
}
test "json.test.n_object_single_quote" {
err(
\\{'a':0}
);
}
test "json.test.n_object_trailing_comma" {
err(
\\{"id":0,}
);
}
test "json.test.n_object_trailing_comment" {
err(
\\{"a":"b"}/**/
);
}
test "json.test.n_object_trailing_comment_open" {
err(
\\{"a":"b"}/**//
);
}
test "json.test.n_object_trailing_comment_slash_open_incomplete" {
err(
\\{"a":"b"}/
);
}
test "json.test.n_object_trailing_comment_slash_open" {
err(
\\{"a":"b"}//
);
}
test "json.test.n_object_two_commas_in_a_row" {
err(
\\{"a":"b",,"c":"d"}
);
}
test "json.test.n_object_unquoted_key" {
err(
\\{a: "b"}
);
}
test "json.test.n_object_unterminated-value" {
err(
\\{"a":"a
);
}
test "json.test.n_object_with_single_string" {
err(
\\{ "foo" : "bar", "a" }
);
}
test "json.test.n_object_with_trailing_garbage" {
err(
\\{"a":"b"}#
);
}
test "json.test.n_single_space" {
err(" ");
}
test "json.test.n_string_1_surrogate_then_escape" {
err(
\\["\uD800\"]
);
}
test "json.test.n_string_1_surrogate_then_escape_u1" {
err(
\\["\uD800\u1"]
);
}
test "json.test.n_string_1_surrogate_then_escape_u1x" {
err(
\\["\uD800\u1x"]
);
}
test "json.test.n_string_1_surrogate_then_escape_u" {
err(
\\["\uD800\u"]
);
}
test "json.test.n_string_accentuated_char_no_quotes" {
err(
\\[é]
);
}
test "json.test.n_string_backslash_00" {
err("[\"\x00\"]");
}
test "json.test.n_string_escaped_backslash_bad" {
err(
\\["\\\"]
);
}
test "json.test.n_string_escaped_ctrl_char_tab" {
err("\x5b\x22\x5c\x09\x22\x5d");
}
test "json.test.n_string_escaped_emoji" {
err("[\"\x5c\xc3\xb0\xc2\x9f\xc2\x8c\xc2\x80\"]");
}
test "json.test.n_string_escape_x" {
err(
\\["\x00"]
);
}
test "json.test.n_string_incomplete_escaped_character" {
err(
\\["\u00A"]
);
}
test "json.test.n_string_incomplete_escape" {
err(
\\["\"]
);
}
test "json.test.n_string_incomplete_surrogate_escape_invalid" {
err(
\\["\uD800\uD800\x"]
);
}
test "json.test.n_string_incomplete_surrogate" {
err(
\\["\uD834\uDd"]
);
}
test "json.test.n_string_invalid_backslash_esc" {
err(
\\["\a"]
);
}
test "json.test.n_string_invalid_unicode_escape" {
err(
\\["\uqqqq"]
);
}
test "json.test.n_string_invalid_utf8_after_escape" {
err("[\"\\\x75\xc3\xa5\"]");
}
test "json.test.n_string_invalid-utf-8-in-escape" {
err(
\\["\uå"]
);
}
test "json.test.n_string_leading_uescaped_thinspace" {
err(
\\[\u0020"asd"]
);
}
test "json.test.n_string_no_quotes_with_bad_escape" {
err(
\\[\n]
);
}
test "json.test.n_string_single_doublequote" {
err(
\\"
);
}
test "json.test.n_string_single_quote" {
err(
\\['single quote']
);
}
test "json.test.n_string_single_string_no_double_quotes" {
err(
\\abc
);
}
test "json.test.n_string_start_escape_unclosed" {
err(
\\["\
);
}
test "json.test.n_string_unescaped_crtl_char" {
err("[\"a\x00a\"]");
}
test "json.test.n_string_unescaped_newline" {
err(
\\["new
\\line"]
);
}
test "json.test.n_string_unescaped_tab" {
err("[\"\t\"]");
}
test "json.test.n_string_unicode_CapitalU" {
err(
\\"\UA66D"
);
}
test "json.test.n_string_with_trailing_garbage" {
err(
\\""x
);
}
test "json.test.n_structure_100000_opening_arrays" {
err("[" ** 100000);
}
test "json.test.n_structure_angle_bracket_." {
err(
\\<.>
);
}
test "json.test.n_structure_angle_bracket_null" {
err(
\\[<null>]
);
}
test "json.test.n_structure_array_trailing_garbage" {
err(
\\[1]x
);
}
test "json.test.n_structure_array_with_extra_array_close" {
err(
\\[1]]
);
}
test "json.test.n_structure_array_with_unclosed_string" {
err(
\\["asd]
);
}
test "json.test.n_structure_ascii-unicode-identifier" {
err(
\\aå
);
}
test "json.test.n_structure_capitalized_True" {
err(
\\[True]
);
}
test "json.test.n_structure_close_unopened_array" {
err(
\\1]
);
}
test "json.test.n_structure_comma_instead_of_closing_brace" {
err(
\\{"x": true,
);
}
test "json.test.n_structure_double_array" {
err(
\\[][]
);
}
test "json.test.n_structure_end_array" {
err(
\\]
);
}
test "json.test.n_structure_incomplete_UTF8_BOM" {
err(
\\ï»{}
);
}
test "json.test.n_structure_lone-invalid-utf-8" {
err(
\\å
);
}
test "json.test.n_structure_lone-open-bracket" {
err(
\\[
);
}
test "json.test.n_structure_no_data" {
err(
\\
);
}
test "json.test.n_structure_null-byte-outside-string" {
err("[\x00]");
}
test "json.test.n_structure_number_with_trailing_garbage" {
err(
\\2@
);
}
test "json.test.n_structure_object_followed_by_closing_object" {
err(
\\{}}
);
}
test "json.test.n_structure_object_unclosed_no_value" {
err(
\\{"":
);
}
test "json.test.n_structure_object_with_comment" {
err(
\\{"a":/*comment*/"b"}
);
}
test "json.test.n_structure_object_with_trailing_garbage" {
err(
\\{"a": true} "x"
);
}
test "json.test.n_structure_open_array_apostrophe" {
err(
\\['
);
}
test "json.test.n_structure_open_array_comma" {
err(
\\[,
);
}
test "json.test.n_structure_open_array_object" {
err("[{\"\":" ** 50000);
}
test "json.test.n_structure_open_array_open_object" {
err(
\\[{
);
}
test "json.test.n_structure_open_array_open_string" {
err(
\\["a
);
}
test "json.test.n_structure_open_array_string" {
err(
\\["a"
);
}
test "json.test.n_structure_open_object_close_array" {
err(
\\{]
);
}
test "json.test.n_structure_open_object_comma" {
err(
\\{,
);
}
test "json.test.n_structure_open_object" {
err(
\\{
);
}
test "json.test.n_structure_open_object_open_array" {
err(
\\{[
);
}
test "json.test.n_structure_open_object_open_string" {
err(
\\{"a
);
}
test "json.test.n_structure_open_object_string_with_apostrophes" {
err(
\\{'a'
);
}
test "json.test.n_structure_open_open" {
err(
\\["\{["\{["\{["\{
);
}
test "json.test.n_structure_single_eacute" {
err(
\\é
);
}
test "json.test.n_structure_single_star" {
err(
\\*
);
}
test "json.test.n_structure_trailing_#" {
err(
\\{"a":"b"}#{}
);
}
test "json.test.n_structure_U+2060_word_joined" {
err(
\\[â ]
);
}
test "json.test.n_structure_uescaped_LF_before_string" {
err(
\\[\u000A""]
);
}
test "json.test.n_structure_unclosed_array" {
err(
\\[1
);
}
test "json.test.n_structure_unclosed_array_partial_null" {
err(
\\[ false, nul
);
}
test "json.test.n_structure_unclosed_array_unfinished_false" {
err(
\\[ true, fals
);
}
test "json.test.n_structure_unclosed_array_unfinished_true" {
err(
\\[ false, tru
);
}
test "json.test.n_structure_unclosed_object" {
err(
\\{"asd":"asd"
);
}
test "json.test.n_structure_unicode-identifier" {
err(
\\Ã¥
);
}
test "json.test.n_structure_UTF8_BOM_no_data" {
err(
\\
);
}
test "json.test.n_structure_whitespace_formfeed" {
err("[\x0c]");
}
test "json.test.n_structure_whitespace_U+2060_word_joiner" {
err(
\\[â ]
);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
test "json.test.i_number_double_huge_neg_exp" {
any(
\\[123.456e-789]
);
}
test "json.test.i_number_huge_exp" {
any(
\\[0.4e00669999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999969999999006]
);
}
test "json.test.i_number_neg_int_huge_exp" {
any(
\\[-1e+9999]
);
}
test "json.test.i_number_pos_double_huge_exp" {
any(
\\[1.5e+9999]
);
}
test "json.test.i_number_real_neg_overflow" {
any(
\\[-123123e100000]
);
}
test "json.test.i_number_real_pos_overflow" {
any(
\\[123123e100000]
);
}
test "json.test.i_number_real_underflow" {
any(
\\[123e-10000000]
);
}
test "json.test.i_number_too_big_neg_int" {
any(
\\[-123123123123123123123123123123]
);
}
test "json.test.i_number_too_big_pos_int" {
any(
\\[100000000000000000000]
);
}
test "json.test.i_number_very_big_negative_int" {
any(
\\[-237462374673276894279832749832423479823246327846]
);
}
test "json.test.i_object_key_lone_2nd_surrogate" {
any(
\\{"\uDFAA":0}
);
}
test "json.test.i_string_1st_surrogate_but_2nd_missing" {
any(
\\["\uDADA"]
);
}
test "json.test.i_string_1st_valid_surrogate_2nd_invalid" {
any(
\\["\uD888\u1234"]
);
}
test "json.test.i_string_incomplete_surrogate_and_escape_valid" {
any(
\\["\uD800\n"]
);
}
test "json.test.i_string_incomplete_surrogate_pair" {
any(
\\["\uDd1ea"]
);
}
test "json.test.i_string_incomplete_surrogates_escape_valid" {
any(
\\["\uD800\uD800\n"]
);
}
test "json.test.i_string_invalid_lonely_surrogate" {
any(
\\["\ud800"]
);
}
test "json.test.i_string_invalid_surrogate" {
any(
\\["\ud800abc"]
);
}
test "json.test.i_string_invalid_utf-8" {
any(
\\["ÿ"]
);
}
test "json.test.i_string_inverted_surrogates_U+1D11E" {
any(
\\["\uDd1e\uD834"]
);
}
test "json.test.i_string_iso_latin_1" {
any(
\\["é"]
);
}
test "json.test.i_string_lone_second_surrogate" {
any(
\\["\uDFAA"]
);
}
test "json.test.i_string_lone_utf8_continuation_byte" {
any(
\\[""]
);
}
test "json.test.i_string_not_in_unicode_range" {
any(
\\["ô¿¿¿"]
);
}
test "json.test.i_string_overlong_sequence_2_bytes" {
any(
\\["À¯"]
);
}
test "json.test.i_string_overlong_sequence_6_bytes" {
any(
\\["ü¿¿¿¿"]
);
}
test "json.test.i_string_overlong_sequence_6_bytes_null" {
any(
\\["ü"]
);
}
test "json.test.i_string_truncated-utf-8" {
any(
\\["àÿ"]
);
}
test "json.test.i_string_utf16BE_no_BOM" {
any("\x00\x5b\x00\x22\x00\xc3\xa9\x00\x22\x00\x5d");
}
test "json.test.i_string_utf16LE_no_BOM" {
any("\x5b\x00\x22\x00\xc3\xa9\x00\x22\x00\x5d\x00");
}
test "json.test.i_string_UTF-16LE_with_BOM" {
any("\xc3\xbf\xc3\xbe\x5b\x00\x22\x00\xc3\xa9\x00\x22\x00\x5d\x00");
}
test "json.test.i_string_UTF-8_invalid_sequence" {
any(
\\["æ¥Ñú"]
);
}
test "json.test.i_string_UTF8_surrogate_U+D800" {
any(
\\["í "]
);
}
test "json.test.i_structure_500_nested_arrays" {
any(("[" ** 500) ++ ("]" ** 500));
}
test "json.test.i_structure_UTF-8_BOM_empty_object" {
any(
\\{}
);
} | std/json_test.zig |
const std = @import("std");
const random = std.crypto.random;
const Stack = std.ArrayList(u16);
/// This is the default font found on Tobias' guide.
const default_font = [_]u8 {
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x90, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
0xF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80 // F
};
/// An enum for the possible key presses.
pub const Key = enum(u8){
Zero = 0,
One = 1,
Two = 2,
Three = 3,
Four = 4,
Five = 5,
Six = 6,
Seven = 7,
Eight = 8,
Nine = 9,
A = 0xA,
B = 0xB,
C = 0xC,
D = 0xD,
E = 0xE,
F = 0xF,
};
pub const ExecuteError = error {
UnknownInstruction,
Unsupported0x0NNN
};
/// The main structure for Chip8 emulation.
pub const ChipZ = struct {
memory: [4096]u8,
display: [64][32]bool,
stack: Stack,
timer_delay: u8,
timer_sound: u8,
program_counter: u16,
index_register: u16,
registers: [16]u8,
flags : struct {
display_update: bool,
current_key_pressed: ?Key,
},
configuration: struct {
shift_operations_sets_ry_into_rx: bool,
bnnn_is_bxnn: bool,
},
/// Inits a ChipZ structure with sensible defaults.
/// Namely, it inits the memory to and display to 0, prepares the stack
/// It sets then the default font using set_font.
pub fn init(allocator: *std.mem.Allocator) ChipZ {
var chip = ChipZ{
.memory = [1]u8{0} ** 4096,
.display = [_][32]bool{[_]bool{false} ** 32} ** 64,
.stack = Stack.init(allocator),
.timer_delay = 0,
.timer_sound = 0,
.program_counter = 0,
.index_register = 0,
.registers = [_]u8{0} ** 16,
.flags = .{
.display_update = false,
.current_key_pressed = null,
},
.configuration = .{
.shift_operations_sets_ry_into_rx = false,
.bnnn_is_bxnn = false
}
};
chip.set_font(default_font);
return chip;
}
/// Loads a program in memory, starting at address 0x200.
pub fn load_program(self: *ChipZ, program: []u8) void {
for(program) |byte, index| {
self.memory[index+0x200] = byte;
}
self.program_counter = 0x200;
}
/// Cleanups the structure
pub fn deinit(self: *ChipZ) void {
self.stack.deinit();
}
/// quick tool for operation parameter type address.
fn get_address(opcode: u16) u12 {
return @intCast(u12 ,opcode & 0xFFF);
}
/// quick tool for operation parameter type 8 bit constant as the last byte.
fn get_8bitconstant(opcode: u16) u8 {
return @intCast(u8 ,opcode & 0xFF);
}
/// quick tool for operation parameter type 4 bit constant as the last nibble.
fn get_4bitconstant(opcode: u16) u4 {
return @intCast(u4 ,opcode & 0xF);
}
/// quick tool for operation parameter type "x", the second nibble.
fn get_x(opcode: u16) u4 {
return @intCast(u4 ,(opcode & 0x0F00) >> 8);
}
/// quick tool for operation parameter type "y", the third nibble.
fn get_y(opcode: u16) u4 {
return @intCast(u4 ,(opcode & 0x00F0) >> 4);
}
/// sets the spedified font at index 0x50
pub fn set_font(self: *ChipZ, font: [16*5]u8) void {
for (font) |byte, index| {
self.memory[index+0x50] = byte;
}
}
/// Cycles and executes the next instruction.
/// This is what makes the emulation run.
/// If a display operation has been executed, the flag "display_update" will be set.
/// This allows updating the display only when necessary.
pub fn cycle(self: *ChipZ) !void {
self.flags.display_update = false;
try self.decode_and_execute(self.fetch());
}
/// Fetches the next instruction and moves the program counter by 2.
/// The use of defer is absolutely unecessary here, except if, like me, you enjoy having the return value at the end.
fn fetch(self: *ChipZ) u16 {
defer self.program_counter += 2;
return (@intCast(u16, self.memory[self.program_counter]) << 8) + self.memory[self.program_counter+1];
}
//! All functions starting with op_ are individual operations.
//! Some comments are directly from Tobias' guide.
/// Clears the screen.
fn op_00E0(self: *ChipZ) void {
for (self.display) |*row| {
for (row) |*column| {
column.* = false;
}
}
}
/// Jumps to address NNN.
fn op_1NNN(self: *ChipZ, address: u12) void {
self.program_counter = address;
}
/// Sets VX to NN.
fn op_6XNN(self: *ChipZ, register: u4, value: u8) void {
self.registers[register] = value;
}
/// Adds NN to VX. (Carry flag is not changed)
fn op_7XNN(self: *ChipZ, register: u4, value: u8) void {
_ = @addWithOverflow(u8, self.registers[register], value, &self.registers[register]);
}
/// Sets I to the address NNN.
fn op_ANNN(self: *ChipZ, address: u12) void {
self.index_register = address;
}
/// Draws a sprite at coordinate (VX, VY) that has a width of 8 pixels and a height of N+1 pixels.
/// Each row of 8 pixels is read as bit-coded starting from memory location I; I value doesn’t change after the execution of this instruction.
/// As described above, VF is set to 1 if any screen pixels are flipped from set to unset when the sprite is drawn, and to 0 if that doesn’t happen
fn op_DXYN(self: *ChipZ, r_col: u8, r_lin: u8, base_height: u4) void {
const col = self.registers[r_col];
const lin = self.registers[r_lin];
self.registers[0xF] = 0;
self.flags.display_update = true;
for (self.memory[self.index_register..self.index_register+base_height]) |sprite_line, index_sprite| {
var x: u4 = 0;
while (x < 8) : ( x += 1) {
if(((@intCast(usize, sprite_line) >> (7-x)) & 1) == 1) {
const coord_x = (col+x)%64;
const coord_y = (lin+index_sprite)%32;
if(self.display[coord_x][coord_y]) {
self.registers[0xF] = self.registers[0xF] | 1;
}
self.display[coord_x][coord_y] = !self.display[coord_x][coord_y];
}
}
}
}
/// return from subroutine
fn op_00EE(self: *ChipZ) void {
self.program_counter = self.stack.pop();
}
/// start subroutine
fn op_2NNN(self: *ChipZ, address: u12) void {
self.stack.append(self.program_counter) catch |err| @panic("Error on pushing on stack");
self.program_counter = address;
}
/// skip instruction if VX == value
fn op_3XNN(self: *ChipZ, register: u4, value: u8) void {
if(self.registers[register] == value) {
self.program_counter += 2;
}
}
/// skip instruction if VX != value
fn op_4XNN(self: *ChipZ, register: u4, value: u8) void {
if(self.registers[register] != value) {
self.program_counter += 2;
}
}
/// skip instruction if VX == VY
fn op_5XY0(self: *ChipZ, register_a: u4, register_b: u4) void {
if(self.registers[register_a] == self.registers[register_b]) {
self.program_counter += 2;
}
}
/// skip instruction if VX != VY
fn op_9XY0(self: *ChipZ, register_a: u4, register_b: u4) void {
if(self.registers[register_a] != self.registers[register_b]) {
self.program_counter += 2;
}
}
/// Sets VX to the value of VY.
fn op_8XY0(self: *ChipZ, x: u4, y: u4) void {
self.registers[x] = self.registers[y];
}
/// VX is set to the bitwise logical disjunction (OR) of VX and VY.
fn op_8XY1(self: *ChipZ, x: u4, y: u4) void {
self.registers[x] = self.registers[x] | self.registers[y];
}
/// VX is set to the bitwise logical conjunction (AND) of VX and VY
fn op_8XY2(self: *ChipZ, x: u4, y: u4) void {
self.registers[x] = self.registers[x] & self.registers[y];
}
/// VX is set to the bitwise exclusive OR (XOR) of VX and VY.
fn op_8XY3(self: *ChipZ, x: u4, y: u4) void {
self.registers[x] = self.registers[x] ^ self.registers[y];
}
/// VX is set to the value of VX plus the value of VY
/// Unlike 7XNN, this addition will affect the carry flag.
/// If the result is larger than 255 (and thus overflows the 8-bit register VX), the flag register VF is set to 1.
/// If it doesn’t overflow, VF is set to 0.
fn op_8XY4(self: *ChipZ, x: u4, y: u4) void {
const overflow = @addWithOverflow(u8, self.registers[x], self.registers[y], &self.registers[x]);
self.registers[0xF] = if(overflow) 1 else 0;
}
/// sets VX to the result of VX - VY.
/// This subtraction will also affect the carry flag, but note that it’s opposite from what you might think.
/// If the minuend (the first operand) is larger than the subtrahend (second operand), VF will be set to 1.
/// If the subtrahend is larger, and we “underflow” the result, VF is set to 0.
/// Another way of thinking of it is that VF is set to 1 before the subtraction, and then the subtraction either borrows from VF (setting it to 0) or not.
fn op_8XY5(self: *ChipZ, x: u4, y: u4) void {
const overflow = @subWithOverflow(u8, self.registers[x], self.registers[y], &self.registers[x]);
self.registers[0xF] = if(self.registers[x] > self.registers[y]) 0 else 1;
}
/// sets VX to the result of VY - VX.
/// This subtraction will also affect the carry flag, but note that it’s opposite from what you might think.
/// If the minuend (the first operand) is larger than the subtrahend (second operand), VF will be set to 1.
/// If the subtrahend is larger, and we “underflow” the result, VF is set to 0.
/// Another way of thinking of it is that VF is set to 1 before the subtraction, and then the subtraction either borrows from VF (setting it to 0) or not.
fn op_8XY7(self: *ChipZ, x: u4, y: u4) void {
const overflow = @subWithOverflow(u8, self.registers[y], self.registers[x], &self.registers[x]);
self.registers[0xF] = if (self.registers[y] > self.registers[x]) 1 else 0;
}
/// shift 1 bit right for vx
fn op_8XY6(self: *ChipZ, x: u4, y: u4) void {
if(self.configuration.shift_operations_sets_ry_into_rx) {
self.registers[x] = self.registers[y];
}
self.registers[0xF] = if(self.registers[x] & 1 == 1) 1 else 0;
self.registers[x] = self.registers[x] >> 1;
}
/// shift 1 bit left for vx
fn op_8XYE(self: *ChipZ, x: u4, y: u4) void {
if(self.configuration.shift_operations_sets_ry_into_rx) {
self.registers[x] = self.registers[y];
}
const overflow = @shlWithOverflow(u8, self.registers[x], 1, &self.registers[x]);
self.registers[0xF] = if(overflow) 1 else 0;
}
/// BNNN: Jump with offset
fn op_BNNN(self: *ChipZ, opcode: u16) void {
if(self.configuration.bnnn_is_bxnn){
const address = get_8bitconstant(opcode);
const x = get_x(opcode);
self.program_counter = address + self.registers[x];
}
else
{
const address = get_address(opcode);
self.program_counter = address + self.registers[0];
}
}
/// This instruction generates a random number, binary ANDs it with the value NN, and puts the result in VX.
fn op_CXNN(self: *ChipZ, x: u4, value: u8) void {
const rand = random.int(u8);
self.registers[x] = rand & value;
}
/// EX9E will skip one instruction (increment PC by 2) if the key corresponding to the value in VX is pressed.
fn op_EX9E(self: *ChipZ, x: u4) void {
if(self.flags.current_key_pressed) |key| {
if(@enumToInt(key) == self.registers[x])
{
self.program_counter += 2;
}
}
}
/// EXA1 skips if the key corresponding to the value in VX is not pressed.
fn op_EXA1(self: *ChipZ, x: u4) void {
if(self.flags.current_key_pressed) |key| {
if(@enumToInt(key) != self.registers[x])
{
self.program_counter += 2;
}
}
else
{
self.program_counter += 2;
}
}
/// FX07 sets VX to the current value of the delay timer
fn op_FX07(self: *ChipZ, x: u4) void {
self.registers[x] = self.timer_delay;
}
/// FX15 sets the delay timer to the value in VX
fn op_FX15(self: *ChipZ, x: u4) void {
self.timer_delay = self.registers[x];
}
/// FX18 sets the sound timer to the value in VX
fn op_FX18(self: *ChipZ, x: u4) void {
self.timer_sound = self.registers[x];
}
/// The index register I will get the value in VX added to it.
fn op_FX1E(self: *ChipZ, x: u4) void {
self.index_register = self.index_register + self.registers[x];
if(self.index_register > 0xFFF) {
self.registers[0xF] = 1;
self.index_register &= 0xFFF;
}
}
/// FX0A: Get key
/// This instruction “blocks”; it stops execution and waits for key input.
/// In other words, if you followed my advice earlier and increment PC after fetching each instruction, then it should be decremented again here unless a key is pressed.
/// Otherwise, PC should simply not be incremented.
/// If a key is pressed while this instruction is waiting for input, its hexadecimal value will be put in VX and execution continues.
fn op_FX0A(self: *ChipZ, x: u4) void {
if(self.flags.current_key_pressed) |key| {
self.registers[x] = @enumToInt(key);
}
else
{
self.program_counter -= 2;
}
}
/// The index register I is set to the address of the hexadecimal character in VX
fn op_FX29(self: *ChipZ, x: u4) void {
const value = self.registers[x] & 0xF;
self.index_register = 0x50 + (value * 5);
}
/// BCD conversion
fn op_FX33(self: *ChipZ, x: u4) void {
var value = self.registers[x];
self.memory[self.index_register] = @divFloor(value, 100);
self.memory[self.index_register + 1] = @divFloor(value, 10) % 10;
self.memory[self.index_register + 2] = value % 10;
}
/// store in memory
fn op_FX55(self: *ChipZ, x: u4) void {
var index : usize = 0;
while (index <= x) : (index += 1) {
self.memory[self.index_register+index] = self.registers[index];
}
}
/// load from memory
fn op_FX65(self: *ChipZ, x: u4) void {
var index : usize = 0;
while (index <= x) : (index += 1) {
self.registers[index] = self.memory[self.index_register+index];
}
}
/// Simple structure to decode a 2-byte instruction into potential parameters.
const OpDetails = struct {
opcode : u4,
x: u4,
y: u4,
n: u4,
nn: u8,
address: u12,
fn get(opcode: u16) OpDetails {
return OpDetails {
.opcode = @intCast(u4, (opcode & 0xF000) >> 12),
.x = get_x(opcode),
.y = get_y(opcode),
.n = get_4bitconstant(opcode),
.nn = get_8bitconstant(opcode),
.address = get_address(opcode)
};
}
};
/// Decodes the instruction, finds the appropriate function and execute it.
fn decode_and_execute(self: *ChipZ, opcode: u16) !void {
errdefer std.log.err("Faulting instruction {x} at program counter value {x}", .{opcode, self.program_counter});
const op = OpDetails.get(opcode);
switch(op.opcode) {
0x0 => {
switch(opcode) {
0x00E0 => self.op_00E0(),
0x00EE => self.op_00EE(),
else => {
return ExecuteError.Unsupported0x0NNN; // Calls machine code routine (RCA 1802 for COSMAC VIP) at address NNN. Not necessary for most ROMs.
}
}
},
0x1 => self.op_1NNN(op.address),
0x2 => self.op_2NNN(op.address),
0x3 => self.op_3XNN(op.x, op.nn),
0x4 => self.op_4XNN(op.x, op.nn),
0x5 => {
if((opcode & 0xF) == 0){
self.op_5XY0(op.x, op.y);
}
else return ExecuteError.UnknownInstruction;
},
0x6 => self.op_6XNN(op.x, op.nn),
0x7 => self.op_7XNN(op.x, op.nn),
0x8 => {
const last_nibble : u4 = @intCast(u4, opcode & 0xF);
switch (last_nibble) {
0x0 => self.op_8XY0(op.x, op.y),
0x1 => self.op_8XY1(op.x, op.y),
0x2 => self.op_8XY2(op.x, op.y),
0x3 => self.op_8XY3(op.x, op.y),
0x4 => self.op_8XY4(op.x, op.y),
0x5 => self.op_8XY5(op.x, op.y),
0x6 => self.op_8XY6(op.x, op.y),
0x7 => self.op_8XY7(op.x, op.y),
0xE => self.op_8XYE(op.x, op.y),
else => return ExecuteError.UnknownInstruction,
}
},
0x9 => {
if((opcode & 0xF) == 0){
self.op_9XY0(op.x, op.y);
}
else return ExecuteError.UnknownInstruction;
},
0xA => self.op_ANNN(op.address),
0xB => self.op_BNNN(opcode),
0xC => self.op_CXNN(op.x, op.nn),
0xD => self.op_DXYN(op.x, op.y, op.n),
0xE => {
const last_byte : u8 = @intCast(u8, opcode & 0xFF);
switch (last_byte) {
0x9E => self.op_EX9E(op.x),
0xA1 => self.op_EXA1(op.x),
else => return ExecuteError.UnknownInstruction,
}
},
0xF => {
switch (op.nn) {
0x07 => self.op_FX07(op.x),
0x0A => self.op_FX0A(op.x),
0x15 => self.op_FX15(op.x),
0x18 => self.op_FX18(op.x),
0x1E => self.op_FX1E(op.x),
0x29 => self.op_FX29(op.x),
0x33 => self.op_FX33(op.x),
0x55 => self.op_FX55(op.x),
0x65 => self.op_FX65(op.x),
else => return ExecuteError.UnknownInstruction,
}
},
}
}
}; | src/lib/chipz.zig |
const BlendOperation = @import("enums.zig").BlendOperation;
const BlendFactor = @import("enums.zig").BlendFactor;
const CompareFunction = @import("enums.zig").CompareFunction;
const StencilOperation = @import("enums.zig").StencilOperation;
const VertexFormat = @import("enums.zig").VertexFormat;
const VertexStepMode = @import("enums.zig").VertexStepMode;
pub const Limits = extern struct {
max_texture_dimension_1d: u32,
max_texture_dimension_2d: u32,
max_texture_dimension_3d: u32,
max_texture_array_layers: u32,
max_bind_groups: u32,
max_dynamic_uniform_buffers_per_pipeline_layout: u32,
max_dynamic_storage_buffers_per_pipeline_layout: u32,
max_sampled_textures_per_shader_stage: u32,
max_samplers_per_shader_stage: u32,
max_storage_buffers_per_shader_stage: u32,
max_storage_textures_per_shader_stage: u32,
max_uniform_buffers_per_shader_stage: u32,
max_uniform_buffer_binding_size: u64,
max_storage_buffer_binding_size: u64,
min_uniform_buffer_offset_alignment: u32,
min_storage_buffer_offset_alignment: u32,
max_vertex_buffers: u32,
max_vertex_attributes: u32,
max_vertex_buffer_array_stride: u32,
max_inter_stage_shader_components: u32,
max_compute_workgroup_storage_size: u32,
max_compute_invocations_per_workgroup: u32,
max_compute_workgroup_size_x: u32,
max_compute_workgroup_size_y: u32,
max_compute_workgroup_size_z: u32,
max_compute_workgroups_per_dimension: u32,
};
pub const Color = extern struct {
r: f64,
g: f64,
b: f64,
a: f64,
};
pub const Extent3D = extern struct {
width: u32,
height: u32,
depth_or_array_layers: u32,
};
pub const Origin3D = extern struct {
x: u32,
y: u32,
z: u32,
};
pub const StencilFaceState = extern struct {
compare: CompareFunction,
fail_op: StencilOperation,
depth_fail_op: StencilOperation,
pass_op: StencilOperation,
};
pub const VertexAttribute = extern struct {
format: VertexFormat,
offset: u64,
shader_location: u32,
};
pub const BlendComponent = extern struct {
operation: BlendOperation,
src_factor: BlendFactor,
dst_factor: BlendFactor,
};
pub const BlendState = extern struct {
color: BlendComponent,
alpha: BlendComponent,
};
pub const VertexBufferLayout = extern struct {
array_stride: u64,
step_mode: VertexStepMode,
attribute_count: u32,
attributes: *const VertexAttribute,
};
test {
_ = Limits;
_ = Color;
_ = Extent3D;
_ = Origin3D;
_ = StencilFaceState;
_ = VertexAttribute;
_ = BlendComponent;
_ = BlendState;
_ = VertexBufferLayout;
} | gpu/src/data.zig |
const std = @import("std");
const testing = std.testing;
const expect = testing.expect;
//;
fn assertIsNumberType(comptime T: type) void {
const type_info = @typeInfo(T);
std.debug.assert(type_info == .Float or type_info == .Int);
}
pub fn TVec2(comptime T: type) type {
return extern struct {
comptime {
assertIsNumberType(T);
}
const Self = @This();
x: T,
y: T,
pub fn init(x: T, y: T) Self {
return .{
.x = x,
.y = y,
};
}
pub fn zero() Self {
return Self.init(0, 0);
}
//;
pub fn cast(self: Self, comptime U: type) TVec2(U) {
comptime assertIsNumberType(U);
if (@typeInfo(T) == .Float) {
if (@typeInfo(U) == .Float) {
return .{
.x = @floatCast(U, self.x),
.y = @floatCast(U, self.y),
};
} else {
return .{
.x = @floatToInt(U, self.x),
.y = @floatToInt(U, self.y),
};
}
} else {
if (@typeInfo(U) == .Float) {
return .{
.x = @intToFloat(U, self.x),
.y = @intToFloat(U, self.y),
};
} else {
return .{
.x = @intCast(U, self.x),
.y = @intCast(U, self.y),
};
}
}
}
//;
pub fn isEqualTo(self: Self, other: Self) bool {
return self.x == other.x and
self.y == other.y;
}
//;
pub fn add(self: Self, other: Self) Self {
return .{
.x = self.x + other.x,
.y = self.y + other.y,
};
}
pub fn sub(self: Self, other: Self) Self {
return .{
.x = self.x - other.x,
.y = self.y - other.y,
};
}
pub fn neg(self: Self) Self {
return .{
.x = -self.x,
.y = -self.y,
};
}
pub fn recip(self: Self) Self {
return .{
.x = 1. / self.x,
.y = 1. / self.y,
};
}
pub fn multScalar(self: Self, scalar: T) Self {
return .{
.x = self.x * scalar,
.y = self.y * scalar,
};
}
pub fn divScalar(self: Self, scalar: T) Self {
return .{
.x = self.x / scalar,
.y = self.y / scalar,
};
}
//;
pub fn multMat3(self: Vec2, matr: Mat3) Vec2 {
return .{
.x = self.x * matr.data[0][0] + self.y * matr.data[1][0] + matr.data[2][0],
.y = self.x * matr.data[0][1] + self.y * matr.data[1][1] + matr.data[2][1],
};
}
};
}
pub const Vec2 = TVec2(f32);
pub const UVec2 = TVec2(u32);
pub const IVec2 = TVec2(i32);
test "Vec2" {
const f_zero = Vec2.zero();
const d_zero = TVec2(f64).zero();
const i_zero = TVec2(u8).zero();
const u_zero = TVec2(i8).zero();
}
pub fn TVec3(comptime T: type) type {
return extern struct {
comptime {
assertIsNumberType(T);
}
const Self = @This();
x: T,
y: T,
z: T,
pub fn init(x: T, y: T, z: T) Self {
return .{
.x = x,
.y = y,
.z = z,
};
}
pub fn zero() Self {
return Self.init(0, 0, 0);
}
pub fn one() Self {
return Self.init(1, 1, 1);
}
//;
pub fn isEqualTo(self: Self, other: Self) bool {
return self.x == other.x and
self.y == other.y and
self.z == other.z;
}
//;
pub fn add(self: Self, other: Self) Self {
return .{
.x = self.x + other.x,
.y = self.y + other.y,
.z = self.z + other.z,
};
}
pub fn sub(self: Self, other: Self) Self {
return .{
.x = self.x - other.x,
.y = self.y - other.y,
.z = self.z - other.z,
};
}
//;
pub fn multMat3(self: Vec3(f32), matr: Mat3) Vec3(f32) {
return .{
.x = self.x * matr.data[0][0] + self.y * matr.data[1][0] + self.z * matr.data[2][0],
.y = self.x * matr.data[0][1] + self.y * matr.data[1][1] + self.z * matr.data[2][1],
.z = self.x * matr.data[0][2] + self.y * matr.data[1][2] + self.z * matr.data[2][2],
};
}
};
}
pub const Vec3 = TVec3(f32);
pub const UVec3 = TVec3(u32);
pub const IVec3 = TVec3(i32);
pub fn TVec4(comptime T: type) type {
return extern struct {
comptime {
assertIsNumberType(T);
}
const Self = @This();
x: T,
y: T,
z: T,
w: T,
pub fn init(x: T, y: T, z: T, w: T) Self {
return .{
.x = x,
.y = y,
.z = z,
.w = w,
};
}
//;
pub fn isEqualTo(self: Self, other: Self) bool {
return self.x == other.x and
self.y == other.y and
self.z == other.z and
self.w == other.w;
}
//;
pub fn add(self: Self, other: Self) Self {
return .{
.x = self.x + other.x,
.y = self.y + other.y,
.z = self.z + other.z,
.w = self.w + other.w,
};
}
pub fn sub(self: Self, other: Self) Self {
return .{
.x = self.x - other.x,
.y = self.y - other.y,
.z = self.z - other.z,
.w = self.w - other.w,
};
}
};
}
pub const Vec4 = TVec4(f32);
pub const UVec4 = TVec4(u32);
pub const IVec4 = TVec4(i32);
//;
// TODO generic mat3?
pub const Mat3 = struct {
const Self = @This();
// column major
data: [3][3]f32,
pub fn zero() Self {
var ret = Self{ .data = undefined };
var i: usize = 0;
var j: usize = 0;
while (i < 3) : (i += 1) {
j = 0;
while (j < 3) : (j += 1) {
ret.data[i][j] = 0.;
}
}
return ret;
}
pub fn identity() Self {
var ret = Self.zero();
ret.data[0][0] = 1;
ret.data[1][1] = 1;
ret.data[2][2] = 1;
return ret;
}
pub fn translation(vec: Vec2) Self {
var ret = Self.identity();
ret.data[2][0] = vec.x;
ret.data[2][1] = vec.y;
return ret;
}
pub fn rotation(rads: f32) Self {
var ret = Self.identity();
const rc = std.math.cos(rads);
const rs = std.math.sin(rads);
ret.data[0][0] = rc;
ret.data[0][1] = rs;
ret.data[1][0] = -rs;
ret.data[1][1] = rc;
return ret;
}
pub fn scaling(vec: Vec2) Self {
var ret = Self.identity();
ret.data[0][0] = vec.x;
ret.data[1][1] = vec.y;
return ret;
}
pub fn shearing(vec: Vec2) Self {
var ret = Self.identity();
ret.data[1][0] = vec.x;
ret.data[0][1] = vec.y;
return ret;
}
pub fn orthoScreen(dimensions: UVec2) Self {
var ret = Self.identity();
ret.data[0][0] = 2 / @intToFloat(f32, dimensions.x);
ret.data[1][1] = -2 / @intToFloat(f32, dimensions.y);
ret.data[2][0] = -1;
ret.data[2][1] = 1;
return ret;
}
pub fn fromTransform2d(t2d: Transform2d) Self {
var ret = Self.identity();
const sx = t2d.scale.x;
const sy = t2d.scale.y;
const rc = std.math.cos(t2d.rotation);
const rs = std.math.sin(t2d.rotation);
ret.data[0][0] = rc * sx;
ret.data[0][1] = rs * sx;
ret.data[1][0] = -rs * sy;
ret.data[1][1] = rc * sy;
ret.data[2][0] = t2d.position.x;
ret.data[2][1] = t2d.position.y;
return ret;
}
//;
pub fn isEqualTo(self: Self, other: Self) bool {
return self.data[0][0] == other.data[0][0] and
self.data[1][0] == other.data[1][0] and
self.data[2][0] == other.data[2][0] and
self.data[0][1] == other.data[0][1] and
self.data[1][1] == other.data[1][1] and
self.data[2][1] == other.data[2][1] and
self.data[0][2] == other.data[0][2] and
self.data[1][2] == other.data[1][2] and
self.data[2][2] == other.data[2][2];
}
//;
// TODO inverse / transpose
// test m * m' == iden
pub fn mult(self: Self, other: Self) Self {
const s = &self.data;
const o = &other.data;
return .{
.data = .{
.{
s[0][0] * o[0][0] + s[0][1] * o[1][0] + s[0][2] * o[2][0],
s[0][0] * o[0][1] + s[0][1] * o[1][1] + s[0][2] * o[2][1],
s[0][0] * o[0][2] + s[0][1] * o[1][2] + s[0][2] * o[2][2],
},
.{
s[1][0] * o[0][0] + s[1][1] * o[1][0] + s[1][2] * o[2][0],
s[1][0] * o[0][1] + s[1][1] * o[1][1] + s[1][2] * o[2][1],
s[1][0] * o[0][2] + s[1][1] * o[1][2] + s[1][2] * o[2][2],
},
.{
s[2][0] * o[0][0] + s[2][1] * o[1][0] + s[2][2] * o[2][0],
s[2][0] * o[0][1] + s[2][1] * o[1][1] + s[2][2] * o[2][1],
s[2][0] * o[0][2] + s[2][1] * o[1][2] + s[2][2] * o[2][2],
},
},
};
}
};
// TODO more tests
test "Mat3" {
const iden = Mat3.identity();
const trans = Mat3.translation(Vec2.init(10, 15));
expect(trans.isEqualTo(iden.mult(trans)));
expect(trans.isEqualTo(trans.mult(iden)));
const v2 = Vec2.init(0, 0);
const mul = v2.multMat3(trans);
expect(mul.isEqualTo(Vec2.init(10, 15)));
}
//;
pub const Mat4 = struct {
const Self = @This();
// column major
data: [4][4]f32,
pub fn zero() Self {
var ret = Self{ .data = undefined };
var i: usize = 0;
var j: usize = 0;
while (i < 4) : (i += 1) {
j = 0;
while (j < 4) : (j += 1) {
ret.data[i][j] = 0.;
}
}
return ret;
}
pub fn identity() Self {
var ret = Self.zero();
ret.data[0][0] = 1;
ret.data[1][1] = 1;
ret.data[2][2] = 1;
ret.data[3][3] = 1;
return ret;
}
//;
pub fn isEqualTo(self: Self, other: Self) bool {
return self.data[0][0] == other.data[0][0] and
self.data[1][0] == other.data[1][0] and
self.data[2][0] == other.data[2][0] and
self.data[3][0] == other.data[3][0] and
self.data[0][1] == other.data[0][1] and
self.data[1][1] == other.data[1][1] and
self.data[2][1] == other.data[2][1] and
self.data[3][1] == other.data[3][1] and
self.data[0][2] == other.data[0][2] and
self.data[1][2] == other.data[1][2] and
self.data[2][2] == other.data[2][2] and
self.data[3][2] == other.data[3][2] and
self.data[0][3] == other.data[0][3] and
self.data[1][3] == other.data[1][3] and
self.data[2][3] == other.data[2][3] and
self.data[3][3] == other.data[3][3];
}
};
//;
pub fn AABB(comptime T: type) type {
return extern struct {
comptime {
assertIsNumberType(T);
}
const Self = @This();
c1: TVec2(T),
c2: TVec2(T),
pub fn init(x1: T, y1: T, x2: T, y2: T) Self {
return .{
.c1 = TVec2(T).init(x1, y1),
.c2 = TVec2(T).init(x2, y2),
};
}
pub fn identity() Self {
return Self.init(0, 0, 1, 1);
}
//;
pub fn width(self: Self) T {
return self.c2.x - self.c1.x;
}
pub fn height(self: Self) T {
return self.c2.y - self.c1.y;
}
// TODO could take float type param and let this return AABB(f64)
// not rly necessary for games
pub fn normalized(self: Self, vec: TVec2(T)) AABB(f32) {
if (@typeInfo(T) == .Float) {
const x = vec.x;
const y = vec.y;
const x1 = self.c1.x / x;
const y1 = self.c1.y / y;
const x2 = self.c2.x / x;
const y2 = self.c2.y / y;
return AABB(f32).init(x1, y1, x2, y2);
} else if (@typeInfo(T) == .Int) {
const x = @intToFloat(f32, vec.x);
const y = @intToFloat(f32, vec.y);
const x1 = @intToFloat(f32, self.c1.x) / x;
const y1 = @intToFloat(f32, self.c1.y) / y;
const x2 = @intToFloat(f32, self.c2.x) / x;
const y2 = @intToFloat(f32, self.c2.y) / y;
return AABB(f32).init(x1, y1, x2, y2);
}
}
pub fn reorient(self: *Self) void {
if (self.c1.x > self.c2.x) {
std.mem.swap(&self.c1.x, &self.c2.x);
}
if (self.c1.y > self.c2.y) {
std.mem.swap(&self.c1.y, &self.c2.y);
}
}
pub fn displace(self: *Self, offset: TVec2(T)) void {
self.c1.x += offset.x;
self.c1.y += offset.y;
self.c2.x += offset.x;
self.c2.y += offset.y;
}
};
}
pub const TextureRegion = AABB(i32);
pub const UV_Region = AABB(f32);
test "math AABB" {
// TODO
}
//;
pub const Transform2d = extern struct {
const Self = @This();
position: Vec2,
rotation: f32,
scale: Vec2,
pub fn init(x: f32, y: f32, r: f32, sx: f32, sy: f32) Self {
return .{
.position = Vec2.init(x, y),
.rotation = r,
.scale = Vec2.init(sx, sy),
};
}
pub fn identity() Self {
return Self.init(0, 0, 0, 1, 1);
}
};
//;
pub const Quaternion = extern struct {
const Self = @This();
x: f32,
y: f32,
z: f32,
w: f32,
pub fn init(x: f32, y: f32, z: f32, w: f32) Self {
return .{
.x = x,
.y = y,
.z = z,
.w = w,
};
}
pub fn identity() Self {
return Self.init(0, 0, 0, 1);
}
};
//;
pub const Transform3d = extern struct {
const Self = @This();
translation: Vec3,
rotation: Quaternion,
scale: Vec3,
pub fn identity() Self {
return .{
.translation = Vec3.zero(),
.rotation = Quaternion.identity(),
.scale = Vec3.one(),
};
}
};
//;
pub const Color = extern struct {
const Self = @This();
r: f32,
g: f32,
b: f32,
a: f32,
pub fn initRgba(r: f32, g: f32, b: f32, a: f32) Self {
return .{
.r = r,
.g = g,
.b = b,
.a = a,
};
}
pub fn white() Self {
return Self.initRgba(1, 1, 1, 1);
}
pub fn black() Self {
return Self.initRgba(0, 0, 0, 1);
}
}; | src/math.zig |
const std = @import("std");
const fs = std.fs;
const ArrayList = std.ArrayList;
const ArrayListSentineled = std.ArrayListSentineled;
const allocPrint0 = std.fmt.allocPrint0;
const err = @import("error.zig");
const errorAt = err.errorAt;
const setTargetString = err.setTargetString;
const setTargetFilename = err.setTargetFilename;
const allocator = @import("allocator.zig");
const getAllocator = allocator.getAllocator;
const stdout = std.io.getStdOut().outStream();
const print = stdout.print;
const Type = @import("type.zig").Type;
const SPACE_CHARS = " \n\t\x0b\x0c\r";
const PUNCT_CHARS = "+-*/()<>;={}&,[].";
const PUNCT_STRS = [_][:0]const u8{ "==", "!=", "<=", ">=", "->" };
const KEYWORDS = [_][:0]const u8{
"return", "if", "else", "for", "while", "sizeof",
"void", "char", "short", "int", "long", "struct",
"union", "typedef",
};
pub const TokenKind = enum {
TkIdent, // 識別子
TkPunct, // 区切り記号
TkKeyword, // キーワード
TkStr, // 文字列
TkNum, // 数値
};
pub const Token = struct {
kind: TokenKind, // トークン種別
val: [:0]u8, // トークン文字列
loc: usize, // 元の文字列上の場所
ty: ?*Type, // 文字列のときに使う
line_no: usize, // 元ファイルの行数
};
pub fn newToken(kind: TokenKind, val: []const u8, loc: usize, str: [:0]u8) !Token {
return Token{
.kind = kind,
.val = try allocPrint0(getAllocator(), "{}", .{val}),
.loc = loc,
.ty = null,
.line_no = getLineNo(str, loc),
};
}
pub fn tokenize(filename: [:0]u8, str: [:0]u8) !ArrayList(Token) {
setTargetFilename(filename);
setTargetString(str);
var tokens = ArrayList(Token).init(getAllocator());
var i: usize = 0;
while (str[i] != 0) {
const c = str[i];
if (startsWith(str, i, "//")) {
i += 2;
while (str[i] != '\n')
i += 1;
continue;
}
if (startsWith(str, i, "/*")) {
i += 2;
while (!startsWith(str, i, "*/")) {
i += 1;
if (i >= str.len)
errorAt(i, null, "コメントが閉じられていません");
}
i += 2;
continue;
}
if (isSpace(c)) {
i += 1;
continue;
}
if (isNumber(c)) {
const h = i;
i = expectNumber(str, i);
const num = try newToken(.TkNum, str[h..i], i, str);
try tokens.append(num);
continue;
}
if (c == '"') {
const tok = try readStringLiteral(tokens, str, &i);
try tokens.append(tok.*);
continue;
}
if (isIdentHead(c)) {
const h = i;
i = readIdent(str, i);
var tk = try newToken(.TkIdent, str[h..i], i, str);
if (isKeyword(str, h, i)) {
tk = try newToken(.TkKeyword, str[h..i], i, str);
}
try tokens.append(tk);
continue;
}
const puncts_end = readPuncts(str, i);
if (puncts_end > i) {
const punct = try newToken(.TkPunct, str[i..puncts_end], i, str);
try tokens.append(punct);
i = puncts_end;
continue;
}
if (isPunct(c)) {
const punct = try newToken(.TkPunct, str[i .. i + 1], i, str);
try tokens.append(punct);
i += 1;
continue;
}
errorAt(i, null, "トークナイズできませんでした");
}
return tokens;
}
fn isNumber(c: u8) bool {
return '0' <= c and c <= '9';
}
fn isSpace(c: u8) bool {
for (SPACE_CHARS) |k| {
if (c == k) {
return true;
}
}
return false;
}
fn isPunct(c: u8) bool {
for (PUNCT_CHARS) |k| {
if (c == k) {
return true;
}
}
return false;
}
fn isIdentHead(c: u8) bool {
return ('a' <= c and c <= 'z') or ('A' <= c and c <= 'Z') or c == '_';
}
fn isIdentTail(c: u8) bool {
return isIdentHead(c) or ('0' <= c and c <= '9');
}
fn readIdent(str: [*:0]const u8, i: usize) usize {
if (str[i] == 0 or !isIdentHead(str[i])) {
return i;
}
var h = i + 1;
while (str[h] != 0 and isIdentTail(str[h])) {
h += 1;
}
return h;
}
fn isKeyword(str: [*:0]const u8, startIndex: usize, endIndex: usize) bool {
const pstr = allocPrint0(getAllocator(), "{}", .{str[startIndex..endIndex]}) catch "";
for (KEYWORDS) |kwd| {
if (streq(kwd, pstr)) {
return true;
}
}
return false;
}
fn readPuncts(str: [*:0]const u8, i: usize) usize {
for (PUNCT_STRS) |pstr| {
const cut_str = allocPrint0(getAllocator(), "{}", .{str[i .. i + pstr.len]}) catch "";
if (streq(cut_str, pstr)) {
return i + pstr.len;
}
}
return i;
}
fn expectNumber(ptr: [*:0]const u8, index: usize) usize {
if (isNumber(ptr[index])) {
return consumeNumber(ptr, index);
} else {
errorAt(index, null, "数値ではありません");
}
}
fn consumeNumber(ptr: [*:0]const u8, index: usize) usize {
var end = index;
while (isNumber(ptr[end])) {
end += 1;
}
return end;
}
pub fn streq(a: [:0]const u8, b: [:0]const u8) bool {
return std.mem.eql(u8, a, b);
}
fn startsWith(str: [:0]u8, i: usize, target: [:0]const u8) bool {
if (target.len > str.len)
return false;
var j: usize = 0;
while (j + i + target.len < str.len and j < target.len) : (j += 1) {
if (str[i + j] != target[j])
return false;
}
return j == target.len;
}
pub fn atoi(s: [:0]u8) i64 {
var n: i64 = 0;
var neg = false;
var si: usize = 0;
if (s.len == 0)
return n;
while (si < s.len and isSpace(s[si]))
si += 1;
switch (s[si]) {
'-' => {
neg = true;
si += 1;
},
'+' => si += 1,
else => {},
}
while (si < s.len and isNumber(s[si])) : (si += 1) {
n = 10 * n - (@intCast(i64, s[si]) - '0');
}
if (neg) {
return n;
} else {
return (-1 * n);
}
}
fn stringLiteralEnd(str: [*:0]const u8, index: usize) usize {
var h = index;
var c = str[h];
while (c != '"') : (c = str[h]) {
const cs = [_:0]u8{c};
if (c == '\n' or c == 0)
errorAt(index, null, "文字列リテラルが閉じられていません");
if (c == '\\') {
h += 2;
} else {
h += 1;
}
}
return h;
}
fn readStringLiteral(tokens: ArrayList(Token), str: [:0]u8, index: *usize) !*Token {
const start = index.*;
const end = stringLiteralEnd(str, start + 1);
var buf: []u8 = try getAllocator().alloc(u8, end - start);
var len: usize = 0;
var i = start + 1;
while (i < end) {
if (str[i] == '\\') {
var j = i + 1;
buf[len] = readEscapedChar(str, &j);
i = j;
} else {
buf[len] = str[i];
i += 1;
}
len += 1;
}
index.* = end + 1;
const tokenVal = try allocPrint0(getAllocator(), "{}", .{buf[0..len]});
var tok = try getAllocator().create(Token);
tok.* = try newToken(.TkStr, tokenVal, i, str);
// 文字列は終端文字の都合上、長さが +1 になる
tok.*.ty = Type.arrayOf(Type.typeChar(), len + 1);
return tok;
}
fn readEscapedChar(str: [*:0]const u8, index: *usize) u8 {
var j = index.*;
var n: usize = 0;
var c = str[j];
while (n < 3 and '0' <= str[j] and str[j] <= '7') {
if (n == 0) {
c = str[j] - '0';
} else {
c = (c << 3) + (str[j] - '0');
}
j += 1;
n += 1;
}
if (n > 0) {
index.* = j;
return c;
}
index.* += 1;
return switch (c) {
'a' => '\x07',
'b' => '\x08',
't' => '\t',
'n' => '\n',
'v' => 11,
'f' => 12,
'r' => 13,
'e' => 27,
'x' => readHex(str, index),
else => c,
};
}
fn readHex(str: [*:0]const u8, index: *usize) u8 {
if (!isXdigit(str[index.*]))
errorAt(index.*, null, "16進数ではありません");
var j = index.*;
var c: u8 = 0;
while (isXdigit(str[j])) : (j += 1) {
c = (c << 4) + fromHex(str[j]);
}
index.* = j + 1;
return c;
}
fn fromHex(c: u8) u8 {
if ('0' <= c and c <= '9') {
return c - '0';
} else if ('a' <= c and c <= 'f') {
return c - 'a' + 10;
}
return c - 'A' + 10;
}
fn isXdigit(c: u8) bool {
return ('0' <= c and c <= '9') or ('a' <= c and c <= 'f') or ('A' <= c and c <= 'F');
}
fn readFile(filename: [:0]u8) ![:0]u8 {
var file: fs.File = undefined;
if (streq(filename, "-")) {
// - のときは標準入力から読み込み
file = std.io.getStdIn();
} else {
const cwd = fs.cwd();
file = cwd.openFile(filename, .{}) catch |e| {
std.debug.panic("Unable to open file: {}\n", .{@errorName(e)});
};
}
defer file.close();
var buf: [1024 * 4]u8 = undefined;
var result = try ArrayListSentineled(u8, 0).init(getAllocator(), "");
defer result.deinit();
while (true) {
const bytes_read = file.read(buf[0..]) catch |e| {
std.debug.panic("Unable to read from stream: {}\n", .{@errorName(e)});
};
if (bytes_read == 0) {
break;
}
try result.appendSlice(buf[0..bytes_read]);
}
if (!result.endsWith("\n"))
try result.appendSlice("\n");
return result.toOwnedSlice();
}
pub fn tokenizeFile(filename: [:0]u8) !ArrayList(Token) {
return try tokenize(filename, try readFile(filename));
}
pub fn getLineNo(str: [:0]const u8, loc: usize) usize {
var i: usize = 0;
var n: usize = 1;
while (i < str.len) : (i += 1) {
if (loc == i) {
return n;
}
if (str[i] == '\n')
n += 1;
}
return 0;
} | src/tokenize.zig |
const IntOrFloat = @import("types.zig").IntOrFloat;
pub const PostscriptWindowsCharacterSet = enum(u32) {
Ansi = 1,
Default = 2,
Symbol = 3,
Macintosh = 4,
ShiftJis = 5,
Hangul = 6,
HangulJohab = 7,
Gb2312 = 8,
ChineseBig5 = 9,
Greek = 10,
Turkish = 11,
Vietnamese = 12,
Hebrew = 13,
Arabic = 14,
Baltic = 15,
Bitstream = 16,
Cyrillic = 17,
Thai = 18,
EasternEuropean = 19,
Oem = 20,
};
pub const WoffMetadataUniqueID = struct {
id: []const u8,
};
pub const WoffAttributeDirection = enum([]const u8) {
LeftToRight = "ltr",
RightToLeft = "rtl",
};
pub const WoffMetadataVendor = struct {
name: []const u8,
url: ?[]const u8,
dir: ?WoffAttributeDirection,
class: ?[]const u8,
};
pub const WoffMetadataCredit = struct {
name: []const u8,
url: ?[]const u8,
role: ?[]const u8,
dir: ?WoffAttributeDirection,
class: ?[]const u8,
};
pub const WoffMetadataCredits = struct {
credits: []WoffMetadataCredits,
};
pub const WoffMetadataText = struct {
text: []const u8,
language: ?[]const u8,
dir: ?WoffAttributeDirection,
class: ?[]const u8,
};
pub const WoffMetadataDescription = struct {
url: ?[]const u8,
text: []WoffMetadataText,
};
pub const WoffMetadataLicense = struct {
url: ?[]const u8,
id: ?[]const u8,
text: []WoffMetadataText,
};
pub const WoffMetadataCopyright = struct {
text: []WoffMetadataText,
};
pub const WoffMetadataTrademark = struct {
text: []WoffMetadataText,
};
pub const WoffMetadataLicensee = struct {
name: []const u8,
dir: ?WoffAttributeDirection,
class: ?[]const u8,
};
pub const WoffMetadataExtensionName = struct {
text: []const u8,
language: ?[]const u8,
dir: ?WoffAttributeDirection,
class: ?[]const u8,
};
pub const WoffMetadataExtensionValue = struct {
text: []const u8,
language: ?[]const u8,
dir: ?WoffAttributeDirection,
class: ?[]const u8,
};
pub const WoffMetadataExtensionItem = struct {
id: []const u8,
names: []WoffMetadataExtensionName,
values: []WoffMetadataExtensionValue,
};
pub const WoffMetadataExtension = struct {
id: ?[]const u8,
names: []WoffMetadataExtensionName,
items: []WoffMetadataExtensionItem,
};
pub const Guideline = struct {};
pub const StyleMapStyle = enum([]const u8) {
regular = "regular",
italic = "italic",
bold = "bold",
bold_italic = "bold italic",
};
pub const GaspBehavior = enum {
gridfit,
do_gray,
symmetric_gridfit,
symmetric_smoothing,
};
pub const GaspRangeRecords = struct {
range_max_ppem: u32,
range_gasp_behavior_flags: []GaspBehavior,
};
pub const NameRecords = struct {
name_id: IntOrFloat,
platform_id: IntOrFloat,
encoding_id: IntOrFloat,
language_id: IntOrFloat,
string: []const u8,
};
pub const Os2Panose = struct {
family_type: u32,
serif_style: u32,
weight: u32,
proportion: u32,
contrast: u32,
stroke_variation: u32,
arm_style: u32,
letter_form: u32,
midline: u32,
x_height: u32,
};
pub const Os2FamilyClass = struct {
// TODO: add verification.
class_id: u8,
subclass: u8,
};
pub const FontInfo = struct {
// Generic Identification Information
family_name: ?[]const u8,
style_name: ?[]const u8,
style_map_family_name: ?[]const u8,
style_map_style_name: ?StyleMapStyle,
version_major: ?i32,
version_minor: ?u32,
year: ?i32,
// Generic Legal Information
copyright: ?[]const u8,
trademark: ?[]const u8,
// Generic Dimension Information
units_per_em: ?IntOrFloat,
descender: ?IntOrFloat,
x_height: ?IntOrFloat,
cap_height: ?IntOrFloat,
ascender: ?IntOrFloat,
italic_angle: ?IntOrFloat,
// Generic Miscellaneous Information
note: ?[]const u8,
// OpenType gasp Table Fields
opentype_gasp_range_records: ?[]GaspRangeRecords,
// OpenType head Table Fields
opentype_head_created: ?[]const u8,
opentype_head_lowest_rec_ppem: ?u32,
opentype_head_flags: ?[]u8,
// OpenType hhea Table Fields
opentype_hhea_ascender: ?i32,
opentype_hhea_descender: ?i32,
opentype_hhea_linegap: ?i32,
opentype_hhea_caret_slope_rise: ?i32,
opentype_hhea_caret_slope_run: ?i32,
opentype_hhea_caret_offset: ?i32,
//OpenType Name Table Fields
opentype_name_designer: ?[]const u8,
opentype_name_designer_url: ?[]const u8,
opentype_name_manufacturer: ?[]const u8,
opentype_name_manufacturer_url: ?[]const u8,
opentype_name_license: ?[]const u8,
opentype_name_license_url: ?[]const u8,
opentype_name_version: ?[]const u8,
opentype_name_unique_id: ?[]const u8,
opentype_name_description: ?[]const u8,
opentype_name_preferred_family_name: ?[]const u8,
opentype_name_preferred_subfamily_name: ?[]const u8,
opentype_name_compatible_full_name: ?[]const u8,
opentype_name_sample_text: ?[]const u8,
opentype_name_wws_family_name: ?[]const u8,
opentype_name_wws_subfamily_name: ?[]const u8,
opentype_name_records: ?[]NameRecords,
// OpenType OS/2 Table Fields
opentype_os2_width_class: ?IntOrFloat,
opentype_os2_weight_class: ?IntOrFloat,
opentype_os2_selection: ?[]u8,
opentype_os2_vendor_id: ?[]const u8,
opentype_os2_panose: ?Os2Panose,
opentype_os2_family_class: ?Os2FamilyClass,
opentype_os2_unicode_ranges: ?[]u8,
opentype_os2_code_page_ranges: ?[]u8,
opentype_os2_typo_ascender: ?i32,
opentype_os2_typo_descender: ?i32,
opentype_os2_typo_line_gap: ?i32,
opentype_os2_win_ascent: ?u32,
opentype_os2_win_descent: ?u32,
opentype_os2_type: ?[]u8,
opentype_os2_subscript_x_size: ?i32,
opentype_os2_subscript_y_size: ?i32,
opentype_os2_subscript_x_offset: ?i32,
opentype_os2_subscript_y_offset: ?i32,
opentype_os2_superscript_x_size: ?i32,
opentype_os2_superscript_y_size: ?i32,
opentype_os2_superscript_x_offset: ?i32,
opentype_os2_superscript_y_offset: ?i32,
opentype_os2_strikeout_size: ?i32,
opentype_os2_strikeout_position: ?i32,
// OpenType vhea Table Fields
opentype_v_hea_vert_typo_ascender: ?i32,
opentype_v_hea_vert_typo_descender: ?i32,
opentype_v_hea_vert_typo_line_gap: ?i32,
opentype_v_hea_caret_slope_rise: ?i32,
opentype_v_hea_caret_slope_run: ?i32,
opentype_v_hea_caret_offset: ?i32,
// PostScript Specific Data
postscript_font_name: ?[]const u8,
postscript_full_name: ?[]const u8,
postscript_slant_angle: ?IntOrFloat,
postscript_unique_id: ?u32,
postscript_underline_thickness: ?IntOrFloat,
postscript_underline_position: ?IntOrFloat,
postscript_is_fixed_pitch: ?bool,
postscript_blue_values: ?[]IntOrFloat,
postscript_other_blues: ?[]IntOrFloat,
postscript_family_blues: ?[]IntOrFloat,
postscript_family_other_blues: ?[]IntOrFloat,
postscript_stem_snap_h: ?[]IntOrFloat,
postscript_stem_snap_v: ?[]IntOrFloat,
postscript_blue_fuzz: ?IntOrFloat,
postscript_blue_shift: ?IntOrFloat,
postscript_blue_scale: ?f64,
postscript_force_bold: ?bool,
postscript_default_width_x: ?IntOrFloat,
postscript_nominal_width_x: ?IntOrFloat,
postscript_weight_name: ?[]const u8,
postscript_default_character: ?[]const u8,
postscript_windows_character_set: ?PostscriptWindowsCharacterSet,
// Macintosh FOND Resource Data
macintosh_fond_family_id: ?i32,
macintosh_fond_name: ?[]const u8,
// WOFF Data
woff_major_version: ?IntOrFloat,
woff_minor_version: ?IntOrFloat,
woff_metadata_unique_id: ?WoffMetadataUniqueID,
woff_metadata_vendor: ?WoffMetadataVendor,
woff_metadata_credits: ?WoffMetadataCredits,
woff_metadata_description: ?WoffMetadataDescription,
woff_metadata_license: ?WoffMetadataLicense,
woff_metadata_copyright: ?WoffMetadataCopyright,
woff_metadata_trademark: ?WoffMetadataTrademark,
woff_metadata_licensee: ?WoffMetadataLicensee,
woff_metadata_extensions: ?[]WoffMetadataExtension,
// Guidelines
guidelines: ?[]Guideline,
}; | src/font_info.zig |
const addv = @import("addo.zig");
const testing = @import("std").testing;
fn test__addosi4(a: i32, b: i32) !void {
var result_ov: c_int = undefined;
var expected_ov: c_int = undefined;
var result = addv.__addosi4(a, b, &result_ov);
var expected: i32 = simple_addosi4(a, b, &expected_ov);
try testing.expectEqual(expected, result);
try testing.expectEqual(expected_ov, result_ov);
}
fn simple_addosi4(a: i32, b: i32, overflow: *c_int) i32 {
overflow.* = 0;
const min: i32 = -2147483648;
const max: i32 = 2147483647;
if (((a > 0) and (b > max - a)) or
((a < 0) and (b < min - a)))
overflow.* = 1;
return a +% b;
}
test "addosi4" {
// -2^31 <= i32 <= 2^31-1
// 2^31 = 2147483648
// 2^31-1 = 2147483647
const min: i32 = -2147483648;
const max: i32 = 2147483647;
var i: i32 = 1;
while (i < max) : (i *|= 2) {
try test__addosi4(i, i);
try test__addosi4(-i, -i);
try test__addosi4(i, -i);
try test__addosi4(-i, i);
}
// edge cases
// 0 + 0 = 0
// MIN + MIN overflow
// MAX + MAX overflow
// 0 + MIN MIN
// 0 + MAX MAX
// MIN + 0 MIN
// MAX + 0 MAX
// MIN + MAX -1
// MAX + MIN -1
try test__addosi4(0, 0);
try test__addosi4(min, min);
try test__addosi4(max, max);
try test__addosi4(0, min);
try test__addosi4(0, max);
try test__addosi4(min, 0);
try test__addosi4(max, 0);
try test__addosi4(min, max);
try test__addosi4(max, min);
// derived edge cases
// MIN+1 + MIN overflow
// MAX-1 + MAX overflow
// 1 + MIN = MIN+1
// -1 + MIN overflow
// -1 + MAX = MAX-1
// +1 + MAX overflow
// MIN + 1 = MIN+1
// MIN + -1 overflow
// MAX + 1 overflow
// MAX + -1 = MAX-1
try test__addosi4(min + 1, min);
try test__addosi4(max - 1, max);
try test__addosi4(1, min);
try test__addosi4(-1, min);
try test__addosi4(-1, max);
try test__addosi4(1, max);
try test__addosi4(min, 1);
try test__addosi4(min, -1);
try test__addosi4(max, -1);
try test__addosi4(max, 1);
} | lib/std/special/compiler_rt/addosi4_test.zig |
const std = @import("std");
const __truncsfhf2 = @import("truncXfYf2.zig").__truncsfhf2;
fn test__truncsfhf2(a: u32, expected: u16) !void {
const actual = @bitCast(u16, __truncsfhf2(@bitCast(f32, a)));
if (actual == expected) {
return;
}
return error.TestFailure;
}
test "truncsfhf2" {
try test__truncsfhf2(0x7fc00000, 0x7e00); // qNaN
try test__truncsfhf2(0x7fe00000, 0x7f00); // sNaN
try test__truncsfhf2(0, 0); // 0
try test__truncsfhf2(0x80000000, 0x8000); // -0
try test__truncsfhf2(0x7f800000, 0x7c00); // inf
try test__truncsfhf2(0xff800000, 0xfc00); // -inf
try test__truncsfhf2(0x477ff000, 0x7c00); // 65520 -> inf
try test__truncsfhf2(0xc77ff000, 0xfc00); // -65520 -> -inf
try test__truncsfhf2(0x71cc3892, 0x7c00); // 0x1.987124876876324p+100 -> inf
try test__truncsfhf2(0xf1cc3892, 0xfc00); // -0x1.987124876876324p+100 -> -inf
try test__truncsfhf2(0x38800000, 0x0400); // normal (min), 2**-14
try test__truncsfhf2(0xb8800000, 0x8400); // normal (min), -2**-14
try test__truncsfhf2(0x477fe000, 0x7bff); // normal (max), 65504
try test__truncsfhf2(0xc77fe000, 0xfbff); // normal (max), -65504
try test__truncsfhf2(0x477fe100, 0x7bff); // normal, 65505 -> 65504
try test__truncsfhf2(0xc77fe100, 0xfbff); // normal, -65505 -> -65504
try test__truncsfhf2(0x477fef00, 0x7bff); // normal, 65519 -> 65504
try test__truncsfhf2(0xc77fef00, 0xfbff); // normal, -65519 -> -65504
try test__truncsfhf2(0x3f802000, 0x3c01); // normal, 1 + 2**-10
try test__truncsfhf2(0xbf802000, 0xbc01); // normal, -1 - 2**-10
try test__truncsfhf2(0x3eaaa000, 0x3555); // normal, approx. 1/3
try test__truncsfhf2(0xbeaaa000, 0xb555); // normal, approx. -1/3
try test__truncsfhf2(0x40490fdb, 0x4248); // normal, 3.1415926535
try test__truncsfhf2(0xc0490fdb, 0xc248); // normal, -3.1415926535
try test__truncsfhf2(0x45cc3892, 0x6e62); // normal, 0x1.987124876876324p+12
try test__truncsfhf2(0x3f800000, 0x3c00); // normal, 1
try test__truncsfhf2(0x38800000, 0x0400); // normal, 0x1.0p-14
try test__truncsfhf2(0x33800000, 0x0001); // denormal (min), 2**-24
try test__truncsfhf2(0xb3800000, 0x8001); // denormal (min), -2**-24
try test__truncsfhf2(0x387fc000, 0x03ff); // denormal (max), 2**-14 - 2**-24
try test__truncsfhf2(0xb87fc000, 0x83ff); // denormal (max), -2**-14 + 2**-24
try test__truncsfhf2(0x35800000, 0x0010); // denormal, 0x1.0p-20
try test__truncsfhf2(0x33280000, 0x0001); // denormal, 0x1.5p-25 -> 0x1.0p-24
try test__truncsfhf2(0x33000000, 0x0000); // 0x1.0p-25 -> zero
}
const __truncdfhf2 = @import("truncXfYf2.zig").__truncdfhf2;
fn test__truncdfhf2(a: f64, expected: u16) void {
const rep = @bitCast(u16, __truncdfhf2(a));
if (rep == expected) {
return;
}
// test other possible NaN representation(signal NaN)
else if (expected == 0x7e00) {
if ((rep & 0x7c00) == 0x7c00 and (rep & 0x3ff) > 0) {
return;
}
}
@panic("__truncdfhf2 test failure");
}
fn test__truncdfhf2_raw(a: u64, expected: u16) void {
const actual = @bitCast(u16, __truncdfhf2(@bitCast(f64, a)));
if (actual == expected) {
return;
}
@panic("__truncdfhf2 test failure");
}
test "truncdfhf2" {
test__truncdfhf2_raw(0x7ff8000000000000, 0x7e00); // qNaN
test__truncdfhf2_raw(0x7ff0000000008000, 0x7e00); // NaN
test__truncdfhf2_raw(0x7ff0000000000000, 0x7c00); //inf
test__truncdfhf2_raw(0xfff0000000000000, 0xfc00); // -inf
test__truncdfhf2(0.0, 0x0); // zero
test__truncdfhf2_raw(0x80000000 << 32, 0x8000); // -zero
test__truncdfhf2(3.1415926535, 0x4248);
test__truncdfhf2(-3.1415926535, 0xc248);
test__truncdfhf2(0x1.987124876876324p+1000, 0x7c00);
test__truncdfhf2(0x1.987124876876324p+12, 0x6e62);
test__truncdfhf2(0x1.0p+0, 0x3c00);
test__truncdfhf2(0x1.0p-14, 0x0400);
// denormal
test__truncdfhf2(0x1.0p-20, 0x0010);
test__truncdfhf2(0x1.0p-24, 0x0001);
test__truncdfhf2(-0x1.0p-24, 0x8001);
test__truncdfhf2(0x1.5p-25, 0x0001);
// and back to zero
test__truncdfhf2(0x1.0p-25, 0x0000);
test__truncdfhf2(-0x1.0p-25, 0x8000);
// max (precise)
test__truncdfhf2(65504.0, 0x7bff);
// max (rounded)
test__truncdfhf2(65519.0, 0x7bff);
// max (to +inf)
test__truncdfhf2(65520.0, 0x7c00);
test__truncdfhf2(-65520.0, 0xfc00);
test__truncdfhf2(65536.0, 0x7c00);
}
const __trunctfsf2 = @import("truncXfYf2.zig").__trunctfsf2;
fn test__trunctfsf2(a: f128, expected: u32) void {
const x = __trunctfsf2(a);
const rep = @bitCast(u32, x);
if (rep == expected) {
return;
}
// test other possible NaN representation(signal NaN)
else if (expected == 0x7fc00000) {
if ((rep & 0x7f800000) == 0x7f800000 and (rep & 0x7fffff) > 0) {
return;
}
}
@panic("__trunctfsf2 test failure");
}
test "trunctfsf2" {
// qnan
test__trunctfsf2(@bitCast(f128, @as(u128, 0x7fff800000000000 << 64)), 0x7fc00000);
// nan
test__trunctfsf2(@bitCast(f128, @as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7fc08000);
// inf
test__trunctfsf2(@bitCast(f128, @as(u128, 0x7fff000000000000 << 64)), 0x7f800000);
// zero
test__trunctfsf2(0.0, 0x0);
test__trunctfsf2(0x1.23a2abb4a2ddee355f36789abcdep+5, 0x4211d156);
test__trunctfsf2(0x1.e3d3c45bd3abfd98b76a54cc321fp-9, 0x3b71e9e2);
test__trunctfsf2(0x1.234eebb5faa678f4488693abcdefp+4534, 0x7f800000);
test__trunctfsf2(0x1.edcba9bb8c76a5a43dd21f334634p-435, 0x0);
}
const __trunctfdf2 = @import("truncXfYf2.zig").__trunctfdf2;
fn test__trunctfdf2(a: f128, expected: u64) void {
const x = __trunctfdf2(a);
const rep = @bitCast(u64, x);
if (rep == expected) {
return;
}
// test other possible NaN representation(signal NaN)
else if (expected == 0x7ff8000000000000) {
if ((rep & 0x7ff0000000000000) == 0x7ff0000000000000 and (rep & 0xfffffffffffff) > 0) {
return;
}
}
@panic("__trunctfsf2 test failure");
}
test "trunctfdf2" {
// qnan
test__trunctfdf2(@bitCast(f128, @as(u128, 0x7fff800000000000 << 64)), 0x7ff8000000000000);
// nan
test__trunctfdf2(@bitCast(f128, @as(u128, (0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7ff8100000000000);
// inf
test__trunctfdf2(@bitCast(f128, @as(u128, 0x7fff000000000000 << 64)), 0x7ff0000000000000);
// zero
test__trunctfdf2(0.0, 0x0);
test__trunctfdf2(0x1.af23456789bbaaab347645365cdep+5, 0x404af23456789bbb);
test__trunctfdf2(0x1.dedafcff354b6ae9758763545432p-9, 0x3f6dedafcff354b7);
test__trunctfdf2(0x1.2f34dd5f437e849b4baab754cdefp+4534, 0x7ff0000000000000);
test__trunctfdf2(0x1.edcbff8ad76ab5bf46463233214fp-435, 0x24cedcbff8ad76ab);
}
const __truncdfsf2 = @import("truncXfYf2.zig").__truncdfsf2;
fn test__truncdfsf2(a: f64, expected: u32) void {
const x = __truncdfsf2(a);
const rep = @bitCast(u32, x);
if (rep == expected) {
return;
}
// test other possible NaN representation(signal NaN)
else if (expected == 0x7fc00000) {
if ((rep & 0x7f800000) == 0x7f800000 and (rep & 0x7fffff) > 0) {
return;
}
}
std.debug.print("got 0x{x} wanted 0x{x}\n", .{ rep, expected });
@panic("__trunctfsf2 test failure");
}
test "truncdfsf2" {
// nan & qnan
test__truncdfsf2(@bitCast(f64, @as(u64, 0x7ff8000000000000)), 0x7fc00000);
test__truncdfsf2(@bitCast(f64, @as(u64, 0x7ff0000000000001)), 0x7fc00000);
// inf
test__truncdfsf2(@bitCast(f64, @as(u64, 0x7ff0000000000000)), 0x7f800000);
test__truncdfsf2(@bitCast(f64, @as(u64, 0xfff0000000000000)), 0xff800000);
test__truncdfsf2(0.0, 0x0);
test__truncdfsf2(1.0, 0x3f800000);
test__truncdfsf2(-1.0, 0xbf800000);
// huge number becomes inf
test__truncdfsf2(340282366920938463463374607431768211456.0, 0x7f800000);
}
const __trunctfhf2 = @import("truncXfYf2.zig").__trunctfhf2;
fn test__trunctfhf2(a: f128, expected: u16) void {
const x = __trunctfhf2(a);
const rep = @bitCast(u16, x);
if (rep == expected) {
return;
}
std.debug.print("got 0x{x} wanted 0x{x}\n", .{ rep, expected });
@panic("__trunctfhf2 test failure");
}
test "trunctfhf2" {
// qNaN
test__trunctfhf2(@bitCast(f128, @as(u128, 0x7fff8000000000000000000000000000)), 0x7e00);
// NaN
test__trunctfhf2(@bitCast(f128, @as(u128, 0x7fff0000000000000000000000000001)), 0x7e00);
// inf
test__trunctfhf2(@bitCast(f128, @as(u128, 0x7fff0000000000000000000000000000)), 0x7c00);
test__trunctfhf2(-@bitCast(f128, @as(u128, 0x7fff0000000000000000000000000000)), 0xfc00);
// zero
test__trunctfhf2(0.0, 0x0);
test__trunctfhf2(-0.0, 0x8000);
test__trunctfhf2(3.1415926535, 0x4248);
test__trunctfhf2(-3.1415926535, 0xc248);
test__trunctfhf2(0x1.987124876876324p+100, 0x7c00);
test__trunctfhf2(0x1.987124876876324p+12, 0x6e62);
test__trunctfhf2(0x1.0p+0, 0x3c00);
test__trunctfhf2(0x1.0p-14, 0x0400);
// denormal
test__trunctfhf2(0x1.0p-20, 0x0010);
test__trunctfhf2(0x1.0p-24, 0x0001);
test__trunctfhf2(-0x1.0p-24, 0x8001);
test__trunctfhf2(0x1.5p-25, 0x0001);
// and back to zero
test__trunctfhf2(0x1.0p-25, 0x0000);
test__trunctfhf2(-0x1.0p-25, 0x8000);
// max (precise)
test__trunctfhf2(65504.0, 0x7bff);
// max (rounded)
test__trunctfhf2(65519.0, 0x7bff);
// max (to +inf)
test__trunctfhf2(65520.0, 0x7c00);
test__trunctfhf2(65536.0, 0x7c00);
test__trunctfhf2(-65520.0, 0xfc00);
test__trunctfhf2(0x1.23a2abb4a2ddee355f36789abcdep+5, 0x508f);
test__trunctfhf2(0x1.e3d3c45bd3abfd98b76a54cc321fp-9, 0x1b8f);
test__trunctfhf2(0x1.234eebb5faa678f4488693abcdefp+453, 0x7c00);
test__trunctfhf2(0x1.edcba9bb8c76a5a43dd21f334634p-43, 0x0);
} | lib/std/special/compiler_rt/truncXfYf2_test.zig |
const std = @import("../std.zig");
const builtin = @import("builtin");
const maxInt = std.math.maxInt;
const iovec = std.os.iovec;
const iovec_const = std.os.iovec_const;
extern "c" fn _errnop() *c_int;
pub const _errno = _errnop;
pub extern "c" fn find_directory(which: c_int, volume: i32, createIt: bool, path_ptr: [*]u8, length: i32) u64;
pub extern "c" fn find_thread(thread_name: ?*anyopaque) i32;
pub extern "c" fn get_system_info(system_info: *system_info) usize;
pub extern "c" fn _get_team_info(team: c_int, team_info: *team_info, size: usize) i32;
pub extern "c" fn _get_next_area_info(team: c_int, cookie: *i64, area_info: *area_info, size: usize) i32;
// TODO revisit if abi changes or better option becomes apparent
pub extern "c" fn _get_next_image_info(team: c_int, cookie: *i32, image_info: *image_info, size: usize) i32;
pub extern "c" fn _kern_read_dir(fd: c_int, buf_ptr: [*]u8, nbytes: usize, maxcount: u32) usize;
pub extern "c" fn _kern_read_stat(fd: c_int, path_ptr: [*]u8, traverse_link: bool, st: *Stat, stat_size: i32) usize;
pub extern "c" fn _kern_get_current_team() i32;
pub const sem_t = extern struct {
type: i32,
u: extern union {
named_sem_id: ?i32,
unnamed_sem: ?i32,
},
padding: [2]i32,
};
pub const pthread_attr_t = extern struct {
__detach_state: i32,
__sched_priority: i32,
__stack_size: i32,
__guard_size: i32,
__stack_address: ?*anyopaque,
};
pub const pthread_mutex_t = extern struct {
flags: u32 = 0,
lock: i32 = 0,
unused: i32 = -42,
owner: i32 = -1,
owner_count: i32 = 0,
};
pub const pthread_cond_t = extern struct {
flags: u32 = 0,
unused: i32 = -42,
mutex: ?*anyopaque = null,
waiter_count: i32 = 0,
lock: i32 = 0,
};
pub const EAI = enum(c_int) {
/// address family for hostname not supported
ADDRFAMILY = 1,
/// name could not be resolved at this time
AGAIN = 2,
/// flags parameter had an invalid value
BADFLAGS = 3,
/// non-recoverable failure in name resolution
FAIL = 4,
/// address family not recognized
FAMILY = 5,
/// memory allocation failure
MEMORY = 6,
/// no address associated with hostname
NODATA = 7,
/// name does not resolve
NONAME = 8,
/// service not recognized for socket type
SERVICE = 9,
/// intended socket type was not recognized
SOCKTYPE = 10,
/// system error returned in errno
SYSTEM = 11,
/// invalid value for hints
BADHINTS = 12,
/// resolved protocol is unknown
PROTOCOL = 13,
/// argument buffer overflow
OVERFLOW = 14,
_,
};
pub const EAI_MAX = 15;
pub const AI = struct {
pub const NUMERICSERV = 0x00000008;
};
pub const AI_NUMERICSERV = AI.NUMERICSERV;
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const uid_t = u32;
pub const gid_t = u32;
pub const mode_t = c_uint;
pub const socklen_t = u32;
// Modes and flags for dlopen()
// include/dlfcn.h
pub const POLL = struct {
/// input available
pub const IN = 70;
/// output available
pub const OUT = 71;
/// input message available
pub const MSG = 72;
/// I/O error
pub const ERR = 73;
/// high priority input available
pub const PRI = 74;
/// device disconnected
pub const HUP = 75;
};
pub const RTLD = struct {
/// relocations are performed as needed
pub const LAZY = 0;
/// the file gets relocated at load time
pub const NOW = 1;
/// all symbols are available
pub const GLOBAL = 2;
/// symbols are not available for relocating any other object
pub const LOCAL = 0;
};
pub const dl_phdr_info = extern struct {
dlpi_addr: usize,
dlpi_name: ?[*:0]const u8,
dlpi_phdr: [*]std.elf.Phdr,
dlpi_phnum: u16,
};
pub const Flock = extern struct {
type: c_short,
whence: c_short,
start: off_t,
len: off_t,
pid: pid_t,
};
pub const msghdr = extern struct {
/// optional address
msg_name: ?*sockaddr,
/// size of address
msg_namelen: socklen_t,
/// scatter/gather array
msg_iov: [*]iovec,
/// # elements in msg_iov
msg_iovlen: i32,
/// ancillary data
msg_control: ?*anyopaque,
/// ancillary data buffer len
msg_controllen: socklen_t,
/// flags on received message
msg_flags: i32,
};
pub const off_t = i64;
pub const ino_t = u64;
pub const nfds_t = u32;
pub const pollfd = extern struct {
fd: i32,
events: i16,
revents: i16,
};
pub const Stat = extern struct {
dev: i32,
ino: u64,
mode: u32,
nlink: i32,
uid: i32,
gid: i32,
size: i64,
rdev: i32,
blksize: i32,
atim: timespec,
mtim: timespec,
ctim: timespec,
crtim: timespec,
st_type: u32,
blocks: i64,
pub fn atime(self: @This()) timespec {
return self.atim;
}
pub fn mtime(self: @This()) timespec {
return self.mtim;
}
pub fn ctime(self: @This()) timespec {
return self.ctim;
}
pub fn birthtime(self: @This()) timespec {
return self.crtim;
}
};
pub const timespec = extern struct {
tv_sec: isize,
tv_nsec: isize,
};
pub const dirent = extern struct {
d_dev: i32,
d_pdev: i32,
d_ino: i64,
d_pino: i64,
d_reclen: u16,
d_name: [256]u8,
pub fn reclen(self: dirent) u16 {
return self.d_reclen;
}
};
pub const B_OS_NAME_LENGTH = 32; // OS.h
pub const area_info = extern struct {
area: u32,
name: [B_OS_NAME_LENGTH]u8,
size: usize,
lock: u32,
protection: u32,
team_id: i32,
ram_size: u32,
copy_count: u32,
in_count: u32,
out_count: u32,
address: *anyopaque,
};
pub const MAXPATHLEN = PATH_MAX;
pub const image_info = extern struct {
id: u32,
image_type: u32,
sequence: i32,
init_order: i32,
init_routine: *anyopaque,
term_routine: *anyopaque,
device: i32,
node: i64,
name: [MAXPATHLEN]u8,
text: *anyopaque,
data: *anyopaque,
text_size: i32,
data_size: i32,
api_version: i32,
abi: i32,
};
pub const system_info = extern struct {
boot_time: i64,
cpu_count: u32,
max_pages: u64,
used_pages: u64,
cached_pages: u64,
block_cache_pages: u64,
ignored_pages: u64,
needed_memory: u64,
free_memory: u64,
max_swap_pages: u64,
free_swap_pages: u64,
page_faults: u32,
max_sems: u32,
used_sems: u32,
max_ports: u32,
used_ports: u32,
max_threads: u32,
used_threads: u32,
max_teams: u32,
used_teams: u32,
kernel_name: [256]u8,
kernel_build_date: [32]u8,
kernel_build_time: [32]u8,
kernel_version: i64,
abi: u32,
};
pub const team_info = extern struct {
team_id: i32,
thread_count: i32,
image_count: i32,
area_count: i32,
debugger_nub_thread: i32,
debugger_nub_port: i32,
argc: i32,
args: [64]u8,
uid: uid_t,
gid: gid_t,
};
pub const in_port_t = u16;
pub const sa_family_t = u8;
pub const sockaddr = extern struct {
/// total length
len: u8,
/// address family
family: sa_family_t,
/// actually longer; address value
data: [14]u8,
pub const SS_MAXSIZE = 128;
pub const storage = std.x.os.Socket.Address.Native.Storage;
pub const in = extern struct {
len: u8 = @sizeOf(in),
family: sa_family_t = AF.INET,
port: in_port_t,
addr: u32,
zero: [8]u8 = [8]u8{ 0, 0, 0, 0, 0, 0, 0, 0 },
};
pub const in6 = extern struct {
len: u8 = @sizeOf(in6),
family: sa_family_t = AF.INET6,
port: in_port_t,
flowinfo: u32,
addr: [16]u8,
scope_id: u32,
};
pub const un = extern struct {
len: u8 = @sizeOf(un),
family: sa_family_t = AF.UNIX,
path: [104]u8,
};
};
pub const CTL = struct {};
pub const KERN = struct {};
pub const IOV_MAX = 1024;
pub const PATH_MAX = 1024;
pub const STDIN_FILENO = 0;
pub const STDOUT_FILENO = 1;
pub const STDERR_FILENO = 2;
pub const PROT = struct {
pub const READ = 0x01;
pub const WRITE = 0x02;
pub const EXEC = 0x04;
pub const NONE = 0x00;
};
pub const CLOCK = struct {
/// system-wide monotonic clock (aka system time)
pub const MONOTONIC = 0;
/// system-wide real time clock
pub const REALTIME = -1;
/// clock measuring the used CPU time of the current process
pub const PROCESS_CPUTIME_ID = -2;
/// clock measuring the used CPU time of the current thread
pub const THREAD_CPUTIME_ID = -3;
};
pub const MAP = struct {
/// mmap() error return code
pub const FAILED = @intToPtr(*anyopaque, maxInt(usize));
/// changes are seen by others
pub const SHARED = 0x01;
/// changes are only seen by caller
pub const PRIVATE = 0x02;
/// require mapping to specified addr
pub const FIXED = 0x04;
/// no underlying object
pub const ANONYMOUS = 0x0008;
pub const ANON = ANONYMOUS;
/// don't commit memory
pub const NORESERVE = 0x10;
};
pub const MSF = struct {
pub const ASYNC = 1;
pub const INVALIDATE = 2;
pub const SYNC = 4;
};
pub const W = struct {
pub const NOHANG = 0x1;
pub const UNTRACED = 0x2;
pub const CONTINUED = 0x4;
pub const EXITED = 0x08;
pub const STOPPED = 0x10;
pub const NOWAIT = 0x20;
pub fn EXITSTATUS(s: u32) u8 {
return @intCast(u8, s & 0xff);
}
pub fn TERMSIG(s: u32) u32 {
return (s >> 8) & 0xff;
}
pub fn STOPSIG(s: u32) u32 {
return (s >> 16) & 0xff;
}
pub fn IFEXITED(s: u32) bool {
return (s & ~@as(u32, 0xff)) == 0;
}
pub fn IFSTOPPED(s: u32) bool {
return ((s >> 16) & 0xff) != 0;
}
pub fn IFSIGNALED(s: u32) bool {
return ((s >> 8) & 0xff) != 0;
}
};
pub const SA = struct {
pub const ONSTACK = 0x20;
pub const RESTART = 0x10;
pub const RESETHAND = 0x04;
pub const NOCLDSTOP = 0x01;
pub const NODEFER = 0x08;
pub const NOCLDWAIT = 0x02;
pub const SIGINFO = 0x40;
pub const NOMASK = NODEFER;
pub const STACK = ONSTACK;
pub const ONESHOT = RESETHAND;
};
pub const SIG = struct {
pub const ERR = @intToPtr(fn (i32) callconv(.C) void, maxInt(usize));
pub const DFL = @intToPtr(fn (i32) callconv(.C) void, 0);
pub const IGN = @intToPtr(fn (i32) callconv(.C) void, 1);
pub const HUP = 1;
pub const INT = 2;
pub const QUIT = 3;
pub const ILL = 4;
pub const CHLD = 5;
pub const ABRT = 6;
pub const IOT = ABRT;
pub const PIPE = 7;
pub const FPE = 8;
pub const KILL = 9;
pub const STOP = 10;
pub const SEGV = 11;
pub const CONT = 12;
pub const TSTP = 13;
pub const ALRM = 14;
pub const TERM = 15;
pub const TTIN = 16;
pub const TTOU = 17;
pub const USR1 = 18;
pub const USR2 = 19;
pub const WINCH = 20;
pub const KILLTHR = 21;
pub const TRAP = 22;
pub const POLL = 23;
pub const PROF = 24;
pub const SYS = 25;
pub const URG = 26;
pub const VTALRM = 27;
pub const XCPU = 28;
pub const XFSZ = 29;
pub const BUS = 30;
pub const RESERVED1 = 31;
pub const RESERVED2 = 32;
pub const BLOCK = 1;
pub const UNBLOCK = 2;
pub const SETMASK = 3;
};
// access function
pub const F_OK = 0; // test for existence of file
pub const X_OK = 1; // test for execute or search permission
pub const W_OK = 2; // test for write permission
pub const R_OK = 4; // test for read permission
pub const O = struct {
pub const RDONLY = 0x0000;
pub const WRONLY = 0x0001;
pub const RDWR = 0x0002;
pub const ACCMODE = 0x0003;
pub const RWMASK = ACCMODE;
pub const EXCL = 0x0100;
pub const CREAT = 0x0200;
pub const TRUNC = 0x0400;
pub const NOCTTY = 0x1000;
pub const NOTRAVERSE = 0x2000;
pub const CLOEXEC = 0x00000040;
pub const NONBLOCK = 0x00000080;
pub const NDELAY = NONBLOCK;
pub const APPEND = 0x00000800;
pub const SYNC = 0x00010000;
pub const RSYNC = 0x00020000;
pub const DSYNC = 0x00040000;
pub const NOFOLLOW = 0x00080000;
pub const DIRECT = 0x00100000;
pub const NOCACHE = DIRECT;
pub const DIRECTORY = 0x00200000;
};
pub const F = struct {
pub const DUPFD = 0x0001;
pub const GETFD = 0x0002;
pub const SETFD = 0x0004;
pub const GETFL = 0x0008;
pub const SETFL = 0x0010;
pub const GETLK = 0x0020;
pub const SETLK = 0x0080;
pub const SETLKW = 0x0100;
pub const DUPFD_CLOEXEC = 0x0200;
pub const RDLCK = 0x0040;
pub const UNLCK = 0x0200;
pub const WRLCK = 0x0400;
};
pub const LOCK = struct {
pub const SH = 0x01;
pub const EX = 0x02;
pub const NB = 0x04;
pub const UN = 0x08;
};
pub const FD_CLOEXEC = 1;
pub const SEEK = struct {
pub const SET = 0;
pub const CUR = 1;
pub const END = 2;
};
pub const SOCK = struct {
pub const STREAM = 1;
pub const DGRAM = 2;
pub const RAW = 3;
pub const SEQPACKET = 5;
};
pub const SO = struct {
pub const ACCEPTCONN = 0x00000001;
pub const BROADCAST = 0x00000002;
pub const DEBUG = 0x00000004;
pub const DONTROUTE = 0x00000008;
pub const KEEPALIVE = 0x00000010;
pub const OOBINLINE = 0x00000020;
pub const REUSEADDR = 0x00000040;
pub const REUSEPORT = 0x00000080;
pub const USELOOPBACK = 0x00000100;
pub const LINGER = 0x00000200;
pub const SNDBUF = 0x40000001;
pub const SNDLOWAT = 0x40000002;
pub const SNDTIMEO = 0x40000003;
pub const RCVBUF = 0x40000004;
pub const RCVLOWAT = 0x40000005;
pub const RCVTIMEO = 0x40000006;
pub const ERROR = 0x40000007;
pub const TYPE = 0x40000008;
pub const NONBLOCK = 0x40000009;
pub const BINDTODEVICE = 0x4000000a;
pub const PEERCRED = 0x4000000b;
};
pub const SOL = struct {
pub const SOCKET = -1;
};
pub const PF = struct {
pub const UNSPEC = AF.UNSPEC;
pub const INET = AF.INET;
pub const ROUTE = AF.ROUTE;
pub const LINK = AF.LINK;
pub const INET6 = AF.INET6;
pub const LOCAL = AF.LOCAL;
pub const UNIX = AF.UNIX;
pub const BLUETOOTH = AF.BLUETOOTH;
};
pub const AF = struct {
pub const UNSPEC = 0;
pub const INET = 1;
pub const APPLETALK = 2;
pub const ROUTE = 3;
pub const LINK = 4;
pub const INET6 = 5;
pub const DLI = 6;
pub const IPX = 7;
pub const NOTIFY = 8;
pub const LOCAL = 9;
pub const UNIX = LOCAL;
pub const BLUETOOTH = 10;
pub const MAX = 11;
};
pub const DT = struct {};
/// add event to kq (implies enable)
pub const EV_ADD = 0x0001;
/// delete event from kq
pub const EV_DELETE = 0x0002;
/// enable event
pub const EV_ENABLE = 0x0004;
/// disable event (not reported)
pub const EV_DISABLE = 0x0008;
/// only report one occurrence
pub const EV_ONESHOT = 0x0010;
/// clear event state after reporting
pub const EV_CLEAR = 0x0020;
/// force immediate event output
/// ... with or without EV_ERROR
/// ... use KEVENT_FLAG_ERROR_EVENTS
/// on syscalls supporting flags
pub const EV_RECEIPT = 0x0040;
/// disable event after reporting
pub const EV_DISPATCH = 0x0080;
pub const EVFILT_READ = -1;
pub const EVFILT_WRITE = -2;
/// attached to aio requests
pub const EVFILT_AIO = -3;
/// attached to vnodes
pub const EVFILT_VNODE = -4;
/// attached to struct proc
pub const EVFILT_PROC = -5;
/// attached to struct proc
pub const EVFILT_SIGNAL = -6;
/// timers
pub const EVFILT_TIMER = -7;
/// Process descriptors
pub const EVFILT_PROCDESC = -8;
/// Filesystem events
pub const EVFILT_FS = -9;
pub const EVFILT_LIO = -10;
/// User events
pub const EVFILT_USER = -11;
/// Sendfile events
pub const EVFILT_SENDFILE = -12;
pub const EVFILT_EMPTY = -13;
pub const T = struct {
pub const CGETA = 0x8000;
pub const CSETA = 0x8001;
pub const CSETAF = 0x8002;
pub const CSETAW = 0x8003;
pub const CWAITEVENT = 0x8004;
pub const CSBRK = 08005;
pub const CFLSH = 0x8006;
pub const CXONC = 0x8007;
pub const CQUERYCONNECTED = 0x8008;
pub const CGETBITS = 0x8009;
pub const CSETDTR = 0x8010;
pub const CSETRTS = 0x8011;
pub const IOCGWINSZ = 0x8012;
pub const IOCSWINSZ = 0x8013;
pub const CVTIME = 0x8014;
pub const IOCGPGRP = 0x8015;
pub const IOCSPGRP = 0x8016;
pub const IOCSCTTY = 0x8017;
pub const IOCMGET = 0x8018;
pub const IOCMSET = 0x8019;
pub const IOCSBRK = 0x8020;
pub const IOCCBRK = 0x8021;
pub const IOCMBIS = 0x8022;
pub const IOCMBIC = 0x8023;
pub const IOCGSID = 0x8024;
pub const FIONREAD = 0xbe000001;
pub const FIONBIO = 0xbe000000;
};
pub const winsize = extern struct {
ws_row: u16,
ws_col: u16,
ws_xpixel: u16,
ws_ypixel: u16,
};
const NSIG = 32;
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
/// signal handler
__sigaction_u: extern union {
__sa_handler: fn (i32) callconv(.C) void,
},
/// see signal options
sa_flags: u32,
/// signal mask to apply
sa_mask: sigset_t,
};
pub const sigset_t = extern struct {
__bits: [SIG.WORDS]u32,
};
const B_POSIX_ERROR_BASE = -2147454976;
pub const E = enum(i32) {
@"2BIG" = B_POSIX_ERROR_BASE + 1,
CHILD = B_POSIX_ERROR_BASE + 2,
DEADLK = B_POSIX_ERROR_BASE + 3,
FBIG = B_POSIX_ERROR_BASE + 4,
MLINK = B_POSIX_ERROR_BASE + 5,
NFILE = B_POSIX_ERROR_BASE + 6,
NODEV = B_POSIX_ERROR_BASE + 7,
NOLCK = B_POSIX_ERROR_BASE + 8,
NOSYS = B_POSIX_ERROR_BASE + 9,
NOTTY = B_POSIX_ERROR_BASE + 10,
NXIO = B_POSIX_ERROR_BASE + 11,
SPIPE = B_POSIX_ERROR_BASE + 12,
SRCH = B_POSIX_ERROR_BASE + 13,
FPOS = B_POSIX_ERROR_BASE + 14,
SIGPARM = B_POSIX_ERROR_BASE + 15,
DOM = B_POSIX_ERROR_BASE + 16,
RANGE = B_POSIX_ERROR_BASE + 17,
PROTOTYPE = B_POSIX_ERROR_BASE + 18,
PROTONOSUPPORT = B_POSIX_ERROR_BASE + 19,
PFNOSUPPORT = B_POSIX_ERROR_BASE + 20,
AFNOSUPPORT = B_POSIX_ERROR_BASE + 21,
ADDRINUSE = B_POSIX_ERROR_BASE + 22,
ADDRNOTAVAIL = B_POSIX_ERROR_BASE + 23,
NETDOWN = B_POSIX_ERROR_BASE + 24,
NETUNREACH = B_POSIX_ERROR_BASE + 25,
NETRESET = B_POSIX_ERROR_BASE + 26,
CONNABORTED = B_POSIX_ERROR_BASE + 27,
CONNRESET = B_POSIX_ERROR_BASE + 28,
ISCONN = B_POSIX_ERROR_BASE + 29,
NOTCONN = B_POSIX_ERROR_BASE + 30,
SHUTDOWN = B_POSIX_ERROR_BASE + 31,
CONNREFUSED = B_POSIX_ERROR_BASE + 32,
HOSTUNREACH = B_POSIX_ERROR_BASE + 33,
NOPROTOOPT = B_POSIX_ERROR_BASE + 34,
NOBUFS = B_POSIX_ERROR_BASE + 35,
INPROGRESS = B_POSIX_ERROR_BASE + 36,
ALREADY = B_POSIX_ERROR_BASE + 37,
ILSEQ = B_POSIX_ERROR_BASE + 38,
NOMSG = B_POSIX_ERROR_BASE + 39,
STALE = B_POSIX_ERROR_BASE + 40,
OVERFLOW = B_POSIX_ERROR_BASE + 41,
MSGSIZE = B_POSIX_ERROR_BASE + 42,
OPNOTSUPP = B_POSIX_ERROR_BASE + 43,
NOTSOCK = B_POSIX_ERROR_BASE + 44,
HOSTDOWN = B_POSIX_ERROR_BASE + 45,
BADMSG = B_POSIX_ERROR_BASE + 46,
CANCELED = B_POSIX_ERROR_BASE + 47,
DESTADDRREQ = B_POSIX_ERROR_BASE + 48,
DQUOT = B_POSIX_ERROR_BASE + 49,
IDRM = B_POSIX_ERROR_BASE + 50,
MULTIHOP = B_POSIX_ERROR_BASE + 51,
NODATA = B_POSIX_ERROR_BASE + 52,
NOLINK = B_POSIX_ERROR_BASE + 53,
NOSR = B_POSIX_ERROR_BASE + 54,
NOSTR = B_POSIX_ERROR_BASE + 55,
NOTSUP = B_POSIX_ERROR_BASE + 56,
PROTO = B_POSIX_ERROR_BASE + 57,
TIME = B_POSIX_ERROR_BASE + 58,
TXTBSY = B_POSIX_ERROR_BASE + 59,
NOATTR = B_POSIX_ERROR_BASE + 60,
NOTRECOVERABLE = B_POSIX_ERROR_BASE + 61,
OWNERDEAD = B_POSIX_ERROR_BASE + 62,
ACCES = -0x7ffffffe, // Permission denied
INTR = -0x7ffffff6, // Interrupted system call
IO = -0x7fffffff, // Input/output error
BUSY = -0x7ffffff2, // Device busy
FAULT = -0x7fffecff, // Bad address
TIMEDOUT = -2147483639, // Operation timed out
AGAIN = -0x7ffffff5,
BADF = -0x7fffa000, // Bad file descriptor
EXIST = -0x7fff9ffe, // File exists
INVAL = -0x7ffffffb, // Invalid argument
NAMETOOLONG = -2147459068, // File name too long
NOENT = -0x7fff9ffd, // No such file or directory
PERM = -0x7ffffff1, // Operation not permitted
NOTDIR = -0x7fff9ffb, // Not a directory
ISDIR = -0x7fff9ff7, // Is a directory
NOTEMPTY = -2147459066, // Directory not empty
NOSPC = -0x7fff9ff9, // No space left on device
ROFS = -0x7fff9ff8, // Read-only filesystem
MFILE = -0x7fff9ff6, // Too many open files
XDEV = -0x7fff9ff5, // Cross-device link
NOEXEC = -0x7fffecfe, // Exec format error
PIPE = -0x7fff9ff3, // Broken pipe
NOMEM = -0x80000000, // Cannot allocate memory
LOOP = -2147459060, // Too many levels of symbolic links
SUCCESS = 0,
_,
};
pub const MINSIGSTKSZ = 8192;
pub const SIGSTKSZ = 16384;
pub const SS_ONSTACK = 0x1;
pub const SS_DISABLE = 0x2;
pub const stack_t = extern struct {
sp: [*]u8,
size: isize,
flags: i32,
};
pub const S = struct {
pub const IFMT = 0o170000;
pub const IFSOCK = 0o140000;
pub const IFLNK = 0o120000;
pub const IFREG = 0o100000;
pub const IFBLK = 0o060000;
pub const IFDIR = 0o040000;
pub const IFCHR = 0o020000;
pub const IFIFO = 0o010000;
pub const INDEX_DIR = 04000000000;
pub const IUMSK = 0o7777;
pub const ISUID = 0o4000;
pub const ISGID = 0o2000;
pub const ISVTX = 0o1000;
pub const IRWXU = 0o700;
pub const IRUSR = 0o400;
pub const IWUSR = 0o200;
pub const IXUSR = 0o100;
pub const IRWXG = 0o070;
pub const IRGRP = 0o040;
pub const IWGRP = 0o020;
pub const IXGRP = 0o010;
pub const IRWXO = 0o007;
pub const IROTH = 0o004;
pub const IWOTH = 0o002;
pub const IXOTH = 0o001;
pub fn ISREG(m: u32) bool {
return m & IFMT == IFREG;
}
pub fn ISLNK(m: u32) bool {
return m & IFMT == IFLNK;
}
pub fn ISBLK(m: u32) bool {
return m & IFMT == IFBLK;
}
pub fn ISDIR(m: u32) bool {
return m & IFMT == IFDIR;
}
pub fn ISCHR(m: u32) bool {
return m & IFMT == IFCHR;
}
pub fn ISFIFO(m: u32) bool {
return m & IFMT == IFIFO;
}
pub fn ISSOCK(m: u32) bool {
return m & IFMT == IFSOCK;
}
pub fn ISINDEX(m: u32) bool {
return m & INDEX_DIR == INDEX_DIR;
}
};
pub const HOST_NAME_MAX = 255;
pub const AT = struct {
pub const FDCWD = -1;
pub const SYMLINK_NOFOLLOW = 0x01;
pub const SYMLINK_FOLLOW = 0x02;
pub const REMOVEDIR = 0x04;
pub const EACCESS = 0x08;
};
pub const addrinfo = extern struct {
flags: i32,
family: i32,
socktype: i32,
protocol: i32,
addrlen: socklen_t,
canonname: ?[*:0]u8,
addr: ?*sockaddr,
next: ?*addrinfo,
};
pub const IPPROTO = struct {
pub const IP = 0;
pub const HOPOPTS = 0;
pub const ICMP = 1;
pub const IGMP = 2;
pub const TCP = 6;
pub const UDP = 17;
pub const IPV6 = 41;
pub const ROUTING = 43;
pub const FRAGMENT = 44;
pub const ESP = 50;
pub const AH = 51;
pub const ICMPV6 = 58;
pub const NONE = 59;
pub const DSTOPTS = 60;
pub const ETHERIP = 97;
pub const RAW = 255;
pub const MAX = 256;
};
pub const rlimit_resource = enum(c_int) {
CORE = 0,
CPU = 1,
DATA = 2,
FSIZE = 3,
NOFILE = 4,
STACK = 5,
AS = 6,
NOVMON = 7,
_,
};
pub const rlim_t = i64;
pub const RLIM = struct {
/// No limit
pub const INFINITY: rlim_t = (1 << 63) - 1;
pub const SAVED_MAX = INFINITY;
pub const SAVED_CUR = INFINITY;
};
pub const rlimit = extern struct {
/// Soft limit
cur: rlim_t,
/// Hard limit
max: rlim_t,
};
pub const SHUT = struct {
pub const RD = 0;
pub const WR = 1;
pub const RDWR = 2;
};
// TODO fill out if needed
pub const directory_which = enum(c_int) {
B_USER_SETTINGS_DIRECTORY = 0xbbe,
_,
};
pub const cc_t = u8;
pub const speed_t = u8;
pub const tcflag_t = u32;
pub const NCCS = 11;
pub const termios = extern struct {
c_iflag: tcflag_t,
c_oflag: tcflag_t,
c_cflag: tcflag_t,
c_lflag: tcflag_t,
c_line: cc_t,
c_ispeed: speed_t,
c_ospeed: speed_t,
cc_t: [NCCS]cc_t,
};
pub const MSG_NOSIGNAL = 0x0800; | lib/std/c/haiku.zig |
const CodeSignature = @This();
const std = @import("std");
const assert = std.debug.assert;
const fs = std.fs;
const log = std.log.scoped(.link);
const macho = std.macho;
const mem = std.mem;
const testing = std.testing;
const Allocator = mem.Allocator;
const Sha256 = std.crypto.hash.sha2.Sha256;
const hash_size: u8 = 32;
const Blob = union(enum) {
code_directory: *CodeDirectory,
requirements: *Requirements,
entitlements: *Entitlements,
signature: *Signature,
fn slotType(self: Blob) u32 {
return switch (self) {
.code_directory => |x| x.slotType(),
.requirements => |x| x.slotType(),
.entitlements => |x| x.slotType(),
.signature => |x| x.slotType(),
};
}
fn size(self: Blob) u32 {
return switch (self) {
.code_directory => |x| x.size(),
.requirements => |x| x.size(),
.entitlements => |x| x.size(),
.signature => |x| x.size(),
};
}
fn write(self: Blob, writer: anytype) !void {
return switch (self) {
.code_directory => |x| x.write(writer),
.requirements => |x| x.write(writer),
.entitlements => |x| x.write(writer),
.signature => |x| x.write(writer),
};
}
};
const CodeDirectory = struct {
inner: macho.CodeDirectory,
ident: []const u8,
special_slots: [n_special_slots][hash_size]u8,
code_slots: std.ArrayListUnmanaged([hash_size]u8) = .{},
const n_special_slots: usize = 7;
fn init(page_size: u16) CodeDirectory {
var cdir: CodeDirectory = .{
.inner = .{
.magic = macho.CSMAGIC_CODEDIRECTORY,
.length = @sizeOf(macho.CodeDirectory),
.version = macho.CS_SUPPORTSEXECSEG,
.flags = macho.CS_ADHOC | macho.CS_LINKER_SIGNED,
.hashOffset = 0,
.identOffset = @sizeOf(macho.CodeDirectory),
.nSpecialSlots = 0,
.nCodeSlots = 0,
.codeLimit = 0,
.hashSize = hash_size,
.hashType = macho.CS_HASHTYPE_SHA256,
.platform = 0,
.pageSize = @truncate(u8, std.math.log2(page_size)),
.spare2 = 0,
.scatterOffset = 0,
.teamOffset = 0,
.spare3 = 0,
.codeLimit64 = 0,
.execSegBase = 0,
.execSegLimit = 0,
.execSegFlags = 0,
},
.ident = undefined,
.special_slots = undefined,
};
comptime var i = 0;
inline while (i < n_special_slots) : (i += 1) {
cdir.special_slots[i] = [_]u8{0} ** hash_size;
}
return cdir;
}
fn deinit(self: *CodeDirectory, allocator: Allocator) void {
self.code_slots.deinit(allocator);
}
fn addSpecialHash(self: *CodeDirectory, index: u32, hash: [hash_size]u8) void {
assert(index > 0);
self.inner.nSpecialSlots = std.math.max(self.inner.nSpecialSlots, index);
mem.copy(u8, &self.special_slots[index - 1], &hash);
}
fn slotType(self: CodeDirectory) u32 {
_ = self;
return macho.CSSLOT_CODEDIRECTORY;
}
fn size(self: CodeDirectory) u32 {
const code_slots = self.inner.nCodeSlots * hash_size;
const special_slots = self.inner.nSpecialSlots * hash_size;
return @sizeOf(macho.CodeDirectory) + @intCast(u32, self.ident.len + 1) + special_slots + code_slots;
}
fn write(self: CodeDirectory, writer: anytype) !void {
try writer.writeIntBig(u32, self.inner.magic);
try writer.writeIntBig(u32, self.inner.length);
try writer.writeIntBig(u32, self.inner.version);
try writer.writeIntBig(u32, self.inner.flags);
try writer.writeIntBig(u32, self.inner.hashOffset);
try writer.writeIntBig(u32, self.inner.identOffset);
try writer.writeIntBig(u32, self.inner.nSpecialSlots);
try writer.writeIntBig(u32, self.inner.nCodeSlots);
try writer.writeIntBig(u32, self.inner.codeLimit);
try writer.writeByte(self.inner.hashSize);
try writer.writeByte(self.inner.hashType);
try writer.writeByte(self.inner.platform);
try writer.writeByte(self.inner.pageSize);
try writer.writeIntBig(u32, self.inner.spare2);
try writer.writeIntBig(u32, self.inner.scatterOffset);
try writer.writeIntBig(u32, self.inner.teamOffset);
try writer.writeIntBig(u32, self.inner.spare3);
try writer.writeIntBig(u64, self.inner.codeLimit64);
try writer.writeIntBig(u64, self.inner.execSegBase);
try writer.writeIntBig(u64, self.inner.execSegLimit);
try writer.writeIntBig(u64, self.inner.execSegFlags);
try writer.writeAll(self.ident);
try writer.writeByte(0);
var i: isize = @intCast(isize, self.inner.nSpecialSlots);
while (i > 0) : (i -= 1) {
try writer.writeAll(&self.special_slots[@intCast(usize, i - 1)]);
}
for (self.code_slots.items) |slot| {
try writer.writeAll(&slot);
}
}
};
const Requirements = struct {
fn deinit(self: *Requirements, allocator: Allocator) void {
_ = self;
_ = allocator;
}
fn slotType(self: Requirements) u32 {
_ = self;
return macho.CSSLOT_REQUIREMENTS;
}
fn size(self: Requirements) u32 {
_ = self;
return 3 * @sizeOf(u32);
}
fn write(self: Requirements, writer: anytype) !void {
try writer.writeIntBig(u32, macho.CSMAGIC_REQUIREMENTS);
try writer.writeIntBig(u32, self.size());
try writer.writeIntBig(u32, 0);
}
};
const Entitlements = struct {
inner: []const u8,
fn deinit(self: *Entitlements, allocator: Allocator) void {
allocator.free(self.inner);
}
fn slotType(self: Entitlements) u32 {
_ = self;
return macho.CSSLOT_ENTITLEMENTS;
}
fn size(self: Entitlements) u32 {
return @intCast(u32, self.inner.len) + 2 * @sizeOf(u32);
}
fn write(self: Entitlements, writer: anytype) !void {
try writer.writeIntBig(u32, macho.CSMAGIC_EMBEDDED_ENTITLEMENTS);
try writer.writeIntBig(u32, self.size());
try writer.writeAll(self.inner);
}
};
const Signature = struct {
fn deinit(self: *Signature, allocator: Allocator) void {
_ = self;
_ = allocator;
}
fn slotType(self: Signature) u32 {
_ = self;
return macho.CSSLOT_SIGNATURESLOT;
}
fn size(self: Signature) u32 {
_ = self;
return 2 * @sizeOf(u32);
}
fn write(self: Signature, writer: anytype) !void {
try writer.writeIntBig(u32, macho.CSMAGIC_BLOBWRAPPER);
try writer.writeIntBig(u32, self.size());
}
};
page_size: u16,
code_directory: CodeDirectory,
requirements: ?Requirements = null,
entitlements: ?Entitlements = null,
signature: ?Signature = null,
pub fn init(page_size: u16) CodeSignature {
return .{
.page_size = page_size,
.code_directory = CodeDirectory.init(page_size),
};
}
pub fn deinit(self: *CodeSignature, allocator: Allocator) void {
self.code_directory.deinit(allocator);
if (self.requirements) |*req| {
req.deinit(allocator);
}
if (self.entitlements) |*ents| {
ents.deinit(allocator);
}
if (self.signature) |*sig| {
sig.deinit(allocator);
}
}
pub fn addEntitlements(self: *CodeSignature, allocator: Allocator, path: []const u8) !void {
const file = try fs.cwd().openFile(path, .{});
defer file.close();
const inner = try file.readToEndAlloc(allocator, std.math.maxInt(u32));
self.entitlements = .{ .inner = inner };
}
pub const WriteOpts = struct {
file: fs.File,
text_segment: macho.segment_command_64,
code_sig_cmd: macho.linkedit_data_command,
output_mode: std.builtin.OutputMode,
};
pub fn writeAdhocSignature(
self: *CodeSignature,
allocator: Allocator,
opts: WriteOpts,
writer: anytype,
) !void {
var header: macho.SuperBlob = .{
.magic = macho.CSMAGIC_EMBEDDED_SIGNATURE,
.length = @sizeOf(macho.SuperBlob),
.count = 0,
};
var blobs = std.ArrayList(Blob).init(allocator);
defer blobs.deinit();
self.code_directory.inner.execSegBase = opts.text_segment.fileoff;
self.code_directory.inner.execSegLimit = opts.text_segment.filesize;
self.code_directory.inner.execSegFlags = if (opts.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0;
const file_size = opts.code_sig_cmd.dataoff;
self.code_directory.inner.codeLimit = file_size;
const total_pages = mem.alignForward(file_size, self.page_size) / self.page_size;
var buffer = try allocator.alloc(u8, self.page_size);
defer allocator.free(buffer);
try self.code_directory.code_slots.ensureTotalCapacityPrecise(allocator, total_pages);
// Calculate hash for each page (in file) and write it to the buffer
var hash: [hash_size]u8 = undefined;
var i: usize = 0;
while (i < total_pages) : (i += 1) {
const fstart = i * self.page_size;
const fsize = if (fstart + self.page_size > file_size) file_size - fstart else self.page_size;
const len = try opts.file.preadAll(buffer, fstart);
assert(fsize <= len);
Sha256.hash(buffer[0..fsize], &hash, .{});
self.code_directory.code_slots.appendAssumeCapacity(hash);
self.code_directory.inner.nCodeSlots += 1;
}
try blobs.append(.{ .code_directory = &self.code_directory });
header.length += @sizeOf(macho.BlobIndex);
header.count += 1;
if (self.requirements) |*req| {
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
try req.write(buf.writer());
Sha256.hash(buf.items, &hash, .{});
self.code_directory.addSpecialHash(req.slotType(), hash);
try blobs.append(.{ .requirements = req });
header.count += 1;
header.length += @sizeOf(macho.BlobIndex) + req.size();
}
if (self.entitlements) |*ents| {
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
try ents.write(buf.writer());
Sha256.hash(buf.items, &hash, .{});
self.code_directory.addSpecialHash(ents.slotType(), hash);
try blobs.append(.{ .entitlements = ents });
header.count += 1;
header.length += @sizeOf(macho.BlobIndex) + ents.size();
}
if (self.signature) |*sig| {
try blobs.append(.{ .signature = sig });
header.count += 1;
header.length += @sizeOf(macho.BlobIndex) + sig.size();
}
self.code_directory.inner.hashOffset =
@sizeOf(macho.CodeDirectory) + @intCast(u32, self.code_directory.ident.len + 1) + self.code_directory.inner.nSpecialSlots * hash_size;
self.code_directory.inner.length = self.code_directory.size();
header.length += self.code_directory.size();
try writer.writeIntBig(u32, header.magic);
try writer.writeIntBig(u32, header.length);
try writer.writeIntBig(u32, header.count);
var offset: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) * @intCast(u32, blobs.items.len);
for (blobs.items) |blob| {
try writer.writeIntBig(u32, blob.slotType());
try writer.writeIntBig(u32, offset);
offset += blob.size();
}
for (blobs.items) |blob| {
try blob.write(writer);
}
}
pub fn size(self: CodeSignature) u32 {
var ssize: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) + self.code_directory.size();
if (self.requirements) |req| {
ssize += @sizeOf(macho.BlobIndex) + req.size();
}
if (self.entitlements) |ent| {
ssize += @sizeOf(macho.BlobIndex) + ent.size();
}
if (self.signature) |sig| {
ssize += @sizeOf(macho.BlobIndex) + sig.size();
}
return ssize;
}
pub fn estimateSize(self: CodeSignature, file_size: u64) u32 {
var ssize: u64 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) + self.code_directory.size();
// Approx code slots
const total_pages = mem.alignForwardGeneric(u64, file_size, self.page_size) / self.page_size;
ssize += total_pages * hash_size;
var n_special_slots: u32 = 0;
if (self.requirements) |req| {
ssize += @sizeOf(macho.BlobIndex) + req.size();
n_special_slots = std.math.max(n_special_slots, req.slotType());
}
if (self.entitlements) |ent| {
ssize += @sizeOf(macho.BlobIndex) + ent.size() + hash_size;
n_special_slots = std.math.max(n_special_slots, ent.slotType());
}
if (self.signature) |sig| {
ssize += @sizeOf(macho.BlobIndex) + sig.size();
}
ssize += n_special_slots * hash_size;
return @intCast(u32, mem.alignForwardGeneric(u64, ssize, @sizeOf(u64)));
}
pub fn clear(self: *CodeSignature, allocator: Allocator) void {
self.code_directory.deinit(allocator);
self.code_directory = CodeDirectory.init(self.page_size);
} | src/link/MachO/CodeSignature.zig |
const std = @import("std");
const bits = @import("bits.zig");
const int = @import("int.zig");
const libpoke = @import("pokemon/index.zig");
const math = std.math;
const mem = std.mem;
const rand = std.rand;
const debug = std.debug;
const assert = debug.assert;
/// A generic enum for different randomization options.
pub const GenericOption = enum {
Same,
Random,
Best,
};
pub const Options = struct {
pub const Trainer = struct {
pub const Pokemon = enum {
/// Trainers Pokémon wont be randomized.
Same,
/// Each trainer will be given random Pokémons.
Random,
/// Trainer Pokémon will be replaced with a random one of the same
/// type. For dual typing, the replacement ratio is 80% primary,
/// 20% secondary.
SameType,
/// Each trainer will have a type trainer_theme, and will be given random
/// Pokémons from that type.
TypeThemed,
/// All trainers will be given only random legendary Pokémons
Legendaries,
};
pub const HeldItems = enum {
/// Trainer Pokémons will have no held items.
None,
/// Trainer Pokémon held items will not change.
Same,
// TODO: Figure out how to implement these:
// Trainer Pokémons will have random held items.
//Random,
// Trainer Pokémons will have random, but useful, held items.
//RandomUseful,
// Trainer Pokémons will hold the best held items in the game.
//RandomBest,
};
pub const Moves = enum {
/// Trainer Pokémon moves will not change.
Same,
/// If possible, Trainer Pokémon will have random moves.
Random,
/// If possible, Trainer Pokémon will have random moves selected from
/// the pool of moves the Pokémon can already learn.
RandomWithinLearnset,
/// If possible, Trainer Pokémon will be given the most powerful moves
/// they can learn.
Best,
};
/// The the way trainers Pokémons should be randomized.
pokemon: Pokemon,
/// Trainer Pokémons will be replaced by once of simular strength (base on
/// Pokémon's base stats).
same_total_stats: bool,
/// Which held items each trainer Pokémon will be given.
held_items: HeldItems,
/// Which moves each trainer Pokémon will be given.
moves: Moves,
/// Trainer Pokémons will have their level increased by x%.
level_modifier: f64,
pub fn default() Trainer {
return Trainer{
.pokemon = Pokemon.Same,
.same_total_stats = false,
.held_items = HeldItems.Same,
.moves = Moves.Same,
.level_modifier = 1.0,
};
}
};
trainer: Trainer,
pub fn default() Options {
return Options{ .trainer = Trainer.default() };
}
};
pub const Randomizer = struct {
fn hash_type(t: u8) u32 {
return t;
}
fn type_eql(t1: u8, t2: u8) bool {
return t1 == t2;
}
const SpeciesByType = std.HashMap(u8, std.ArrayList(u16), hash_type, type_eql);
game: libpoke.Game,
random: *rand.Random,
allocator: *mem.Allocator,
species_by_type: ?SpeciesByType,
pub fn init(game: libpoke.Game, random: *rand.Random, allocator: *mem.Allocator) Randomizer {
return Randomizer{
.game = game,
.allocator = allocator,
.random = random,
.species_by_type = null,
};
}
pub fn deinit(randomizer: *Randomizer) void {
if (randomizer.species_by_type) |*by_type| {
freeSpeciesByType(by_type);
}
}
pub fn randomize(randomizer: *Randomizer, options: Options) !void {
try randomizer.randomizeTrainers(options.trainer);
}
pub fn randomizeTrainers(randomizer: *Randomizer, options: Options.Trainer) !void {
const game = randomizer.game;
var by_type = try randomizer.speciesByType();
const pokemons = game.pokemons();
const trainers = game.trainers();
var trainer_it = trainers.iterator();
while (trainer_it.nextValid()) |trainer_item| {
const trainer = trainer_item.value;
const party = trainer.party();
var party_it = party.iterator();
while (party_it.next()) |party_item| {
const member = party_item.value;
const member_pokemon = try pokemons.at(member.species());
switch (options.pokemon) {
Options.Trainer.Pokemon.Same => {},
Options.Trainer.Pokemon.Random => {
// TODO: Types probably shouldn't be weighted equally, as
// there is a different number of Pokémons per type.
// TODO: If a Pokémon is dual type, it has a higher chance of
// being chosen. I think?
const pokemon_type = try randomizer.randomType();
const pick_from = (by_type.get(pokemon_type).?).value.toSliceConst();
const new_pokemon = try randomizer.randomTrainerPokemon(member_pokemon, options.same_total_stats, pick_from);
member.setSpecies(new_pokemon);
},
Options.Trainer.Pokemon.SameType => {
const pokemon_type = blk: {
const member_types = member_pokemon.types();
const roll = randomizer.random.float(f32);
break :blk if (roll < 0.80) member_types[0] else member_types[1];
};
const pick_from = (by_type.get(pokemon_type).?).value.toSliceConst();
const new_pokemon = try randomizer.randomTrainerPokemon(member_pokemon, options.same_total_stats, pick_from);
member.setSpecies(new_pokemon);
},
Options.Trainer.Pokemon.TypeThemed => {
const trainer_theme = try randomizer.randomType();
const pick_from = (by_type.get(trainer_theme).?).value.toSliceConst();
const new_pokemon = try randomizer.randomTrainerPokemon(member_pokemon, options.same_total_stats, pick_from);
member.setSpecies(new_pokemon);
},
Options.Trainer.Pokemon.Legendaries => {
const new_pokemon = try randomizer.randomTrainerPokemon(member_pokemon, options.same_total_stats, game.base.version.legendaries());
member.setSpecies(new_pokemon);
},
}
randomizer.randomizeTrainerPokemonHeldItem(member, options.held_items);
try randomizer.randomizeTrainerPokemonMoves(member, options);
const lvl = member.level();
const new_level = blk: {
var res = @intToFloat(f32, lvl) * options.level_modifier;
res = math.min(res, f32(100));
res = math.max(res, f32(1));
break :blk @floatToInt(u8, math.round(res));
};
member.setLevel(new_level);
}
}
}
fn randomTrainerPokemon(randomizer: *Randomizer, curr_pokemom: libpoke.Pokemon, same_total_stats: bool, pick_from: []const u16) !u16 {
const game = randomizer.game;
const pokemons = game.pokemons();
if (same_total_stats) {
var min_total = curr_pokemom.totalStats();
var max_total = min_total;
var matches = std.ArrayList(u16).init(randomizer.allocator);
defer matches.deinit();
// If we dont get 25 matches on the first try, we just loop again. This means matches
// will end up collecting some duplicates. This is fine, as this makes it soo that
// Pokémon that are a better match, have a higher chance of being picked.
while (matches.len < 25) {
min_total = math.sub(u16, min_total, 5) catch min_total;
max_total = math.add(u16, max_total, 5) catch max_total;
for (pick_from) |species| {
const pokemon = try pokemons.at(species);
const total = pokemon.totalStats();
if (min_total <= total and total <= max_total)
try matches.append(species);
}
}
return matches.toSlice()[randomizer.random.range(usize, 0, matches.len)];
} else {
return pick_from[randomizer.random.range(usize, 0, pick_from.len)];
}
}
fn randomizeTrainerPokemonHeldItem(randomizer: Randomizer, member: libpoke.PartyMember, option: Options.Trainer.HeldItems) void {
const game = randomizer.game;
switch (option) {
Options.Trainer.HeldItems.None => {
member.setItem(0) catch return;
},
Options.Trainer.HeldItems.Same => {},
//Options.Trainer.HeldItems.Random => {
// // TODO:
//},
//Options.Trainer.HeldItems.RandomUseful => {
// // TODO:
//},
//Options.Trainer.HeldItems.RandomBest => {
// // TODO:
//},
}
}
fn randomizeTrainerPokemonMoves(randomizer: *Randomizer, member: libpoke.PartyMember, option: Options.Trainer) !void {
const pokemons = randomizer.game.pokemons();
const member_moves = member.moves() orelse return;
switch (option.moves) {
Options.Trainer.Moves.Same => {
// If trainer Pokémons where randomized, then keeping the same moves makes no sense.
// We therefore reset them to something sensible.
if (option.pokemon != Options.Trainer.Pokemon.Same) {
const MoveLevelPair = struct {
level: u8,
move_id: u16,
};
const new_moves = blk: {
const pokemon = try pokemons.at(member.species());
const level_up_moves = pokemon.levelUpMoves();
var moves = []MoveLevelPair{MoveLevelPair{
.level = 0,
.move_id = 0,
}} ** 4;
var it = level_up_moves.iterator();
while (it.next()) |item| {
const level_up_move = item.value;
for (moves) |*move| {
const move_lvl = level_up_move.level();
const trainer_pkm_level = member.level();
if (move.level < move_lvl and move_lvl < trainer_pkm_level) {
move.level = move_lvl;
move.move_id = level_up_move.moveId();
break;
}
}
}
break :blk moves;
};
debug.assert(member_moves.len() == new_moves.len);
for (new_moves) |new_move, i| {
member_moves.atSet(i, new_move.move_id);
}
}
},
Options.Trainer.Moves.Random => {
var i: usize = 0;
while (i < member_moves.len()) : (i += 1) {
member_moves.atSet(i, try randomizer.randomMoveId());
}
},
Options.Trainer.Moves.RandomWithinLearnset => {
const learned_moves = try randomizer.movesLearned(member.species());
defer randomizer.allocator.free(learned_moves);
var i: usize = 0;
while (i < member_moves.len()) : (i += 1) {
const pick = learned_moves[randomizer.random.range(usize, 0, learned_moves.len)];
member_moves.atSet(i, pick);
}
},
Options.Trainer.Moves.Best => {
const moves = randomizer.game.moves();
const pokemon = try pokemons.at(member.species());
const learned_moves = try randomizer.movesLearned(member.species());
defer randomizer.allocator.free(learned_moves);
{
var i: usize = 0;
while (i < member_moves.len()) : (i += 1)
member_moves.atSet(i, 0);
}
for (learned_moves) |learned| {
const learned_move = try moves.at(learned);
const p_t1 = pokemon.types()[0];
const p_t2 = pokemon.types()[1];
const l_t = learned_move.types()[0];
// TODO: Rewrite to work with Pokémons that can have N types
const learned_stab = if (l_t == p_t1 or l_t == p_t2) f32(1.5) else f32(1.0);
const learned_power = @intToFloat(f32, learned_move.power().*) * learned_stab;
var i: usize = 0;
while (i < member_moves.len()) : (i += 1) {
const move_id = member_moves.at(i);
const move = try moves.at(move_id);
const m_t = move.types()[0];
// TODO: Rewrite to work with Pokémons that can have N types
const move_stab = if (m_t == p_t1 or m_t == p_t2) f32(1.5) else f32(1.0);
const move_power = @intToFloat(f32, move.power().*) * move_stab;
// TODO: We probably also want Pokémons to have varied types
// of moves, so it has good coverage.
// TODO: We probably want to pick attack vs sp_attack moves
// depending on the Pokémons stats.
if (move_power < learned_power) {
member_moves.atSet(i, learned);
break;
}
}
}
},
}
}
fn randomType(randomizer: *Randomizer) !u8 {
const species_by_type = try randomizer.speciesByType();
const choice = randomizer.random.range(usize, 0, species_by_type.size);
var it = species_by_type.iterator();
var i: usize = 0;
while (i < species_by_type.size) : (i += 1) {
var n = it.next().?;
if (i == choice)
return n.key;
}
unreachable;
}
fn randomMoveId(randomizer: *Randomizer) !u16 {
const game = randomizer.game;
const moves = game.moves();
while (true) {
const move_id = randomizer.random.range(u16, 0, @intCast(u16, moves.len()));
const move = try moves.at(move_id);
// A move with 0 pp is useless, so we will assume it's a dummy move.
if (move.pp().* == 0) continue;
return move_id;
}
}
/// Caller owns memory returned
fn movesLearned(randomizer: Randomizer, species: u16) ![]u16 {
const game = randomizer.game;
const pokemons = game.pokemons();
const pokemon = try pokemons.at(species);
const levelup_learnset = pokemon.levelUpMoves();
const tm_learnset = pokemon.tmLearnset();
const hm_learnset = pokemon.hmLearnset();
const tms = game.tms();
const hms = game.hms();
var res = std.ArrayList(u16).init(randomizer.allocator);
try res.ensureCapacity(levelup_learnset.len());
var lvl_it = levelup_learnset.iterator();
while (lvl_it.next()) |item| {
// HACK: TODO: Remove this when https://github.com/zig-lang/zig/issues/649 is a thing
try res.append(item.value.moveId());
}
var tm_learnset_it = tm_learnset.iterator();
while (tm_learnset_it.next()) |item| {
if (item.value) {
const move_id = tms.at(item.index);
try res.append(move_id);
}
}
var hm_learnset_it = hm_learnset.iterator();
while (hm_learnset_it.next()) |item| {
if (item.value) {
const move_id = hms.at(item.index);
try res.append(move_id);
}
}
return res.toOwnedSlice();
}
fn speciesByType(randomizer: *Randomizer) !*SpeciesByType {
if (randomizer.species_by_type) |*species_by_type|
return species_by_type;
var species_by_type = SpeciesByType.init(randomizer.allocator);
errdefer freeSpeciesByType(&species_by_type);
const game = randomizer.game;
const pokemons = game.pokemons();
var it = pokemons.iterator();
while (it.nextValid()) |item| {
const pokemon = item.value;
const species = item.index;
// Asume that Pokémons with 0 hp are dummy Pokémon
if (pokemon.hp().* == 0)
continue;
for (pokemon.types().*) |t| {
const entry = species_by_type.get(t) orelse blk: {
_ = try species_by_type.put(t, std.ArrayList(u16).init(randomizer.allocator));
break :blk species_by_type.get(t).?;
};
try entry.value.append(@intCast(u16, species));
}
}
randomizer.species_by_type = species_by_type;
return &randomizer.species_by_type.?;
}
fn freeSpeciesByType(by_type: *SpeciesByType) void {
var it = by_type.iterator();
while (it.next()) |entry|
entry.value.deinit();
by_type.deinit();
}
}; | src/randomizer.zig |
pub const NEUTRAL = 0x00;
pub const INVARIANT = 0x7f;
pub const AFRIKAANS = 0x36;
pub const ALBANIAN = 0x1c;
pub const ALSATIAN = 0x84;
pub const AMHARIC = 0x5e;
pub const ARABIC = 0x01;
pub const ARMENIAN = 0x2b;
pub const ASSAMESE = 0x4d;
pub const AZERI = 0x2c;
pub const AZERBAIJANI = 0x2c;
pub const BANGLA = 0x45;
pub const BASHKIR = 0x6d;
pub const BASQUE = 0x2d;
pub const BELARUSIAN = 0x23;
pub const BENGALI = 0x45;
pub const BRETON = 0x7e;
pub const BOSNIAN = 0x1a;
pub const BOSNIAN_NEUTRAL = 0x781a;
pub const BULGARIAN = 0x02;
pub const CATALAN = 0x03;
pub const CENTRAL_KURDISH = 0x92;
pub const CHEROKEE = 0x5c;
pub const CHINESE = 0x04;
pub const CHINESE_SIMPLIFIED = 0x04;
pub const CHINESE_TRADITIONAL = 0x7c04;
pub const CORSICAN = 0x83;
pub const CROATIAN = 0x1a;
pub const CZECH = 0x05;
pub const DANISH = 0x06;
pub const DARI = 0x8c;
pub const DIVEHI = 0x65;
pub const DUTCH = 0x13;
pub const ENGLISH = 0x09;
pub const ESTONIAN = 0x25;
pub const FAEROESE = 0x38;
pub const FARSI = 0x29;
pub const FILIPINO = 0x64;
pub const FINNISH = 0x0b;
pub const FRENCH = 0x0c;
pub const FRISIAN = 0x62;
pub const FULAH = 0x67;
pub const GALICIAN = 0x56;
pub const GEORGIAN = 0x37;
pub const GERMAN = 0x07;
pub const GREEK = 0x08;
pub const GREENLANDIC = 0x6f;
pub const GUJARATI = 0x47;
pub const HAUSA = 0x68;
pub const HAWAIIAN = 0x75;
pub const HEBREW = 0x0d;
pub const HINDI = 0x39;
pub const HUNGARIAN = 0x0e;
pub const ICELANDIC = 0x0f;
pub const IGBO = 0x70;
pub const INDONESIAN = 0x21;
pub const INUKTITUT = 0x5d;
pub const IRISH = 0x3c;
pub const ITALIAN = 0x10;
pub const JAPANESE = 0x11;
pub const KANNADA = 0x4b;
pub const KASHMIRI = 0x60;
pub const KAZAK = 0x3f;
pub const KHMER = 0x53;
pub const KICHE = 0x86;
pub const KINYARWANDA = 0x87;
pub const KONKANI = 0x57;
pub const KOREAN = 0x12;
pub const KYRGYZ = 0x40;
pub const LAO = 0x54;
pub const LATVIAN = 0x26;
pub const LITHUANIAN = 0x27;
pub const LOWER_SORBIAN = 0x2e;
pub const LUXEMBOURGISH = 0x6e;
pub const MACEDONIAN = 0x2f;
pub const MALAY = 0x3e;
pub const MALAYALAM = 0x4c;
pub const MALTESE = 0x3a;
pub const MANIPURI = 0x58;
pub const MAORI = 0x81;
pub const MAPUDUNGUN = 0x7a;
pub const MARATHI = 0x4e;
pub const MOHAWK = 0x7c;
pub const MONGOLIAN = 0x50;
pub const NEPALI = 0x61;
pub const NORWEGIAN = 0x14;
pub const OCCITAN = 0x82;
pub const ODIA = 0x48;
pub const ORIYA = 0x48;
pub const PASHTO = 0x63;
pub const PERSIAN = 0x29;
pub const POLISH = 0x15;
pub const PORTUGUESE = 0x16;
pub const PULAR = 0x67;
pub const PUNJABI = 0x46;
pub const QUECHUA = 0x6b;
pub const ROMANIAN = 0x18;
pub const ROMANSH = 0x17;
pub const RUSSIAN = 0x19;
pub const SAKHA = 0x85;
pub const SAMI = 0x3b;
pub const SANSKRIT = 0x4f;
pub const SCOTTISH_GAELIC = 0x91;
pub const SERBIAN = 0x1a;
pub const SERBIAN_NEUTRAL = 0x7c1a;
pub const SINDHI = 0x59;
pub const SINHALESE = 0x5b;
pub const SLOVAK = 0x1b;
pub const SLOVENIAN = 0x24;
pub const SOTHO = 0x6c;
pub const SPANISH = 0x0a;
pub const SWAHILI = 0x41;
pub const SWEDISH = 0x1d;
pub const SYRIAC = 0x5a;
pub const TAJIK = 0x28;
pub const TAMAZIGHT = 0x5f;
pub const TAMIL = 0x49;
pub const TATAR = 0x44;
pub const TELUGU = 0x4a;
pub const THAI = 0x1e;
pub const TIBETAN = 0x51;
pub const TIGRIGNA = 0x73;
pub const TIGRINYA = 0x73;
pub const TSWANA = 0x32;
pub const TURKISH = 0x1f;
pub const TURKMEN = 0x42;
pub const UIGHUR = 0x80;
pub const UKRAINIAN = 0x22;
pub const UPPER_SORBIAN = 0x2e;
pub const URDU = 0x20;
pub const UZBEK = 0x43;
pub const VALENCIAN = 0x03;
pub const VIETNAMESE = 0x2a;
pub const WELSH = 0x52;
pub const WOLOF = 0x88;
pub const XHOSA = 0x34;
pub const YAKUT = 0x85;
pub const YI = 0x78;
pub const YORUBA = 0x6a;
pub const ZULU = 0x35; | lib/std/os/windows/lang.zig |
const std = @import("std");
const cpu = @import("cpu.zig");
const c = @cImport({
@cInclude("SDL2/SDL.h");
});
const Allocator = std.mem.Allocator;
pub const log_level = .info;
var screen: [0x4000]u8 = undefined;
var running: bool = true;
fn readSlice(userdata: usize, addr: usize) u8 {
const slice = @intToPtr(*[]u8, userdata);
//std.debug.warn("read from 0x{x}\n", .{addr});
return slice.*[addr];
}
const GpuMode = enum {
/// 80x25
Text,
Graphics
};
/// GPU operation start at 0xBD800, flush by writing at 0xBD7FF
const GpuOp = union(enum) {
SetMode: union(GpuMode) {
Text,
Graphics: packed struct { width: u32, height: u32 }
},
Fill: packed struct {
x: u16, y: u16,
w: u16, h: u16,
rgb: u32
}
};
var currentMode = GpuMode.Text;
var gSurface: *c.SDL_Surface = undefined;
var gTexture: *c.SDL_Texture = undefined;
fn writeSlice(userdata: usize, addr: usize, value: u8) void {
var slice = @intToPtr(*[]u8, userdata);
if (addr == 0xBD7FF) {
const op = @ptrCast(*GpuOp, slice.*[0xBD800..].ptr);
switch (op.*) {
.SetMode => |sm| {
switch (sm) {
.Text => |t| {
std.log.info("set text to 80x25", .{});
_ = c.SDL_RenderClear(renderer);
},
.Graphics => |g| {
std.log.info("set graphics to {}x{}", .{g.width, g.height});
c.SDL_SetWindowSize(window, @intCast(c_int, g.width), @intCast(c_int, g.height));
const vp = c.SDL_Rect {
.x = 0, .y = 0,
.w = @intCast(c_int, g.width), .h = @intCast(c_int, g.height)
};
gTexture = c.SDL_CreateTexture(renderer, c.SDL_PIXELFORMAT_RGB24, c.SDL_TEXTUREACCESS_TARGET,
@intCast(c_int, g.width), @intCast(c_int, g.height)) orelse unreachable;
}
}
},
.Fill => |cmd| {
//std.log.info("fill {}x{} rectangle at {}x{} with color {x}", .{cmd.w, cmd.h, cmd.x, cmd.y, cmd.rgb});
const rect = c.SDL_Rect {
.x = cmd.x, .y = cmd.y,
.w = cmd.w, .h = cmd.h
};
_ = c.SDL_RenderClear(renderer);
_ = c.SDL_SetRenderTarget(renderer, gTexture);
_ = c.SDL_SetRenderDrawColor(renderer, @truncate(u8, cmd.rgb >> 16),
@truncate(u8, cmd.rgb >> 8), @truncate(u8, cmd.rgb), 0xFF);
_ = c.SDL_RenderFillRect(renderer, &rect);
_ = c.SDL_SetRenderTarget(renderer, null);
_ = c.SDL_RenderCopy(renderer, gTexture, null, null);
c.SDL_RenderPresent(renderer);
}
}
} else if (addr >= 0xB8000 and addr <= 0xB8000+0x4000) {
screen[addr - 0xB8000] = value;
if (currentMode == .Text) {
_ = c.SDL_RenderClear(renderer);
var x: usize = 0;
while (x < 80) : (x += 1) {
var y: usize = 0;
while (y < 25) : (y += 1) {
const pos = x+y*80;
font.drawChar(x*8, y*16, screen[pos]);
}
}
c.SDL_RenderPresent(renderer);
}
} else {
slice.*[addr] = value;
}
}
pub fn defaultMemory(ram: *[]u8) cpu.Memory {
return .{
.readFn = readSlice,
.writeFn = writeSlice,
.userData = @ptrToInt(ram)
};
}
fn ecall(hart: *cpu.Hart) void {
const number = hart.x[17];
switch (number) {
64 => { // write
const fd = hart.x[10];
const ptr = hart.x[11];
const count = hart.x[12];
std.log.scoped(.syscall).debug("write({}, ..., {})", .{fd, count});
if (fd == 1) { // == STDOUT_FILENO
var buf: [256]u8 = undefined;
var i: usize = 0;
while (i < count) : (i += 1) {
buf[i] = hart.memory.read(ptr + i);
}
const ret = std.io.getStdOut().write(buf[0..count]) catch unreachable; // TODO: handle error
hart.x[10] = @truncate(u32, ret); // it is impossible to have written more than count which is 32 bits.
}
},
else => std.log.crit("Unknown syscall: {}", .{number})
}
}
fn usage() !void {
var stderr = std.io.getStdErr().writer();
try stderr.writeAll("Usage: zerorisc {help | [file]}\n");
std.process.exit(1);
}
fn hartMain(hart: *cpu.Hart) !void {
while (running) {
hart.cycle();
std.time.sleep(1000000);
}
}
var window: *c.SDL_Window = undefined;
var renderer: *c.SDL_Renderer = undefined;
var font: Font = undefined;
pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}) {};
defer _ = gpa.deinit();
const allocator = &gpa.allocator;
const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);
if (args.len < 2) {
try usage();
}
const filePath = args[1];
if (std.mem.eql(u8, filePath, "help")) {
try std.io.getStdOut().writer().writeAll(
\\Zero RISC
\\
\\IO ports:
\\ 0x100: stdout
\\
\\Usage:
\\ zerorisc { help | [file] }
\\ Example: zerorisc tests/hello
\\
);
return;
}
var file = std.fs.cwd().openFile(filePath, .{ .read = true }) catch |err| {
std.debug.warn("Could not open '{s}', got {s} error.\n", .{filePath, @errorName(err)});
std.process.exit(1);
};
var useSDL: bool = false;
defer file.close();
// Allocate memory
var ram = try allocator.alignedAlloc(u8, 16, 64 * 128 * 1024); // big alignment for better performance
defer allocator.free(ram);
var memory = defaultMemory(&ram);
// Read the ELF file
var header = try std.elf.readHeader(file);
const entry = header.entry;
var pIterator = header.program_header_iterator(file);
while (try pIterator.next()) |phdr| {
if (phdr.p_type == std.elf.PT_LOAD) {
try file.seekTo(phdr.p_offset);
var data = try allocator.alloc(u8, phdr.p_filesz);
_ = try file.readAll(data);
for (data) |val, i| {
ram[phdr.p_vaddr + i] = val;
}
allocator.free(data);
}
}
// Initialise the RISC-V hart
var hart = cpu.Hart.init(memory, @intCast(u32, header.entry), 0);
hart.ecallFn = ecall;
hart.x[2] = 0x512; // set sp
_ = c.SDL_Init(c.SDL_INIT_VIDEO);
defer c.SDL_Quit();
window = c.SDL_CreateWindow("Zero RISC", c.SDL_WINDOWPOS_CENTERED, c.SDL_WINDOWPOS_CENTERED, 8*80, 16*25, 0).?;
defer c.SDL_DestroyWindow(window);
renderer = c.SDL_CreateRenderer(window, -1, 0).?;
defer c.SDL_DestroyRenderer(renderer);
const fontFile = try std.fs.cwd().openFile("unifont.hex", .{ .read = true });
font = try loadHexFont(allocator, fontFile);
fontFile.close();
var thread = try std.Thread.spawn(&hart, hartMain);
while (true) {
var event: c.SDL_Event = undefined;
if (c.SDL_PollEvent(&event) != 0) {
if (event.type == c.SDL_QUIT) {
break;
} else if (event.type == c.SDL_MOUSEMOTION) {
const mouse = event.motion;
memory.writeIntLittle(u16, 0xD0000, @intCast(u16, mouse.x));
memory.writeIntLittle(u16, 0xD0002, @intCast(u16, mouse.y));
}
}
//c.SDL_RenderPresent(renderer);
}
running = false;
thread.wait();
// Print the registers (x0-x31) as debug.
var i: usize = 0;
while (i < 32) : (i += 1) {
var str = [1]u8 {@intCast(u8, hart.x[i] & 0xFF)};
std.log.debug("x{} = 0x{x} = {s}", .{i, hart.x[i], str});
}
}
const Font = struct {
texture: *c.SDL_Texture,
renderer: *c.SDL_Renderer,
total: c_int,
data: []u8,
fn drawChar(self: *const Font, x: usize, y: usize, codePoint: u16) void {
const width = std.math.sqrt(self.total) * 8;
const src = c.SDL_Rect {
.x = (codePoint*8) % width, .y = ((codePoint * 8) / width) * 16,
.w = 8, .h = 16
};
const dst = c.SDL_Rect {
.x = @intCast(c_int, x), .y = @intCast(c_int, y),
.w = 8, .h = 16
};
_ = c.SDL_RenderCopy(self.renderer, self.texture, &src, &dst);
}
};
fn loadHexFont(allocator: *Allocator, file: std.fs.File) !Font {
var reader = file.reader();
var total: c_int = 0;
var totalBitmap: []u8 = try allocator.alloc(u8, 0);
while (true) {
const line = (try reader.readUntilDelimiterOrEofAlloc(allocator, '\n', std.math.maxInt(usize))) orelse break;
defer allocator.free(line);
if (line.len == 0) break;
total += 1;
var split = std.mem.split(line, ":");
const codePoint = try std.fmt.parseUnsigned(u16, split.next().?, 16);
const bitmap = split.next().?;
var charPixels: [16]u8 = undefined;
for (charPixels) |*pixel, i| {
const digits = bitmap[(i*2)..(i*2+2)];
pixel.* = try std.fmt.parseUnsigned(u8, digits, 16);
}
totalBitmap = try allocator.realloc(totalBitmap, totalBitmap.len + 16);
@memcpy(totalBitmap[totalBitmap.len-16..].ptr, &charPixels, charPixels.len);
}
const width: usize = std.math.sqrt(total) * 8;
const height: usize = std.math.sqrt(total) * 16;
var pixels: []u8 = try allocator.alloc(u8, width * height * 3);
var i: usize = 0;
var ix: usize = 0;
var iy: usize = 0;
while (i < total) : (i += 1) {
var y: usize = 0;
while (y < 16) : (y += 1) {
const row = totalBitmap[i*16+y];
var bitShift: usize = 0;
while (bitShift < 8) : (bitShift += 1) {
const bit = ((row << @intCast(u3, bitShift)) & 0x80) >> 7;
const pos = (ix+bitShift)*3 + (iy+y)*width*3;
pixels[pos] = bit * 0xFF;
pixels[pos+1] = bit * 0xFF;
pixels[pos+2] = bit * 0xFF;
}
}
ix += 8;
if (ix >= width) {
ix = 0;
iy += 16;
}
}
allocator.free(totalBitmap);
var surface = c.SDL_CreateRGBSurfaceWithFormatFrom(pixels.ptr, @intCast(c_int, width), @intCast(c_int, height), 24,
@intCast(c_int, width*3), c.SDL_PIXELFORMAT_RGB24).?;
var texture = c.SDL_CreateTextureFromSurface(renderer, surface).?;
c.SDL_FreeSurface(surface);
allocator.free(pixels);
return Font {
.texture = texture,
.renderer = renderer,
.data = pixels,
.total = total
};
} | src/main.zig |
const std = @import("std");
const Allocator = mem.Allocator;
const ArrayList = std.ArrayList;
const Token = std.zig.Token;
const ast = std.zig.ast;
const json = std.json;
const mem = std.mem;
const parse = std.zig.parse;
const testing = std.testing;
const warn = std.debug.warn;
pub const Span = struct {
start: Token,
end: Token,
pub fn encode(self: Span, a: *Allocator) !json.Value {
var m = json.ObjectMap.init(a);
_ = try m.put("startToken", try encodeToken(self.start, a));
_ = try m.put("endToken", try encodeToken(self.end, a));
return json.Value{ .Object = m };
}
pub fn encodeToken(self: Token, a: *Allocator) !json.Value {
var m = json.ObjectMap.init(a);
_ = try m.put("start", json.Value{
.Integer = @intCast(i64, self.start),
});
_ = try m.put("end", json.Value{
.Integer = @intCast(i64, self.end),
});
return json.Value{ .Object = m };
}
};
/// Stores information about a symbol in a zig source file. This covers
/// declarations at the top level scope of the source file. Since a source file
/// is just a struct container, this can therefore represent top level members of
/// the main struct container(or file in this case).
pub const Declaration = struct {
/// The string representation of the declared symbol.This is the identifier
/// name. For instance
/// ```
/// const name="gernest";
/// ```
/// Here label will be "name"
label: []const u8,
/// descripes the kind of the declarion.
typ: Type,
/// The position of the first token for this declaration is.
start: usize,
/// The position of the last token for this declaration;
end: usize,
/// True when the declaration is exported. This means declaration begins with
/// keyword pub.
is_public: bool,
/// true if the declaration starts with var and false when the declation
/// starts with const.
is_mutable: bool,
/// The actual node in the ast for this declaration.
node: *ast.Node,
/// sybmol's documentation.
zig_doc: ?Span,
/// For container nodes this is the collection of symbols declared within
/// the container. Containers can be struct,enum or union.
children: ArrayList(*Declaration),
pub const List = ArrayList(*Declaration);
pub const Iterator = struct {
at: usize,
ls: []*Declaration,
pub fn init(ls: *List) Iterator {
return Iterator{ .at = 0, .ls = ls.toSlice() };
}
pub fn next(self: *Iterator) ?*Declaration {
if (self.at >= self.ls.len) return null;
var d = self.ls[self.at];
self.at += 1;
return d;
}
pub fn peek(self: *Iterator) ?*Declaration {
if (self.at >= self.ls.len) return null;
var d = self.ls[self.at];
return d;
}
};
pub const Type = enum {
Field,
Import,
TopAssign, //like import but just struct assignment
Const,
Var,
Struct,
Method,
Enum,
Union,
Fn,
Test,
fn encode(self: Type, a: *Allocator) !json.Value {
return json.Value{
.String = switch (self) {
.Import => "import",
.TopAssign => "topAssign",
.Const => "const",
.Var => "var",
.Struct => "struct",
.Field => "field",
.Method => "method",
.Enum => "enum",
.Union => "union",
.Fn => "function",
.Test => "test",
else => return error.UnknownType,
},
};
}
fn fromString(container_kind: []const u8) ?Type {
if (mem.eql(u8, container_kind, "struct")) {
return Declaration.Type.Struct;
} else if (mem.eql(u8, container_kind, "enum")) {
return Declaration.Type.Enum;
} else if (mem.eql(u8, container_kind, "union")) {
return Declaration.Type.Union;
} else {
return null;
}
}
fn mutable(mut: []const u8) Type {
if (mem.eql(u8, mut, "const")) {
return Declaration.Type.Const;
}
if (mem.eql(u8, mut, "var")) {
return Declaration.Type.Var;
}
unreachable;
}
};
pub fn less(self: *Declaration, b: *Declaration) bool {
if (@enumToInt(self.typ) <= @enumToInt(Type.TopAssign)) {
if (self.typ == b.typ) {
return mem.compare(u8, self.label, b.label) == .LessThan;
}
return @enumToInt(self.typ) < @enumToInt(b.typ);
}
return false;
}
pub fn lessStruct(self: *Declaration, b: *Declaration) bool {
if (self.typ == b.typ) {
return false;
}
return @enumToInt(self.typ) < @enumToInt(b.typ);
}
pub fn sortList(ls: *const List) void {
std.sort.sort(*Declaration, ls.toSlice(), less);
}
pub fn sortListStruct(ls: *const List) void {
std.sort.sort(*Declaration, ls.toSlice(), lessStruct);
}
fn encode(self: *Declaration, a: *Allocator) anyerror!json.Value {
var m = json.ObjectMap.init(a);
_ = try m.put("label", json.Value{
.String = if (self.label.len == 0) "-" else self.label,
});
if (self.typ == .TopAssign) {
if (self.is_mutable) {
_ = try m.put("type", json.Value{ .String = "var" });
} else {
_ = try m.put("type", json.Value{ .String = "const" });
}
} else {
_ = try m.put("type", try self.typ.encode(a));
}
_ = try m.put("start", json.Value{
.Integer = @intCast(i64, self.start),
});
_ = try m.put("end", json.Value{
.Integer = @intCast(i64, self.end),
});
_ = try m.put("isPublic", json.Value{
.Bool = self.is_public,
});
if (self.zig_doc) |doc| {
_ = try m.put("zigDoc", try doc.encode(a));
}
if (self.children.len > 0) {
var children_list = std.ArrayList(json.Value).init(a);
for (self.children.toSlice()) |child| {
try children_list.append(try child.encode(a));
}
_ = try m.put("children", json.Value{ .Array = children_list });
}
return json.Value{ .Object = m };
}
};
/// outlineDecls collects top level declarations and returns them as a list. The
/// order is as they appear in source file.
pub fn outlineDecls(a: *Allocator, tree: *ast.Tree) anyerror!Declaration.List {
var ls = Declaration.List.init(a);
var it = tree.root_node.decls.iterator(0);
while (true) {
var decl = (it.next() orelse break).*;
try collect(tree, &ls, decl);
}
return ls;
}
pub fn outlineFromDeclList(
a: *Allocator,
tree: *ast.Tree,
list: *ast.Node.Root.DeclList,
) anyerror!Declaration.List {
var ls = Declaration.List.init(a);
var it = list.iterator(0);
while (true) {
var decl = (it.next() orelse break).*;
try collect(tree, &ls, decl);
}
return ls;
}
/// Returns text for a zig docummentation of symbols.
fn getDoc(tree: *ast.Tree, doc: ?*ast.Node.DocComment) ?Span {
if (doc == null) {
return null;
}
const first = tree.tokens.at(doc.?.firstToken());
const last = tree.tokens.at(doc.?.lastToken());
return Span{ .start = first.*, .end = last.* };
}
fn collect(
tree: *ast.Tree,
ls: *Declaration.List,
decl: *ast.Node,
) !void {
const first_token_ndex = decl.firstToken();
const last_token_index = decl.lastToken();
const first_token = tree.tokens.at(first_token_ndex);
const last_token = tree.tokens.at(last_token_index);
// decl.dump(0);
switch (decl.id) {
.VarDecl => {
try collectVarDecl(tree, ls, decl);
},
.ContainerField => {
try collectContainerDecl(tree, ls, decl);
},
.TestDecl => {
const test_decl = @fieldParentPtr(ast.Node.TestDecl, "base", decl);
const name_decl = @fieldParentPtr(ast.Node.StringLiteral, "base", test_decl.name);
const test_name = tree.tokenSlice(name_decl.token);
var decl_ptr = try ls.allocator.create(Declaration);
decl_ptr.* = Declaration{
.start = first_token.start,
.end = last_token.end,
.typ = Declaration.Type.Test,
.label = unquote(test_name),
.node = decl,
.zig_doc = getDoc(tree, test_decl.doc_comments),
.is_public = false,
.is_mutable = false,
.children = Declaration.List.init(ls.allocator),
};
try ls.append(decl_ptr);
},
.FnProto => {
try collectFnProto(tree, ls, decl);
},
else => {},
}
}
fn collectFnProto(
tree: *ast.Tree,
ls: *Declaration.List,
decl: *ast.Node,
) !void {
const first_token_ndex = decl.firstToken();
const last_token_index = decl.lastToken();
const first_token = tree.tokens.at(first_token_ndex);
const last_token = tree.tokens.at(last_token_index);
const fn_decl = @fieldParentPtr(ast.Node.FnProto, "base", decl);
if (fn_decl.name_token) |idx| {
const fn_name = tree.tokenSlice(idx);
switch (fn_decl.return_type) {
.Explicit => |n| {
switch (n.id) {
.Identifier => {
// Functions that returns type.
//
// Lots of generic functions are defined this way, and
// the Function name is used to represent the returned
// type.
//
// We outline the body of the return function like any
// other container for enum,s structs or unions.
const ident = @fieldParentPtr(ast.Node.Identifier, "base", n);
const txt = tree.tokenSlice(ident.token);
if (mem.eql(u8, txt, "type")) {
if (fn_decl.body_node) |body| {
const block = @fieldParentPtr(ast.Node.Block, "base", body);
if (block.statements.count() > 0) {
const last = block.statements.count() - 1;
var ln = block.statements.at(last).*;
const cf = @fieldParentPtr(ast.Node.ControlFlowExpression, "base", ln);
switch (cf.kind) {
.Return => {
if (cf.rhs) |r| {
if (r.id == .ContainerDecl) {
const container_decl = @fieldParentPtr(ast.Node.ContainerDecl, "base", r);
const container_kind = tree.tokenSlice(container_decl.kind_token);
const typ = Declaration.Type.fromString(container_kind);
if (typ) |kind| {
var decl_ptr = try ls.allocator.create(Declaration);
decl_ptr.* = Declaration{
.start = first_token.start,
.end = last_token.end,
.typ = kind,
.label = fn_name,
.node = decl,
.zig_doc = getDoc(tree, fn_decl.doc_comments),
.is_public = fn_decl.visib_token != null,
.is_mutable = false,
.children = Declaration.List.init(ls.allocator),
};
var it = container_decl.fields_and_decls.iterator(0);
while (true) {
var field = (it.next() orelse break).*;
const field_first_token_ndex = field.firstToken();
const field_last_token_index = field.lastToken();
const field_first_token = tree.tokens.at(field_first_token_ndex);
const field_last_token = tree.tokens.at(field_last_token_index);
switch (field.id) {
.ContainerField => {
const field_decl = @fieldParentPtr(ast.Node.ContainerField, "base", field);
const field_name = tree.tokenSlice(field_decl.name_token);
var field_ptr = try ls.allocator.create(Declaration);
field_ptr.* = Declaration{
.start = field_first_token.start,
.end = field_last_token.end,
.typ = Declaration.Type.Field,
.label = field_name,
.node = field,
.zig_doc = getDoc(tree, field_decl.doc_comments),
.is_public = field_decl.visib_token != null,
.is_mutable = false,
.children = Declaration.List.init(ls.allocator),
};
try decl_ptr.children.append(field_ptr);
},
.FnProto => {
const ret_fn_decl = @fieldParentPtr(ast.Node.FnProto, "base", field);
if (ret_fn_decl.name_token) |ret_idx| {
const ret_fn_name = tree.tokenSlice(ret_idx);
var fn_decl_ptr = try ls.allocator.create(Declaration);
fn_decl_ptr.* = Declaration{
.start = field_first_token.start,
.end = field_last_token.end,
.typ = Declaration.Type.Fn,
.label = ret_fn_name,
.node = field,
.zig_doc = getDoc(tree, ret_fn_decl.doc_comments),
.is_public = ret_fn_decl.visib_token != null,
.is_mutable = false,
.children = Declaration.List.init(ls.allocator),
};
try decl_ptr.children.append(fn_decl_ptr);
}
},
.VarDecl => {
const field_decl = @fieldParentPtr(ast.Node.VarDecl, "base", field);
const field_name = tree.tokenSlice(field_decl.name_token);
const f_mut = tree.tokenSlice(field_decl.mut_token);
const f_is_mutable = mem.eql(u8, f_mut, "var");
var field_ptr = try ls.allocator.create(Declaration);
field_ptr.* = Declaration{
.start = field_first_token.start,
.end = field_last_token.end,
.typ = Declaration.Type.mutable(
tree.tokenSlice(field_decl.mut_token),
),
.label = field_name,
.node = field,
.zig_doc = getDoc(tree, field_decl.doc_comments),
.is_public = field_decl.visib_token != null,
.is_mutable = f_is_mutable,
.children = Declaration.List.init(ls.allocator),
};
try decl_ptr.children.append(field_ptr);
},
else => {
field.dump(0);
},
}
}
try ls.append(decl_ptr);
}
return;
}
}
},
else => {},
}
}
}
}
},
else => {},
}
},
else => {},
}
var decl_ptr = try ls.allocator.create(Declaration);
decl_ptr.* = Declaration{
.start = first_token.start,
.end = last_token.end,
.typ = .Fn,
.label = fn_name,
.node = decl,
.zig_doc = getDoc(tree, fn_decl.doc_comments),
.is_public = fn_decl.visib_token != null,
.is_mutable = false,
.children = Declaration.List.init(ls.allocator),
};
try ls.append(decl_ptr);
}
}
fn collectVarDecl(
tree: *ast.Tree,
ls: *Declaration.List,
decl: *ast.Node,
) !void {
const first_token_ndex = decl.firstToken();
const last_token_index = decl.lastToken();
const first_token = tree.tokens.at(first_token_ndex);
const last_token = tree.tokens.at(last_token_index);
const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", decl);
const decl_name = tree.tokenSlice(var_decl.name_token);
const mut = tree.tokenSlice(var_decl.mut_token);
const is_mutable = mem.eql(u8, mut, "const");
if (var_decl.init_node) |init_node| {
switch (init_node.id) {
.BuiltinCall => {
var builtn_call = @fieldParentPtr(ast.Node.BuiltinCall, "base", init_node);
const fn_name = tree.tokenSlice(builtn_call.builtin_token);
if (mem.eql(u8, fn_name, "@import")) {
var decl_ptr = try ls.allocator.create(Declaration);
decl_ptr.* = Declaration{
.start = first_token.start,
.end = last_token.end,
.typ = Declaration.Type.Import,
.label = decl_name,
.node = decl,
.zig_doc = getDoc(tree, var_decl.doc_comments),
.is_public = var_decl.visib_token != null,
.is_mutable = is_mutable,
.children = Declaration.List.init(ls.allocator),
};
try ls.append(decl_ptr);
} else {
var decl_ptr = try ls.allocator.create(Declaration);
decl_ptr.* = Declaration{
.start = first_token.start,
.end = last_token.end,
.typ = Declaration.Type.mutable(mut),
.label = decl_name,
.node = decl,
.zig_doc = getDoc(tree, var_decl.doc_comments),
.is_public = var_decl.visib_token != null,
.is_mutable = is_mutable,
.children = Declaration.List.init(ls.allocator),
};
try ls.append(decl_ptr);
}
},
.ContainerDecl => {
const container_decl = @fieldParentPtr(ast.Node.ContainerDecl, "base", init_node);
const container_kind = tree.tokenSlice(container_decl.kind_token);
const typ = Declaration.Type.fromString(container_kind);
if (typ) |kind| {
var decl_ptr = try ls.allocator.create(Declaration);
decl_ptr.* = Declaration{
.start = first_token.start,
.end = last_token.end,
.typ = kind,
.label = decl_name,
.node = decl,
.zig_doc = getDoc(tree, var_decl.doc_comments),
.is_public = var_decl.visib_token != null,
.is_mutable = is_mutable,
.children = Declaration.List.init(ls.allocator),
};
var it = container_decl.fields_and_decls.iterator(0);
while (true) {
var field = (it.next() orelse break).*;
const field_first_token_ndex = field.firstToken();
const field_last_token_index = field.lastToken();
const field_first_token = tree.tokens.at(field_first_token_ndex);
const field_last_token = tree.tokens.at(field_last_token_index);
switch (field.id) {
.ContainerField => {
const field_decl = @fieldParentPtr(ast.Node.ContainerField, "base", field);
const field_name = tree.tokenSlice(field_decl.name_token);
var field_ptr = try ls.allocator.create(Declaration);
field_ptr.* = Declaration{
.start = field_first_token.start,
.end = field_last_token.end,
.typ = Declaration.Type.Field,
.label = field_name,
.node = field,
.zig_doc = getDoc(tree, field_decl.doc_comments),
.is_public = field_decl.visib_token != null,
.is_mutable = false,
.children = Declaration.List.init(ls.allocator),
};
try decl_ptr.children.append(field_ptr);
},
.FnProto => {
const fn_decl = @fieldParentPtr(ast.Node.FnProto, "base", field);
if (fn_decl.name_token) |idx| {
const fn_name = tree.tokenSlice(idx);
var fn_decl_ptr = try ls.allocator.create(Declaration);
fn_decl_ptr.* = Declaration{
.start = field_first_token.start,
.end = field_last_token.end,
.typ = Declaration.Type.Fn,
.label = fn_name,
.node = field,
.zig_doc = getDoc(tree, fn_decl.doc_comments),
.is_public = fn_decl.visib_token != null,
.is_mutable = false,
.children = Declaration.List.init(ls.allocator),
};
try decl_ptr.children.append(fn_decl_ptr);
}
},
.VarDecl => {
const field_decl = @fieldParentPtr(ast.Node.VarDecl, "base", field);
const field_name = tree.tokenSlice(field_decl.name_token);
const f_mut = tree.tokenSlice(field_decl.mut_token);
const f_is_mutable = mem.eql(u8, f_mut, "var");
var field_ptr = try ls.allocator.create(Declaration);
field_ptr.* = Declaration{
.start = field_first_token.start,
.end = field_last_token.end,
.typ = Declaration.Type.mutable(
tree.tokenSlice(field_decl.mut_token),
),
.label = field_name,
.node = field,
.zig_doc = getDoc(tree, field_decl.doc_comments),
.is_public = field_decl.visib_token != null,
.is_mutable = f_is_mutable,
.children = Declaration.List.init(ls.allocator),
};
try decl_ptr.children.append(field_ptr);
},
else => {
field.dump(0);
},
}
}
try ls.append(decl_ptr);
}
},
.InfixOp => {
const infix_decl = @fieldParentPtr(ast.Node.InfixOp, "base", init_node);
switch (infix_decl.op) {
.Period => {
var inner = innerMostInfix(infix_decl.lhs);
switch (inner.id) {
.BuiltinCall => {
var builtn_call = @fieldParentPtr(ast.Node.BuiltinCall, "base", inner);
const fn_name = tree.tokenSlice(builtn_call.builtin_token);
if (mem.eql(u8, fn_name, "@import")) {
var decl_ptr = try ls.allocator.create(Declaration);
decl_ptr.* = Declaration{
.start = first_token.start,
.end = last_token.end,
.typ = Declaration.Type.Import,
.label = decl_name,
.node = decl,
.zig_doc = getDoc(tree, var_decl.doc_comments),
.is_public = var_decl.visib_token != null,
.is_mutable = is_mutable,
.children = Declaration.List.init(ls.allocator),
};
try ls.append(decl_ptr);
} else {
var decl_ptr = try ls.allocator.create(Declaration);
decl_ptr.* = Declaration{
.start = first_token.start,
.end = last_token.end,
.typ = Declaration.Type.mutable(mut),
.label = decl_name,
.node = decl,
.zig_doc = getDoc(tree, var_decl.doc_comments),
.is_public = var_decl.visib_token != null,
.is_mutable = is_mutable,
.children = Declaration.List.init(ls.allocator),
};
try ls.append(decl_ptr);
}
},
.Identifier => {
var decl_ptr = try ls.allocator.create(Declaration);
decl_ptr.* = Declaration{
.start = first_token.start,
.end = last_token.end,
.typ = .TopAssign,
.label = decl_name,
.node = decl,
.zig_doc = getDoc(tree, var_decl.doc_comments),
.is_public = var_decl.visib_token != null,
.is_mutable = is_mutable,
.children = Declaration.List.init(ls.allocator),
};
try ls.append(decl_ptr);
},
else => {
var decl_ptr = try ls.allocator.create(Declaration);
decl_ptr.* = Declaration{
.start = first_token.start,
.end = last_token.end,
.typ = Declaration.Type.mutable(mut),
.label = decl_name,
.node = decl,
.zig_doc = getDoc(tree, var_decl.doc_comments),
.is_public = var_decl.visib_token != null,
.is_mutable = is_mutable,
.children = Declaration.List.init(ls.allocator),
};
try ls.append(decl_ptr);
},
}
},
else => {
var decl_ptr = try ls.allocator.create(Declaration);
decl_ptr.* = Declaration{
.start = first_token.start,
.end = last_token.end,
.typ = Declaration.Type.mutable(mut),
.label = decl_name,
.node = decl,
.zig_doc = getDoc(tree, var_decl.doc_comments),
.is_public = var_decl.visib_token != null,
.is_mutable = is_mutable,
.children = Declaration.List.init(ls.allocator),
};
try ls.append(decl_ptr);
},
}
},
else => {
var decl_ptr = try ls.allocator.create(Declaration);
decl_ptr.* = Declaration{
.start = first_token.start,
.end = last_token.end,
.typ = Declaration.Type.mutable(mut),
.label = decl_name,
.node = decl,
.zig_doc = getDoc(tree, var_decl.doc_comments),
.is_public = var_decl.visib_token != null,
.is_mutable = is_mutable,
.children = Declaration.List.init(ls.allocator),
};
try ls.append(decl_ptr);
},
}
}
}
fn collectContainerDecl(
tree: *ast.Tree,
ls: *Declaration.List,
decl: *ast.Node,
) !void {
const first_token_ndex = decl.firstToken();
const last_token_index = decl.lastToken();
const first_token = tree.tokens.at(first_token_ndex);
const last_token = tree.tokens.at(last_token_index);
const var_decl = @fieldParentPtr(ast.Node.ContainerField, "base", decl);
const decl_name = tree.tokenSlice(var_decl.name_token);
var decl_ptr = try ls.allocator.create(Declaration);
decl_ptr.* = Declaration{
.start = first_token.start,
.end = last_token.end,
.typ = .Field,
.label = decl_name,
.node = decl,
.zig_doc = getDoc(tree, var_decl.doc_comments),
.is_public = var_decl.visib_token != null,
.is_mutable = false,
.children = Declaration.List.init(ls.allocator),
};
try ls.append(decl_ptr);
}
fn innerMostInfix(node: *ast.Node) *ast.Node {
switch (node.id) {
.InfixOp => {
const infix_decl = @fieldParentPtr(ast.Node.InfixOp, "base", node);
return innerMostInfix(infix_decl.lhs);
},
else => {
return node;
},
}
}
fn unquote(s: []const u8) []const u8 {
if (s.len == 0 or s[0] != '"') {
return s;
}
return s[1 .. s.len - 1];
}
fn dump(a: *Allocator, ls: *Declaration.List) !void {
var values = std.ArrayList(json.Value).init(a);
defer values.deinit();
for (ls.toSlice()) |decl| {
var v = try decl.encode(a);
try values.append(v);
}
var v = json.Value{ .Array = values };
v.dump();
}
fn exec(a: *std.mem.Allocator, src: []const u8) anyerror!void {
var tree = try parse(a, src);
defer tree.deinit();
var arena = std.heap.ArenaAllocator.init(a);
defer arena.deinit();
var ls = &try outlineDecls(&arena.allocator, tree);
defer ls.deinit();
try dump(a, ls);
}
fn testOutline(
a: *Allocator,
buf: *std.Buffer,
src: []const u8,
expected: []const u8,
) !void {
try exec(a, src);
}
test "outline" {
var a = std.debug.global_allocator;
var buf = &try std.Buffer.init(a, "");
defer buf.deinit();
// try testOutline(a, buf,
// \\ const c=@import("c");
// \\
// \\ const a=@import("a");
// ,
// \\[{"end":22,"label":"c","type":"import","start":1},{"end":46,"label":"a","type":"import","start":25}]
// );
// try testOutline(a, buf,
// \\ const c=@import("c").d;
// \\
// \\ const a=@import("a").b.c;
// ,
// \\[{"end":24,"label":"c","type":"const","start":1},{"end":52,"label":"a","type":"const","start":27}]
// );
// try testOutline(a, buf,
// \\test "outline" {}
// \\test "outline2" {}
// ,
// \\[{"end":17,"label":"outline","type":"test","start":0},{"end":36,"label":"outline2","type":"test","start":18}]
// );
// try testOutline(a, buf,
// \\fn outline()void{}
// \\fn outline2()void{}
// ,
// \\[{"end":18,"label":"outline","type":"function","start":0},{"end":38,"label":"outline2","type":"function","start":19}]
// );
// try testOutline(a, buf,
// \\var StructContainer=struct{
// \\ name: []const u8,
// \\ const max=12;
// \\var min=20;
// \\ pub fn handle(self: StructContainer)void{}
// \\ };
// \\ pub const major=0.1;
// \\ const minor=0.2;
// ,
// \\[{"children":[{"end":53,"label":"name","type":"field","start":0}],"end":53,"label":"StructContainer","type":"struct","start":0}]
// );
// try testOutline(a, buf,
// \\const EnumContainer=enum{
// \\ One,
// \\ };
// ,
// \\[{"children":[{"end":36,"label":"One","type":"field","start":0}],"end":36,"label":"EnumContainer","type":"enum","start":0}]
// );
// try testOutline(a, buf,
// \\const UnionContainer=union{};
// ,
// \\[{"end":29,"label":"UnionContainer","type":"union","start":0}]
// );
try testOutline(a, buf,
\\fn Generic(comptime T:type)type{
\\ return struct{
\\ const Self=@This();
\\ pub fn say()void{}
\\ };
\\}
,
\\[{"children":[{"end":55,"label":"A","type":"field","start":0},{"end":55,"label":"B","type":"field","start":0}],"end":55,"label":"UnionContainer","type":"union","start":0}]
);
} | src/outline/outline.zig |
const std = @import("std");
usingnamespace @import("imgui");
const colors = @import("../colors.zig");
const ts = @import("../tilescript.zig");
const thickness: f32 = 2;
pub fn drawWindow(state: *ts.AppState) void {
if (igBegin("Brushes", null, ImGuiWindowFlags_NoCollapse | ImGuiWindowFlags_NoResize | ImGuiWindowFlags_AlwaysAutoResize)) {
draw(state, 32, false);
}
igEnd();
}
pub fn drawPopup(state: *ts.AppState, popup_id: [*c]const u8) void {
ogSetNextWindowPos(igGetIO().MousePos, ImGuiCond_Appearing, .{ .x = 0.5 });
if (igBeginPopup(popup_id, ImGuiWindowFlags_NoTitleBar)) {
draw(state, state.map_rect_size, false);
igEndPopup();
}
}
/// draws the palette. If skip_input_processing is true, no input processing will occur and the selected brush will not be highlighted.
pub fn draw(state: *ts.AppState, rect_size: f32, skip_input_processing: bool) void {
const canvas_size = 6 * rect_size;
const draw_list = igGetWindowDrawList();
var pos = ogGetCursorScreenPos();
_ = ogInvisibleButton("##but", .{ .x = canvas_size, .y = canvas_size }, ImGuiButtonFlags_None);
const mouse_pos = igGetIO().MousePos;
const hovered = igIsItemHovered(ImGuiHoveredFlags_None);
var y: usize = 0;
while (y < 6) : (y += 1) {
var x: usize = 0;
while (x < 6) : (x += 1) {
const index = x + y * 6;
const offset_x = @intToFloat(f32, x) * rect_size;
const offset_y = @intToFloat(f32, y) * rect_size;
var tl = ImVec2{ .x = pos.x + offset_x, .y = pos.y + offset_y };
drawBrush(rect_size, index, tl);
if (index == state.selected_brush_index and !skip_input_processing) {
const size = rect_size - thickness;
tl.x += thickness / 2;
tl.y += thickness / 2;
ogImDrawList_AddQuad(draw_list, &ImVec2{ .x = tl.x, .y = tl.y }, &ImVec2{ .x = tl.x + size, .y = tl.y }, &ImVec2{ .x = tl.x + size, .y = tl.y + size }, &ImVec2{ .x = tl.x, .y = tl.y + size }, colors.brush_selected, 2);
}
if (hovered and !skip_input_processing) {
if (tl.x <= mouse_pos.x and mouse_pos.x < tl.x + rect_size and tl.y <= mouse_pos.y and mouse_pos.y < tl.y + rect_size) {
if (igIsMouseClicked(ImGuiMouseButton_Left, false)) {
state.selected_brush_index = index;
igCloseCurrentPopup();
}
}
}
}
}
}
pub fn drawBrush(rect_size: f32, index: usize, tl: ImVec2) void {
// we have 14 unique colors so collapse our index
const color_index = @mod(index, 14);
const set = @divTrunc(index, 14);
ogImDrawList_AddQuadFilled(igGetWindowDrawList(), &ImVec2{ .x = tl.x, .y = tl.y }, &ImVec2{ .x = tl.x + rect_size, .y = tl.y }, &ImVec2{ .x = tl.x + rect_size, .y = tl.y + rect_size }, &ImVec2{ .x = tl.x, .y = tl.y + rect_size }, colors.brushes[color_index]);
const mini_size = rect_size / 2;
var pt = tl;
pt.x += (rect_size - mini_size) / 2;
pt.y += (rect_size - mini_size) / 2;
if (set == 1) {
ogImDrawList_AddQuadFilled(igGetWindowDrawList(), &ImVec2{ .x = pt.x, .y = pt.y }, &ImVec2{ .x = pt.x + mini_size, .y = pt.y }, &ImVec2{ .x = pt.x + mini_size, .y = pt.y + mini_size }, &ImVec2{ .x = pt.x, .y = pt.y + mini_size }, colors.colorRgba(0, 0, 0, 100));
} else if (set == 2) {
ogImDrawList_AddQuad(igGetWindowDrawList(), &ImVec2{ .x = pt.x, .y = pt.y }, &ImVec2{ .x = pt.x + mini_size, .y = pt.y }, &ImVec2{ .x = pt.x + mini_size, .y = pt.y + mini_size }, &ImVec2{ .x = pt.x, .y = pt.y + mini_size }, colors.colorRgba(0, 0, 0, 150), 1);
}
} | tilescript/windows/brushes.zig |
const std = @import("std");
const mem = @import("mem.zig");
const vtable = @import("vtable.zig");
const testing = std.testing;
const read = @This();
pub fn Reader(comptime E: type) type {
return struct {
pub const Error = E;
const VTable = struct {
pub const Impl = @OpaqueType();
read: fn (reader: *Impl, buf: []u8) Error![]u8,
};
vtable: *const VTable,
impl: *VTable.Impl,
pub fn init(reader: var) @This() {
const T = @TypeOf(reader).Child;
return @This(){
.vtable = comptime vtable.populate(VTable, T, T),
.impl = @ptrCast(*VTable.Impl, reader),
};
}
pub fn read(reader: @This(), buf: []u8) Error![]u8 {
return reader.vtable.read(reader.impl, buf);
}
};
}
test "read.Reader" {
var buf: [2]u8 = undefined;
var mr = MemReader{ .buffer = "abc" };
const reader = Reader(MemReader.Error).init(&mr);
testing.expectEqualSlices(u8, "ab", try reader.read(buf[0..]));
testing.expectEqual(@as(usize, 2), mr.i);
testing.expectEqualSlices(u8, "c", try reader.read(buf[0..]));
testing.expectEqual(@as(usize, 3), mr.i);
testing.expectEqualSlices(u8, "", try reader.read(buf[0..]));
testing.expectEqual(@as(usize, 3), mr.i);
}
pub fn Writer(comptime E: type) type {
return struct {
pub const Error = E;
const VTable = struct {
pub const Impl = @OpaqueType();
write: fn (writer: *Impl, buf: []const u8) Error!usize,
};
vtable: *const VTable,
impl: *VTable.Impl,
pub fn init(writer: var) @This() {
const T = @TypeOf(writer).Child;
return @This(){
.vtable = comptime vtable.populate(VTable, T, T),
.impl = @ptrCast(*VTable.Impl, writer),
};
}
pub fn write(writer: @This(), buf: []const u8) Error!usize {
return writer.vtable.write(writer.impl, buf);
}
};
}
test "read.Writer" {
var buf: [2]u8 = undefined;
const mw = &MemWriter{ .buffer = buf[0..] };
const writer = Writer(MemWriter.Error).init(mw);
testing.expectEqual(@as(usize, 1), try writer.write("a"));
testing.expectEqualSlices(u8, "a", mw.buffer[0..mw.i]);
testing.expectEqual(@as(usize, 1), try writer.write("b"));
testing.expectEqualSlices(u8, "ab", mw.buffer[0..mw.i]);
testing.expectEqual(@as(usize, 0), try writer.write("c"));
testing.expectEqualSlices(u8, "ab", mw.buffer[0..mw.i]);
}
pub fn ReadWriter(comptime ReadErr: type, comptime WriteErr: type) type {
return struct {
pub const ReadError = ReadErr;
pub const WriteError = WriteErr;
const VTable = struct {
pub const Impl = @OpaqueType();
read: fn (reader: *Impl, buf: []u8) ReadError![]u8,
write: fn (writer: *Impl, buf: []const u8) WriteError!usize,
};
vtable: *const VTable,
impl: *VTable.Impl,
pub fn init(rw: var) @This() {
const T = @TypeOf(rw).Child;
return @This(){
.vtable = comptime vtable.populate(VTable, T, T),
.impl = @ptrCast(*VTable.Impl, rw),
};
}
pub fn read(rw: @This(), buf: []u8) ReadError![]u8 {
return rw.vtable.read(rw.impl, buf);
}
pub fn write(rw: @This(), buf: []const u8) WriteError!usize {
return rw.vtable.write(rw.impl, buf);
}
};
}
test "read.ReadWriter" {
var buf: [2]u8 = undefined;
var buf2: [1]u8 = undefined;
var mrw = MemReadWriter{ .buffer = buf[0..] };
const rw = ReadWriter(MemReadWriter.Error, MemReadWriter.Error).init(&mrw);
testing.expectEqual(@as(usize, 1), try rw.write("a"));
testing.expectEqualSlices(u8, "a", mrw.notRead());
testing.expectEqual(@as(usize, 1), try rw.write("b"));
testing.expectEqualSlices(u8, "ab", mrw.notRead());
testing.expectEqual(@as(usize, 0), try rw.write("c"));
testing.expectEqualSlices(u8, "ab", mrw.notRead());
testing.expectEqualSlices(u8, "a", try rw.read(buf2[0..]));
testing.expectEqualSlices(u8, "b", mrw.notRead());
testing.expectEqualSlices(u8, "b", try rw.read(buf2[0..]));
testing.expectEqualSlices(u8, "", mrw.notRead());
testing.expectEqualSlices(u8, "", try rw.read(buf2[0..]));
}
pub const MemReader = struct {
pub const Error = error{};
buffer: []const u8,
i: usize = 0,
pub fn read(reader: *MemReader, buf: []u8) Error![]u8 {
const buffer = reader.rest();
const len = std.math.min(buf.len, buffer.len);
std.mem.copy(u8, buf, buffer[0..len]);
reader.i += len;
return buf[0..len];
}
pub fn rest(reader: MemReader) []const u8 {
return reader.buffer[reader.i..];
}
};
test "read.MemReader" {
var buf: [2]u8 = undefined;
var mr = MemReader{ .buffer = "abc" };
testing.expectEqualSlices(u8, "ab", try mr.read(buf[0..]));
testing.expectEqual(@as(usize, 2), mr.i);
testing.expectEqualSlices(u8, "c", try mr.read(buf[0..]));
testing.expectEqual(@as(usize, 3), mr.i);
testing.expectEqualSlices(u8, "", try mr.read(buf[0..]));
testing.expectEqual(@as(usize, 3), mr.i);
}
pub const MemWriter = struct {
pub const Error = error{};
buffer: []u8,
i: usize = 0,
pub fn write(writer: *MemWriter, buf: []const u8) Error!usize {
const buffer = writer.rest();
const len = std.math.min(buf.len, buffer.len);
std.mem.copy(u8, buffer, buf[0..len]);
writer.i += len;
return len;
}
pub fn rest(writer: MemWriter) []u8 {
return writer.buffer[writer.i..];
}
};
test "read.MemWriter" {
var buf: [2]u8 = undefined;
var mw = MemWriter{ .buffer = buf[0..] };
testing.expectEqual(@as(usize, 1), try mw.write("a"));
testing.expectEqualSlices(u8, "a", mw.buffer[0..mw.i]);
testing.expectEqual(@as(usize, 1), try mw.write("b"));
testing.expectEqualSlices(u8, "ab", mw.buffer[0..mw.i]);
testing.expectEqual(@as(usize, 0), try mw.write("c"));
testing.expectEqualSlices(u8, "ab", mw.buffer[0..mw.i]);
}
pub const MemReadWriter = struct {
pub const Error = error{};
buffer: []u8,
start: usize = 0,
end: usize = 0,
pub fn write(mrw: *MemReadWriter, buf: []const u8) Error!usize {
const buffer = mrw.notWritten();
const len = std.math.min(buf.len, buffer.len);
std.mem.copy(u8, buffer, buf[0..len]);
mrw.end += len;
return len;
}
pub fn read(mrw: *MemReadWriter, buf: []u8) Error![]u8 {
const buffer = mrw.notRead();
const len = std.math.min(buf.len, buffer.len);
std.mem.copy(u8, buf, buffer[0..len]);
mrw.start += len;
return buf[0..len];
}
pub fn notWritten(mrw: MemReadWriter) []u8 {
return mrw.buffer[mrw.end..];
}
pub fn notRead(mrw: MemReadWriter) []const u8 {
return mrw.buffer[mrw.start..mrw.end];
}
};
test "read.MemReadWriter" {
var buf: [2]u8 = undefined;
var buf2: [1]u8 = undefined;
var mrw = MemReadWriter{ .buffer = buf[0..] };
testing.expectEqual(@as(usize, 1), try mrw.write("a"));
testing.expectEqualSlices(u8, "a", mrw.notRead());
testing.expectEqual(@as(usize, 1), try mrw.write("b"));
testing.expectEqualSlices(u8, "ab", mrw.notRead());
testing.expectEqual(@as(usize, 0), try mrw.write("c"));
testing.expectEqualSlices(u8, "ab", mrw.notRead());
testing.expectEqualSlices(u8, "a", try mrw.read(buf2[0..]));
testing.expectEqualSlices(u8, "b", mrw.notRead());
testing.expectEqualSlices(u8, "b", try mrw.read(buf2[0..]));
testing.expectEqualSlices(u8, "", mrw.notRead());
testing.expectEqualSlices(u8, "", try mrw.read(buf2[0..]));
}
pub fn byte(reader: var) !u8 {
var buf: [1]u8 = undefined;
const bytes = try reader.read(buf[0..]);
if (bytes.len == 0)
return error.EndOfStream;
return bytes[0];
}
test "read.byte" {
var mr = MemReader{ .buffer = "abcd" };
testing.expectEqual(@as(u8, 'a'), try read.byte(&mr));
testing.expectEqual(@as(u8, 'b'), try read.byte(&mr));
testing.expectEqual(@as(u8, 'c'), try read.byte(&mr));
testing.expectEqual(@as(u8, 'd'), try read.byte(&mr));
testing.expectError(error.EndOfStream, read.byte(&mr));
}
pub fn until(reader: var, allocator: var, delim: u8) ![]u8 {
var res = try mem.alloc(allocator, u8, 4);
errdefer mem.free(allocator, res);
var i: usize = 0;
while (true) : (i += 1) {
if (res.len <= i)
res = try mem.realloc(allocator, u8, res, res.len * 2);
res[i] = try read.byte(reader);
if (res[i] == delim)
return mem.shrink(allocator, u8, res, i + 1);
}
}
test "read.until" {
var buf: [32]u8 = undefined;
var fba = mem.FixedBufferAllocator{ .buffer = buf[0..] };
var mr = MemReader{ .buffer = "ab\ncd" };
const line = try read.until(&mr, &fba, '\n');
testing.expectEqualSlices(u8, "ab\n", line);
testing.expectError(error.EndOfStream, read.until(&mr, &fba, '\n'));
} | src/read.zig |
const std = @import("std");
const expect = std.testing.expect;
pub const Identifier = []const u8;
pub const Valid = struct {
identifier: Identifier,
fn clone(self: @This()) @This() {
return Invalid{
.identifier = self.identifier,
};
}
};
pub const Invalid = struct {
identifier: Identifier,
/// contracts that caused this contract to be invalid
/// (primarily concerns composite contracts)
causes: []const Contract = &[0]Contract{},
/// reason for violation
reason: []const u8 = "none given",
/// Returns the first contract that causes this contract's failure
pub fn cause(comptime self: @This()) Contract {
if (self.causes.len > 0)
return self.causes[0].Invalid.cause();
return Contract.init(false, self.clone());
}
fn clone(self: @This()) @This() {
return Invalid{
.identifier = self.identifier,
.reason = self.reason,
.causes = self.causes,
};
}
};
/// Contract
pub const Contract = union(enum) {
Valid: Valid,
Invalid: Invalid,
pub fn init(validity: bool, invalid: Invalid) @This() {
return if (validity) .{
.Valid = .{ .identifier = invalid.identifier },
} else .{
.Invalid = invalid,
};
}
fn clone(comptime self: @This()) @This() {
return @This().init(
self == .Valid,
(if (self == .Valid) Invalid{
.identifier = self.identifier(),
} else self.Invalid.clone()),
);
}
/// Returns the identifier of a contract
pub fn identifier(comptime self: @This()) Identifier {
return switch (self) {
.Valid => |v| v.identifier,
.Invalid => |i| i.identifier,
};
}
fn collectFailures(comptime t1: @This(), comptime t2: @This()) []const @This() {
var n: usize = 0;
if (t1 == .Invalid) n += 1;
if (t2 == .Invalid) n += 1;
var causes: [n]@This() = [_]@This(){undefined} ** n;
var i: usize = 0;
if (t1 == .Invalid) {
causes[i] = t1;
i += 1;
}
if (t2 == .Invalid) {
causes[i] = t2;
}
return &causes;
}
/// A contract that requires both `self` and `t` to be valid
pub fn andAlso(comptime self: @This(), comptime t: @This()) @This() {
return @This().init(self == .Valid and t == .Valid, Invalid{
.identifier = std.fmt.comptimePrint("{s}.andAlso({s})", .{ self.identifier(), t.identifier() }),
.causes = self.collectFailures(t),
});
}
/// A contract that requires `self` to be valid and if it is, the
/// contract returned by Then.then() has to be valid as well
pub fn andThen(comptime self: @This(), comptime Then: type) @This() {
if (self == .Invalid)
return self.clone();
return Then.then().named(std.fmt.comptimePrint("andThen({s})", .{@typeName(Then)}));
}
/// A contract that requires `self` to be valid and if it is not, the
/// contract returned by Then.then() has to be valid
pub fn orThen(comptime self: @This(), comptime Then: type) @This() {
if (self == .Valid)
return self.clone();
return Then.then().named(std.fmt.comptimePrint("orThen({s})", .{@typeName(Then)}));
}
/// A contract that requires either `self` or `t` to be valid
pub fn orElse(comptime self: @This(), comptime t: @This()) @This() {
return @This().init(self == .Valid or t == .Valid, Invalid{
.identifier = std.fmt.comptimePrint("{s}.orElse({s})", .{ self.identifier(), t.identifier() }),
.causes = t.collectFailures(self),
});
}
/// Gives a contract a new identifier, wrapping the original contract as a cause
///
/// Useful for creating named contracts that compose other contracts together
///
/// ```
/// pub fn isMyThing(compile T: type) contracts.Contract {
/// return contracts.isType(T, .Struct).named("isMyThing");
/// }
/// ```
pub fn named(comptime self: @This(), identifier_: []const u8) @This() {
return @This().init(self == .Valid, Invalid{ .identifier = identifier_, .causes = &[1]@This(){self} });
}
};
test "andAlso" {
comptime {
const T = u8;
const valid_contract = is(T, u8).andAlso(is(T, u8));
try expect(valid_contract == .Valid);
const contract = is(T, u8).andAlso(is(T, u16));
try expect(contract == .Invalid);
try expect(std.mem.eql(u8, "is(u8, u8).andAlso(is(u8, u16))", contract.identifier()));
try expect(contract.Invalid.causes.len == 1);
try expect(std.mem.eql(u8, contract.Invalid.causes[0].identifier(), is(T, u16).identifier()));
}
}
test "andThen" {
comptime {
var a = 1;
const contract = is(u8, u8).andThen(struct {
pub fn then() Contract {
a = 2;
return is(u8, u16);
}
});
try expect(contract == .Invalid);
// andThen got to executed
try expect(a == 2);
const contract1 = is(u8, u16).andThen(struct {
pub fn then() Contract {
a = 3;
return is(u8, u16);
}
});
try expect(contract1 == .Invalid);
// andThen didn't get to execute
try expect(a == 2);
}
}
test "orElse" {
comptime {
const T = u8;
const valid_contract = is(T, u8).orElse(is(T, u16));
try expect(valid_contract == .Valid);
const contract = is(T, u1).orElse(is(T, u17));
try expect(contract == .Invalid);
try expect(std.mem.eql(u8, "is(u8, u1).orElse(is(u8, u17))", contract.identifier()));
try expect(contract.Invalid.causes.len == 2);
try expect(std.mem.eql(u8, contract.Invalid.causes[0].identifier(), is(T, u17).identifier()));
try expect(std.mem.eql(u8, contract.Invalid.causes[1].identifier(), is(T, u1).identifier()));
}
}
test "orThen" {
comptime {
var a = 1;
const contract = is(u8, u8).orThen(struct {
pub fn then() Contract {
a = 2;
return is(u8, u16);
}
});
try expect(contract == .Valid);
// orThen didn't get executed
try expect(a == 1);
const contract1 = is(u8, u16).orThen(struct {
pub fn then() Contract {
a = 2;
return is(u8, u16);
}
});
try expect(contract1 == .Invalid);
//// orThen got to execute
try expect(a == 2);
}
}
test "invalid contract cause" {
comptime {
const T = u8;
try expect(std.mem.eql(
u8,
is(T, u16).identifier(),
is(T, u16).Invalid.cause().identifier(),
));
const contract = is(T, u8).andAlso(is(T, u16));
try expect(contract == .Invalid);
try expect(std.mem.eql(
u8,
is(T, u16).identifier(),
contract.Invalid.cause().identifier(),
));
try expect(std.mem.eql(
u8,
contract.Invalid.reason,
contract.Invalid.cause().Invalid.reason,
));
const custom_reason = Contract{ .Invalid = .{ .identifier = "custom", .reason = "custom" } };
try expect(std.mem.eql(u8, custom_reason.Invalid.reason, custom_reason.Invalid.cause().Invalid.reason));
const nested = (Contract{ .Valid = .{ .identifier = "1" } })
.andAlso(
(Contract{ .Valid = .{ .identifier = "2" } })
.andAlso(Contract{ .Invalid = .{ .identifier = "3", .reason = "special" } }),
);
try expect(std.mem.eql(u8, "3", nested.Invalid.cause().identifier()));
try expect(std.mem.eql(u8, "special", nested.Invalid.cause().Invalid.reason));
}
}
test "Contract.named" {
comptime {
const contract = is(u8, u16).named("contract");
try expect(contract == .Invalid);
try expect(std.mem.eql(u8, "contract", contract.identifier()));
try expect(std.mem.eql(u8, "is(u8, u16)", contract.Invalid.cause().identifier()));
}
}
fn fnArgsEql(comptime a: []const std.builtin.TypeInfo.FnArg, comptime b: []const std.builtin.TypeInfo.FnArg) bool {
if (a.len != b.len) return false;
if (a.ptr == b.ptr) return true;
for (a) |item, index| {
if (b[index].is_generic != item.is_generic) return false;
if (b[index].is_noalias != item.is_noalias) return false;
if (b[index].arg_type == null and item.arg_type != null) return false;
if (b[index].arg_type != null and item.arg_type != null and b[index].arg_type.? != item.arg_type.?) return false;
}
return true;
}
fn isGenericFnEqual(comptime T: type, comptime T1: type) bool {
comptime {
if (@typeInfo(T) != .Fn or @typeInfo(T) != .Fn)
return false;
const ti = @typeInfo(T).Fn;
const ti1 = @typeInfo(T1).Fn;
return ti.calling_convention == ti1.calling_convention and ti.alignment == ti1.alignment and
((ti.return_type == null and ti1.return_type == null) or (ti.return_type.? == ti1.return_type.?)) and
fnArgsEql(ti.args, ti1.args);
}
}
/// A contract that requires type `T` to be the same type as `T1`
pub fn is(comptime T: type, comptime T1: type) Contract {
const valid = if (isGenericFn(T) == .Valid and isGenericFn(T1) == .Valid) isGenericFnEqual(T, T1) else T == T1;
return Contract.init(valid, Invalid{
.identifier = std.fmt.comptimePrint("is({}, {})", .{ T, T1 }),
});
}
test "is" {
comptime {
try expect(is(u8, u8) == .Valid);
try expect(is(u8, u16) == .Invalid);
try expect(std.mem.eql(u8, "is(u8, u16)", is(u8, u16).identifier()));
try expect(is(u8, u16).Invalid.causes.len == 0);
try expect(is(fn (u8) u8, fn (u8) u8) == .Valid);
try expect(is(fn (type) u8, fn (type) u8) == .Valid);
// FIXME: figure out what to do with the return type, it's not really validating it
// try expect(is(fn (type) u8, fn (type) bool) == .Invalid);
}
}
/// A contract that requires function type T be generic
pub fn isGenericFn(comptime T: type) Contract {
comptime {
const identifier = std.fmt.comptimePrint("isGenericFn({})", .{T});
return isType(T, .Fn).andThen(struct {
pub fn then() Contract {
return Contract.init(@typeInfo(T).Fn.is_generic, Invalid{ .identifier = identifier });
}
}).named(identifier);
}
}
test "isGenericFn" {
comptime {
try expect(isGenericFn(fn (type) bool) == .Valid);
try expect(isGenericFn(fn (bool) bool) == .Invalid);
try expect(isGenericFn(u8) == .Invalid);
}
}
/// A contract that requires that type `T` has to be of a certain
/// type (as in .Struct, .Int, etc.)
///
/// Includes violation reason into `Invalid.reason`
pub fn isType(comptime T: type, comptime type_id: std.builtin.TypeId) Contract {
return Contract.init(@typeInfo(T) == type_id, Invalid{
.identifier = std.fmt.comptimePrint("isType({}, .{s})", .{ T, @tagName(type_id) }),
.reason = std.fmt.comptimePrint("got .{s}", .{@tagName(@typeInfo(T))}),
});
}
test "isType" {
comptime {
try expect(isType(struct {}, .Struct) == .Valid);
try expect(isType(u8, .Struct) == .Invalid);
try expect(std.mem.eql(u8, "got .Int", isType(u8, .Struct).Invalid.reason));
try expect(std.mem.eql(u8, "isType(u8, .Struct)", isType(u8, .Struct).identifier()));
}
}
/// A contract that requires that a given type is a struct,
/// enum, union or an opaque type that has a declaration by the given name.
pub fn hasDecl(comptime T: type, comptime name: []const u8) Contract {
const ti = @typeInfo(T);
const validType = ti == .Struct or ti == .Enum or ti == .Union or ti == .Opaque;
const valid = if (validType) @hasDecl(T, name) else false;
const reason = if (validType) "declaration not found" else std.fmt.comptimePrint("{s} is not a struct, enum, union or an opaque type", .{T});
return Contract.init(valid, Invalid{
.identifier = std.fmt.comptimePrint("hasDecl({}, {s})", .{ T, name }),
.reason = reason,
});
}
test "hasDecl" {
comptime {
try expect(hasDecl(u8, "a") == .Invalid);
try expect(std.mem.eql(u8, hasDecl(u8, "a").Invalid.reason, "u8 is not a struct, enum, union or an opaque type"));
try expect(hasDecl(struct {}, "a") == .Invalid);
try expect(hasDecl(struct {
const a = 1;
}, "a") == .Valid);
try expect(hasDecl(enum {
a,
}, "a") == .Invalid);
try expect(hasDecl(enum {
a,
const b = 1;
}, "b") == .Valid);
try expect(hasDecl(union(enum) {
a: void,
}, "a") == .Invalid);
try expect(hasDecl(union(enum) {
a: void,
const b = 1;
}, "b") == .Valid);
}
}
/// A contract that requires that a given type is a struct,
/// enum, union or an opaque type that has a struct declaration by the given name.
pub fn hasStruct(comptime T: type, comptime name: []const u8) Contract {
return hasDecl(T, name).andThen(struct {
pub fn then() Contract {
return isType(@field(T, name), .Struct);
}
})
.named(std.fmt.comptimePrint("hasStruct({}, {s})", .{ T, name }));
}
test "hasStruct" {
comptime {
try expect(hasStruct(u8, "a") == .Invalid);
try expect(std.mem.eql(
u8,
hasStruct(u8, "a").Invalid.cause().Invalid.reason,
"u8 is not a struct, enum, union or an opaque type",
));
try expect(hasStruct(struct {}, "a") == .Invalid);
try expect(hasStruct(struct {
const a = struct {};
}, "a") == .Valid);
try expect(hasStruct(enum {
a,
}, "a") == .Invalid);
try expect(hasStruct(enum {
a,
const b = struct {};
}, "b") == .Valid);
try expect(hasStruct(union(enum) {
a: void,
}, "a") == .Invalid);
try expect(hasStruct(union(enum) {
a: void,
const b = struct {};
}, "b") == .Valid);
}
}
/// A contract that requires that a given type is a struct,
/// enum, union or an opaque type that has a function declaration by the given name.
pub fn hasFn(comptime T: type, comptime name: []const u8) Contract {
return hasDecl(T, name).andThen(struct {
pub fn then() Contract {
return isType(@TypeOf(@field(T, name)), .Fn);
}
})
.named(std.fmt.comptimePrint("hasFn({}, {s})", .{ T, name }));
}
test "hasFn" {
comptime {
try expect(hasFn(u8, "a") == .Invalid);
try expect(std.mem.eql(
u8,
hasFn(u8, "a").Invalid.cause().Invalid.reason,
"u8 is not a struct, enum, union or an opaque type",
));
try expect(hasFn(struct {}, "a") == .Invalid);
try expect(hasFn(struct {
fn a() void {}
}, "a") == .Valid);
try expect(hasFn(enum {
a,
}, "a") == .Invalid);
try expect(hasFn(enum {
a,
fn b() void {}
}, "b") == .Valid);
try expect(hasFn(union(enum) {
a: void,
}, "a") == .Invalid);
try expect(hasFn(union(enum) {
a: void,
fn b() void {}
}, "b") == .Valid);
}
}
fn isEquivalent_(comptime A: type, comptime B: type) Contract {
return hasStruct(A, "contracts")
.andThen(struct {
pub fn then() Contract {
comptime {
return hasFn(@field(A, "contracts"), "isEquivalent");
}
}
})
.andThen(struct {
pub fn then() Contract {
return is(@TypeOf(A.contracts.isEquivalent), fn (type) bool);
}
})
.andThen(struct {
pub fn then() Contract {
return Contract.init(A.contracts.isEquivalent(B), Invalid{
.identifier = std.fmt.comptimePrint("{s}.contracts.isEquivalent({s})", .{ A, B }),
});
}
});
}
/// A contract that requires that a given types A and B are a struct, enum, union or
/// an opaque type, and either A, or B, or both define `contracts` struct with `isEquivalent(type) bool`
/// function, and that function of either A or B returns true.
///
/// This is used to establish equivalency of otherwise unequal types:
///
/// ```
/// const A = struct {
/// pub const contracts = struct {
/// pub fn isEquivalent(comptime T: type) bool {
/// return T == B;
/// }
/// };
///
/// pub const contracts = struct {
/// pub fn isEquivalent(comptime T: type) bool {
/// return T == B;
/// }
/// };
/// };
///
/// assert(isEquivalent(A, B) == .Valid);
/// ```
pub fn isEquivalent(comptime A: type, comptime B: type) Contract {
return isEquivalent_(A, B).orElse(isEquivalent_(B, A));
}
test "isEquivalent" {
comptime {
const Tequiv = struct {
pub const contracts = struct {
pub fn isEquivalent(comptime _: type) bool {
return true;
}
};
};
const Tnonequiv = struct {
pub const contracts = struct {
pub fn isEquivalent(comptime _: type) bool {
return false;
}
};
};
_ = Tequiv;
_ = Tnonequiv;
try expect(isEquivalent(Tequiv, Tnonequiv) == .Valid);
try expect(isEquivalent(Tnonequiv, Tequiv) == .Valid);
try expect(isEquivalent(Tnonequiv, Tnonequiv) == .Invalid);
}
}
/// Requires a contract to be valid, throws a compile-time error otherwise
pub fn require(comptime contract: Contract) void {
if (contract == .Invalid) {
const err = if (std.mem.eql(u8, contract.identifier(), contract.Invalid.cause().identifier()) and
std.mem.eql(u8, contract.Invalid.reason, contract.Invalid.cause().Invalid.reason))
std.fmt.comptimePrint(
"requirement failure in {s} (reason: {s})",
.{
contract.identifier(),
contract.Invalid.reason,
},
)
else
std.fmt.comptimePrint(
"requirement failure in {s} (reason: {s}), cause: {s} (reason: {s})",
.{
contract.identifier(),
contract.Invalid.reason,
contract.Invalid.cause().identifier(),
contract.Invalid.cause().Invalid.reason,
},
);
@compileError(err);
}
}
/// Requires a contract to be valid, throws a compile-time error otherwise
///
/// Used in function signatures:
///
/// ```
/// fn signature_contract(t: anytype) contracts.RequiresAndReturns(
/// contracts.is(@TypeOf(t), u8),
/// void,
/// ) {}
/// ```
pub fn RequiresAndReturns(contract: Contract, comptime T: type) type {
require(contract);
return T;
} | src/lib.zig |
const std = @import("std");
const math = std.math;
const zp = @import("../../zplay.zig");
const alg = zp.deps.alg;
const Vec3 = alg.Vec3;
const Mat4 = alg.Mat4;
const Self = @This();
const MoveDirection = enum {
forward,
backward,
left,
right,
up,
down,
};
/// up vector of the world
world_up: Vec3 = undefined,
/// position of camera
position: Vec3 = undefined,
/// direction of camera
dir: Vec3 = undefined,
/// up of camera
up: Vec3 = undefined,
/// right of camera
right: Vec3 = undefined,
/// euler angle of camera
euler: Vec3 = undefined,
temp_angle: f32 = undefined,
/// i/o state
move_speed: f32 = 2.5,
mouse_sensitivity: f32 = 0.25,
zoom: f32 = 45.0,
/// create a 3d camera using position and target
pub fn fromPositionAndTarget(pos: Vec3, target: Vec3, world_up: ?Vec3) Self {
var camera: Self = .{};
camera.world_up = world_up orelse Vec3.up();
camera.position = pos;
camera.dir = target.sub(pos).norm();
camera.right = camera.dir.cross(camera.world_up).norm();
camera.up = camera.right.cross(camera.dir).norm();
// calculate euler angles
var crossdir = Vec3.cross(camera.world_up, camera.up);
if (Vec3.dot(crossdir, camera.right) < 0) {
camera.euler.x = -Vec3.getAngle(camera.world_up, camera.up);
} else {
camera.euler.x = Vec3.getAngle(camera.world_up, camera.up);
}
crossdir = Vec3.cross(camera.right, Vec3.right());
if (Vec3.dot(crossdir, camera.world_up) < 0) {
camera.euler.y = -Vec3.getAngle(camera.right, Vec3.right()) - 90;
} else {
camera.euler.y = Vec3.getAngle(camera.right, Vec3.right()) - 90;
}
camera.euler.z = 0;
return camera;
}
/// create a 3d camera using position and euler angle (in degrees)
pub fn fromPositionAndEulerAngles(pos: Vec3, pitch: f32, yaw: f32, world_up: ?Vec3) Self {
var camera: Self = .{};
camera.world_up = world_up orelse Vec3.up();
camera.position = pos;
camera.euler = Vec3.new(pitch, yaw - 90, 0);
camera.updateVectors();
return camera;
}
/// get view matrix
pub fn getViewMatrix(self: Self) Mat4 {
return Mat4.lookAt(self.position, self.position.add(self.dir), self.world_up);
}
/// move camera
pub fn move(self: *Self, direction: MoveDirection, distance: f32) void {
var movement = switch (direction) {
.forward => self.dir.scale(distance),
.backward => self.dir.scale(-distance),
.left => self.right.scale(-distance),
.right => self.right.scale(distance),
.up => self.up.scale(distance),
.down => self.up.scale(-distance),
};
self.position = self.position.add(movement);
}
/// rotate camera (in degrees)
pub fn rotate(self: *Self, pitch: f32, yaw: f32) void {
self.euler.x += pitch;
self.euler.y += yaw;
self.updateVectors();
}
/// update vectors: direction/right/up
fn updateVectors(self: *Self) void {
self.euler.x = math.clamp(self.euler.x, -89, 89);
const sin_pitch = math.sin(alg.toRadians(self.euler.x));
const cos_pitch = math.cos(alg.toRadians(self.euler.x));
const sin_yaw = math.sin(alg.toRadians(self.euler.y));
const cos_yaw = math.cos(alg.toRadians(self.euler.y));
self.dir.x = cos_yaw * cos_pitch;
self.dir.y = sin_pitch;
self.dir.z = sin_yaw * cos_pitch;
self.dir = self.dir.norm();
self.right = self.dir.cross(self.world_up).norm();
self.up = self.right.cross(self.dir).norm();
}
/// get position of ray test target
/// NOTE: assuming mouse's coordinate is relative to top-left corner of viewport
pub fn getRayTestTarget(
self: Self,
viewport_w: u32,
viewport_h: u32,
mouse_x: u32,
mouse_y: u32,
) Vec3 {
const far_plane: f32 = 10000.0;
const tanfov = math.tan(0.5 * alg.toRadians(self.zoom));
const width = @intToFloat(f32, viewport_w);
const height = @intToFloat(f32, viewport_h);
const aspect = width / height;
const ray_forward = self.dir.scale(far_plane);
const hor = self.right.scale(2.0 * far_plane * tanfov * aspect);
const vertical = self.up.scale(2.0 * far_plane * tanfov);
const ray_to_center = self.position.add(ray_forward);
const dhor = hor.scale(1.0 / width);
const dvert = vertical.scale(1.0 / height);
var ray_to = ray_to_center.sub(hor.scale(0.5)).sub(vertical.scale(0.5));
ray_to = ray_to.add(dhor.scale(@intToFloat(f32, mouse_x)));
ray_to = ray_to.add(dvert.scale(@intToFloat(f32, viewport_h - mouse_y)));
return ray_to;
} | src/graphics/3d/Camera.zig |
const std = @import("std");
pub const MethodDescriptor = struct {
const Self = @This();
parameters: []const Descriptor,
return_type: *const Descriptor,
pub fn stringify(self: Self, writer: anytype) anyerror!void {
try writer.writeByte('(');
for (self.parameters) |param| try param.stringify(writer);
try writer.writeByte(')');
try self.return_type.stringify(writer);
}
pub fn deinit(self: *Self, allocator: *std.mem.Allocator) void {
for (self.parameters) |*param| param.*.deinit(allocator);
allocator.free(self.parameters);
self.return_type.deinit(allocator);
}
pub fn toStringArrayList(self: Self, buf: *std.ArrayList(u8)) !void {
try buf.ensureCapacity(0);
try self.stringify(buf.writer());
}
};
pub const Descriptor = union(enum) {
const Self = @This();
byte,
char,
int,
long,
short,
float,
double,
boolean: void,
object: []const u8,
array: *const Descriptor,
method: MethodDescriptor,
/// Only valid for method `return_type`s
@"void": void,
pub fn stringify(self: Self, writer: anytype) anyerror!void {
switch (self) {
.byte => try writer.writeByte('B'),
.char => try writer.writeByte('C'),
.int => try writer.writeByte('I'),
.long => try writer.writeByte('J'),
.short => try writer.writeByte('S'),
.float => try writer.writeByte('F'),
.double => try writer.writeByte('D'),
.boolean => try writer.writeByte('Z'),
.void => try writer.writeByte('V'),
.object => |o| try writer.print("L{s};", .{o}),
.array => |a| {
try writer.writeByte('[');
try a.stringify(writer);
},
.method => |m| try m.stringify(writer),
}
}
pub fn toStringArrayList(self: Self, buf: *std.ArrayList(u8)) !void {
try buf.ensureCapacity(0);
try self.stringify(buf.writer());
}
pub fn humanStringify(self: Self, writer: anytype) anyerror!void {
try switch (self) {
.byte => _ = try writer.writeAll("byte"),
.char => _ = try writer.writeAll("char"),
.int => _ = try writer.writeAll("int"),
.long => _ = try writer.writeAll("long"),
.short => _ = try writer.writeAll("short"),
.float => _ = try writer.writeAll("float"),
.double => _ = try writer.writeAll("double"),
.boolean => _ = try writer.writeAll("boolean"),
.void => _ = try writer.writeAll("void"),
.object => |o| {
var i: usize = 0;
var tc = std.mem.count(u8, o, "/");
var t = std.mem.tokenize(o, "/");
while (t.next()) |z| : (i += 1) {
_ = try writer.writeAll(z);
if (i != tc) try writer.writeByte('.');
}
},
.array => |a| {
try a.humanStringify(writer);
_ = try writer.writeAll("[]");
},
.method => error.NotImplemented,
};
}
pub fn toHumanStringArrayList(self: Self, buf: *std.ArrayList(u8)) !void {
try buf.ensureCapacity(0);
try self.humanStringify(buf.writer());
}
pub fn deinit(self: *Self, allocator: *std.mem.Allocator) void {
switch (self.*) {
.object => |*o| allocator.free(o.*),
.array => |*a| a.*.deinit(allocator),
.method => |*m| m.*.deinit(allocator),
else => {},
}
allocator.destroy(self);
}
};
fn c(allocator: *std.mem.Allocator, d: Descriptor) !*Descriptor {
var x = try allocator.create(Descriptor);
x.* = d;
return x;
}
fn parse_(allocator: *std.mem.Allocator, reader: anytype) anyerror!?*Descriptor {
var kind = try reader.readByte();
switch (kind) {
'B' => return try c(allocator, .byte),
'C' => return try c(allocator, .char),
'I' => return try c(allocator, .int),
'J' => return try c(allocator, .long),
'S' => return try c(allocator, .short),
'F' => return try c(allocator, .float),
'D' => return try c(allocator, .double),
'Z' => return try c(allocator, .boolean),
'V' => return try c(allocator, .void),
'L' => {
var x = try allocator.create(Descriptor);
x.* = .{ .object = try reader.readUntilDelimiterAlloc(allocator, ';', 256) };
return x;
},
'[' => {
var x = try allocator.create(Descriptor);
x.* = .{ .array = try parse(allocator, reader) };
return x;
},
'(' => {
var params = std.ArrayList(*Descriptor).init(allocator);
defer params.deinit();
while (try parse_(allocator, reader)) |k| {
try params.append(k);
}
var returnd = (try parse_(allocator, reader)).?;
var x = try allocator.create(Descriptor);
x.* = .{ .method = .{ .parameters = params.toOwnedSlice(), .return_type = returnd } };
return x;
},
')' => return null,
else => unreachable,
}
}
pub fn parse(allocator: *std.mem.Allocator, reader: anytype) anyerror!*Descriptor {
return (try parse_(allocator, reader)).?;
}
pub fn parseString(allocator: *std.mem.Allocator, string: []const u8) !*Descriptor {
var fbs = std.io.fixedBufferStream(string);
return parse(allocator, fbs.reader());
}
test "Descriptors: Write/parse 3D array of objects" {
var test_string = "[[[Ljava/lang/Object;";
var object = Descriptor{ .object = "java/lang/Object" };
var array1 = Descriptor{ .array = &object };
var array2 = Descriptor{ .array = &array1 };
var desc = Descriptor{ .array = &array2 };
var out_buf = std.ArrayList(u8).init(std.testing.allocator);
defer out_buf.deinit();
try desc.stringify(out_buf.writer());
try std.testing.expectEqualStrings(test_string, out_buf.items);
var fbs = std.io.fixedBufferStream(test_string);
var parsed_desc = try parse(std.testing.allocator, fbs.reader());
defer parsed_desc.deinit(std.testing.allocator);
out_buf.shrinkRetainingCapacity(0);
try parsed_desc.stringify(out_buf.writer());
try std.testing.expectEqualStrings(test_string, out_buf.items);
}
test "Descriptors: Write/parse method that returns an object and accepts an integer, double, and Thread" {
var test_string = "(IDLjava/lang/Thread;)Ljava/lang/Object;";
var int = Descriptor{ .int = {} };
var double = Descriptor{ .double = {} };
var thread = Descriptor{ .object = "java/lang/Thread" };
var object = Descriptor{ .object = "java/lang/Object" };
var desc = Descriptor{ .method = .{ .parameters = &.{ &int, &double, &thread }, .return_type = &object } };
var out_buf = std.ArrayList(u8).init(std.testing.allocator);
defer out_buf.deinit();
try desc.stringify(out_buf.writer());
try std.testing.expectEqualStrings(test_string, out_buf.items);
var fbs = std.io.fixedBufferStream(test_string);
var parsed_desc = try parse(std.testing.allocator, fbs.reader());
defer parsed_desc.deinit(std.testing.allocator);
out_buf.shrinkRetainingCapacity(0);
try parsed_desc.stringify(out_buf.writer());
try std.testing.expectEqualStrings(test_string, out_buf.items);
} | src/descriptors.zig |
const std = @import("std");
const gl = @import("zgl");
const glfw = @import("glfw");
const nanovg = @import("nanovg");
const v = @import("v.zig");
const World = @import("World.zig");
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
try glfw.init(.{});
defer glfw.terminate();
const win = try glfw.Window.create(1280, 1024, "Physics", null, null, .{});
defer win.destroy();
try glfw.makeContextCurrent(win);
const ctx = nanovg.Context.createGl3(.{});
defer ctx.deleteGl3();
gl.clearColor(0, 0, 0, 1);
var world = World{
.allocator = allocator,
.gravity = .{ 0, 1000 },
};
defer world.deinit();
try world.addStatic(.{ .verts = &[_]v.Vec2{
.{ 100, 500 },
.{ 400, 500 },
.{ 500, 700 },
.{ 200, 700 },
} });
try world.addStatic(.{ .verts = &[_]v.Vec2{
.{ 1000, 800 },
.{ 1000, 900 },
.{ 400, 900 },
.{ 400, 800 },
} });
try world.addObject(.{ 400, 100 }, .{}, .{ .verts = &[_]v.Vec2{
.{ 0, 0 },
.{ 100, 0 },
.{ 50, 90 },
} });
try world.addObject(.{ 500, 100 }, .{}, .{ .radius = 50, .verts = &[_]v.Vec2{
.{ 0, 90 },
.{ 50, 0 },
.{ 100, 90 },
} });
try world.addObject(.{ 730, 30 }, .{}, .{ .radius = 30, .verts = &[_]v.Vec2{.{ 0, 0 }} });
while (!win.shouldClose()) {
const size = try win.getSize();
const fbsize = try win.getFramebufferSize();
gl.viewport(0, 0, size.width, size.height);
gl.clear(.{ .color = true });
ctx.beginFrame(
@intToFloat(f32, size.width),
@intToFloat(f32, size.height),
@intToFloat(f32, fbsize.width) /
@intToFloat(f32, size.width),
);
const colors = [_]u32{
0x00ffffff,
0xff00ffff,
0x00ff00ff,
0xff0000ff,
};
var it = world.colliders();
var i: usize = 0;
while (it.next()) |c| : (i = (i + 1) % colors.len) {
drawCollider(ctx, c.pos, c.collider, switch (c.kind) {
.static => 0xeeff0066,
.active => colors[i],
});
}
try world.tick(1 / 60.0);
ctx.endFrame();
try win.swapBuffers();
try glfw.pollEvents();
}
}
fn drawCollider(ctx: *nanovg.Context, pos: v.Vec2, c: World.Collider, color: u32) void {
var clr = nanovg.Color.hex(color);
ctx.strokeColor(clr);
clr.a *= 0.5;
ctx.fillColor(clr);
ctx.beginPath();
for (c.verts) |raw_vert, i| {
const vprev = c.verts[if (i == 0) c.verts.len - 1 else i - 1];
const vnext = c.verts[if (i + 1 == c.verts.len) 0 else i + 1];
const out = v.conj(vprev - raw_vert);
const out_next = v.conj(raw_vert - vnext);
const a0 = std.math.atan2(
f32,
@floatCast(f32, out[1]),
@floatCast(f32, out[0]),
);
var a1 = std.math.atan2(
f32,
@floatCast(f32, out_next[1]),
@floatCast(f32, out_next[0]),
);
if (a1 == a0) a1 += std.math.tau;
const vert = raw_vert + pos;
ctx.arc(
@floatCast(f32, vert[0]),
@floatCast(f32, vert[1]),
@floatCast(f32, c.radius),
a0,
a1,
.cw,
);
}
ctx.closePath();
ctx.stroke();
ctx.fill();
ctx.beginPath();
for (c.verts) |raw_vert| {
const vert = raw_vert + pos;
ctx.circle(
@floatCast(f32, vert[0]),
@floatCast(f32, vert[1]),
3,
);
}
ctx.fill();
}
fn drawPoint(ctx: *nanovg.Context, p: v.Vec2, color: u32) void {
ctx.beginPath();
ctx.circle(
@floatCast(f32, p[0]),
@floatCast(f32, p[1]),
5,
);
ctx.fillColor(nanovg.Color.hex(color));
ctx.fill();
}
fn drawVector(ctx: *nanovg.Context, start: v.Vec2, dir: v.Vec2, color: u32) void {
const end = start + dir;
const d = (end - start) * v.v(0.1);
const arrow0 = end + v.rotate(.{ -1, 0.5 }, d);
const arrow1 = end + v.rotate(.{ -1, -0.5 }, d);
ctx.beginPath();
ctx.moveTo(
@floatCast(f32, start[0]),
@floatCast(f32, start[1]),
);
ctx.lineTo(
@floatCast(f32, end[0]),
@floatCast(f32, end[1]),
);
ctx.lineTo(
@floatCast(f32, arrow0[0]),
@floatCast(f32, arrow0[1]),
);
ctx.moveTo(
@floatCast(f32, end[0]),
@floatCast(f32, end[1]),
);
ctx.lineTo(
@floatCast(f32, arrow1[0]),
@floatCast(f32, arrow1[1]),
);
ctx.strokeColor(nanovg.Color.hex(color));
ctx.stroke();
} | main.zig |
const std = @import("std");
const ascii = std.ascii;
const mem = std.mem;
const fs = std.fs;
const io = std.io;
pub fn main() !void {
const allocator = std.heap.page_allocator;
const out = try fs.cwd().createFile("status_codes.zig", .{});
defer out.close();
const stdout = out.writer();
const file = try fs.cwd().openFile("status_codes.csv", .{});
defer file.close();
const content = try file.readToEndAlloc(allocator, 4 * 1024);
defer allocator.free(content);
try stdout.writeAll("// zig fmt: off\n");
try stdout.writeAll("pub const StatusCode = enum(u10) {\n");
try stdout.writeAll(" // https://www.iana.org/assignments/http-status-codes/http-status-codes.txt (2018-09-21)\n");
var line_it = mem.split(mem.trim(u8, content, &ascii.spaces), "\r\n");
_ = line_it.next();
while (line_it.next()) |line| {
var value_it = mem.split(line, ",");
const code = value_it.next().?;
const name = value_it.next().?;
const rfc = value_it.rest();
if (code[1] == '0' and code[2] == '0') try stdout.writeAll("\n");
try stdout.writeAll(" ");
if (mem.eql(u8, name, "Unassigned") or mem.eql(u8, name, "(Unused)")) {
try stdout.print("// {s} {s}", .{ code, name });
} else {
var len = name.len;
switch (code[0]) {
'1' => {
try stdout.writeAll("info_");
len += 5;
},
'2' => {
try stdout.writeAll("success_");
len += 8;
},
'3' => {
try stdout.writeAll("redirect_");
len += 9;
},
'4' => {
try stdout.writeAll("client_");
len += 7;
},
'5' => {
try stdout.writeAll("server_");
len += 7;
},
else => unreachable,
}
for (name) |c| {
if (ascii.isPunct(c) or ascii.isSpace(c)) {
try stdout.writeAll("_");
} else {
try stdout.writeByte(ascii.toLower(c));
}
}
try stdout.print(" = {s},", .{code});
try stdout.writeByteNTimes(' ', 40 - len);
try stdout.print("// {s}", .{mem.trim(u8, rfc, "[\"]")});
}
try stdout.writeAll("\n");
}
try stdout.writeAll(
\\
\\ _,
\\
\\ pub fn code(self: StatusCode) std.meta.Tag(StatusCode) {
\\ return @enumToInt(self);
\\ }
\\
\\ pub fn isValid(self: StatusCode) bool {
\\ return @enumToInt(self) >= 100 and @enumToInt(self) < 600;
\\ }
\\
\\ pub const Group = enum { info, success, redirect, client_error, server_error, invalid };
\\ pub fn group(self: StatusCode) Group {
\\ return switch (self.code()) {
\\ 100...199 => .info,
\\ 200...299 => .success,
\\ 300...399 => .redirect,
\\ 400...499 => .client_error,
\\ 500...599 => .server_error,
\\ else => .invalid,
\\ };
\\ }
\\
);
try stdout.writeAll("};\n");
try stdout.writeAll("// zig fmt: on");
} | .gyro/hzzp-truemedian-github.com-91ab8e74/pkg/scripts/generate_status_codes.zig |
const std = @import("std");
/// Globally unique entity identifier, usable as a UUIDv4
pub const EntityId = u128;
pub const EidHash = struct {
// We can use this incredibly fast hash function because our bits are already randomly distributed
pub fn hash(_: EidHash, eid: EntityId) u64 {
return @truncate(u64, eid) ^ @truncate(u64, eid >> 64);
}
pub fn eql(_: EidHash, a: EntityId, b: EntityId) bool {
return a == b;
}
};
pub const SceneOptions = struct {
RandomSource: type = std.rand.Xoroshiro128,
};
pub fn Scene(comptime EntityType: type, comptime opts: SceneOptions) type {
return struct {
allocator: *std.mem.Allocator,
rng: opts.RandomSource,
id_map: IdMap = .{},
entities: DataStore(EntityI) = .{},
components: ComponentStores = .{},
pub const Entity = EntityType;
pub const Component = std.meta.FieldEnum(Entity);
const IdMap = std.HashMapUnmanaged(EntityId, Addr, EidHash, 80);
const component_count = std.meta.fields(Entity).len;
const EntityI = struct {
eid: EntityId,
indices: [component_count]Addr,
};
const ComponentStores = blk: {
var fields = std.meta.fields(Entity)[0..component_count].*;
for (fields) |*field, i| {
field.name = std.fmt.comptimePrint("{d}", .{i});
field.field_type = DataStore(EntityComponent(field.field_type));
const default_value: ?field.field_type = field.field_type{};
field.default_value = default_value;
}
break :blk @Type(.{ .Struct = .{
.is_tuple = true,
.layout = .Auto,
.decls = &.{},
.fields = &fields,
} });
};
fn EntityComponent(comptime T: type) type {
return struct {
entity: Addr,
component: T,
};
}
pub const OptionalEntity = blk: {
var fields = [_]std.builtin.TypeInfo.StructField{undefined} ** (component_count + 1);
const default_id: ?EntityId = null;
fields[0] = .{
.name = "id",
.field_type = ?EntityId,
.default_value = default_id,
.is_comptime = false,
.alignment = @alignOf(?EntityId),
};
for (std.meta.fields(Entity)) |field, i| {
fields[i + 1] = .{
.name = field.name,
.field_type = ?field.field_type,
.default_value = @as(??field.field_type, @as(?field.field_type, null)),
.is_comptime = false,
.alignment = @alignOf(?field.field_type),
};
}
break :blk @Type(.{ .Struct = .{
.is_tuple = false,
.layout = .Auto,
.decls = &.{},
.fields = &fields,
} });
};
pub fn PartialEntity(comptime components: []const Component) type {
var fields = [_]std.builtin.TypeInfo.StructField{undefined} ** (components.len + 1);
fields[0] = .{
.name = "id",
.field_type = EntityId,
.default_value = null,
.is_comptime = false,
.alignment = @alignOf(EntityId),
};
for (components) |comp, i| {
var field = std.meta.fieldInfo(Entity, comp);
fields[i + 1] = .{
.name = field.name,
.field_type = *field.field_type,
.default_value = null,
.is_comptime = false,
.alignment = @alignOf(*field.field_type),
};
}
return @Type(.{ .Struct = .{
.is_tuple = false,
.layout = .Auto,
.decls = &.{},
.fields = &fields,
} });
}
const Self = @This();
pub fn init(allocator: *std.mem.Allocator) Self {
return .{
.allocator = allocator,
.rng = opts.RandomSource.init(std.crypto.random.int(u64)),
};
}
pub fn deinit(self: *Self) void {
self.id_map.deinit(self.allocator);
self.entities.deinit(self.allocator);
comptime var i = 0;
inline while (i < self.components.len) : (i += 1) {
self.components[i].deinit(self.allocator);
}
}
// FIXME: cleanup properly on error
pub fn add(self: *Self, entity: OptionalEntity) !EntityId {
const addr = try self.entities.add(self.allocator, .{
.eid = entity.id orelse self.genEid(),
.indices = [_]Addr{invalid_addr} ** component_count,
});
var ei = self.entities.get(addr).?;
inline for (comptime std.meta.fieldNames(OptionalEntity)[1..]) |name, i| {
if (@field(entity, name)) |value| {
ei.indices[i] = try self.components[i].add(self.allocator, .{
.entity = addr,
.component = value,
});
}
}
const eid = ei.eid; // HACK: Workaround for stage1 bug
try self.id_map.put(self.allocator, eid, addr);
return eid;
}
pub fn del(self: *Self, eid: EntityId) void {
const addr = self.id_map.fetchRemove(eid).?.value;
const ei = self.entities.get(addr).?;
inline for (comptime std.meta.fieldNames(OptionalEntity)[1..]) |_, i| {
if (ei.indices[i] != invalid_addr) {
self.components[i].del(ei.indices[i]);
}
}
self.entities.del(addr);
}
fn genEid(self: *Self) EntityId {
var id = self.rng.random.int(EntityId);
// Set UUID variant and version
id &= ~(@as(EntityId, 0xc0_00_f0) << (6 * 8));
id |= @as(EntityId, 0x80_00_40) << (6 * 8);
return id;
}
/// If the entity exists, it must have all of the specified components
pub fn get(self: Self, comptime components: []const Component, eid: EntityId) ?PartialEntity(components) {
const addr = self.id_map.get(eid) orelse return null;
// Retrieve all components
var entity: PartialEntity(components) = undefined;
const indices = self.entities.get(addr).?.indices;
inline for (components) |comp| {
const field = std.meta.fieldInfo(Entity, comp);
@field(entity, field.name) = self.getComponent(comp, indices).?;
}
entity.id = eid;
return entity;
}
pub fn getOpt(self: Self, comptime components: []const Component, eid: EntityId) ?PartialEntity(components) {
const addr = self.id_map.get(eid) orelse return null;
// Retrieve all components
var entity: PartialEntity(components) = undefined;
const indices = self.entities.get(addr).?.indices;
inline for (components) |comp| {
const field = std.meta.fieldInfo(Entity, comp);
@field(entity, field.name) = self.getComponent(comp, indices) orelse return null;
}
entity.id = eid;
return entity;
}
/// If the entity exists, it must have the specified component
pub fn getOne(self: Self, comptime comp: Component, eid: EntityId) ?*std.meta.fieldInfo(Entity, comp).field_type {
const addr = self.id_map.get(eid) orelse return null;
const indices = self.entities.get(addr).?.indices;
return self.getComponent(comp, indices).?;
}
pub fn getOneOpt(self: Self, comptime comp: Component, eid: EntityId) ?*std.meta.fieldInfo(Entity, comp).field_type {
const addr = self.id_map.get(eid) orelse return null;
const indices = self.entities.get(addr).?.indices;
return self.getComponent(comp, indices);
}
pub fn componentByType(comptime T: type) Component {
var comp_opt: ?Component = null;
inline for (std.meta.fields(Entity)) |field| {
if (field.field_type == T) {
if (comp_opt == null) {
comp_opt = @field(Component, field.name);
} else {
@compileError("More than one component with type " ++ @typeName(T));
}
}
}
if (comp_opt) |comp| {
return comp;
} else {
@compileError("No component with type " ++ @typeName(T));
}
}
fn getComponent(self: Self, comptime comp: Component, indices: [component_count]Addr) ?*std.meta.fieldInfo(Entity, comp).field_type {
if (self.components[@enumToInt(comp)].get(indices[@enumToInt(comp)])) |res| {
return &res.component;
} else {
return null;
}
}
/// Returns the number of entities with the specified component
pub fn count(self: Self, comptime comp: Component) u32 {
return self.components[@enumToInt(comp)].count;
}
/// The component specified first will be the one iterated through. If you know
/// which component is likely to have the least entries, specify that one first.
pub fn iter(self: *const Self, comptime components: []const Component) //
if (components.len == 0) Iterator else ComponentIterator(components) {
if (components.len == 0) {
return .{ .it = self.entities.iter() };
} else {
return .{
.scene = self,
.it = self.components[@enumToInt(components[0])].iter(),
};
}
}
pub const Iterator = struct {
it: DataStore(EntityI),
pub fn next(self: *Iterator) ?PartialEntity(&.{}) {
const ei = self.it.next() orelse return null;
return PartialEntity(&.{}){ .id = ei.eid };
}
};
pub fn ComponentIterator(comptime required_components: []const Component) type {
return struct {
scene: *const Self,
it: std.meta.fields(ComponentStores)[@enumToInt(required_components[0])].field_type.Iterator,
const Iter = @This();
pub fn next(self: *Iter) ?PartialEntity(required_components) {
var entity: PartialEntity(required_components) = undefined;
search: while (true) {
const res = self.it.next() orelse return null;
const ei = self.scene.entities.get(res.entity).?;
// Retrieve all components
inline for (required_components) |comp_id, i| {
const comp = if (i == 0)
&res.component
else
self.scene.getComponent(comp_id, ei.indices) orelse
// If we're missing a component, skip this entity and keep looking
continue :search;
const field = std.meta.fieldInfo(Entity, comp_id);
@field(entity, field.name) = comp;
}
// Set the entity ID and return
entity.id = ei.eid;
return entity;
}
}
};
}
};
}
// An index into a DataStore. Unique only within that DataStore, and may be reused after the corresponding entry is deleted
// The maximum address is invalid, and can be used to represent a missing entry
const Addr = u32;
const invalid_addr = std.math.maxInt(Addr);
// This datastructure stores entries of a given type, addressed by Addr
// Insertion order is not maintained, and addresses may be reused
fn DataStore(comptime T: type) type {
return struct {
entries: std.ArrayListUnmanaged(Entry) = .{},
count: Addr = 0, // Number of allocated entries
free: Addr = invalid_addr, // Address of the first free entry, if any
const Self = @This();
const Entry = union(enum) {
alloced: T, // The value, if this entry is allocated
free: Addr, // The next free entry, if this entry is unallocated
};
pub fn deinit(self: *Self, allocator: *std.mem.Allocator) void {
self.entries.deinit(allocator);
}
// HACK: noinline is a workaround for a compiler bug - see comment on "6 components" test
pub noinline fn add(self: *Self, allocator: *std.mem.Allocator, value: T) std.mem.Allocator.Error!Addr {
if (self.free != invalid_addr) {
const addr = self.free;
self.free = self.entries.items[addr].free;
self.entries.items[addr] = .{ .alloced = value };
self.count += 1;
return addr;
}
const addr = self.entries.items.len;
if (addr >= invalid_addr) return error.OutOfMemory;
try self.entries.append(allocator, .{ .alloced = value });
self.count += 1;
return @intCast(Addr, addr);
}
pub fn get(self: Self, addr: Addr) ?*T {
if (addr >= self.entries.items.len) return null;
return switch (self.entries.items[addr]) {
.alloced => |*value| value,
.free => null,
};
}
/// addr must be valid
pub fn del(self: *Self, addr: Addr) void {
std.debug.assert(self.entries.items[addr] == .alloced);
self.entries.items[addr] = .{ .free = self.free };
self.free = addr;
self.count -= 1;
}
pub fn iter(self: *const Self) Iterator {
return .{ .store = self };
}
pub const Iterator = struct {
store: *const Self,
idx: Addr = 0,
pub fn next(self: *Iterator) ?*T {
while (self.idx < self.store.entries.items.len) {
self.idx += 1;
if (self.store.get(self.idx - 1)) |value| {
return value;
}
}
return null;
}
};
};
}
test "create scene" {
var scene = TestScene.init(std.testing.allocator);
defer scene.deinit();
}
test "add/get entities" {
var scene = TestScene.init(std.testing.allocator);
defer scene.deinit();
const e1 = try scene.add(.{});
const e2 = try scene.add(.{ .x = .{} });
const e3 = try scene.add(.{ .y = .{ .a = 7 } });
const e4 = try scene.add(.{ .z = 10 });
try std.testing.expect(scene.get(&.{}, e1) != null);
try std.testing.expect(scene.get(&.{.x}, e2) != null);
try std.testing.expectEqual(@as(i32, 7), scene.get(&.{.y}, e3).?.y.a);
try std.testing.expectEqual(@as(i32, 10), scene.get(&.{.z}, e4).?.z.*);
}
test "get multiple components" {
var scene = TestScene.init(std.testing.allocator);
defer scene.deinit();
const id = try scene.add(.{ .x = .{}, .y = .{ .a = 7 }, .z = 10 });
const ent = scene.get(&.{ .x, .y, .z }, id) orelse return error.UnexpectedResult;
try std.testing.expectEqual(@as(i32, 7), ent.y.a);
try std.testing.expectEqual(@as(i32, 10), ent.z.*);
}
test "iterate entities" {
var scene = TestScene.init(std.testing.allocator);
defer scene.deinit();
_ = try scene.add(.{});
_ = try scene.add(.{ .x = .{} });
_ = try scene.add(.{ .y = .{ .a = 7 } });
_ = try scene.add(.{ .z = 10 });
_ = try scene.add(.{ .y = .{ .a = 3 } });
_ = try scene.add(.{ .z = 11, .x = .{} });
_ = try scene.add(.{ .z = 12, .y = .{ .a = 3 } });
_ = try scene.add(.{ .z = 13 });
var n: i32 = 10;
var it = scene.iter(&.{.z});
while (it.next()) |ent| {
try std.testing.expectEqual(n, ent.z.*);
n += 1;
}
}
test "iterate multiple components" {
var scene = TestScene.init(std.testing.allocator);
defer scene.deinit();
_ = try scene.add(.{});
_ = try scene.add(.{ .x = .{} });
_ = try scene.add(.{ .y = .{ .a = 7 } });
_ = try scene.add(.{ .z = 10 });
_ = try scene.add(.{ .y = .{ .a = 3 } });
_ = try scene.add(.{ .z = 11, .x = .{}, .y = .{ .a = 3 } });
_ = try scene.add(.{ .z = 12, .y = .{ .a = 3 } });
_ = try scene.add(.{ .z = 13 });
_ = try scene.add(.{ .z = 12, .x = .{}, .y = .{ .a = 4 } });
_ = try scene.add(.{ .z = 13 });
_ = try scene.add(.{ .z = 13, .x = .{}, .y = .{ .a = 5 } });
_ = try scene.add(.{ .z = 12, .y = .{ .a = 3 } });
_ = try scene.add(.{ .z = 14, .x = .{}, .y = .{ .a = 6 } });
var y: i32 = 3;
var z: i32 = 11;
var it = scene.iter(&.{ .z, .y, .x });
while (it.next()) |ent| {
try std.testing.expectEqual(y, ent.y.a);
try std.testing.expectEqual(z, ent.z.*);
y += 1;
z += 1;
}
}
const TestScene = Scene(struct {
x: struct {},
y: struct { a: i32 },
z: i32,
}, .{});
// This tests a bug where between 4 and 7 components (inclusive) causes a
// segfault after the first add, iff compiling in a release mode
test "6 components" {
const BigScene = Scene(struct {
a: void,
b: void,
c: void,
d: void,
e: void,
f: void,
}, .{});
var scene = BigScene.init(std.testing.allocator);
defer scene.deinit();
_ = try scene.add(.{});
_ = try scene.add(.{});
_ = try scene.add(.{});
_ = try scene.add(.{});
} | znt.zig |
const std = @import("std");
const c = @import("../c.zig");
const zupnp = @import("../lib.zig");
/// Make an HTTP request and get a response.
pub fn request(method: zupnp.web.Method, url: [:0]const u8, client_request: zupnp.web.ClientRequest) !zupnp.web.ClientResponse {
const logger = std.log.scoped(.@"zupnp.web.request");
logger.debug("Establishing a {s} request to {s}", .{method, url});
const timeout = client_request.timeout orelse -1;
var handle: ?*anyopaque = undefined;
if (c.is_error(c.UpnpOpenHttpConnection(url, &handle, timeout))) |err| {
logger.err("Failed opening HTTP connection: {s}", .{err});
return zupnp.Error;
}
errdefer logger.debug("Close err {d}", .{c.UpnpCloseHttpConnection(handle)});
var headers_buf = if (client_request.headers) |*headers| try headers.toString(url) else null;
defer { if (headers_buf) |*hb| hb.deinit(); }
const headers = if (headers_buf) |*hb| blk: {
var headers = c.UpnpString_new();
_ = c.UpnpString_set_StringN(headers, hb.items.ptr, hb.items.len);
break :blk headers;
}
else null;
if (c.is_error(c.UpnpMakeHttpRequest(
method.toUpnpMethod(),
url,
handle,
headers,
client_request.content_type orelse null,
@intCast(c_int, client_request.contents.len), timeout)
)) |err| {
logger.err("Failed making request to HTTP endpoint: {s}", .{err});
return zupnp.Error;
}
if (client_request.contents.len > 0) {
const contents = c.mutate([*c]u8, client_request.contents.ptr);
var len = client_request.contents.len;
if (c.is_error(c.UpnpWriteHttpRequest(handle, contents, &len, timeout))) |err| {
logger.err("Failed writing HTTP contents to endpoint: {s}", .{err});
return zupnp.Error;
}
}
if (c.is_error(c.UpnpEndHttpRequest(handle, timeout))) |err| {
logger.err("Failed finalizing HTTP contents to endpoint: {s}", .{err});
return zupnp.Error;
}
var http_status: c_int = undefined;
var content_type: [*c]u8 = undefined;
var content_length: c_int = undefined;
// TODO be aware that content type is only valid for the duration of the connection
if (c.is_error(c.UpnpGetHttpResponse(handle, null, &content_type, &content_length, &http_status, timeout))) |err| {
logger.err("Failed getting HTTP response: {s}", .{err});
return zupnp.Error;
}
var content_type_slice = if (content_type != null) std.mem.sliceTo(content_type, 0) else null;
return zupnp.web.ClientResponse {
.http_status = http_status,
.content_type = content_type_slice,
.content_length = if (content_length < 0) null else @intCast(u32, content_length),
.timeout = timeout,
.handle = handle,
};
} | src/web/client.zig |
const Tree = @import("Tree.zig");
const TokenIndex = Tree.TokenIndex;
const NodeIndex = Tree.NodeIndex;
const Parser = @import("Parser.zig");
const Compilation = @import("Compilation.zig");
const Type = @This();
pub const Qualifiers = packed struct {
@"const": bool = false,
atomic: bool = false,
@"volatile": bool = false,
restrict: bool = false,
pub fn any(quals: Qualifiers) bool {
return quals.@"const" or quals.restrict or quals.@"volatile" or quals.atomic;
}
pub fn dump(quals: Qualifiers, w: anytype) !void {
if (quals.@"const") try w.writeAll("const ");
if (quals.atomic) try w.writeAll("_Atomic ");
if (quals.@"volatile") try w.writeAll("volatile ");
if (quals.restrict) try w.writeAll("restrict ");
}
};
pub const Func = struct {
return_type: Type,
param_types: []NodeIndex,
};
pub const Array = struct {
len: u64,
elem: Type,
};
pub const VLA = struct {
expr: NodeIndex,
elem: Type,
};
pub const Specifier = enum {
void,
bool,
// integers
char,
schar,
uchar,
short,
ushort,
int,
uint,
long,
ulong,
long_long,
ulong_long,
// floating point numbers
float,
double,
long_double,
complex_float,
complex_double,
complex_long_double,
// data.sub_type
pointer,
atomic,
unspecified_variable_len_array,
// data.func
/// int foo(int bar, char baz) and int (void)
func,
/// int foo(int bar, char baz, ...)
var_args_func,
/// int foo(bar, baz) and int foo()
/// is also var args, but we can give warnings about incorrect amounts of parameters
old_style_func,
// data.array
array,
static_array,
incomplete_array,
// data.vla
variable_len_array,
// data.node
@"struct",
@"union",
@"enum",
};
data: union {
sub_type: *Type,
func: *Func,
array: *Array,
vla: *VLA,
node: NodeIndex,
none: void,
} = .{ .none = {} },
alignment: u32 = 0,
specifier: Specifier,
qual: Qualifiers = .{},
pub fn isCallable(ty: Type) ?Type {
return switch (ty.specifier) {
.func, .var_args_func, .old_style_func => ty,
.pointer => ty.data.sub_type.isCallable(),
else => null,
};
}
pub fn isFunc(ty: Type) bool {
return switch (ty.specifier) {
.func, .var_args_func, .old_style_func => true,
else => false,
};
}
pub fn isArray(ty: Type) bool {
return switch (ty.specifier) {
.array, .static_array, .incomplete_array, .variable_len_array, .unspecified_variable_len_array => true,
else => false,
};
}
pub fn isUnsignedInt(ty: Type, comp: *Compilation) bool {
_ = comp;
return switch (ty.specifier) {
.char => return false, // TODO check comp for char signedness
.uchar, .ushort, .uint, .ulong, .ulong_long => return true,
else => false,
};
}
pub fn wideChar(p: *Parser) Type {
_ = p;
// TODO get target from compilation
return .{ .specifier = .int };
}
pub fn hasIncompleteSize(ty: Type) bool {
return switch (ty.specifier) {
.void, .incomplete_array => true,
else => false,
};
}
/// Size of type as reported by sizeof
pub fn sizeof(ty: Type, comp: *Compilation) u64 {
// TODO get target from compilation
return switch (ty.specifier) {
.variable_len_array, .unspecified_variable_len_array, .incomplete_array => unreachable, // TODO special case
.func, .var_args_func, .old_style_func, .void, .bool => 1,
.char, .schar, .uchar => 1,
.short, .ushort => 2,
.int, .uint => 4,
.long, .ulong => switch (comp.target.os.tag) {
.linux,
.macos,
.freebsd,
.netbsd,
.dragonfly,
.openbsd,
.wasi,
.emscripten,
=> comp.target.cpu.arch.ptrBitWidth() >> 3,
.windows, .uefi => 32,
else => 32,
},
.long_long, .ulong_long => 8,
.float => 4,
.double => 8,
.long_double => 16,
.complex_float => 8,
.complex_double => 16,
.complex_long_double => 32,
.pointer, .static_array => comp.target.cpu.arch.ptrBitWidth() >> 3,
.atomic => return ty.data.sub_type.sizeof(comp),
.array => return ty.data.sub_type.sizeof(comp) * ty.data.array.len,
.@"struct" => @panic("TODO"),
.@"union" => @panic("TODO"),
.@"enum" => @panic("TODO"),
};
}
pub fn combine(inner: *Type, outer: Type, p: *Parser, source_tok: TokenIndex) Parser.Error!void {
switch (inner.specifier) {
.pointer => return inner.data.sub_type.combine(outer, p, source_tok),
.variable_len_array, .unspecified_variable_len_array => return p.todo("combine array"),
.array, .static_array, .incomplete_array => {
try inner.data.array.elem.combine(outer, p, source_tok);
if (inner.data.array.elem.hasIncompleteSize()) return p.errTok(.array_incomplete_elem, source_tok);
if (inner.data.array.elem.isFunc()) return p.errTok(.array_func_elem, source_tok);
if (inner.data.array.elem.specifier == .static_array and inner.isArray()) return p.errTok(.static_non_outernmost_array, source_tok);
if (inner.data.array.elem.qual.any() and inner.isArray()) return p.errTok(.qualifier_non_outernmost_array, source_tok);
},
.func, .var_args_func, .old_style_func => {
try inner.data.func.return_type.combine(outer, p, source_tok);
if (inner.data.func.return_type.isArray()) return p.errTok(.func_cannot_return_array, source_tok);
if (inner.data.func.return_type.isFunc()) return p.errTok(.func_cannot_return_func, source_tok);
},
else => inner.* = outer,
}
}
/// An unfinished Type
pub const Builder = struct {
typedef: ?struct {
tok: TokenIndex,
spec: []const u8,
} = null,
kind: Kind = .none,
pub const Kind = union(enum) {
none,
void,
bool,
char,
schar,
uchar,
unsigned,
signed,
short,
sshort,
ushort,
short_int,
sshort_int,
ushort_int,
int,
sint,
uint,
long,
slong,
ulong,
long_int,
slong_int,
ulong_int,
long_long,
slong_long,
ulong_long,
long_long_int,
slong_long_int,
ulong_long_int,
float,
double,
long_double,
complex,
complex_long,
complex_float,
complex_double,
complex_long_double,
pointer: *Type,
atomic: *Type,
unspecified_variable_len_array: *Type,
func: *Func,
var_args_func: *Func,
old_style_func: *Func,
array: *Array,
static_array: *Array,
incomplete_array: *Array,
variable_len_array: *VLA,
@"struct": NodeIndex,
@"union": NodeIndex,
@"enum": NodeIndex,
pub fn str(spec: Kind) []const u8 {
return switch (spec) {
.none => unreachable,
.void => "void",
.bool => "_Bool",
.char => "char",
.schar => "signed char",
.uchar => "unsigned char",
.unsigned => "unsigned",
.signed => "signed",
.short => "short",
.ushort => "unsigned short",
.sshort => "signed short",
.short_int => "short int",
.sshort_int => "signed short int",
.ushort_int => "unsigned short int",
.int => "int",
.sint => "signed int",
.uint => "unsigned int",
.long => "long",
.slong => "signed long",
.ulong => "unsigned long",
.long_int => "long int",
.slong_int => "signed long int",
.ulong_int => "unsigned long int",
.long_long => "long long",
.slong_long => "signed long long",
.ulong_long => "unsigned long long",
.long_long_int => "long long int",
.slong_long_int => "signed long long int",
.ulong_long_int => "unsigned long long int",
.float => "float",
.double => "double",
.long_double => "long double",
.complex => "_Complex",
.complex_long => "_Complex long",
.complex_float => "_Complex float",
.complex_double => "_Complex double",
.complex_long_double => "_Complex long double",
// TODO make these more specific?
.pointer => "pointer",
.atomic => "atomic",
.func, .var_args_func, .old_style_func => "function",
.array, .static_array, .unspecified_variable_len_array, .variable_len_array, .incomplete_array => "array",
.@"struct" => "struct",
.@"union" => "union",
.@"enum" => "enum",
};
}
};
pub fn finish(spec: Builder, p: *Parser, ty: *Type) Parser.Error!void {
ty.specifier = switch (spec.kind) {
.none => {
ty.specifier = .int;
return p.err(.missing_type_specifier);
},
.void => .void,
.bool => .bool,
.char => .char,
.schar => .schar,
.uchar => .uchar,
.unsigned => .uint,
.signed => .int,
.short_int, .sshort_int, .short, .sshort => .short,
.ushort, .ushort_int => .ushort,
.int, .sint => .int,
.uint => .uint,
.long, .slong, .long_int, .slong_int => .long,
.ulong, .ulong_int => .ulong,
.long_long, .slong_long, .long_long_int, .slong_long_int => .long_long,
.ulong_long, .ulong_long_int => .ulong_long,
.float => .float,
.double => .double,
.long_double => .long_double,
.complex_float => .complex_float,
.complex_double => .complex_double,
.complex_long_double => .complex_long_double,
.complex, .complex_long => {
try p.errExtra(.type_is_invalid, p.tok_i, .{ .str = spec.kind.str() });
return error.ParsingFailed;
},
.atomic => |data| {
ty.specifier = .atomic;
ty.data = .{ .sub_type = data };
return;
},
.pointer => |data| {
ty.specifier = .pointer;
ty.data = .{ .sub_type = data };
return;
},
.unspecified_variable_len_array => |data| {
ty.specifier = .unspecified_variable_len_array;
ty.data = .{ .sub_type = data };
return;
},
.func => |data| {
ty.specifier = .func;
ty.data = .{ .func = data };
return;
},
.var_args_func => |data| {
ty.specifier = .var_args_func;
ty.data = .{ .func = data };
return;
},
.old_style_func => |data| {
ty.specifier = .old_style_func;
ty.data = .{ .func = data };
return;
},
.array => |data| {
ty.specifier = .array;
ty.data = .{ .array = data };
return;
},
.static_array => |data| {
ty.specifier = .static_array;
ty.data = .{ .array = data };
return;
},
.incomplete_array => |data| {
ty.specifier = .incomplete_array;
ty.data = .{ .array = data };
return;
},
.variable_len_array => |data| {
ty.specifier = .variable_len_array;
ty.data = .{ .vla = data };
return;
},
.@"struct" => |data| {
ty.specifier = .@"struct";
ty.data = .{ .node = data };
return;
},
.@"union" => |data| {
ty.specifier = .@"union";
ty.data = .{ .node = data };
return;
},
.@"enum" => |data| {
ty.specifier = .@"enum";
ty.data = .{ .node = data };
return;
},
};
}
pub fn cannotCombine(spec: Builder, p: *Parser) Parser.Error {
try p.errExtra(.cannot_combine_spec, p.tok_i, .{ .str = spec.kind.str() });
if (spec.typedef) |some| try p.errStr(.sepc_from_typedef, some.tok, some.spec);
return error.ParsingFailed;
}
pub fn combine(spec: *Builder, p: *Parser, new: Kind) Parser.Error!void {
switch (new) {
else => switch (spec.kind) {
.none => spec.kind = new,
else => return spec.cannotCombine(p),
},
.atomic => return p.todo("atomic types"),
.signed => spec.kind = switch (spec.kind) {
.none => .signed,
.char => .schar,
.short => .sshort,
.short_int => .sshort_int,
.int => .sint,
.long => .slong,
.long_int => .slong_int,
.long_long => .slong_long,
.long_long_int => .slong_long_int,
.sshort,
.sshort_int,
.sint,
.slong,
.slong_int,
.slong_long,
.slong_long_int,
=> return p.errStr(.duplicate_decl_spec, p.tok_i, "signed"),
else => return spec.cannotCombine(p),
},
.unsigned => spec.kind = switch (spec.kind) {
.none => .unsigned,
.char => .uchar,
.short => .ushort,
.short_int => .ushort_int,
.int => .uint,
.long => .ulong,
.long_int => .ulong_int,
.long_long => .ulong_long,
.long_long_int => .ulong_long_int,
.ushort,
.ushort_int,
.uint,
.ulong,
.ulong_int,
.ulong_long,
.ulong_long_int,
=> return p.errStr(.duplicate_decl_spec, p.tok_i, "unsigned"),
else => return spec.cannotCombine(p),
},
.char => spec.kind = switch (spec.kind) {
.none => .char,
.unsigned => .uchar,
.signed => .schar,
.char, .schar, .uchar => return p.errStr(.duplicate_decl_spec, p.tok_i, "char"),
else => return spec.cannotCombine(p),
},
.short => spec.kind = switch (spec.kind) {
.none => .short,
.unsigned => .ushort,
.signed => .sshort,
else => return spec.cannotCombine(p),
},
.int => spec.kind = switch (spec.kind) {
.none => .int,
.signed => .sint,
.unsigned => .uint,
.short => .short_int,
.sshort => .sshort_int,
.ushort => .ushort_int,
.long => .long_int,
.slong => .slong_int,
.ulong => .ulong_int,
.long_long => .long_long_int,
.slong_long => .slong_long_int,
.ulong_long => .ulong_long_int,
.int,
.sint,
.uint,
.short_int,
.sshort_int,
.ushort_int,
.long_int,
.slong_int,
.ulong_int,
.long_long_int,
.slong_long_int,
.ulong_long_int,
=> return p.errStr(.duplicate_decl_spec, p.tok_i, "int"),
else => return spec.cannotCombine(p),
},
.long => spec.kind = switch (spec.kind) {
.none => .long,
.long => .long_long,
.unsigned => .ulong,
.signed => .long,
.int => .long_int,
.sint => .slong_int,
.ulong => .ulong_long,
.long_long, .ulong_long => return p.errStr(.duplicate_decl_spec, p.tok_i, "long"),
else => return spec.cannotCombine(p),
},
.float => spec.kind = switch (spec.kind) {
.none => .float,
.complex => .complex_float,
.complex_float, .float => return p.errStr(.duplicate_decl_spec, p.tok_i, "float"),
else => return spec.cannotCombine(p),
},
.double => spec.kind = switch (spec.kind) {
.none => .double,
.long => .long_double,
.complex_long => .complex_long_double,
.complex => .complex_double,
.long_double,
.complex_long_double,
.complex_double,
.double,
=> return p.errStr(.duplicate_decl_spec, p.tok_i, "double"),
else => return spec.cannotCombine(p),
},
.complex => spec.kind = switch (spec.kind) {
.none => .complex,
.long => .complex_long,
.float => .complex_float,
.double => .complex_double,
.long_double => .complex_long_double,
.complex,
.complex_long,
.complex_float,
.complex_double,
.complex_long_double,
=> return p.errStr(.duplicate_decl_spec, p.tok_i, "_Complex"),
else => return spec.cannotCombine(p),
},
}
}
pub fn fromType(ty: Type) Kind {
return switch (ty.specifier) {
.void => .void,
.bool => .bool,
.char => .char,
.schar => .schar,
.uchar => .uchar,
.short => .short,
.ushort => .ushort,
.int => .int,
.uint => .uint,
.long => .long,
.ulong => .ulong,
.long_long => .long_long,
.ulong_long => .ulong_long,
.float => .float,
.double => .double,
.long_double => .long_double,
.complex_float => .complex_float,
.complex_double => .complex_double,
.complex_long_double => .complex_long_double,
.pointer => .{ .pointer = ty.data.sub_type },
.atomic => .{ .atomic = ty.data.sub_type },
.unspecified_variable_len_array => .{ .unspecified_variable_len_array = ty.data.sub_type },
.func => .{ .func = ty.data.func },
.var_args_func => .{ .var_args_func = ty.data.func },
.old_style_func => .{ .old_style_func = ty.data.func },
.array => .{ .array = ty.data.array },
.static_array => .{ .static_array = ty.data.array },
.incomplete_array => .{ .incomplete_array = ty.data.array },
.variable_len_array => .{ .variable_len_array = ty.data.vla },
.@"struct" => .{ .@"struct" = ty.data.node },
.@"union" => .{ .@"union" = ty.data.node },
.@"enum" => .{ .@"enum" = ty.data.node },
};
}
};
// Print as Zig types since those are actually readable
pub fn dump(ty: Type, tree: Tree, w: anytype) @TypeOf(w).Error!void {
try ty.qual.dump(w);
switch (ty.specifier) {
.pointer => {
try w.writeAll("*");
try ty.data.sub_type.dump(tree, w);
},
.atomic => {
try w.writeAll("_Atomic");
try ty.data.sub_type.dump(tree, w);
try w.writeAll(")");
},
.func, .var_args_func, .old_style_func => {
try w.writeAll("fn (");
for (ty.data.func.param_types) |param, i| {
if (i != 0) try w.writeAll(", ");
const name_tok = tree.nodes.items(.data)[param].first;
if (tree.tokens.items(.id)[name_tok] == .identifier) {
try w.print("{s}: ", .{tree.tokSlice(name_tok)});
}
try tree.nodes.items(.ty)[param].dump(tree, w);
}
if (ty.specifier != .func) {
if (ty.data.func.param_types.len != 0) try w.writeAll(", ");
try w.writeAll("...");
}
try w.writeAll(") ");
try ty.data.func.return_type.dump(tree, w);
},
.array, .static_array => {
try w.writeByte('[');
if (ty.specifier == .static_array) try w.writeAll("static ");
try w.print("{d}]", .{ty.data.array.len});
try ty.data.array.elem.dump(tree, w);
},
.incomplete_array => {
try w.writeAll("[]");
try ty.data.array.elem.dump(tree, w);
},
else => try w.writeAll(Builder.fromType(ty).str()),
}
} | src/Type.zig |
usingnamespace @import("raylib");
const std = @import("std");
const math = std.math;
const warn = std.debug.warn;
const panic = std.debug.panic;
const grid_width: i32 = 10;
const grid_height: i32 = 20;
const grid_cell_size: i32 = 32;
const margin: i32 = 20;
const piece_preview_width = grid_cell_size * 5;
const screen_width: i32 = (grid_width * grid_cell_size) + (margin * 2) + piece_preview_width + margin;
const screen_height: i32 = (grid_height * grid_cell_size) + margin;
fn rgb(r: u8, g: u8, b: u8) Color {
return Color{ .r = r, .g = g, .b = b, .a = 255 };
}
fn rgba(r: u8, g: u8, b: u8, a: u8) Color {
return Color{ .r = r, .g = g, .b = b, .a = a };
}
const BackgroundColor = rgb(29, 38, 57);
const BackgroundHiLightColor = rgb(39, 48, 67);
const BorderColor = rgb(3, 2, 1);
const State = enum {
StartScreen,
Play,
Pause,
GameOver,
};
const Pos = struct {
x: i32,
y: i32,
};
fn p(x: i32, y: i32) Pos {
return Pos{ .x = x, .y = y };
}
const Type = enum {
Cube,
Long,
Z,
S,
T,
L,
J,
};
fn piece_color(t: Type) Color {
return switch (t) {
Type.Cube => rgb(241, 211, 90),
Type.Long => rgb(83, 179, 219),
Type.L => rgb(92, 205, 162),
Type.J => rgb(231, 111, 124),
Type.T => rgb(195, 58, 47),
Type.S => rgb(96, 150, 71),
Type.Z => rgb(233, 154, 56),
};
}
fn random_type(rng: *std.rand.DefaultPrng) Type {
const index = rng.random.uintLessThanBiased(@typeInfo(Type).Enum.tag_type,
@typeInfo(Type).Enum.fields.len);
return @intToEnum(Type, index);
}
const Rotation = enum {
A, B, C, D
};
const Square = struct {
color: Color,
active: bool,
};
const Level = struct {
tick_rate: i32,
value: usize,
pub fn get_level(piece_count: usize) Level {
return switch (piece_count) {
0...10 => Level{ .value = 1, .tick_rate = 30 },
11...25 => Level{ .value = 2, .tick_rate = 30 },
26...50 => Level{ .value = 3, .tick_rate = 25 },
51...100 => Level{ .value = 4, .tick_rate = 25 },
101...150 => Level{ .value = 5, .tick_rate = 20 },
151...200 => Level{ .value = 6, .tick_rate = 20 },
201...250 => Level{ .value = 7, .tick_rate = 15 },
251...300 => Level{ .value = 8, .tick_rate = 15 }, // score ~100
301...350 => Level{ .value = 9, .tick_rate = 12 },
351...400 => Level{ .value = 10, .tick_rate = 12 },
401...450 => Level{ .value = 11, .tick_rate = 10 },
451...500 => Level{ .value = 12, .tick_rate = 10 },
501...600 => Level{ .value = 13, .tick_rate = 8 },
601...700 => Level{ .value = 14, .tick_rate = 8 },
701...800 => Level{ .value = 15, .tick_rate = 6 },
else => Level{ .value = 16, .tick_rate = 5 },
};
}
};
const Game = struct {
grid: [grid_width * grid_height]Square,
squares: [4]Pos,
rng: std.rand.DefaultPrng,
state: State,
t: Type,
next_type: Type,
r: Rotation,
tick: i32,
freeze_down: i32,
freeze_input: i32,
freeze_space: i32,
x: i32,
y: i32,
score: usize,
piece_count: usize,
rows_this_tick: usize,
level: Level,
pub fn init() Game {
// grid
var grid: [grid_width * grid_height]Square = undefined;
for (grid) |*item, i| {
item.* = Square{ .color = WHITE, .active = false };
}
// rng
var buf: [8]u8 = undefined;
std.crypto.random.bytes(buf[0..]);
const seed = std.mem.readIntLittle(u64, buf[0..8]);
var r = std.rand.DefaultPrng.init(seed);
// squares
const t = random_type(&r);
const next_type = random_type(&r);
var squares = Game.get_squares(t, Rotation.A);
return Game{
.grid = grid,
.squares = squares,
.rng = r,
.state = State.StartScreen,
.t = t,
.next_type = next_type,
.r = Rotation.A,
.tick = 0,
.freeze_down = 0,
.freeze_input = 0,
.freeze_space = 0,
.x = 4,
.y = 0,
.score = 0,
.piece_count = 1,
.rows_this_tick = 0,
.level = Level.get_level(1),
};
}
fn anykey(self: *Game) bool {
const k = GetKeyPressed();
if (k != @enumToInt(KeyboardKey.KEY_NULL)) {
return true;
} else {
// Seems like some keys don't register with GetKeyPressed, so
// checking for them manually here.
if (IsKeyReleased(KeyboardKey.KEY_DOWN) or
IsKeyReleased(KeyboardKey.KEY_LEFT) or
IsKeyReleased(KeyboardKey.KEY_RIGHT) or
IsKeyReleased(KeyboardKey.KEY_DOWN) or
IsKeyReleased(KeyboardKey.KEY_ENTER))
{
return true;
}
}
return false;
}
pub fn update(self: *Game) void {
switch (self.state) {
State.StartScreen => {
if (self.anykey()) {
self.freeze_space = 30;
self.state = State.Play;
}
},
State.GameOver => {
if (self.anykey() and self.freeze_input == 0) {
self.reset();
self.piece_reset();
self.tick = 0;
// TODO: Add High Score screen.
self.score = 0;
self.rows_this_tick = 0;
self.state = State.Play;
}
},
State.Play => {
if (IsKeyReleased(KeyboardKey.KEY_ESCAPE)) {
self.state = State.Pause;
return;
}
if (IsKeyPressed(KeyboardKey.KEY_RIGHT)) {
self.move_right();
}
if (IsKeyPressed(KeyboardKey.KEY_LEFT)) {
self.move_left();
}
if (IsKeyDown(KeyboardKey.KEY_DOWN)) {
if (self.freeze_down <= 0) {
const moved = self.move_down();
if (!moved) {
self.freeze_down = 60;
}
}
}
if (IsKeyReleased(KeyboardKey.KEY_DOWN)) {
self.freeze_down = 0;
}
if (IsKeyPressed(KeyboardKey.KEY_RIGHT_CONTROL) or IsKeyPressed(KeyboardKey.KEY_SPACE)) {
const moved = self.drop();
if (!moved) {
self.freeze_down = 60;
}
}
if (IsKeyPressed(KeyboardKey.KEY_UP)) {
self.rotate();
}
if (self.tick >= self.level.tick_rate) {
_ = self.move_down();
self.remove_full_rows();
self.tick = 0;
self.update_score();
self.update_level();
}
self.tick += 1;
},
State.Pause => {
if (IsKeyReleased(KeyboardKey.KEY_ESCAPE)) {
self.state = State.Play;
}
},
}
if (self.freeze_down > 0) {
self.freeze_down -= 1;
}
if (self.freeze_space > 0) {
self.freeze_space -= 1;
}
if (self.freeze_input > 0) {
self.freeze_input -= 1;
}
}
fn update_score(self: *Game) void {
const bonus: usize = switch (self.rows_this_tick) {
0 => 0,
1 => 1,
2 => 3,
3 => 5,
4 => 8,
else => 100, // shouldn't happen
};
self.score += bonus;
self.rows_this_tick = 0;
}
fn update_level(self: *Game) void {
self.level = Level.get_level(self.piece_count);
warn("level: {}, speed: {}\n", .{self.level.value, self.level.tick_rate});
}
fn row_is_full(self: Game, y: i32) bool {
if (y >= self.grid.len or y < 0) {
warn("Row index out of bounds {}", .{y});
return false;
}
var x: i32 = 0;
return while (x < grid_width) : (x += 1) {
if (!self.get_active(x, y)) {
break false;
}
} else true;
}
fn copy_row(self: *Game, y1: i32, y2: i32) void {
if (y1 == y2) {
warn("Invalid copy, {} must not equal {}\n", .{ y1, y2 });
return;
}
if (y2 < 0 or y1 >= grid_height or y2 >= grid_height) {
warn("Invalid copy, {} or {} is out of bounds\n", .{ y1, y2 });
return;
}
var x: i32 = 0;
while (x < grid_width) : (x += 1) {
if (y1 < 0) {
self.set_active_state(x, y2, false);
self.set_grid_color(x, y2, WHITE);
} else {
self.set_active_state(x, y2, self.get_active(x, y1));
self.set_grid_color(x, y2, self.get_grid_color(x, y1));
}
}
}
fn copy_rows(self: *Game, src_y: i32, dst_y: i32) void {
// Starting at dest row, copy everything above, but starting at dest
if (src_y >= dst_y) {
warn("{} must be less than {}\n", .{ src_y, dst_y });
return;
}
var y1: i32 = src_y;
var y2: i32 = dst_y;
while (y2 > -1) {
self.copy_row(y1, y2);
y1 -= 1;
y2 -= 1;
}
}
pub fn remove_full_rows(self: *Game) void {
// Remove full rows
var y: i32 = grid_height - 1;
var cp_y: i32 = y;
while (y > -1) {
if (self.row_is_full(y)) {
while (self.row_is_full(cp_y)) {
self.rows_this_tick += 1;
cp_y -= 1;
}
self.copy_rows(cp_y, y);
cp_y = y;
}
y -= 1;
cp_y -= 1;
}
}
pub fn get_active(self: Game, x: i32, y: i32) bool {
if (x < 0) {
return true;
}
if (y < 0) {
return false;
}
const index: usize = @intCast(usize, y) * @intCast(usize, grid_width) + @intCast(usize, x);
if (index >= self.grid.len) {
return true;
}
return self.grid[index].active;
}
pub fn get_grid_color(self: Game, x: i32, y: i32) Color {
if (x < 0) {
return LIGHTGRAY;
}
if (y < 0) {
return WHITE;
}
const index: usize = @intCast(usize, y) * @intCast(usize, grid_width) + @intCast(usize, x);
if (index >= self.grid.len) {
return LIGHTGRAY;
}
return self.grid[index].color;
}
pub fn set_active_state(self: *Game, x: i32, y: i32, state: bool) void {
if (x < 0 or y < 0) {
return;
}
const index: usize = @intCast(usize, y) * @intCast(usize, grid_width) + @intCast(usize, x);
if (index >= self.grid.len) {
return;
}
self.grid[index].active = state;
}
fn set_grid_color(self: *Game, x: i32, y: i32, color: Color) void {
if (x < 0 or y < 0) {
return;
}
const index: usize = @intCast(usize, y) * @intCast(usize, grid_width) + @intCast(usize, x);
if (index >= self.grid.len) {
return;
}
self.grid[index].color = color;
}
pub fn reset(self: *Game) void {
for (self.grid) |*item, i| {
item.* = Square{ .color = WHITE, .active = false };
}
}
pub fn piece_reset(self: *Game) void {
self.piece_count += 1;
self.y = 0;
self.x = 4;
self.t = self.next_type;
self.next_type = random_type(&self.rng);
self.r = Rotation.A;
self.squares = Game.get_squares(self.t, self.r);
if (self.check_collision(self.squares)) {
self.state = State.GameOver;
self.freeze_input = 60; // Keep player from mashing keys at end and skipping the game over screen.
}
}
fn piece_shade(self: *Game) Color {
return switch (self.t) {
Type.Cube => rgb(241, 211, 90),
Type.Long => rgb(83, 179, 219),
Type.L => rgb(92, 205, 162),
Type.J => rgb(231, 111, 124),
Type.T => rgb(195, 58, 47),
Type.S => rgb(96, 150, 71),
Type.Z => rgb(233, 154, 56),
};
}
fn piece_ghost(self: *Game) Color {
return switch (self.t) {
Type.Cube => rgba(241, 211, 90, 175),
Type.Long => rgba(83, 179, 219, 175),
Type.L => rgba(92, 205, 162, 175),
Type.J => rgba(231, 111, 124, 175),
Type.T => rgba(195, 58, 47, 175),
Type.S => rgba(96, 150, 71, 175),
Type.Z => rgba(233, 154, 56, 175),
};
}
pub fn draw(self: *Game) void {
ClearBackground(BorderColor);
var y: i32 = 0;
var upper_left_y: i32 = 0;
while (y < grid_height) {
var x: i32 = 0;
var upper_left_x: i32 = margin;
while (x < grid_width) {
if (self.get_active(x, y)) {
DrawRectangle(upper_left_x, upper_left_y, grid_cell_size, grid_cell_size, self.get_grid_color(x, y));
} else {
DrawRectangle(upper_left_x, upper_left_y, grid_cell_size, grid_cell_size, BackgroundHiLightColor);
DrawRectangle(upper_left_x + 1, upper_left_y + 1, grid_cell_size - 2, grid_cell_size - 2, BackgroundColor);
}
upper_left_x += grid_cell_size;
x += 1;
}
upper_left_y += grid_cell_size;
y += 1;
}
if (self.state != State.StartScreen) {
// Draw falling piece and ghost
const ghost_square_offset = self.get_ghost_square_offset();
for (self.squares) |pos| {
// Draw ghost
DrawRectangle((self.x + pos.x) * grid_cell_size + margin, (self.y + ghost_square_offset + pos.y) * grid_cell_size, grid_cell_size, grid_cell_size, self.piece_ghost());
// Draw shape
DrawRectangle((self.x + pos.x) * grid_cell_size + margin, (self.y + pos.y) * grid_cell_size, grid_cell_size, grid_cell_size, piece_color(self.t));
}
}
const right_bar = margin + (10 * grid_cell_size) + margin;
var draw_height = margin; // Track where to start drawing the next item
// Draw score
DrawText("Score:", right_bar, draw_height, 20, LIGHTGRAY);
draw_height += 20;
var score_text_buf = [_]u8{0} ** 20;
const score_text = std.fmt.bufPrint(score_text_buf[0..], "{}", .{self.score}) catch unreachable;
DrawText(@ptrCast([*c]const u8, score_text[0..1]), right_bar, draw_height, 20, LIGHTGRAY);
draw_height += 20;
// Draw next piece
draw_height += margin;
DrawRectangle(right_bar, draw_height, piece_preview_width, piece_preview_width, BackgroundColor);
if (self.state != State.StartScreen) {
const next_squares = switch (self.next_type) {
Type.Long => Game.get_squares(self.next_type, Rotation.B),
else => Game.get_squares(self.next_type, Rotation.A),
};
var max_x: i32 = 0;
var min_x: i32 = 0;
var max_y: i32 = 0;
var min_y: i32 = 0;
for (next_squares) |pos| {
min_x = math.min(min_x, pos.x);
max_x = math.max(max_x, pos.x);
min_y = math.min(min_y, pos.y);
max_y = math.max(max_y, pos.y);
}
const height = (max_y - min_y + 1) * grid_cell_size;
const width = (max_x - min_x + 1) * grid_cell_size;
// offset to add to each local pos so that 0,0 is in upper left.
const x_offset = min_x * -1;
const y_offset = min_y * -1;
const x_pixel_offset = @divFloor(piece_preview_width - width, 2);
const y_pixel_offset = @divFloor(piece_preview_width - height, 2);
for (next_squares) |pos| {
DrawRectangle(right_bar + x_pixel_offset + ((pos.x + x_offset) * grid_cell_size), draw_height + y_pixel_offset + ((pos.y + y_offset) * grid_cell_size), grid_cell_size, grid_cell_size, piece_color(self.next_type));
}
}
draw_height += piece_preview_width;
if (self.state == State.Pause or self.state == State.GameOver or self.state == State.StartScreen) {
// Partially transparent background to give text better contrast if drawn over the grid
DrawRectangle(0, (screen_height / 2) - 70, screen_width, 110, rgba(3, 2, 1, 100));
}
if (self.state == State.Pause) {
DrawText("PAUSED", 75, screen_height / 2 - 50, 50, WHITE);
DrawText("Press ESCAPE to unpause", 45, screen_height / 2, 20, LIGHTGRAY);
}
if (self.state == State.GameOver) {
DrawText("GAME OVER", 45, screen_height / 2 - 50, 42, WHITE);
DrawText("Press any key to continue", 41, screen_height / 2, 20, LIGHTGRAY);
}
if (self.state == State.StartScreen) {
DrawText("TETRIS", 75, screen_height / 2 - 50, 50, WHITE);
DrawText("Press any key to continue", 41, screen_height / 2, 20, LIGHTGRAY);
}
}
pub fn get_squares(t: Type, r: Rotation) [4]Pos {
return switch (t) {
Type.Cube => [_]Pos{
p(0, 0), p(1, 0), p(0, 1), p(1, 1),
},
Type.Long => switch (r) {
Rotation.A, Rotation.C => [_]Pos{
p(-1, 0), p(0, 0), p(1, 0), p(2, 0),
},
Rotation.B, Rotation.D => [_]Pos{
p(0, -1), p(0, 0), p(0, 1), p(0, 2),
},
},
Type.Z => switch (r) {
Rotation.A, Rotation.C => [_]Pos{
p(-1, 0), p(0, 0), p(0, 1), p(1, 1),
},
Rotation.B, Rotation.D => [_]Pos{
p(0, -1), p(-1, 0), p(0, 0), p(-1, 1),
},
},
Type.S => switch (r) {
Rotation.A, Rotation.C => [_]Pos{
p(0, 0), p(1, 0), p(-1, 1), p(0, 1),
},
Rotation.B, Rotation.D => [_]Pos{
p(0, -1), p(0, 0), p(1, 0), p(1, 1),
},
},
Type.T => switch (r) {
Rotation.A => [_]Pos{
p(0, -1), p(-1, 0), p(0, 0), p(1, 0),
},
Rotation.B => [_]Pos{
p(0, -1), p(0, 0), p(1, 0), p(0, 1),
},
Rotation.C => [_]Pos{
p(-1, 0), p(0, 0), p(1, 0), p(0, 1),
},
Rotation.D => [_]Pos{
p(0, -1), p(-1, 0), p(0, 0), p(0, 1),
},
},
Type.L => switch (r) {
Rotation.A => [_]Pos{
p(0, -1), p(0, 0), p(0, 1), p(1, 1),
},
Rotation.B => [_]Pos{
p(-1, 0), p(0, 0), p(1, 0), p(-1, 1),
},
Rotation.C => [_]Pos{
p(-1, -1), p(0, -1), p(0, 0), p(0, 1),
},
Rotation.D => [_]Pos{
p(1, -1), p(-1, 0), p(0, 0), p(1, 0),
},
},
Type.J => switch (r) {
Rotation.A => [_]Pos{
p(0, -1), p(0, 0), p(-1, 1), p(0, 1),
},
Rotation.B => [_]Pos{
p(-1, -1), p(-1, 0), p(0, 0), p(1, 0),
},
Rotation.C => [_]Pos{
p(0, -1), p(1, -1), p(0, 0), p(0, 1),
},
Rotation.D => [_]Pos{
p(-1, 0), p(0, 0), p(1, 0), p(1, 1),
},
},
};
}
pub fn get_ghost_square_offset(self: *Game) i32 {
var offset: i32 = 0;
while (true) {
if (self.check_collision_offset(0, offset, self.squares)) {
break;
}
offset += 1;
}
return offset - 1;
}
pub fn rotate(self: *Game) void {
const r = switch (self.r) {
Rotation.A => Rotation.B,
Rotation.B => Rotation.C,
Rotation.C => Rotation.D,
Rotation.D => Rotation.A,
};
const squares = Game.get_squares(self.t, r);
if (self.check_collision(squares)) {
// Try moving left or right by one or two squares. This helps when trying
// to rotate when right next to the wall or another block. Esp noticable
// on the 4x1 (Long) type.
const x_offsets = [_]i32{ 1, -1, 2, -2 };
for (x_offsets) |x_offset| {
if (!self.check_collision_offset(x_offset, 0, squares)) {
self.x += x_offset;
self.squares = squares;
self.r = r;
return;
}
}
} else {
self.squares = squares;
self.r = r;
}
}
pub fn check_collision(self: *Game, squares: [4]Pos) bool {
for (squares) |pos| {
const x = self.x + pos.x;
const y = self.y + pos.y;
if ((x >= grid_width) or (x < 0) or (y >= grid_height) or self.get_active(x, y)) {
return true;
}
}
return false;
}
fn check_collision_offset(self: *Game, offset_x: i32, offset_y: i32, squares: [4]Pos) bool {
for (squares) |pos| {
const x = self.x + pos.x + offset_x;
const y = self.y + pos.y + offset_y;
if ((x >= grid_width) or (x < 0) or (y >= grid_height) or self.get_active(x, y)) {
return true;
}
}
return false;
}
pub fn move_right(self: *Game) void {
const can_move = blk: {
for (self.squares) |pos| {
const x = self.x + pos.x + 1;
const y = self.y + pos.y;
if ((x >= grid_width) or self.get_active(x, y)) {
break :blk false;
}
}
break :blk true;
};
if (can_move) {
self.x += 1;
}
}
pub fn move_left(self: *Game) void {
const can_move = blk: {
for (self.squares) |pos| {
const x = self.x + pos.x - 1;
const y = self.y + pos.y;
if ((x < 0) or self.get_active(x, y)) {
break :blk false;
}
}
break :blk true;
};
if (can_move) {
self.x -= 1;
}
}
fn can_move_down(self: *Game) bool {
for (self.squares) |pos| {
const x = self.x + pos.x;
const y = self.y + pos.y + 1;
if ((y >= grid_height) or self.get_active(x, y)) {
return false;
}
}
return true;
}
pub fn drop(self: *Game) bool {
// Drop all the way down
var moved = false;
while (self.can_move_down()) {
self.y += 1;
moved = true;
}
if (moved) {
return true;
} else {
for (self.squares) |pos| {
self.set_active_state(self.x + pos.x, self.y + pos.y, true);
self.set_grid_color(self.x + pos.x, self.y + pos.y, self.piece_shade());
}
self.piece_reset();
return false;
}
}
pub fn move_down(self: *Game) bool {
if (self.can_move_down()) {
self.y += 1;
return true;
} else {
for (self.squares) |pos| {
self.set_active_state(self.x + pos.x, self.y + pos.y, true);
self.set_grid_color(self.x + pos.x, self.y + pos.y, self.piece_shade());
}
self.piece_reset();
return false;
}
}
};
pub fn main() anyerror!void {
// Initialization
var game = Game.init();
InitWindow(screen_width, screen_height, "Tetris");
defer CloseWindow();
// Default is Escape, but we want to use that for pause instead
SetExitKey(KeyboardKey.KEY_F4);
// Set the game to run at 60 frames-per-second
SetTargetFPS(60);
// Solves blurry font on high resolution displays
SetTextureFilter(GetFontDefault().texture, @enumToInt(TextureFilterMode.FILTER_POINT));
// Main game loop
while (!WindowShouldClose()) // Detect window close button or ESC key
{
game.update();
BeginDrawing();
game.draw();
EndDrawing();
}
} | src/main.zig |
const std = @import("std.zig");
const StringHashMap = std.StringHashMap;
const mem = std.mem;
const Allocator = mem.Allocator;
const testing = std.testing;
/// BufMap copies keys and values before they go into the map, and
/// frees them when they get removed.
pub const BufMap = struct {
hash_map: BufMapHashMap,
const BufMapHashMap = StringHashMap([]const u8);
/// Create a BufMap backed by a specific allocator.
/// That allocator will be used for both backing allocations
/// and string deduplication.
pub fn init(allocator: Allocator) BufMap {
var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) };
return self;
}
/// Free the backing storage of the map, as well as all
/// of the stored keys and values.
pub fn deinit(self: *BufMap) void {
var it = self.hash_map.iterator();
while (it.next()) |entry| {
self.free(entry.key_ptr.*);
self.free(entry.value_ptr.*);
}
self.hash_map.deinit();
}
/// Same as `put` but the key and value become owned by the BufMap rather
/// than being copied.
/// If `putMove` fails, the ownership of key and value does not transfer.
pub fn putMove(self: *BufMap, key: []u8, value: []u8) !void {
const get_or_put = try self.hash_map.getOrPut(key);
if (get_or_put.found_existing) {
self.free(get_or_put.key_ptr.*);
self.free(get_or_put.value_ptr.*);
get_or_put.key_ptr.* = key;
}
get_or_put.value_ptr.* = value;
}
/// `key` and `value` are copied into the BufMap.
pub fn put(self: *BufMap, key: []const u8, value: []const u8) !void {
const value_copy = try self.copy(value);
errdefer self.free(value_copy);
const get_or_put = try self.hash_map.getOrPut(key);
if (get_or_put.found_existing) {
self.free(get_or_put.value_ptr.*);
} else {
get_or_put.key_ptr.* = self.copy(key) catch |err| {
_ = self.hash_map.remove(key);
return err;
};
}
get_or_put.value_ptr.* = value_copy;
}
/// Find the address of the value associated with a key.
/// The returned pointer is invalidated if the map resizes.
pub fn getPtr(self: BufMap, key: []const u8) ?*[]const u8 {
return self.hash_map.getPtr(key);
}
/// Return the map's copy of the value associated with
/// a key. The returned string is invalidated if this
/// key is removed from the map.
pub fn get(self: BufMap, key: []const u8) ?[]const u8 {
return self.hash_map.get(key);
}
/// Removes the item from the map and frees its value.
/// This invalidates the value returned by get() for this key.
pub fn remove(self: *BufMap, key: []const u8) void {
const kv = self.hash_map.fetchRemove(key) orelse return;
self.free(kv.key);
self.free(kv.value);
}
/// Returns the number of KV pairs stored in the map.
pub fn count(self: BufMap) BufMapHashMap.Size {
return self.hash_map.count();
}
/// Returns an iterator over entries in the map.
pub fn iterator(self: *const BufMap) BufMapHashMap.Iterator {
return self.hash_map.iterator();
}
fn free(self: BufMap, value: []const u8) void {
self.hash_map.allocator.free(value);
}
fn copy(self: BufMap, value: []const u8) ![]u8 {
return self.hash_map.allocator.dupe(u8, value);
}
};
test "BufMap" {
const allocator = std.testing.allocator;
var bufmap = BufMap.init(allocator);
defer bufmap.deinit();
try bufmap.put("x", "1");
try testing.expect(mem.eql(u8, bufmap.get("x").?, "1"));
try testing.expect(1 == bufmap.count());
try bufmap.put("x", "2");
try testing.expect(mem.eql(u8, bufmap.get("x").?, "2"));
try testing.expect(1 == bufmap.count());
try bufmap.put("x", "3");
try testing.expect(mem.eql(u8, bufmap.get("x").?, "3"));
try testing.expect(1 == bufmap.count());
bufmap.remove("x");
try testing.expect(0 == bufmap.count());
try bufmap.putMove(try allocator.dupe(u8, "k"), try allocator.dupe(u8, "v1"));
try bufmap.putMove(try allocator.dupe(u8, "k"), try allocator.dupe(u8, "v2"));
} | lib/std/buf_map.zig |
const libpoke = @import("index.zig");
const std = @import("std");
const fun = @import("../../lib/fun-with-zig/src/index.zig");
const mem = std.mem;
const debug = std.debug;
const generic = fun.generic;
const lu16 = fun.platform.lu16;
const lu32 = fun.platform.lu32;
const lu64 = fun.platform.lu64;
pub fn Section(comptime Item: type) type {
return struct {
const Self = @This();
start: usize,
len: usize,
pub fn init(data_slice: []const u8, items: []const Item) Self {
const data_ptr = @ptrToInt(data_slice.ptr);
const item_ptr = @ptrToInt(items.ptr);
debug.assert(data_ptr <= item_ptr);
debug.assert(item_ptr + items.len * @sizeOf(Item) <= data_ptr + data_slice.len);
return Self{
.start = item_ptr - data_ptr,
.len = items.len,
};
}
pub fn end(offset: Self) usize {
return offset.start + @sizeOf(Item) * offset.len;
}
pub fn slice(offset: Self, data: []u8) []Item {
return @bytesToSlice(Item, data[offset.start..offset.end()]);
}
};
}
pub const TrainerSection = Section(libpoke.gen3.Trainer);
pub const MoveSection = Section(libpoke.gen3.Move);
pub const MachineLearnsetSection = Section(lu64);
pub const BaseStatsSection = Section(libpoke.gen3.BasePokemon);
pub const EvolutionSection = Section([5]libpoke.common.Evolution);
pub const LevelUpLearnsetPointerSection = Section(libpoke.gen3.Ref(libpoke.gen3.LevelUpMove));
pub const HmSection = Section(lu16);
pub const TmSection = Section(lu16);
pub const ItemSection = Section(libpoke.gen3.Item);
pub const WildPokemonHeaderSection = Section(libpoke.gen3.WildPokemonHeader);
pub const Info = struct {
game_title: [12]u8,
gamecode: [4]u8,
version: libpoke.Version,
trainers: TrainerSection,
moves: MoveSection,
machine_learnsets: MachineLearnsetSection,
base_stats: BaseStatsSection,
evolutions: EvolutionSection,
level_up_learnset_pointers: LevelUpLearnsetPointerSection,
hms: HmSection,
tms: TmSection,
items: ItemSection,
wild_pokemon_headers: WildPokemonHeaderSection,
};
pub const infos = []Info{
emerald_us_info,
ruby_us_info,
sapphire_us_info,
fire_us_info,
leaf_us_info,
};
const emerald_us_info = Info{
.game_title = "POKEMON EMER",
.gamecode = "BPEE",
.version = libpoke.Version.Emerald,
.trainers = TrainerSection{
.start = 0x00310030,
.len = 855,
},
.moves = MoveSection{
.start = 0x0031C898,
.len = 355,
},
.machine_learnsets = MachineLearnsetSection{
.start = 0x0031E898,
.len = 412,
},
.base_stats = BaseStatsSection{
.start = 0x003203CC,
.len = 412,
},
.evolutions = EvolutionSection{
.start = 0x0032531C,
.len = 412,
},
.level_up_learnset_pointers = LevelUpLearnsetPointerSection{
.start = 0x0032937C,
.len = 412,
},
.hms = HmSection{
.start = 0x00329EEA,
.len = 8,
},
.tms = TmSection{
.start = 0x00615B94,
.len = 50,
},
.items = ItemSection{
.start = 0x005839A0,
.len = 377,
},
.wild_pokemon_headers = WildPokemonHeaderSection{
.start = 0x00552D48,
.len = 124,
},
};
pub const ruby_us_info = Info{
.game_title = "POKEMON RUBY",
.gamecode = "AXVE",
.version = libpoke.Version.Ruby,
.trainers = TrainerSection{
.start = 0x001F0514,
.len = 337,
},
.moves = MoveSection{
.start = 0x001FB144,
.len = 355,
},
.machine_learnsets = MachineLearnsetSection{
.start = 0x001FD108,
.len = 412,
},
.base_stats = BaseStatsSection{
.start = 0x001FEC30,
.len = 412,
},
.evolutions = EvolutionSection{
.start = 0x00203B80,
.len = 412,
},
.level_up_learnset_pointers = LevelUpLearnsetPointerSection{
.start = 0x00207BE0,
.len = 412,
},
.hms = HmSection{
.start = 0x00208332,
.len = 8,
},
.tms = TmSection{
.start = 0x0037651C,
.len = 50,
},
.items = ItemSection{
.start = 0x003C5580,
.len = 349,
},
.wild_pokemon_headers = WildPokemonHeaderSection{
.start = 0x0039D46C,
.len = 97,
},
};
pub const sapphire_us_info = Info{
.game_title = "POKEMON SAPP",
.gamecode = "AXPE",
.version = libpoke.Version.Sapphire,
.trainers = TrainerSection{
.start = 0x001F04A4,
.len = 337,
},
.moves = MoveSection{
.start = 0x001FB0D4,
.len = 355,
},
.machine_learnsets = MachineLearnsetSection{
.start = 0x001FD098,
.len = 412,
},
.base_stats = BaseStatsSection{
.start = 0x001FEBC0,
.len = 412,
},
.evolutions = EvolutionSection{
.start = 0x00203B10,
.len = 412,
},
.level_up_learnset_pointers = LevelUpLearnsetPointerSection{
.start = 0x00207B70,
.len = 412,
},
.hms = HmSection{
.start = 0x002082C2,
.len = 8,
},
.tms = TmSection{
.start = 0x003764AC,
.len = 50,
},
.items = ItemSection{
.start = 0x003C55DC,
.len = 349,
},
.wild_pokemon_headers = WildPokemonHeaderSection{
.start = 0x0039D2B4,
.len = 97,
},
};
pub const fire_us_info = Info{
.game_title = "POKEMON FIRE",
.gamecode = "BPRE",
.version = libpoke.Version.FireRed,
.trainers = TrainerSection{
.start = 0x0023EB38,
.len = 743,
},
.moves = MoveSection{
.start = 0x00250C74,
.len = 355,
},
.machine_learnsets = MachineLearnsetSection{
.start = 0x00252C38,
.len = 412,
},
.base_stats = BaseStatsSection{
.start = 0x002547F4,
.len = 412,
},
.evolutions = EvolutionSection{
.start = 0x002597C4,
.len = 412,
},
.level_up_learnset_pointers = LevelUpLearnsetPointerSection{
.start = 0x0025D824,
.len = 412,
},
.hms = HmSection{
.start = 0x0025E084,
.len = 8,
},
.tms = TmSection{
.start = 0x0045A604,
.len = 50,
},
.items = ItemSection{
.start = 0x003DB098,
.len = 374,
},
.wild_pokemon_headers = WildPokemonHeaderSection{
.start = 0x003C9D28,
.len = 132,
},
};
pub const leaf_us_info = Info{
.game_title = "POKEMON LEAF",
.gamecode = "BPGE",
.version = libpoke.Version.LeafGreen,
.trainers = TrainerSection{
.start = 0x0023EB14,
.len = 743,
},
.moves = MoveSection{
.start = 0x00250C50,
.len = 355,
},
.machine_learnsets = MachineLearnsetSection{
.start = 0x00252C14,
.len = 412,
},
.base_stats = BaseStatsSection{
.start = 0x002547D0,
.len = 412,
},
.evolutions = EvolutionSection{
.start = 0x002597A4,
.len = 412,
},
.level_up_learnset_pointers = LevelUpLearnsetPointerSection{
.start = 0x0025D804,
.len = 412,
},
.hms = HmSection{
.start = 0x0025E064,
.len = 8,
},
.tms = TmSection{
.start = 0x0045A034,
.len = 50,
},
.items = ItemSection{
.start = 0x003DAED4,
.len = 374,
},
.wild_pokemon_headers = WildPokemonHeaderSection{
.start = 0x003C9B64,
.len = 132,
},
}; | src/pokemon/gen3-constants.zig |
const std = @import("std");
const BitInStream = std.io.BitInStream;
const Endian = std.builtin.Endian;
const min = std.math.min;
const warn = std.debug.warn;
const Block = @import("./block.zig").Block;
const RawBlock = @import("./block.zig").RawBlock;
const HuffmanBlock = @import("./block.zig").HuffmanBlock;
const BlockTree = @import("./block_tree.zig").BlockTree;
const SlidingWindow = @import("./sliding_window.zig").SlidingWindow;
pub const DeflateSlidingWindow = SlidingWindow(u8, 32 * 1024);
const len_extra_bits_table = result: {
var table = [_]u3{0} ** 29;
var i: usize = 4;
while (i < table.len - 1) : (i += 1) {
const bits = ((i - 4) >> 2);
table[i] = @intCast(u4, bits);
}
break :result table;
};
const len_base_table = result: {
var table = [_]u9{0} ** 29;
var i: usize = 0;
var v: u9 = 3;
while (i < table.len) : (i += 1) {
const bits = len_extra_bits_table[i];
table[i] = @intCast(u9, v);
v += (1 << bits);
// The second-to-last case is kinda weird.
// It omits the last theoretically-valid value.
if (i == table.len - 2) {
v -= 1;
}
}
break :result table;
};
const dist_extra_bits_table = result: {
var table = [_]u4{0} ** 30;
var i: usize = 2;
while (i < table.len) : (i += 1) {
const bits = ((i - 2) >> 1);
table[i] = @intCast(u4, bits);
}
break :result table;
};
const dist_base_table = result: {
var table = [_]u16{0} ** 30;
var i: usize = 0;
var v: u16 = 1;
while (i < table.len) : (i += 1) {
const bits = dist_extra_bits_table[i];
table[i] = @intCast(u16, v);
v += (1 << bits);
}
break :result table;
};
pub fn RawDeflateReader(comptime InStreamType: type) type {
return struct {
const Self = @This();
const BitInStreamType = BitInStream(Endian.Little, InStreamType);
const ThisBlock = Block(BitInStreamType);
const ThisRawBlock = RawBlock(BitInStreamType);
const ThisHuffmanBlock = HuffmanBlock(BitInStreamType);
const ThisBlockTree = BlockTree(BitInStreamType);
read_stream: BitInStreamType,
window: DeflateSlidingWindow = DeflateSlidingWindow{},
bytes_to_read_from_window: usize = 0,
is_last_block: bool = false,
current_block: ThisBlock = ThisBlock.Empty,
pub fn init(read_stream: InStreamType) Self {
return Self{
.read_stream = BitInStreamType.init(read_stream),
};
}
pub fn read(self: *Self, buffer: []u8) !usize {
var i: usize = 0;
while (i < buffer.len) {
var bytes_read = self.readBytes(buffer[i..]) catch |err| {
if (err == error.EndOfStream) {
return i;
} else {
return err;
}
};
i += bytes_read;
}
return i;
}
fn fetchNextBlock(self: *Self) !void {
if (self.is_last_block) {
return error.EndOfStream;
} else {
self.fetchNextBlockUnconditionally() catch |err| {
if (err == error.EndOfStream) {
return error.Failed;
} else {
return err;
}
};
}
}
fn fetchNextBlockUnconditionally(self: *Self) !void {
// Not EOF, so grab a new block.
var bfinal: u1 = try self.read_stream.readBitsNoEof(u1, 1);
var btype: u2 = try self.read_stream.readBitsNoEof(u2, 2);
//warn("New block: bfinal={}, btype={}\n", .{ bfinal, btype });
self.is_last_block = switch (bfinal) {
0 => false,
1 => true,
};
self.current_block = try switch (btype) {
0 => ThisRawBlock.fromBitStream(&self.read_stream),
1 => ThisBlock{
.Huffman = ThisHuffmanBlock{
.tree = ThisBlockTree.makeStatic(),
},
},
2 => ThisBlock{
.Huffman = ThisHuffmanBlock{
.tree = try ThisBlockTree.fromBitStream(&self.read_stream),
},
},
else => error.Failed,
};
}
fn readBytesFromWindow(self: *Self, buffer: []u8) !usize {
var num_bytes_read = try self.window.readElementsFromEnd(buffer, self.bytes_to_read_from_window);
self.bytes_to_read_from_window -= num_bytes_read;
//warn("{c}", .{byte});
return num_bytes_read;
}
fn readElementFromBlockDirectly(self: *Self) !u9 {
return try switch (self.current_block) {
.Empty => error.EndOfBlock,
.Raw => self.current_block.Raw.readElementFrom(&self.read_stream),
.Huffman => self.current_block.Huffman.readElementFrom(&self.read_stream),
else => error.Failed,
};
}
fn readElementFromBlock(self: *Self) !u9 {
return self.readElementFromBlockDirectly() catch |err| {
if (err == error.EndOfBlock) {
self.current_block = ThisBlock.Empty;
try self.fetchNextBlock();
return try self.readElementFromBlockDirectly();
} else {
return err;
}
};
}
fn processBlockElement(self: *Self, v: u9) !void {
switch (v) {
0...255 => {
try self.window.appendElement(@intCast(u8, v));
self.bytes_to_read_from_window += 1;
},
256 => unreachable, // 256 (end of block) does NOT appear in this layer!
257...285 => {
const extra_bits_for_len = len_extra_bits_table[v - 257];
const copy_len = len_base_table[v - 257] + try self.read_stream.readBitsNoEof(u5, extra_bits_for_len);
const dist_offset: u9 = try switch (self.current_block) {
.Huffman => self.current_block.Huffman.readDistFrom(&self.read_stream),
else => error.Failed,
};
const extra_bits_for_dist = dist_extra_bits_table[dist_offset];
const copy_dist = dist_base_table[dist_offset] + try self.read_stream.readBitsNoEof(u13, extra_bits_for_dist);
//warn("copy {} offset {}\n", copy_len, copy_dist);
//warn("len def v={} base={} len={}\n", v, len_base_table[v-257], extra_bits_for_len);
try self.window.copyElementsFromEnd(copy_dist, copy_len);
self.bytes_to_read_from_window += copy_len;
},
else => return error.Failed,
}
}
fn readBytes(self: *Self, buffer: []u8) !usize {
// Do we have bytes to read from the window?
if (self.bytes_to_read_from_window >= 1) {
// Yes - read from there first.
return try self.readBytesFromWindow(buffer);
}
const v: u9 = try self.readElementFromBlock();
try self.processBlockElement(v);
// At this point we should have something in the window.
// If not, well, enjoy your runtime error.
return try self.readBytesFromWindow(buffer);
}
};
} | src/raw_deflate_reader.zig |
const builtin = @import("builtin");
const TypeId = builtin.TypeId;
const std = @import("std");
const math = std.math;
const assert = std.debug.assert;
const warn = std.debug.warn;
const misc = @import("modules/zig-misc/index.zig");
const saturateCast = misc.saturateCast;
const DBG = false;
pub const ColorU8 = Color(u8, u8, u8, u8);
pub fn Color(comptime A: type, comptime R: type, comptime G: type, comptime B: type) type {
return struct {
const Self = @This();
pub const Black = Self{ .a = misc.maxValue(A), .r = misc.minValue(R), .g = misc.minValue(G), .b = misc.minValue(B) };
pub const White = Self{ .a = misc.maxValue(A), .r = misc.maxValue(R), .g = misc.maxValue(G), .b = misc.maxValue(B) };
pub const Red = Self{ .a = misc.maxValue(A), .r = misc.maxValue(R), .g = misc.minValue(G), .b = misc.minValue(B) };
pub const Green = Self{ .a = misc.maxValue(A), .r = misc.minValue(R), .g = misc.maxValue(G), .b = misc.minValue(B) };
pub const Blue = Self{ .a = misc.maxValue(A), .r = misc.minValue(R), .g = misc.minValue(G), .b = misc.maxValue(B) };
a: A,
r: R,
g: G,
b: B,
pub fn init(a: A, r: R, g: G, b: B) Self {
return Self{
.a = saturateCast(A, a),
.r = saturateCast(R, r),
.g = saturateCast(G, g),
.b = saturateCast(B, b),
};
}
/// Return color as a a:r:g:b u32
pub fn asU32Argb(pSelf: *const Self) u32 {
var a = @intCast(u32, saturateCast(u8, pSelf.a));
var r = @intCast(u32, saturateCast(u8, pSelf.r));
var g = @intCast(u32, saturateCast(u8, pSelf.g));
var b = @intCast(u32, saturateCast(u8, pSelf.b));
return (a << 24) | (r << 16) | (g << 8) | (b << 0);
}
/// Scale each of the rgb components by other
pub fn colorScale(color: Self, other: f32) Self {
var a: A = color.a;
var r: R = saturateCast(R, math.round(saturateCast(f32, (color.r)) * other));
var g: G = saturateCast(G, math.round(saturateCast(f32, (color.g)) * other));
var b: B = saturateCast(B, math.round(saturateCast(f32, (color.b)) * other));
var result = Self{
.a = a,
.r = r,
.g = g,
.b = b,
};
return result;
}
/// Custom format routine
pub fn format(
pSelf: *const Self,
comptime fmt: []const u8,
context: var,
comptime FmtError: type,
output: fn (@typeOf(context), []const u8) FmtError!void,
) FmtError!void {
try std.fmt.format(context, FmtError, output, "{{ ");
try formatOneColor(A, pSelf.a, fmt, context, FmtError, output, false);
try formatOneColor(R, pSelf.r, fmt, context, FmtError, output, false);
try formatOneColor(G, pSelf.g, fmt, context, FmtError, output, false);
try formatOneColor(B, pSelf.b, fmt, context, FmtError, output, true);
try std.fmt.format(context, FmtError, output, "}}");
}
};
}
fn formatOneColor(
comptime T: type,
color: T,
comptime fmt: []const u8,
context: var,
comptime FmtError: type,
output: fn (@typeOf(context), []const u8) FmtError!void,
last: bool,
) FmtError!void {
switch (@typeId(T)) {
TypeId.Float => try std.fmt.format(context, FmtError, output, "{}{.3}{}", if (math.signbit(color)) "-" else " ", if (math.signbit(color)) -color else color, if (!last) ", " else " "),
TypeId.Int => try std.fmt.format(context, FmtError, output, "{d6}{}", color, if (!last) ", " else " "),
else => @compileError("Expected Float or Int type"),
}
}
test "Color" {
warn("\n");
var cu8 = ColorU8.White;
assert(cu8.a == 0xFF);
assert(cu8.r == 0xFF);
assert(cu8.g == 0xFF);
assert(cu8.b == 0xFF);
assert(cu8.asU32Argb() == 0xFFFFFFFF);
cu8 = ColorU8.Black;
assert(cu8.a == 0xFF);
assert(cu8.r == 0x00);
assert(cu8.g == 0x00);
assert(cu8.b == 0x00);
assert(cu8.asU32Argb() == 0xFF000000);
cu8 = ColorU8.Red;
assert(cu8.a == 0xFF);
assert(cu8.r == 0xFF);
assert(cu8.g == 0x00);
assert(cu8.b == 0x00);
assert(cu8.asU32Argb() == 0xFFFF0000);
cu8 = ColorU8.Green;
assert(cu8.a == 0xFF);
assert(cu8.r == 0x00);
assert(cu8.g == 0xFF);
assert(cu8.b == 0x00);
assert(cu8.asU32Argb() == 0xFF00FF00);
cu8 = ColorU8.Blue;
assert(cu8.a == 0xFF);
assert(cu8.r == 0x00);
assert(cu8.g == 0x00);
assert(cu8.b == 0xFF);
assert(cu8.asU32Argb() == 0xFF0000FF);
var c = Color(f32, f32, f32, f32).init(1, 2, 3, 4);
assert(c.a == f32(1));
assert(c.r == f32(2));
assert(c.g == f32(3));
assert(c.b == f32(4));
assert(c.asU32Argb() == 0x01020304);
warn("c={}:{x8}\n", &c, c.asU32Argb());
var d = Color(u2, i10, i10, i10).init(3, -3, -2, 2);
assert(d.a == u2(3));
assert(d.r == i10(-3));
assert(d.g == i10(-2));
assert(d.b == i10(2));
// This is probably wrong, we should unbias the result!
assert(d.asU32Argb() == 0x03000002);
warn("d={}:{x8}\n", &d, d.asU32Argb());
var u = Color(u2, u10, u10, u10).init(0, 3, 2, 1);
assert(u.a == u2(0));
assert(u.r == u10(3));
assert(u.g == u10(2));
assert(u.b == u10(1));
assert(u.asU32Argb() == 0x00030201);
warn("u={}:{x8}\n", &u, u.asU32Argb());
} | src/color.zig |
const std = @import("std");
const os = @import("root").os;
/// Allocator used to allocate memory for new tasks
const task_alloc = os.memory.vmm.backed(.Ephemeral);
/// Load balancer lock. Locked when scheduler finds the best CPU for the task
/// or when task terminates
var balancer_lock = os.thread.Spinlock{};
/// Move to the next task.
/// NOTE: Should be called in interrupt disabled context if its
/// not the last time task runs
pub fn wait() void {
os.platform.thread.yield();
}
/// Terminate current task to never run it again
pub fn leave() noreturn {
wait();
unreachable;
}
/// Preempt to the next task
pub fn yield() void {
const state = os.platform.get_and_disable_interrupts();
os.platform.thread.get_current_cpu().executable_tasks.enqueue(os.platform.get_current_task());
os.platform.thread.yield();
os.platform.set_interrupts(state);
}
/// Wake a task that has called `wait`
pub fn wake(task: *os.thread.Task) void {
os.platform.smp.cpus[task.allocated_core_id].executable_tasks.enqueue(task);
}
/// Create a new task that calls a function with given arguments.
/// Uses heap, so don't create tasks in interrupt context
pub fn make_task(func: anytype, args: anytype) !*os.thread.Task {
const task = try task_alloc.create(os.thread.Task);
errdefer task_alloc.destroy(task);
try task.allocate_stack();
errdefer task.free_stack();
task.paging_context = os.platform.get_current_task().paging_context;
// Find the best CPU for the task
var best_cpu_idx: usize = 0;
{
const state = balancer_lock.lock();
// TODO: maybe something more sophisticated?
for (os.platform.smp.cpus) |*cpu, i| {
if (cpu.tasks_count < os.platform.smp.cpus[best_cpu_idx].tasks_count) {
best_cpu_idx = i;
}
}
task.allocated_core_id = best_cpu_idx;
os.platform.smp.cpus[best_cpu_idx].tasks_count += 1;
balancer_lock.unlock(state);
}
os.log("Task allocated to core {}\n", .{best_cpu_idx});
errdefer {
const state = balancer_lock.lock();
os.platform.smp.cpus[best_cpu_idx].tasks_count -= 1;
balancer_lock.unlock(state);
}
// Initialize task in a way that it will execute func with args on the startup
const entry = os.thread.NewTaskEntry.alloc(task, func, args);
try os.platform.thread.init_task_call(task, entry);
return task;
}
/// Create and start a new task that calls a function with given arguments.
pub fn spawn_task(func: anytype, args: anytype) !void {
const task = try make_task(func, args);
os.platform.smp.cpus[task.allocated_core_id].executable_tasks.enqueue(task);
}
/// Exit current task
/// TODO: Should be reimplemented with URM
pub fn exit_task() noreturn {
const task = os.platform.thread.self_exited();
const id = if (task) |t| t.allocated_core_id else 0;
const state = balancer_lock.lock();
os.platform.smp.cpus[id].tasks_count -= 1;
balancer_lock.unlock(state);
if(task) |t| {
task_alloc.destroy(t);
}
leave();
}
/// Initialize scheduler
pub fn init(task: *os.thread.Task) void {
os.platform.smp.cpus[0].bootstrap_stacks();
os.platform.bsp_pre_scheduler_init();
os.platform.set_current_task(task);
os.platform.thread.get_current_cpu().executable_tasks.init();
} | src/thread/scheduler.zig |
const std = @import("std");
const builtin = @import("builtin");
const time = std.os.time;
const winmm = @import("audio/backend/winmm.zig");
pub const Backend = enum {
Wasapi,
Winmm,
Null,
};
pub const PlayerError = error{};
pub const AudioMode = union(enum) {
const Self = @This();
Mono: usize,
Stereo: usize,
pub fn channelCount(self: Self) usize {
return switch (self) {
AudioMode.Mono => usize(1),
AudioMode.Stereo => usize(2),
};
}
};
pub const Player = struct {
const Self = @This();
player: sys.Player,
pub sample_rate: usize,
mode: AudioMode,
buf_size: usize,
pub fn new(allocator: *std.mem.Allocator, sample_rate: usize, mode: AudioMode, buf_size: usize) !Self {
return Self{
.player = try sys.Player.new(allocator, sample_rate, mode, buf_size),
.sample_rate = sample_rate,
.buf_size = buf_size,
.mode = mode,
};
}
fn bytes_per_sec(self: Self) usize {
return self.sample_rate * switch (self.mode) {
AudioMode.Mono => |bps| bps * self.mode.channelCount(),
AudioMode.Stereo => |bps| bps * self.mode.channelCount(),
};
}
pub fn write(self: *Self, bytes: []const u8) Error!void {
var written: usize = 0;
var data = bytes;
while (data.len > 0) {
const n = try self.player.write(data);
written += n;
data = data[n..];
if (data.len > 0) {
time.sleep(time.ns_per_s * self.buf_size / self.bytes_per_sec() / 8);
}
}
}
pub fn close(self: *Self) !void {
time.sleep(time.ns_per_s * self.buf_size / self.bytes_per_sec());
try self.player.close();
}
};
// test "Player -- raw audio" {
// var direct_allocator = std.heap.DirectAllocator.init();
// const alloc = &direct_allocator.allocator;
// defer direct_allocator.deinit();
// const mode = AudioMode { .Stereo = 2 };
// var player = try Player.new(alloc, 44100, mode, 2048);
// var stream = player.outStream().stream;
// var timer = try time.Timer.start();
// const duration = time.ns_per_s * 5;
// const dt = 1.0 / @intToFloat(f32, player.sample_rate);
// while (timer.read() < duration) {
// try player.write([]u8{127}**2048);
// // var i: usize = 0;
// // while (i < player.buf_size) : (i += 1) {
// // const p = @intToFloat(f32, i) / @intToFloat(f32, player.buf_size);
// // const out = std.math.sin(p * 2.0 * std.math.pi);
// // try stream.writeByte(@floatToInt(u8, out));
// // try stream.writeByte(@floatToInt(u8, out));
// // }
// }
// } | src/audio.zig |
const std = @import("std");
const assert = std.debug.assert;
const warn = std.debug.warn;
const Allocator = std.mem.Allocator;
const mem = std.mem;
const HashMap = std.HashMap;
const AutoHashMap = std.AutoHashMap;
test "TaggedUnion.HashMap" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
var pAllocator = &direct_allocator.allocator;
const Tagged8 = union(enum) {
int1_8: i8,
int2_8: i8,
uint2_8: u8,
uint1_8: u8,
};
var t8 = Tagged8 { .uint2_8=28, };
warn("sizeof(Tagged8)={} t8={}\n", usize(@sizeOf(Tagged8)), t8);
// Compile error no packed tagged unions
//const PackedTagged8 = packed union(enum) {
// int8: i8,
// uint8: u8,
//};
//warn("sizeof(PackedTagged8)={}\n", usize(@sizeOf(PackedTagged8)));
const TaggedUnionValue = union(enum) {
int64: i64,
uint64: u64,
};
const NamedTaggedUnionValueMap = HashMap([]const u8, TaggedUnionValue, mem.hash_slice_u8, mem.eql_slice_u8);
var map = NamedTaggedUnionValueMap.init(pAllocator);
// Add a new NameValue to an empty list.
// - error if memory failure
// - r != null r is the previous value
// - r == null then new NameValue was added to the map
var r = try map.put("value1", TaggedUnionValue { .int64=-123, });
if (r != null) {
warn("value1, already inserted but this is impossible, failing\n");
return error.value1AlreadyInserted;
}
// Get the value we just inserted
var pR: ?*NamedTaggedUnionValueMap.KV = map.get("value1");
assert(pR != null);
assert(pR.?.value.int64 == -123);
r = try map.put("value2", TaggedUnionValue { .uint64=123, });
if (r != null) {
warn("value2, already inserted but this is impossible, failing\n");
return error.value2AlreadyInserted;
}
// Get the value we just inserted
pR = map.get("value2");
assert(pR != null);
assert(pR.?.value.uint64 == 123);
} | tagged_union/tagged_union.zig |
const std = @import("std");
const assert = std.debug.assert;
const warn = std.debug.warn;
const Allocator = std.mem.Allocator;
const mem = std.mem;
const HashMap = std.HashMap;
const AutoHashMap = std.AutoHashMap;
test "HashMap.struct" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
var pAllocator = &direct_allocator.allocator;
const NameValue = struct {
name: []const u8,
value: []const u8,
};
const NameValueMap = HashMap([]const u8, NameValue, mem.hash_slice_u8, mem.eql_slice_u8);
var map = NameValueMap.init(pAllocator);
// Add a new NameValue to an empty list.
// - error if memory failure
// - r != null r is the previous value
// - r == null then new NameValue was added to the map
var r = try map.put("wink saville", NameValue { .name="wink saville", .value="hello from wink", });
if (r != null) {
warn("wink saville, already inserted but this is impossible, failing\n");
return error.NameAlreadyInserted;
}
// Get the value we just inserted
var pR: ?*NameValueMap.KV = map.get("wink saville");
assert(pR != null);
assert(mem.eql(u8, pR.?.value.name, "wink saville"));
assert(mem.eql(u8, pR.?.value.value, "hello from wink"));
// Replace previous NameValue with a new value
r = try map.put("wink saville", NameValue { .name="wink saville", .value="bye from wink", });
if (r == null) {
warn("wink saville, was already inserted and this is impossible, failing\n");
return error.NameWasntInserted;
}
// Old value is returned
assert(mem.eql(u8, r.?.value.name, "wink saville"));
assert(mem.eql(u8, r.?.value.value, "hello from wink"));
// Get the "new" NameValue we inserted
pR = map.get("wink saville"); // orelse return error.ExpectingSuccess;
assert(pR != null);
assert(mem.eql(u8, pR.?.value.name, "<NAME>"));
assert(mem.eql(u8, pR.?.value.value, "bye from wink"));
// Expecting failure, got == null
pR = map.get("wink");
assert(pR == null);
}
// From std/hash_map.zig
test "HashMap.basic" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
var map = AutoHashMap(i32, i32).init(&direct_allocator.allocator);
defer map.deinit();
assert((try map.put(1, 11)) == null);
assert((try map.put(2, 22)) == null);
assert((try map.put(3, 33)) == null);
assert((try map.put(4, 44)) == null);
assert((try map.put(5, 55)) == null);
assert((try map.put(5, 66)).?.value == 55);
assert((try map.put(5, 55)).?.value == 66);
const gop1 = try map.getOrPut(5);
assert(gop1.found_existing == true);
assert(gop1.kv.value == 55);
gop1.kv.value = 77;
assert(map.get(5).?.value == 77);
const gop2 = try map.getOrPut(99);
assert(gop2.found_existing == false);
gop2.kv.value = 42;
assert(map.get(99).?.value == 42);
assert(map.contains(2));
assert(map.get(2).?.value == 22);
_ = map.remove(2);
assert(map.remove(2) == null);
assert(map.get(2) == null);
}
// From std/hash_map.zig
test "HashMap.iterator" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
var reset_map = AutoHashMap(i32, i32).init(&direct_allocator.allocator);
defer reset_map.deinit();
assert((try reset_map.put(1, 11)) == null);
assert((try reset_map.put(2, 22)) == null);
assert((try reset_map.put(3, 33)) == null);
var keys = []i32{
3,
2,
1,
};
var values = []i32{
33,
22,
11,
};
var it = reset_map.iterator();
var count: usize = 0;
while (it.next()) |next| {
assert(next.key == keys[count]);
assert(next.value == values[count]);
count += 1;
}
assert(count == 3);
assert(it.next() == null);
it.reset();
count = 0;
while (it.next()) |next| {
assert(next.key == keys[count]);
assert(next.value == values[count]);
count += 1;
if (count == 2) break;
}
it.reset();
var entry = it.next().?;
assert(entry.key == keys[0]);
assert(entry.value == values[0]);
} | hash_map/hash_map_x.zig |
const std = @import("std");
const ConstantPool = @This();
allocator: std.mem.Allocator,
entries: std.ArrayListUnmanaged(Entry),
utf8_entries_map: std.StringHashMapUnmanaged(u16),
pub fn init(allocator: std.mem.Allocator, entry_count: u16) !*ConstantPool {
var c = try allocator.create(ConstantPool);
c.* = ConstantPool{
.allocator = allocator,
.entries = try std.ArrayListUnmanaged(Entry).initCapacity(allocator, entry_count),
.utf8_entries_map = std.StringHashMapUnmanaged(u16){},
};
c.entries.items.len = entry_count;
return c;
}
pub fn get(self: ConstantPool, index: u16) Entry {
return self.entries.items[index - 1];
}
pub fn deinit(self: *ConstantPool) void {
for (self.entries.items) |entry| {
switch (entry) {
.utf8 => |info| self.allocator.free(info.bytes),
else => {},
}
}
self.entries.deinit(self.allocator);
self.utf8_entries_map.deinit(self.allocator);
self.allocator.destroy(self);
}
pub fn Serialize(comptime T: type) type {
return struct {
pub fn decode(constant_pool: *const ConstantPool, reader: anytype) !T {
var value: T = undefined;
value.constant_pool = constant_pool;
inline for (std.meta.fields(T)[1..]) |field| {
@field(value, field.name) = switch (@typeInfo(field.field_type)) {
.Int => try reader.readIntBig(field.field_type),
.Enum => |info| @intToEnum(field.field_type, try reader.readIntBig(info.tag_type)),
else => @compileError("Decode not implemented: " ++ @typeName(field.field_type)),
};
}
return value;
}
};
}
/// Locate a Utf8Info entry that has the value `bytes`
/// Useful for attributes that need an entry describing their name
pub fn locateUtf8Entry(self: *ConstantPool, bytes: []const u8) !u16 {
var get_or_put_output = try self.utf8_entries_map.getOrPut(self.allocator, bytes);
if (get_or_put_output.found_existing) {
return get_or_put_output.value_ptr.*;
} else {
var entry = try self.entries.addOne(self.allocator);
get_or_put_output.value_ptr.* = @intCast(u16, self.entries.items.len);
entry.* = Entry{ .utf8 = .{ .constant_pool = self, .bytes = try self.allocator.dupe(u8, bytes) } };
return get_or_put_output.value_ptr.*;
}
}
pub fn decodeEntries(self: *ConstantPool, reader: anytype) !void {
var constant_pool_index: u16 = 0;
while (constant_pool_index < self.entries.items.len) : (constant_pool_index += 1) {
var cp = try self.decodeEntry(reader);
if (cp == .utf8) {
var get_or_put_output = try self.utf8_entries_map.getOrPut(self.allocator, cp.utf8.bytes);
if (!get_or_put_output.found_existing) {
get_or_put_output.value_ptr.* = constant_pool_index + 1;
}
}
self.entries.items[constant_pool_index] = cp;
// Doubles and longs take up two slots because Java is bad (https://docs.oracle.com/javase/specs/jvms/se16/html/jvms-4.html#jvms-4.10.2.3)
if (cp == .double or cp == .long) {
constant_pool_index += 1;
}
}
}
pub fn decodeEntry(self: ConstantPool, reader: anytype) !Entry {
var tag = try reader.readIntBig(u8);
inline for (@typeInfo(Tag).Enum.fields) |f, i| {
const this_tag_value = @field(Tag, f.name);
if (tag == @enumToInt(this_tag_value)) {
const T = std.meta.fields(Entry)[i].field_type;
var value = if (@hasDecl(T, "decode")) try @field(T, "decode")(&self, reader) else try Serialize(T).decode(&self, reader);
return @unionInit(Entry, f.name, value);
}
}
unreachable;
}
pub const Tag = enum(u8) {
class = 7,
fieldref = 9,
methodref = 10,
interface_methodref = 11,
string = 8,
integer = 3,
float = 4,
long = 5,
double = 6,
name_and_type = 12,
utf8 = 1,
method_handle = 15,
method_type = 16,
dynamic = 17,
invoke_dynamic = 18,
module = 19,
package = 20,
};
pub const ClassInfo = struct {
constant_pool: *const ConstantPool,
/// Points to a `Utf8Info`
name_index: u16,
pub fn getName(self: ClassInfo) Utf8Info {
return self.constant_pool.get(self.name_index).utf8;
}
};
pub const RefInfo = struct {
constant_pool: *const ConstantPool,
/// Points to class or interface
class_index: u16,
/// Points to a `NameAndTypeInfo`
name_and_type_index: u16,
pub fn getClassInfo(self: RefInfo) ClassInfo {
return self.constant_pool.get(self.class_index).class;
}
pub fn getNameAndTypeInfo(self: RefInfo) NameAndTypeInfo {
return self.constant_pool.get(self.name_and_type_index).name_and_type;
}
};
/// Points to a `Utf8Info`
pub const StringInfo = struct {
constant_pool: *const ConstantPool,
string_index: u16,
};
/// Represents 4-byte (32 bit) integer
pub const IntegerInfo = struct {
constant_pool: *const ConstantPool,
bytes: u32,
};
/// Represents 4-byte (32 bit) float
pub const FloatInfo = struct {
constant_pool: *const ConstantPool,
value: u32,
};
pub const LongInfo = struct {
constant_pool: *const ConstantPool,
value: u64,
};
pub const DoubleInfo = struct {
constant_pool: *const ConstantPool,
value: u64,
};
pub const NameAndTypeInfo = struct {
const Self = @This();
constant_pool: *const ConstantPool,
/// Points to a `Utf8Info` describing a unique field or method name or <init>
name_index: u16,
/// Points to a `Utf8Info` representing a field or method descriptor
descriptor_index: u16,
pub fn getName(self: Self) Utf8Info {
return self.constant_pool.get(self.name_index).utf8;
}
pub fn getDescriptor(self: Self) Utf8Info {
return self.constant_pool.get(self.descriptor_index).utf8;
}
};
pub const Utf8Info = struct {
const Self = @This();
constant_pool: *const ConstantPool,
bytes: []u8,
pub fn decode(constant_pool: *const ConstantPool, reader: anytype) !Self {
var length = try reader.readIntBig(u16);
var bytes = try constant_pool.allocator.alloc(u8, length);
_ = try reader.readAll(bytes);
return Self{
.constant_pool = constant_pool,
.bytes = bytes,
};
}
pub fn encode(self: Utf8Info, writer: anytype) !void {
try writer.writeIntBig(u16, @intCast(u16, self.bytes.len));
try writer.writeAll(self.bytes);
}
pub fn format(value: Utf8Info, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
try writer.print("Utf8Info(\"{s}\")", .{value.bytes});
}
};
pub const ReferenceKind = enum(u8) {
get_field = 1,
get_static = 2,
put_field = 3,
put_static = 4,
invoke_virtual = 5,
invoke_static = 6,
invoke_special = 7,
new_invoke_special = 8,
invoke_interface = 9,
};
pub const MethodHandleInfo = struct {
const Self = @This();
constant_pool: *const ConstantPool,
reference_kind: ReferenceKind,
/// Based on ref kind:
/// 1, 2, 3, 4 - points to fieldref
/// 5, 8 - points to methodref
/// 6, 7 - points to methodref or interfacemethodref
/// 9 - Must point to interfacemethodref
reference_index: u16,
// fn parse(allocator: std.mem.Allocator, reader: anytype) !Self {
// return Self{
// .reference_kind = @intToEnum(ReferenceKind, try reader.readIntBig(u8)),
// .reference_index = try reader.readIntBig(u16),
// };
// }
pub fn getReference(self: Self, constant_pool: []Entry) Entry {
var ref = constant_pool[self.reference_index - 1];
switch (self.reference_kind) {
.get_field, .get_static, .put_field, .put_static => std.debug.assert(std.meta.activeTag(ref) == .fieldref),
.invoke_virtual, .new_invoke_special => std.debug.assert(std.meta.activeTag(ref) == .methodref),
.invoke_static, .invoke_special => std.debug.assert(std.meta.activeTag(ref) == .methodref or std.meta.activeTag(ref) == .interface_methodref),
.invoke_interface => std.debug.assert(std.meta.activeTag(ref) == .interface_methodref),
}
return ref;
}
};
pub const MethodTypeInfo = struct {
const Self = @This();
constant_pool: *const ConstantPool,
descriptor_index: u16,
pub fn getDescriptor(self: Self, constant_pool: []Entry) Utf8Info {
return constant_pool[self.descriptor_index - 1].utf8;
}
};
pub const DynamicInfo = struct {
constant_pool: *const ConstantPool,
bootstrap_method_attr_index: u16,
name_and_type_index: u16,
pub fn getNameAndTypeInfo(self: DynamicInfo, constant_pool: []Entry) NameAndTypeInfo {
return constant_pool[self.name_and_type_index - 1].name_and_type;
}
};
pub const InvokeDynamicInfo = struct {
constant_pool: *const ConstantPool,
bootstrap_method_attr_index: u16,
name_and_type_index: u16,
pub fn getNameAndTypeInfo(self: InvokeDynamicInfo, constant_pool: []Entry) NameAndTypeInfo {
return constant_pool[self.name_and_type_index - 1].name_and_type;
}
};
pub const ModuleInfo = struct {
constant_pool: *const ConstantPool,
name_index: u16,
pub fn getName(self: ModuleInfo, constant_pool: []Entry) Utf8Info {
return constant_pool[self.name_index - 1].utf8;
}
};
pub const PackageInfo = struct {
constant_pool: *const ConstantPool,
name_index: u16,
pub fn getName(self: PackageInfo, constant_pool: []Entry) Utf8Info {
return constant_pool[self.name_index - 1].utf8;
}
};
pub const Entry = union(Tag) {
const Self = @This();
class: ClassInfo,
fieldref: RefInfo,
methodref: RefInfo,
interface_methodref: RefInfo,
string: StringInfo,
integer: IntegerInfo,
float: FloatInfo,
long: LongInfo,
double: DoubleInfo,
name_and_type: NameAndTypeInfo,
utf8: Utf8Info,
method_handle: MethodHandleInfo,
method_type: MethodTypeInfo,
dynamic: DynamicInfo,
invoke_dynamic: InvokeDynamicInfo,
module: ModuleInfo,
package: PackageInfo,
pub fn encode(self: Entry, writer: anytype) !void {
inline for (@typeInfo(Tag).Enum.fields) |f, i| {
const this_tag_value = @field(Tag, f.name);
if (@enumToInt(self) == @enumToInt(this_tag_value)) {
try writer.writeByte(@enumToInt(self));
const T = std.meta.fields(Entry)[i].field_type;
var value = @field(self, f.name);
if (@hasDecl(T, "encode"))
return try @field(value, "encode")(writer);
inline for (std.meta.fields(T)[1..]) |field| {
switch (@typeInfo(field.field_type)) {
.Int => try writer.writeIntBig(field.field_type, @field(value, field.name)),
.Enum => |info| try writer.writeIntBig(info.tag_type, @enumToInt(@field(value, field.name))),
else => @compileError("Encode not implemented: " ++ @typeName(field.field_type)),
}
}
}
}
}
}; | src/ConstantPool.zig |
const std = @import("std");
const cu = @import("cuda_cimports.zig").cu;
pub const Attribute = enum(c_uint) {
/// Maximum number of threads per block
MaxThreadsPerBlock = 1,
/// Maximum block dimension X
MaxBlockDimX = 2,
/// Maximum block dimension Y
MaxBlockDimY = 3,
/// Maximum block dimension Z
MaxBlockDimZ = 4,
/// Maximum grid dimension X
MaxGridDimX = 5,
/// Maximum grid dimension Y
MaxGridDimY = 6,
/// Maximum grid dimension Z
MaxGridDimZ = 7,
/// Maximum shared memory available per block in bytes
MaxSharedMemoryPerBlock = 8,
/// Memory available on device for __constant__ variables in a CUDA C kernel in bytes
TotalConstantMemory = 9,
/// Warp size in threads
WarpSize = 10,
/// Maximum pitch in bytes allowed by memory copies
MaxPitch = 11,
/// Maximum number of 32-bit registers available per block
MaxRegistersPerBlock = 12,
/// Typical clock frequency in kilohertz
ClockRate = 13,
/// Alignment requirement for textures
TextureAlignment = 14,
/// Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead AsyncEngineCount.
GpuOverlap = 15,
/// Number of multiprocessors on device
MultiprocessorCount = 16,
/// Specifies whether there is a run time limit on kernels
KernelExecTimeout = 17,
/// Device is integrated with host memory
Integrated = 18,
/// Device can map host memory into CUDA address space
CanMapHostMemory = 19,
/// Compute mode (See ::CUcomputemode for details)
ComputeMode = 20,
/// Maximum 1D texture width
MaximumTexture1dWidth = 21,
/// Maximum 2D texture width
MaximumTexture2dWidth = 22,
/// Maximum 2D texture height
MaximumTexture2dHeight = 23,
/// Maximum 3D texture width
MaximumTexture3dWidth = 24,
/// Maximum 3D texture height
MaximumTexture3dHeight = 25,
/// Maximum 3D texture depth
MaximumTexture3dDepth = 26,
/// Maximum 2D layered texture width
MaximumTexture2dLayeredWidth = 27,
/// Maximum 2D layered texture height
MaximumTexture2dLayeredHeight = 28,
/// Maximum layers in a 2D layered texture
MaximumTexture2dLayeredLayers = 29,
/// Alignment requirement for surfaces
SurfaceAlignment = 30,
/// Device can possibly execute multiple kernels concurrently
ConcurrentKernels = 31,
/// Device has ECC support enabled
EccEnabled = 32,
/// PCI bus ID of the device
PciBusID = 33,
/// PCI device ID of the device
PciDeviceID = 34,
/// Device is using TCC driver model
TccDriver = 35,
/// Peak memory clock frequency in kilohertz
MemoryClockRate = 36,
/// Global memory bus width in bits
GlobalMemoryBusWidth = 37,
/// Size of L2 cache in bytes
L2CacheSize = 38,
/// Maximum resident threads per multiprocessor
MaxThreadsPerMultiprocessor = 39,
/// Number of asynchronous engines
AsyncEngineCount = 40,
/// Device shares a unified address space with the host
UnifiedAddressing = 41,
/// Maximum 1D layered texture width
MaximumTexture1dLayeredWidth = 42,
/// Maximum layers in a 1D layered texture
MaximumTexture1dLayeredLayers = 43,
/// Deprecated, do not use.
CanTex2dGather = 44,
/// Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set
MaximumTexture2dGatherWidth = 45,
/// Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set
MaximumTexture2dGatherHeight = 46,
/// Alternate maximum 3D texture width
MaximumTexture3dWidthAlternate = 47,
/// Alternate maximum 3D texture height
MaximumTexture3dHeightAlternate = 48,
/// Alternate maximum 3D texture depth
MaximumTexture3dDepthAlternate = 49,
/// PCI domain ID of the device
PciDomainID = 50,
/// Pitch alignment requirement for textures
TexturePitchAlignment = 51,
/// Maximum cubemap texture width/height
MaximumTexturecubemapWidth = 52,
/// Maximum cubemap layered texture width/height
MaximumTexturecubemapLayeredWidth = 53,
/// Maximum layers in a cubemap layered texture
MaximumTexturecubemapLayeredLayers = 54,
/// Maximum 1D surface width
MaximumSurface1dWidth = 55,
/// Maximum 2D surface width
MaximumSurface2dWidth = 56,
/// Maximum 2D surface height
MaximumSurface2dHeight = 57,
/// Maximum 3D surface width
MaximumSurface3dWidth = 58,
/// Maximum 3D surface height
MaximumSurface3dHeight = 59,
/// Maximum 3D surface depth
MaximumSurface3dDepth = 60,
/// Maximum 1D layered surface width
MaximumSurface1dLayeredWidth = 61,
/// Maximum layers in a 1D layered surface
MaximumSurface1dLayeredLayers = 62,
/// Maximum 2D layered surface width
MaximumSurface2dLayeredWidth = 63,
/// Maximum 2D layered surface height
MaximumSurface2dLayeredHeight = 64,
/// Maximum layers in a 2D layered surface
MaximumSurface2dLayeredLayers = 65,
/// Maximum cubemap surface width
MaximumSurfacecubemapWidth = 66,
/// Maximum cubemap layered surface width
MaximumSurfacecubemapLayeredWidth = 67,
/// Maximum layers in a cubemap layered surface
MaximumSurfacecubemapLayeredLayers = 68,
/// Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or cuDeviceGetTexture1DLinearMaxWidth() instead.
MaximumTexture1dLinearWidth = 69,
/// Maximum 2D linear texture width
MaximumTexture2dLinearWidth = 70,
/// Maximum 2D linear texture height
MaximumTexture2dLinearHeight = 71,
/// Maximum 2D linear texture pitch in bytes
MaximumTexture2dLinearPitch = 72,
/// Maximum mipmapped 2D texture width
MaximumTexture2dMipmappedWidth = 73,
/// Maximum mipmapped 2D texture height
MaximumTexture2dMipmappedHeight = 74,
/// Major compute capability version number
ComputeCapabilityMajor = 75,
/// Minor compute capability version number
ComputeCapabilityMinor = 76,
/// Maximum mipmapped 1D texture width
MaximumTexture1dMipmappedWidth = 77,
/// Device supports stream priorities
StreamPrioritiesSupported = 78,
/// Device supports caching globals in L1
GlobalL1CacheSupported = 79,
/// Device supports caching locals in L1
LocalL1CacheSupported = 80,
/// Maximum shared memory available per multiprocessor in bytes
MaxSharedMemoryPerMultiprocessor = 81,
/// Maximum number of 32-bit registers available per multiprocessor
MaxRegistersPerMultiprocessor = 82,
/// Device can allocate managed memory on this system
ManagedMemory = 83,
/// Device is on a multi-GPU board
MultiGpuBoard = 84,
/// Unique id for a group of devices on the same multi-GPU board
MultiGpuBoardGroupID = 85,
/// Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware
HostNativeAtomicSupported = 86,
/// Ratio of single precision performance (in floating-point operations per second) to double precision performance
SingleToDoublePrecisionPerfRatio = 87,
/// Device supports coherently accessing pageable memory without calling cudaHostRegister on it
PageableMemoryAccess = 88,
/// Device can coherently access managed memory concurrently with the CPU
ConcurrentManagedAccess = 89,
/// Device supports compute preemption.
ComputePreemptionSupported = 90,
/// Device can access host registered memory at the same virtual address as the CPU
CanUseHostPointerForRegisteredMem = 91,
/// ::cuStreamBatchMemOp and related APIs are supported.
CanUseStreamMemOps = 92,
/// 64-bit operations are supported in ::cuStreamBatchMemOp and related APIs.
CanUse64BitStreamMemOps = 93,
/// ::CU_STREAM_WAIT_VALUE_NOR is supported.
CanUseStreamWaitValueNor = 94,
/// Device supports launching cooperative kernels via ::cuLaunchCooperativeKernel
CooperativeLaunch = 95,
/// Deprecated, ::cuLaunchCooperativeKernelMultiDevice is deprecated.
CooperativeMultiDeviceLaunch = 96,
/// Maximum optin shared memory per block
MaxSharedMemoryPerBlockOptin = 97,
/// The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. See \ref CUDA_MEMOP for additional details.
CanFlushRemoteWrites = 98,
/// Device supports host memory registration via ::cudaHostRegister.
HostRegisterSupported = 99,
/// Device accesses pageable memory via the host's page tables.
PageableMemoryAccessUsesHostPageTables = 100,
/// The host can directly access managed memory on the device without migration.
DirectManagedMemAccessFromHost = 101,
/// Device supports virtual memory management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs
VirtualMemoryManagementSupported = 102,
/// Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate
HandleTypePosixFileDescriptorSupported = 103,
/// Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate
HandleTypeWin32HandleSupported = 104,
/// Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested ::cuMemCreate
HandleTypeWin32KmtHandleSupported = 105,
/// Maximum number of blocks per multiprocessor
MaxBlocksPerMultiprocessor = 106,
/// Device supports compression of memory
GenericCompressionSupported = 107,
/// Maximum L2 persisting lines capacity setting in bytes.
MaxPersistingL2CacheSize = 108,
/// Maximum value of CUaccessPolicyWindow::num_bytes.
MaxAccessPolicyWindowSize = 109,
/// Device supports specifying the GPUDirect RDMA flag with ::cuMemCreate
GpuDirectRdmaWithCudaVmmSupported = 110,
/// Shared memory reserved by CUDA driver per block in bytes
ReservedSharedMemoryPerBlock = 111,
/// Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays
SparseCudaArraySupported = 112,
/// Device supports using the ::cuMemHostRegister flag CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU
ReadOnlyHostRegisterSupported = 113,
/// External timeline semaphore interop is supported on the device
TimelineSemaphoreInteropSupported = 114,
/// Device supports using the ::cuMemAllocAsync and ::cuMemPool family of APIs
MemoryPoolsSupported = 115,
/// Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information)
GpuDirectRdmaSupported = 116,
/// The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the ::CUflushGPUDirectRDMAWritesOptions enum
GpuDirectRdmaFlushWritesOptions = 117,
/// GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See ::CUGPUDirectRDMAWritesOrdering for the numerical values returned here.
GpuDirectRdmaWritesOrdering = 118,
/// Handle types supported with mempool based IPC
MempoolSupportedHandleTypes = 119,
};
// TODO: take a CUdevice here, and expose device in the Stream object
pub fn getAttr(device: u8, attr: Attribute) i32 {
var d: cu.CUdevice = undefined;
_ = cu.cuDeviceGet(&d, device);
var value: i32 = std.math.minInt(i32);
_ = cu.cuDeviceGetAttribute(&value, @enumToInt(attr), d);
return value;
} | cudaz/src/attributes.zig |
const std = @import("std");
const zp = @import("../../zplay.zig");
const gl = zp.deps.gl;
pub const PrimitiveType = enum(c_uint) {
points = gl.GL_POINTS,
line_strip = gl.GL_LINE_STRIP,
line_loop = gl.GL_LINE_LOOP,
lines = gl.GL_LINES,
line_strip_adjacency = gl.GL_LINE_STRIP_ADJACENCY,
lines_adjacency = gl.GL_LINES_ADJACENCY,
triangle_strip = gl.GL_TRIANGLE_STRIP,
triangle_fan = gl.GL_TRIANGLE_FAN,
triangles = gl.GL_TRIANGLES,
triangle_strip_adjacency = gl.GL_TRIANGLE_STRIP_ADJACENCY,
triangles_adjacency = gl.GL_TRIANGLES_ADJACENCY,
};
/// issue draw call
pub fn drawBuffer(
primitive: PrimitiveType,
offset: u32,
vertex_count: u32,
instance_count: ?u32,
) void {
if (instance_count) |count| {
gl.drawArraysInstanced(
@enumToInt(primitive),
@intCast(gl.GLint, offset),
@intCast(gl.GLsizei, vertex_count),
@intCast(gl.GLsizei, count),
);
} else {
gl.drawArrays(
@enumToInt(primitive),
@intCast(gl.GLint, offset),
@intCast(gl.GLsizei, vertex_count),
);
}
gl.util.checkError();
}
/// issue draw call (only accept unsigned-integer indices!)
pub fn drawElements(
primitive: PrimitiveType,
offset: u32,
element_count: u32,
comptime ElementType: type,
instance_count: ?u32,
) void {
if (ElementType != u16 and ElementType != u32) {
std.debug.panic("unsupported element type!", .{});
}
if (instance_count) |count| {
gl.drawElementsInstanced(
@enumToInt(primitive),
@intCast(gl.GLsizei, element_count),
gl.util.dataType(ElementType),
@intToPtr(*allowzero anyopaque, offset),
@intCast(gl.GLsizei, count),
);
} else {
gl.drawElements(
@enumToInt(primitive),
@intCast(gl.GLsizei, element_count),
gl.util.dataType(ElementType),
@intToPtr(*allowzero anyopaque, offset),
);
}
gl.util.checkError();
} | src/graphics/common/drawcall.zig |
const Allocator = std.mem.Allocator;
const FormatInterface = @import("../format_interface.zig").FormatInterface;
const ImageFormat = image.ImageFormat;
const ImageReader = image.ImageReader;
const ImageInfo = image.ImageInfo;
const ImageSeekStream = image.ImageSeekStream;
const PixelFormat = @import("../pixel_format.zig").PixelFormat;
const color = @import("../color.zig");
const errors = @import("../errors.zig");
const image = @import("../image.zig");
const std = @import("std");
const utils = @import("../utils.zig");
// this file implements the Portable Anymap specification provided by
// http://netpbm.sourceforge.net/doc/pbm.html // P1, P4 => Bitmap
// http://netpbm.sourceforge.net/doc/pgm.html // P2, P5 => Graymap
// http://netpbm.sourceforge.net/doc/ppm.html // P3, P6 => Pixmap
/// one of the three types a netbpm graphic could be stored in.
pub const Format = enum {
/// the image contains black-and-white pixels.
Bitmap,
/// the image contains grayscale pixels.
Grayscale,
/// the image contains RGB pixels.
Rgb,
};
pub const Header = struct {
format: Format,
binary: bool,
width: usize,
height: usize,
max_value: usize,
};
fn parseHeader(stream: ImageReader) !Header {
var header: Header = undefined;
var magic: [2]u8 = undefined;
_ = try stream.read(magic[0..]);
if (std.mem.eql(u8, &magic, "P1")) {
header.binary = false;
header.format = .Bitmap;
header.max_value = 1;
} else if (std.mem.eql(u8, &magic, "P2")) {
header.binary = false;
header.format = .Grayscale;
} else if (std.mem.eql(u8, &magic, "P3")) {
header.binary = false;
header.format = .Rgb;
} else if (std.mem.eql(u8, &magic, "P4")) {
header.binary = true;
header.format = .Bitmap;
header.max_value = 1;
} else if (std.mem.eql(u8, &magic, "P5")) {
header.binary = true;
header.format = .Grayscale;
} else if (std.mem.eql(u8, &magic, "P6")) {
header.binary = true;
header.format = .Rgb;
} else {
return errors.ImageError.InvalidMagicHeader;
}
var read_buffer: [16]u8 = undefined;
header.width = try parseNumber(stream, read_buffer[0..]);
header.height = try parseNumber(stream, read_buffer[0..]);
if (header.format != .Bitmap) {
header.max_value = try parseNumber(stream, read_buffer[0..]);
}
return header;
}
fn isWhitespace(b: u8) bool {
return switch (b) {
// Whitespace (blanks, TABs, CRs, LFs).
'\n', '\r', ' ', '\t' => true,
else => false,
};
}
fn readNextByte(stream: ImageReader) !u8 {
while (true) {
var b = try stream.readByte();
switch (b) {
// Before the whitespace character that delimits the raster, any characters
// from a "#" through the next carriage return or newline character, is a
// comment and is ignored. Note that this is rather unconventional, because
// a comment can actually be in the middle of what you might consider a token.
// Note also that this means if you have a comment right before the raster,
// the newline at the end of the comment is not sufficient to delimit the raster.
'#' => {
// eat up comment
while (true) {
var c = try stream.readByte();
switch (c) {
'\r', '\n' => break,
else => {},
}
}
},
else => return b,
}
}
}
/// skips whitespace and comments, then reads a number from the stream.
/// this function reads one whitespace behind the number as a terminator.
fn parseNumber(stream: ImageReader, buffer: []u8) !usize {
var input_length: usize = 0;
while (true) {
var b = try readNextByte(stream);
if (isWhitespace(b)) {
if (input_length > 0) {
return try std.fmt.parseInt(usize, buffer[0..input_length], 10);
} else {
continue;
}
} else {
if (input_length >= buffer.len)
return error.OutOfMemory;
buffer[input_length] = b;
input_length += 1;
}
}
}
fn loadBinaryBitmap(header: Header, data: []color.Grayscale1, stream: ImageReader) !void {
var data_index: usize = 0;
const data_end = header.width * header.height;
var bit_reader = std.io.bitReader(.Big, stream);
while (data_index < data_end) : (data_index += 1) {
// set bit is black, cleared bit is white
// bits are "left to right" (so msb to lsb)
const read_bit = try bit_reader.readBitsNoEof(u1, 1);
data[data_index] = color.Grayscale1{ .value = ~read_bit };
}
}
fn loadAsciiBitmap(header: Header, data: []color.Grayscale1, stream: ImageReader) !void {
var data_index: usize = 0;
const data_end = header.width * header.height;
while (data_index < data_end) {
var b = try stream.readByte();
if (isWhitespace(b)) {
continue;
}
// 1 is black, 0 is white in PBM spec.
// we use 1=white, 0=black in u1 format
const pixel = if (b == '0') @as(u1, 1) else @as(u1, 0);
data[data_index] = color.Grayscale1{ .value = pixel };
data_index += 1;
}
}
fn readLinearizedValue(stream: ImageReader, max_value: usize) !u8 {
return if (max_value > 255)
@truncate(u8, 255 * @as(usize, try stream.readIntBig(u16)) / max_value)
else
@truncate(u8, 255 * @as(usize, try stream.readByte()) / max_value);
}
fn loadBinaryGraymap(header: Header, pixels: *color.ColorStorage, stream: ImageReader) !void {
var data_index: usize = 0;
const data_end = header.width * header.height;
if (header.max_value <= 255) {
while (data_index < data_end) : (data_index += 1) {
pixels.Grayscale8[data_index] = color.Grayscale8{ .value = try readLinearizedValue(stream, header.max_value) };
}
} else {
while (data_index < data_end) : (data_index += 1) {
pixels.Grayscale16[data_index] = color.Grayscale16{ .value = try stream.readIntBig(u16) };
}
}
}
fn loadAsciiGraymap(header: Header, pixels: *color.ColorStorage, stream: ImageReader) !void {
var read_buffer: [16]u8 = undefined;
var data_index: usize = 0;
const data_end = header.width * header.height;
if (header.max_value <= 255) {
while (data_index < data_end) : (data_index += 1) {
pixels.Grayscale8[data_index] = color.Grayscale8{ .value = @truncate(u8, try parseNumber(stream, read_buffer[0..])) };
}
} else {
while (data_index < data_end) : (data_index += 1) {
pixels.Grayscale16[data_index] = color.Grayscale16{ .value = @truncate(u16, try parseNumber(stream, read_buffer[0..])) };
}
}
}
fn loadBinaryRgbmap(header: Header, data: []color.Rgb24, stream: ImageReader) !void {
var data_index: usize = 0;
const data_end = header.width * header.height;
while (data_index < data_end) : (data_index += 1) {
data[data_index] = color.Rgb24{
.R = try readLinearizedValue(stream, header.max_value),
.G = try readLinearizedValue(stream, header.max_value),
.B = try readLinearizedValue(stream, header.max_value),
};
}
}
fn loadAsciiRgbmap(header: Header, data: []color.Rgb24, stream: ImageReader) !void {
var read_buffer: [16]u8 = undefined;
var data_index: usize = 0;
const data_end = header.width * header.height;
while (data_index < data_end) : (data_index += 1) {
var r = try parseNumber(stream, read_buffer[0..]);
var g = try parseNumber(stream, read_buffer[0..]);
var b = try parseNumber(stream, read_buffer[0..]);
data[data_index] = color.Rgb24{
.R = @truncate(u8, 255 * r / header.max_value),
.G = @truncate(u8, 255 * g / header.max_value),
.B = @truncate(u8, 255 * b / header.max_value),
};
}
}
fn Netpbm(comptime image_format: ImageFormat, comptime header_numbers: []const u8) type {
return struct {
header: Header = undefined,
const Self = @This();
pub const EncoderOptions = struct {
binary: bool,
};
pub fn formatInterface() FormatInterface {
return FormatInterface{
.format = @ptrCast(FormatInterface.FormatFn, format),
.formatDetect = @ptrCast(FormatInterface.FormatDetectFn, formatDetect),
.readForImage = @ptrCast(FormatInterface.ReadForImageFn, readForImage),
.writeForImage = @ptrCast(FormatInterface.WriteForImageFn, writeForImage),
};
}
pub fn format() ImageFormat {
return image_format;
}
pub fn formatDetect(reader: ImageReader, seek_stream: ImageSeekStream) !bool {
_ = seek_stream;
var magic_number_buffer: [2]u8 = undefined;
_ = try reader.read(magic_number_buffer[0..]);
if (magic_number_buffer[0] != 'P') {
return false;
}
var found = false;
for (header_numbers) |number| {
if (magic_number_buffer[1] == number) {
found = true;
break;
}
}
return found;
}
pub fn readForImage(allocator: Allocator, reader: ImageReader, seek_stream: ImageSeekStream, pixels: *?color.ColorStorage) !ImageInfo {
var netpbm_file = Self{};
try netpbm_file.read(allocator, reader, seek_stream, pixels);
var image_info = ImageInfo{};
image_info.width = netpbm_file.header.width;
image_info.height = netpbm_file.header.height;
return image_info;
}
pub fn writeForImage(allocator: Allocator, write_stream: image.ImageWriterStream, seek_stream: ImageSeekStream, pixels: color.ColorStorage, save_info: image.ImageSaveInfo) !void {
_ = allocator;
var netpbm_file = Self{};
netpbm_file.header.binary = switch (save_info.encoder_options) {
.pbm => |options| options.binary,
.pgm => |options| options.binary,
.ppm => |options| options.binary,
else => false,
};
netpbm_file.header.width = save_info.width;
netpbm_file.header.height = save_info.height;
netpbm_file.header.format = switch (pixels) {
.Grayscale1 => Format.Bitmap,
.Grayscale8, .Grayscale16 => Format.Grayscale,
.Rgb24 => Format.Rgb,
else => return errors.ImageError.UnsupportedPixelFormat,
};
netpbm_file.header.max_value = switch (pixels) {
.Grayscale16 => std.math.maxInt(u16),
.Grayscale1 => 1,
else => std.math.maxInt(u8),
};
try netpbm_file.write(write_stream, seek_stream, pixels);
}
pub fn pixelFormat(self: Self) !PixelFormat {
return switch (self.header.format) {
.Bitmap => PixelFormat.Grayscale1,
.Grayscale => switch (self.header.max_value) {
0...255 => PixelFormat.Grayscale8,
else => PixelFormat.Grayscale16,
},
.Rgb => PixelFormat.Rgb24,
};
}
pub fn read(self: *Self, allocator: Allocator, reader: ImageReader, seek_stream: ImageSeekStream, pixels_opt: *?color.ColorStorage) !void {
_ = seek_stream;
self.header = try parseHeader(reader);
const pixel_format = try self.pixelFormat();
pixels_opt.* = try color.ColorStorage.init(allocator, pixel_format, self.header.width * self.header.height);
if (pixels_opt.*) |*pixels| {
switch (self.header.format) {
.Bitmap => {
if (self.header.binary) {
try loadBinaryBitmap(self.header, pixels.Grayscale1, reader);
} else {
try loadAsciiBitmap(self.header, pixels.Grayscale1, reader);
}
},
.Grayscale => {
if (self.header.binary) {
try loadBinaryGraymap(self.header, pixels, reader);
} else {
try loadAsciiGraymap(self.header, pixels, reader);
}
},
.Rgb => {
if (self.header.binary) {
try loadBinaryRgbmap(self.header, pixels.Rgb24, reader);
} else {
try loadAsciiRgbmap(self.header, pixels.Rgb24, reader);
}
},
}
}
}
pub fn write(self: *Self, write_stream: image.ImageWriterStream, seek_stream: image.ImageSeekStream, pixels: color.ColorStorage) !void {
_ = seek_stream;
const image_type = if (self.header.binary) header_numbers[1] else header_numbers[0];
try write_stream.print("P{c}\n", .{image_type});
_ = try write_stream.write("# Created by zigimg\n");
try write_stream.print("{} {}\n", .{ self.header.width, self.header.height });
if (self.header.format != .Bitmap) {
try write_stream.print("{}\n", .{self.header.max_value});
}
if (self.header.binary) {
switch (self.header.format) {
.Bitmap => {
switch (pixels) {
.Grayscale1 => {
var bit_writer = std.io.bitWriter(.Big, write_stream);
for (pixels.Grayscale1) |entry| {
try bit_writer.writeBits(~entry.value, 1);
}
try bit_writer.flushBits();
},
else => {
return errors.ImageError.UnsupportedPixelFormat;
},
}
},
.Grayscale => {
switch (pixels) {
.Grayscale16 => {
for (pixels.Grayscale16) |entry| {
// Big due to 16-bit PGM being semi standardized as big-endian
try write_stream.writeIntBig(u16, entry.value);
}
},
.Grayscale8 => {
for (pixels.Grayscale8) |entry| {
try write_stream.writeIntLittle(u8, entry.value);
}
},
else => {
return errors.ImageError.UnsupportedPixelFormat;
},
}
},
.Rgb => {
switch (pixels) {
.Rgb24 => {
for (pixels.Rgb24) |entry| {
try write_stream.writeByte(entry.R);
try write_stream.writeByte(entry.G);
try write_stream.writeByte(entry.B);
}
},
else => {
return errors.ImageError.UnsupportedPixelFormat;
},
}
},
}
} else {
switch (self.header.format) {
.Bitmap => {
switch (pixels) {
.Grayscale1 => {
for (pixels.Grayscale1) |entry| {
try write_stream.print("{}", .{~entry.value});
}
_ = try write_stream.write("\n");
},
else => {
return errors.ImageError.UnsupportedPixelFormat;
},
}
},
.Grayscale => {
switch (pixels) {
.Grayscale16 => {
const pixels_len = pixels.len();
for (pixels.Grayscale16) |entry, index| {
try write_stream.print("{}", .{entry.value});
if (index != (pixels_len - 1)) {
_ = try write_stream.write(" ");
}
}
_ = try write_stream.write("\n");
},
.Grayscale8 => {
const pixels_len = pixels.len();
for (pixels.Grayscale8) |entry, index| {
try write_stream.print("{}", .{entry.value});
if (index != (pixels_len - 1)) {
_ = try write_stream.write(" ");
}
}
_ = try write_stream.write("\n");
},
else => {
return errors.ImageError.UnsupportedPixelFormat;
},
}
},
.Rgb => {
switch (pixels) {
.Rgb24 => {
for (pixels.Rgb24) |entry| {
try write_stream.print("{} {} {}\n", .{ entry.R, entry.G, entry.B });
}
},
else => {
return errors.ImageError.UnsupportedPixelFormat;
},
}
},
}
}
}
};
}
pub const PBM = Netpbm(ImageFormat.Pbm, &[_]u8{ '1', '4' });
pub const PGM = Netpbm(ImageFormat.Pgm, &[_]u8{ '2', '5' });
pub const PPM = Netpbm(ImageFormat.Ppm, &[_]u8{ '3', '6' }); | src/formats/netpbm.zig |
const std = @import("std");
const rpc = @import("rpc.zig");
const Server = @import("main.zig").Server;
pub const keys = @import("keys.zig");
pub const Key = keys.Key;
/// Data sent to Client which represents the data to draw on the screen.
pub const DrawData = struct {
lines: []const Line,
pub const Line = struct {
number: u32,
contents: []const u8,
face: Face = Face.default,
};
pub const Face = struct {
fg: []const u8,
bg: []const u8,
attributes: []const Attribute = &[0]Attribute{},
pub const default = Face{ .fg = "default", .bg = "default" };
};
pub const Attribute = enum {
underline,
reverse,
bold,
blink,
dim,
italic,
pub fn jsonStringify(
value: Attribute,
options: std.json.StringifyOptions,
out_stream: anytype,
) @TypeOf(out_stream).Error!void {
_ = options;
try out_stream.writeAll(std.meta.tagName(value));
}
};
};
pub const CommandKind = enum {
nop,
/// Params has information about the key.
keypress,
/// Sent by client when it quits.
quitted,
/// First value in params is the command kind, others are arguments to this command.
initialize,
quit,
save,
redraw,
insert_character,
cursor_move_down,
cursor_move_left,
cursor_move_up,
cursor_move_right,
delete_word,
delete_line,
open_file,
};
/// Command is a an action issued by client to be executed on the server.
pub const Command = union(CommandKind) {
nop,
/// Sent by client when it quits.
quitted,
quit,
save,
redraw,
/// Provide initial parameters to initialize a client.
initialize: ClientInitParams,
/// Value is inserted character. TODO: should not be u8.
insert_character: u8,
keypress: Keypress,
cursor_move_down,
cursor_move_left,
cursor_move_up,
cursor_move_right,
delete_word,
delete_line,
/// Value is absolute file path.
open_file: struct { path: []const u8 },
/// Parameters necessary to create new state in workspace and get `active_display_state`.
pub const ClientInitParams = struct {
path: []const u8,
readonly: bool,
text_area_rows: u32,
text_area_cols: u32,
};
// pub const Multiplier = struct { multiplier: u32 = 1 };
/// Different editing operations accept a numeric multiplier which specifies the number of
/// times the operation should be executed.
pub const Keypress = struct { key: Key, multiplier: u32 };
};
pub const TextBufferMetrics = struct {
max_line_number: u32 = 0,
};
pub const TextBufferLineEnding = enum {
/// \n
unix,
/// \r\n
dos,
};
pub const Selection = struct {
/// Value is offset.
cursor: usize,
/// Value is offset.
anchor: usize,
anchored: bool = false,
primary: bool = true,
// TODO: think about design.
// last_character_is_newline: bool,
};
pub const Selections = std.ArrayList(Selection); | src/kisa.zig |
const std = @import("std");
const assert = std.debug.assert;
// ----------------------- Public API -------------------------
// ---------------------------- Implementation -----------------------------
const ImageDosHeader = extern struct {
signature: u16,
cblp: u16,
cp: u16,
crlc: u16,
cparhdr: u16,
minalloc: u16,
maxalloc: u16,
ss: u16,
sp: u16,
csum: u16,
ip: u16,
cs: u16,
lfarlc: u16,
ovno: u16,
res: [4]u16,
oemid: u16,
oeminfo: u16,
res2: [10]u16,
lfanew: i32,
};
const ImageFileHeader = extern struct {
machine: u16,
num_sections: u16,
datetime: u32,
symbol_table_offset: u32,
num_symbols: u32,
optional_header_size: u16,
characteristics: u16,
};
const ImageDataDirectory = extern struct {
virtual_address: u32,
size: u32,
};
const ImageOptionalHeader32 = extern struct {
magic: u16,
major_linked_version: u8,
minor_linked_version: u8,
code_size: u32,
initialized_data_size: u32,
uninitialized_data_size: u32,
entry_point_address: u32,
start_of_code: u32,
start_of_data: u32,
start_of_image: u32,
section_alignment: u32,
file_alignment: u32,
os_major_version: u16,
os_minor_version: u16,
image_major_version: u16,
image_minor_version: u16,
subsystem_major_version: u16,
subsystem_minor_version: u16,
win32_version: u32,
image_size: u32,
headers_size: u32,
checksum: u32,
subsystem: u16,
dll_characteristics: u16,
stack_reserve_size: u32,
stack_commit_size: u32,
heap_reserve_size: u32,
heap_commit_size: u32,
loader_flags: u32,
number_of_rva_and_sizes: u32,
data_directories: [16]ImageDataDirectory,
};
const ImageOptionalHeader32Plus = extern struct {
magic: u16,
major_linked_version: u8,
minor_linked_version: u8,
code_size: u32,
initialized_data_size: u32,
uninitialized_data_size: u32,
entry_point_address: u32,
start_of_code: u32,
start_of_image: u64,
section_alignment: u32,
file_alignment: u32,
os_major_version: u16,
os_minor_version: u16,
image_major_version: u16,
image_minor_version: u16,
subsystem_major_version: u16,
subsystem_minor_version: u16,
win32_version: u32,
image_size: u32,
headers_size: u32,
checksum: u32,
subsystem: u16,
dll_characteristics: u16,
stack_reserve_size: u64,
stack_commit_size: u64,
heap_reserve_size: u64,
heap_commit_size: u64,
loader_flags: u32,
number_of_rva_and_sizes: u32,
data_directories: [16]ImageDataDirectory,
};
const ImageNtHeaders32 = extern struct {
signature: u32,
header: ImageFileHeader,
optional_header: ImageOptionalHeader32,
};
const ImageNtHeaders32Plus = extern struct {
signature: u32,
header: ImageFileHeader,
optional_header: ImageOptionalHeader32Plus,
};
const IMAGE_SIZEOF_SHORT_NAME = 8;
const ImageSectionHeader = extern struct {
name: [IMAGE_SIZEOF_SHORT_NAME]u8,
misc: extern union {
physical_address: u32,
virtual_size: u32,
},
virtual_address: u32,
raw_data_size: u32,
raw_data_offset: u32,
relocations_offset: u32,
linenumbers_offset: u32,
num_relocations: u16,
num_linenumbers: u16,
characteristics: u32,
};
const ImageCor20Header = extern struct {
cb: u32,
runtime_major_version: u16,
runtime_minor_version: u16,
metadata: ImageDataDirectory,
flags: u32,
// TODO fix this
anon0: extern union {
entry_point_token: u32,
entry_point_rva: u32,
},
resources: ImageDataDirectory,
strong_name_signature: ImageDataDirectory,
code_manager_table: ImageDataDirectory,
vtable_fixups: ImageDataDirectory,
export_address_table_jumps: ImageDataDirectory,
managed_native_header: ImageDataDirectory,
};
const TableKind = enum (u8) {
module = 0x00,
type_ref = 0x01,
type_def = 0x02,
field = 0x04,
method_def = 0x06,
param = 0x08,
interface_impl = 0x09,
member_ref = 0x0A,
constant = 0x0B,
custom_attribute = 0x0C,
field_marshal = 0x0D,
decl_security = 0x0E,
class_layout = 0x0F,
field_layout = 0x10,
standalone_sig = 0x11,
event_map = 0x12,
event = 0x14,
property_map = 0x15,
property = 0x17,
method_semantics = 0x18,
method_impl = 0x19,
module_ref = 0x1A,
type_spec = 0x1B,
impl_map = 0x1C,
field_rva = 0x1D,
assembly = 0x20,
assembly_processor = 0x21,
assembly_os = 0x22,
assembly_ref = 0x23,
assembly_ref_processor = 0x24,
assembly_ref_os = 0x25,
file = 0x26,
exported_type = 0x27,
manifest_resource = 0x28,
nested_class = 0x29,
generic_param = 0x2A,
method_spec = 0x2B,
generic_param_constraint = 0x2C,
invalid = 0xFF,
_,
/// List of valid table kinds
const values = comptime blk: {
var t1 = [_]TableKind{};
const Any = struct { v: anytype };
var any = Any{ .v = t1 };
for (@typeInfo(TableKind).Enum.fields) |field| {
const tag = @field(@This(), field.name);
if (tag != .invalid) {
const tmp = any.v ++ [_]TableKind{ tag };
any.v = tmp;
}
}
break :blk any.v;
};
/// Use this value as the length of an array
/// where kinds are keys. Does not include
/// invalid values, always check bounds.
const num_valid_kinds = comptime blk: {
var max_kind = 0;
for (@typeInfo(TableKind).Enum.fields) |field| {
if (@field(@This(), field.name) == .invalid) {
continue;
}
if (field.value > max_kind) {
max_kind = field.value;
}
}
assert(max_kind == 0x2C);
break :blk max_kind + 1;
};
// Array of row types, mapping from TableKind index to type
const row_types = comptime blk: {
var types = [_]type{ InvalidRow } ** num_valid_kinds;
for (all_row_types) |Row| {
if (Row.tag != .invalid) {
const kind: TableKind = Row.tag;
const index = @enumToInt(kind);
if (types[index] != InvalidRow) {
@compileError("Both "++@typeName(types[index])++
" and "++@typeName(Row)++
" have tag "++@tagName(kind));
}
types[index] = Row;
}
}
break :blk types;
};
/// Get the type of a row of the table corresponding to the given kind.
pub fn RowType(comptime kind: TableKind) type {
const index = @enumToInt(kind);
if (index < row_types.len) {
return row_types[index];
}
return InvalidRow;
}
};
// Assert that no two rows have the same kind
const InvalidRow = packed struct {
pub usingnamespace RowMixin(.invalid, @This());
__reserved0: u16 = 0,
};
// The following enums are used for "coded indexes".
// A coded index is represented in the file as an index
// bit-packed with one of these restricted enums.\
// The names in these enums must exactly match the names
// in the TableKind enum, so that metaprogramming can
// match them when generating conversion code.
// Use `decodeIndex` to convert from a coded index
// to a table and index.
const GenericIndex = struct {
table: TableKind,
index: u32,
};
fn CodedIndexMixin(comptime Enum: type) type {
const enum_info = @typeInfo(Enum).Enum;
const num_bits = @bitSizeOf(Enum);
const num_values = 1 << num_bits;
const mask = num_values - 1;
if (enum_info.fields.len != num_values and enum_info.is_exhaustive) {
@compileError("Coded enum "++@typeName(Enum)++" does not fill all slots, but is not exhaustive.");
}
var kind_lookup = [_]TableKind{ .invalid } ** num_values;
for (enum_info.fields) |field| {
kind_lookup[field.value] = @field(TableKind, field.name);
}
return struct {
pub fn Type(comptime self: Enum) type {
return self.toGeneric().Type();
}
pub inline fn toGeneric(self: Enum) TableKind {
return kind_lookup[@enumToInt(self)];
}
pub fn fromGeneric(target: TableKind) error{WinmdInvalidTableReference}!Enum {
// For now don't generate a reverse lookup table.
// since these enums are small, iterate the forward table.
for (kind_lookup) |kind, i| {
if (kind != .invalid and kind == target) {
const tag = @intCast(enum_info.tag_type, i);
return @intToEnum(Enum, tag);
}
}
return error.WinmdInvalidTableReference;
}
pub inline fn decodeTable(value: u32) Enum {
return @intToEnum(Enum, @truncate(enum_info.tag_type, value));
}
pub inline fn decodeIndex(value: u32) u32 {
return value >> num_bits;
}
pub fn decode(value: u32) GenericIndex {
return .{
.table = decodeTable(value).toGeneric(),
.index = decodeIndex(value),
};
}
pub fn encode(index: GenericIndex) error{WinmdInvalidTableReference}!u32 {
const encoded_value = try fromGeneric(index.table);
return (index.index << num_bits) | @as(u32, @enumToInt(encoded_value));
}
};
}
pub const TypeDefOrRef = enum (u2) {
pub usingnamespace CodedIndexMixin(@This());
type_def = 0,
type_ref = 1,
type_spec = 2,
_,
};
/// A table which can be a declaring scope
/// for a constant. The .constant table
/// has a back reference to its parent.
pub const HasConstant = enum (u2) {
pub usingnamespace CodedIndexMixin(@This());
field = 0,
param = 1,
property = 2,
_,
};
pub const HasCustomAttribute = enum (u5) {
pub usingnamespace CodedIndexMixin(@This());
method_def = 0,
field = 1,
type_ref = 2,
type_def = 3,
param = 4,
interface_impl = 5,
member_ref = 6,
module = 7,
// This is called out in the spec,
// but there is no permission table?
// permission = 8,
property = 9,
event = 10,
standalone_sig = 11,
module_ref = 12,
type_spec = 13,
assembly = 14,
assembly_ref = 15,
file = 16,
exported_type = 17,
manifest_resource = 18,
generic_param = 19,
generic_param_constraint = 20,
method_spec = 21,
_,
};
pub const HasFieldMarshal = enum (u1) {
pub usingnamespace CodedIndexMixin(@This());
field = 0,
param = 1,
};
pub const HasDeclSecurity = enum (u2) {
pub usingnamespace CodedIndexMixin(@This());
type_def = 0,
method_def = 1,
assembly = 2,
_,
};
pub const MemberRefParent = enum (u3) {
pub usingnamespace CodedIndexMixin(@This());
type_def = 0,
type_ref = 1,
module_ref = 2,
method_def = 3,
type_spec = 4,
_,
};
pub const HasSemantics = enum (u1) {
pub usingnamespace CodedIndexMixin(@This());
event = 0,
property = 1,
};
pub const MethodDefOrRef = enum (u1) {
pub usingnamespace CodedIndexMixin(@This());
method_def = 0,
member_ref = 1,
};
const MemberForwarded = enum (u1) {
pub usingnamespace CodedIndexMixin(@This());
field = 0,
method_def = 1,
};
const Implementation = enum (u2) {
pub usingnamespace CodedIndexMixin(@This());
file = 0,
assembly_ref = 1,
exported_type = 2,
_,
};
const CustomAttributeType = enum (u3) {
pub usingnamespace CodedIndexMixin(@This());
method_def = 2,
member_ref = 3,
_,
};
pub const ResolutionScope = enum (u2) {
pub usingnamespace CodedIndexMixin(@This());
module = 0,
module_ref = 1,
assembly_ref = 2,
type_ref = 3,
};
pub const TypeOrMethodDef = enum (u1) {
pub usingnamespace CodedIndexMixin(@This());
type_def = 0,
method_def = 1,
};
const all_coded_enums = [_]type{
TypeDefOrRef,
HasConstant,
HasCustomAttribute,
HasFieldMarshal,
HasDeclSecurity,
MemberRefParent,
HasSemantics,
MethodDefOrRef,
MemberForwarded,
Implementation,
CustomAttributeType,
ResolutionScope,
TypeOrMethodDef,
};
pub const MemberAccess = enum (u3) {
/// cannot be referenced
compiler_controlled = 0,
/// only in own type
private = 1,
/// only in subtypes in assembly
family_and_assembly = 2,
/// only in assembly
assembly = 3,
/// only in subtypes (protected)
family = 4,
/// in subtypes or anywhere in this assembly
family_or_assembly = 5,
/// anywhere
public = 6,
_,
};
pub const CodeType = enum (u2) {
cil = 0,
native = 1,
optil = 2,
runtime = 3,
};
pub const Managed = enum (u1) {
managed = 0,
unmanaged = 1,
};
pub const VtableLayout = enum (u1) {
reuse_slot = 0,
new_slot = 1,
};
/// Note: These values exactly match the equivalent
/// values in ElementType, so they can be cross-cast.
pub const ConstantType = extern enum (u16) {
bool = 2,
char = 3,
i8 = 4,
u8 = 5,
i16 = 6,
u16 = 7,
i32 = 8,
u32 = 9,
i64 = 10,
u64 = 11,
f32 = 12,
f64 = 13,
string = 14,
class = 18,
};
// TODO: This might be a secret packed struct
pub const ElementType = extern enum (u8) {
end_sentinel = 0,
void = 1,
bool = 2,
char = 3,
i8 = 4,
u8 = 5,
i16 = 6,
u16 = 7,
i32 = 8,
u32 = 9,
i64 = 10,
u64 = 11,
f32 = 12,
f64 = 13,
string = 14,
/// followed by TypeSig
ptr = 15,
/// followed by TypeSig
by_ref = 16,
/// followed by TypeDef or TypeRef
value_type = 17,
/// followed by TypeDef or TypeRef
class = 18,
/// generic parameter in a type definition
generic_type_parameter = 19,
array = 20,
generic_instance = 21,
typed_by_ref = 22,
// 23 is unused?
isize = 24,
usize = 25,
// 26 is unused?
/// followed by full method system
fn_ptr = 27,
/// C# System.Object
object = 28,
sized_array = 29,
/// generic parameter in function definition
generic_fn_parameter = 30,
/// followed by TypeDef or TypeRef
required_modifier = 31,
/// followed by TypeDef or TypeRef
optional_modifier = 32,
internal = 33,
/// Or'd with element types that follow
modifier = 64,
vararg_sentinel = 65,
pinned = 69, // nice
type = 80,
tagged_object = 81,
attr_field = 83,
attr_property = 84,
attr_enum = 85,
};
pub const CallingConvention = packed struct {
kind: Kind,
is_generic: bool,
has_this: bool,
has_explicit_this: bool,
__reserved: bool = false,
pub const Kind = enum (u4) {
default = 0,
var_args = 5,
field = 6,
local_signature = 7,
property = 8,
_,
};
};
pub const AssemblyHashAlgorithm = extern enum (u32) {
none = 0,
reserved_md5 = 0x8003,
sha1 = 0x8004,
};
/// Note: This struct relies on the current packed struct layout rules.
/// Those may change in the future, which would break this.
pub const AssemblyFlags = packed struct {
has_full_public_key: bool,
__reserved0: u7 = 0,
is_retargetable: bool,
needs_windows_runtime: bool,
__reserved1: u4 = 0,
disable_jit_optimizer: bool,
enable_jit_tracking: bool,
__reserved2: u8 = 0,
__reserved3: u8 = 0,
};
/// Note: This struct relies on the current packed struct layout rules.
/// Those may change in the future, which would break this.
pub const AssemblyAttributes = packed struct {
__reserved0: u9 = 0,
has_special_name: bool,
has_rt_special_name: bool,
__reserved1: u5 = 0,
};
/// Note: This struct relies on the current packed struct layout rules.
/// Those may change in the future, which would break this.
pub const EventAttributes = packed struct {
__reserved0: u8 = 0,
__reserved1: u1 = 0,
has_special_name: bool,
has_rt_special_name: bool,
__reserved2: u5 = 0,
};
/// Note: This struct relies on the current packed struct layout rules.
/// Those may change in the future, which would break this.
pub const FieldAttributes = packed struct {
comptime { if (@bitSizeOf(MemberAccess) != 3) @compileError("MemberAccess must be 3 bits"); }
access: MemberAccess,
__reserved0: u1 = 0,
is_static: bool,
is_init_only: bool,
is_literal: bool,
is_not_serialized: bool,
has_field_rva: bool,
has_special_name: bool,
has_rt_special_name: bool,
__reserved1: u1 = 0,
has_field_marshal: bool,
is_pinvoke_impl: bool,
__reserved2: u1 = 0,
has_default: bool,
};
/// Note: This struct relies on the current packed struct layout rules.
/// Those may change in the future, which would break this.
pub const FileAttributes = packed struct {
has_no_metadata: bool,
__reserved0: u7 = 0,
__reserved1: u8 = 0,
};
/// Note: This struct relies on the current packed struct layout rules.
/// Those may change in the future, which would break this.
pub const GenericParameterAttributes = packed struct {
variance: Variance, // 2 bits
special_constraint: SpecialConstraint, // 3 bits
__reserved0: u11 = 0,
pub const Variance = enum (u2) {
none = 0,
covariant = 1,
contravariant = 2,
_,
};
// Note: Even though these are bits, this is not a mask.
pub const SpecialConstraint = enum (u3) {
reference_type_constraint = 1,
not_nullable_value_type_constraint = 2,
default_constructor_constraint = 4,
_,
};
};
pub const ManifestResourceAttributes = packed struct {
visibility: enum (u3) {
public = 1,
private = 2,
_,
},
__reserved0: u5 = 0,
__reserved1: u8 = 0,
__reserved2: u8 = 0,
__reserved3: u8 = 0,
};
/// Note: This struct relies on the current packed struct layout rules.
/// Those may change in the future, which would break this.
pub const MethodAttributes = packed struct {
comptime {
if (@bitSizeOf(MemberAccess) != 3)
@compileError("MemberAccess must be 3 bits");
if (@bitSizeOf(VtableLayout) != 1)
@compileError("VtableLayout must be 1 bit");
}
access: MemberAccess,
is_unmanaged_export: bool,
is_static: bool,
is_final: bool,
is_virtual: bool,
is_hide_by_signature: bool,
vtable_layout: VtableLayout,
is_strict: bool,
is_abstract: bool,
is_special_name: bool,
is_rt_special_name: bool,
is_pinvoke_impl: bool,
has_security: bool,
is_require_sec_object: bool,
};
/// Note: This struct relies on the current packed struct layout rules.
/// Those may change in the future, which would break this.
pub const MethodImplAttributes = packed struct {
comptime {
if (@bitSizeOf(CodeType) != 2)
@compileError("CodeType must be 2 bits");
if (@bitSizeOf(Managed) != 1)
@compileError("Managed must be 1 bit");
}
code_type: CodeType, // 2 bits
managed: Managed, // 1 bit
is_no_inline: bool,
is_forward_ref: bool,
is_synchronized: bool,
is_no_optimization: bool,
is_preserve_sig: bool,
__reserved0: u4 = 0,
is_internal_call: bool,
__reserved1: u3 = 0,
};
/// Note: This struct relies on the current packed struct layout rules.
/// Those may change in the future, which would break this.
pub const MethodSemanticsAttributes = packed struct {
is_setter: bool,
is_getter: bool,
is_other: bool,
is_add_on: bool,
is_remove_on: bool,
is_fire: bool,
__reserved0: u10 = 0,
};
/// Note: This struct relies on the current packed struct layout rules.
/// Those may change in the future, which would break this.
pub const ParamAttributes = packed struct {
is_in: bool,
is_out: bool,
__reserved0: u2 = 0,
is_optional: bool,
__reserved1: u3 = 0,
__reserved2: u4 = 0,
has_default: bool,
has_field_marshal: bool,
__reserved3: u2 = 0,
};
pub const PInvokeAttributes = packed struct {
is_no_mangle: bool,
char_set: enum (u2) {
not_specified = 0,
ansi = 1,
unicode = 2,
auto = 3,
},
__reserved0: u2 = 0,
supports_last_error: bool,
__reserved1: u2 = 0,
calling_convention: enum (u3) {
platform_api = 1,
cdecl = 2,
stdcall = 3,
thiscall = 4,
fastcall = 5,
_,
},
__reserved2: u5 = 0,
};
/// Note: This struct relies on the current packed struct layout rules.
/// Those may change in the future, which would break this.
pub const PropertyAttributes = packed struct {
__reserved0: u8 = 0,
__reserved1: u1 = 0,
is_special_name: bool,
is_rt_special_name: bool,
__reserved2: u1 = 0,
has_default: bool,
__reserved3: u3 = 0,
};
/// Note: This struct relies on the current packed struct layout rules.
/// Those may change in the future, which would break this.
pub const TypeAttributes = packed struct {
visibility: Visibility, // 3 bits
layout: Layout, // 2 bits
semantics: Semantics, // 1 bit
__reserved0: u1 = 0,
is_abstract: bool,
is_sealed: bool,
__reserved1: u1 = 0,
is_special_name: bool,
is_rt_special_name: bool,
is_import: bool,
is_serializable: bool,
is_windows_runtime: bool,
__reserved2: u1 = 0,
string_format: StringFormat, // 4 bits
is_before_field_init: bool,
is_type_forwarder: bool,
__reserved3: u2 = 0,
__reserved4: u8 = 0,
pub const Visibility = enum (u3) {
not_public = 0,
public = 1,
nested_public = 2,
nested_private = 3,
nested_family = 4,
nested_assembly = 5,
nested_family_and_assembly = 6,
nested_family_or_assembly = 7,
};
pub const Layout = enum (u2) {
auto = 0,
sequential = 1,
explicit = 2,
_,
};
pub const Semantics = enum (u1) {
class = 0,
interface = 1,
};
pub const StringFormat = packed struct {
class: Class,
custom_format: u2 = 0,
pub const Class = enum (u2) {
ansi = 0,
unicode = 1,
auto = 2,
custom_format = 3,
};
};
comptime {
if (@bitSizeOf(StringFormat) != 4)
@compileError("StringFormat must be 4 bits");
}
};
const MAX_INDEX_COLUMNS = comptime blk: {
var max_indices = 0;
for (TableKind.values) |kind| {
const Row = kind.RowType();
var next_index = 0;
for (@typeInfo(Row).Struct.fields) |field| {
if (@typeInfo(field.field_type) == .Struct and @hasDecl(field.field_type, "index_column")) {
const index = field.field_type.index_column;
if (index != next_index) {
@compileError("In Row struct "++@typeName(Row)++" for kind "++@tagName(kind)++
", index field "++field.name++" is out of order.");
}
next_index += 1;
} else {
if (next_index != 0) {
@compileError("In Row struct "++@typeName(Row)++" for kind "++@tagName(kind)++
", field "++field.name++" comes after index fields, but is not an index.");
}
}
}
if (next_index > max_indices) {
max_indices = next_index;
}
}
assert(max_indices == 5);
break :blk max_indices;
};
const IndexPosition = struct {
offset: u8,
size: u8,
};
pub const GenericTable = struct {
data: ?[*]const u8,
rows: u32,
row_size: u32,
indexes: [MAX_INDEX_COLUMNS]IndexPosition,
fn at(self: GenericTable, comptime T: type, index: u32) *const T {
assert(index < self.rows);
return @ptrCast(*const T, @alignCast(@alignOf(T), self.data.? + index * self.row_size));
}
fn iterator(self: GenericTable) Iterator {
return .{
.next_row = self.data,
.remaining_items = self.rows,
.row_size = self.row_size,
};
}
fn range(self: GenericTable, start: u32, end: u32) Iterator {
return .{
.next_row = if (start == 0) self.data else (self.data.? + start),
.remaining_items = end - start,
.row_size = self.row_size,
};
}
pub const Iterator = struct {
next_row: ?[*]const u8,
remaining_items: usize,
row_size: usize,
pub fn next(self: *Iterator, comptime T: type) ?*const T {
if (self.remaining_items == 0) return null;
self.remaining_items -= 1;
const next_row = self.next_row.?;
self.next_row = next_row + row_size;
return @ptrCast(*const T, next_row);
}
};
pub fn as(self: GenericTable, comptime kind: TableKind) Table(kind) {
return .{ .generic = self };
}
};
pub fn Table(comptime kind: TableKind) type {
return struct {
pub const tag = kind;
pub const Row = kind.RowType();
generic: GenericTable,
pub fn at(self: @This(), index: u32) *const Row {
return self.generic.at(Row, index);
}
pub fn iterator(self: @This()) Iterator {
return .{ .generic = self.generic.iterator() };
}
pub fn range(self: @This(), start: u32, end: u32) Iterator {
return .{ .generic = self.generic.range(start, end) };
}
const Iterator = struct {
generic: GenericTable.Iterator,
pub fn next(self: *Iterator) ?*const Row {
return self.generic.next(Row);
}
};
};
}
const SizeInfo = struct {
has_large_string_indexes: bool,
has_large_guid_indexes: bool,
has_large_blob_indexes: bool,
database: *const Database,
};
pub const Database = struct {
tables: [TableKind.num_valid_kinds]GenericTable,
strings: []const u8,
blobs: []const u8,
guids: []const [16]u8,
pub fn getTable(self: Database, comptime kind: TableKind) Table(kind) {
return self.tables[@enumToInt(kind)].as(kind);
}
pub fn getString(self: Database, index: u32) [:0]const u8 {
const rest = self.strings[index..];
if (std.mem.indexOfScalar(u8, rest, 0)) |null_index| {
return rest[0..null_index :0];
} else {
// TODO: Invalid files may hit this case.
unreachable;
}
}
pub fn getBlob(self: Database, index: u32) []const u8 {
var rest = self.blobs[index..];
const encoding = rest[0] >> 5;
switch (encoding) {
// top bit 0, one byte size
0, 1, 2, 3 => {
const len = rest[0] & 0x7f;
return rest[1..][0..len];
},
// top bits 10, two bytes size
4, 5 => {
const len = std.mem.readIntLittle(u16, rest[0..2]) & 0x3FFF;
return rest[2..][0..len];
},
// top bits 110, four bytes size
6 => {
const len = std.mem.readIntLittle(u32, rest[0..4]) & 0x1FFF_FFFF;
return rest[4..][0..len];
},
// no other valid combinations
else => {
// TODO: Invalid files may hit this case
unreachable;
},
}
}
pub fn getGuid(self: Database, index: u32) *const [16]u8 {
return &self.guids[index];
}
pub fn initIndices(self: *Database) void {
const sizing = SizeInfo{
// TODO: Pull this out of the file header
.has_large_string_indexes = true,
.has_large_guid_indexes = true,
.has_large_blob_indexes = true,
.database = self,
};
inline for (TableKind.values) |kind| {
const Row = kind.RowType();
var byte_offset: u8 = 0;
const gen_table = &self.tables[@enumToInt(kind)];
inline for (@typeInfo(Row).Struct.fields) |field| {
if (@typeInfo(field.field_type) == .Struct and
@hasDecl(field.field_type, "index_column"))
{
const index: u8 = field.field_type.index_column;
const size: u8 = field.field_type.calculateSize(sizing);
// Every index is one byte because pointers to zero sized
// types don't work, so we need to subtract that here.
// This is the offset within a row from the pointer to
// the index struct to the actual data in the row.
gen_table.indexes[index] = .{
.offset = byte_offset - index,
.size = size,
};
byte_offset += size;
}
}
}
}
};
pub fn IndexMixin(comptime column: usize) type {
return packed struct {
pub const index_column = column;
pub fn readIndex(self: *const @This(), source_table: GenericTable) u32 {
const index_data = source_table.indexes[column];
const byte_ptr = @ptrCast([*]const u8, self) + index_data.offset;
return switch (index_data.size) {
2 => std.mem.readIntLittle(u16, byte_ptr[0..2]),
4 => std.mem.readIntLittle(u32, byte_ptr[0..4]),
else => unreachable,
};
}
self: u8,
};
}
pub fn StringIndex(comptime column: usize) type {
return packed struct {
const Mixin = IndexMixin(column);
pub usingnamespace Mixin;
raw: Mixin,
pub fn getString(self: *@This(), source_table: GenericTable, database: Database) [:0]const u8 {
// TODO SAFETY: These strings may not be null terminated if the file is invalid :(
const index = self.raw.readIndex(source_table);
return database.getString(index);
}
fn calculateSize(sizing_info: SizeInfo) u8 {
return if (sizing_info.has_large_string_indexes) 4 else 2;
}
};
}
pub fn BlobIndex(comptime column: usize) type {
return packed struct {
const Mixin = IndexMixin(column);
pub usingnamespace Mixin;
raw: Mixin,
pub fn getBlob(self: *@This(), source_table: GenericTable, database: Database) []const u8 {
const index = self.raw.readIndex(source_table);
return database.getBlob(index);
}
fn calculateSize(sizing_info: SizeInfo) u8 {
return if (sizing_info.has_large_blob_indexes) 4 else 2;
}
};
}
pub fn GuidIndex(comptime column: usize) type {
return packed struct {
const Mixin = IndexMixin(column);
pub usingnamespace Mixin;
raw: Mixin,
pub fn getGuid(self: *@This(), source_table: GenericTable, database: Database) *const [16]u8 {
const index = self.raw.readIndex(source_table);
return database.getGuid(index);
}
fn calculateSize(sizing_info: SizeInfo) u8 {
return if (sizing_info.has_large_guid_indexes) 4 else 2;
}
};
}
pub fn TableIndex(comptime kind: TableKind, comptime column: usize) type {
return packed struct {
const Mixin = IndexMixin(column);
pub usingnamespace Mixin;
raw: Mixin,
// TODO SAFETY: Might need to return error or optional here if index is invalid.
pub fn getRow(self: *@This(), source_table: GenericTable, database: Database) *const kind.RowType() {
const index = self.raw.readIndex(source_table);
const table = database.getTable(kind);
return table.at(index);
}
fn calculateSize(sizing_info: SizeInfo) u8 {
const rows = sizing_info.database.tables[@enumToInt(kind)].rows;
return if (rows < 1<<16) 2 else 4;
}
};
}
pub fn ListIndex(comptime kind: TableKind, comptime column: usize) type {
return packed struct {
const Mixin = IndexMixin(column);
pub usingnamespace Mixin;
raw: Mixin,
pub fn iterator(self: *const @This(), source_table: GenericTable, database: Database) Table(kind).Iterator {
const start_index = self.raw.readIndex(source_table);
// Check if we are the last entry in the source table
const source_table_end = @ptrToInt(source_table.data.?) + source_table.rows * source_table.row_size;
const next_entry = @ptrToInt(self) + source_table.row_size;
if (next_entry < source_table_end) {
// we are not the last entry
const next_index = @intToPtr(*const @This(), next_entry);
const end_index = next_index.raw.readIndex(source_table);
return database.getTable(kind).range(start_index, end_index);
} else {
// we are the last entry
const table = database.getTable(kind);
return table.range(start_index, table.generic.rows);
}
}
// TODO SAFETY: Might need to return error or optional here if index is invalid.
pub fn firstRow(self: *@This(), source_table: GenericTable, database: Database) *const kind.RowType() {
const index = self.raw.readIndex(source_table);
const table = database.getTable(kind);
return table.at(index);
}
fn calculateSize(sizing_info: SizeInfo) u8 {
const rows = sizing_info.database.tables[@enumToInt(kind)].rows;
return if (rows < 1<<16) 2 else 4;
}
};
}
pub fn CodedIndex(comptime Enum: type, comptime column: usize) type {
return packed struct {
const Mixin = IndexMixin(column);
pub usingnamespace Mixin;
raw: Mixin,
pub fn getKind(self: *const @This(), source_table: GenericTable) Enum {
const index = self.raw.readIndex(source_table);
return Enum.decodeTable(index);
}
// TODO SAFETY: Might need to return error or optional here if index is invalid.
pub fn getRow(self: *const @This(), comptime kind: Enum, source_table: GenericTable, database: Database) kind.toGeneric().RowType() {
const index = self.raw.readIndex(source_table);
assert(Enum.decodeTable(index) == kind);
return database.getTable(kind.toGeneric()).at(index);
}
fn calculateSize(sizing_info: SizeInfo) u8 {
const enum_info = @typeInfo(Enum).Enum;
const large_index_size = 1<<(16 - @bitSizeOf(Enum));
var needs_large_indices = false;
inline for (enum_info.fields) |field| {
const generic_tag = @field(TableKind, field.name);
const rows = sizing_info.database.tables[@enumToInt(generic_tag)].rows;
if (rows >= large_index_size) {
needs_large_indices = true;
}
}
return if (needs_large_indices) 4 else 2;
}
};
}
// ---------------------- Tables ---------------------
fn RowMixin(comptime kind: @Type(.EnumLiteral), comptime Row: type) type {
const typed_kind: TableKind = kind; // If this fails, you have misspelled the kind.
if (@typeInfo(Row).Struct.layout != .Packed) {
@compileError("Row structs must have packed layout, "++@typeName(Row)++" does not.");
}
if (@alignOf(Row) > 2) {
@compileError("Row structs must be two byte aligned, "++@typeName(Row)++" is not.");
}
return struct {
pub const tag = kind;
};
}
const AssemblyRow = packed struct {
pub usingnamespace RowMixin(.assembly, @This());
hash_algorithm: AssemblyHashAlgorithm,
major_version: u16,
minor_version: u16,
build_number: u16,
revision_number: u16,
flags: AssemblyFlags,
public_key_index: BlobIndex(0),
name_index: StringIndex(1),
culture_index: StringIndex(2),
};
const AssemblyOsRow = packed struct {
pub usingnamespace RowMixin(.assembly_os, @This());
os_platform_id: u32,
os_major_version: u32,
os_minor_version: u32,
};
const AssemblyProcessorRow = packed struct {
pub usingnamespace RowMixin(.assembly_processor, @This());
processor: u32,
};
const AssemblyRefRow = packed struct {
pub usingnamespace RowMixin(.assembly_ref, @This());
major_version: u16,
minor_version: u16,
build_number: u16,
revision_number: u16,
flags: AssemblyFlags,
public_key_or_token_index: BlobIndex(0),
name: StringIndex(1),
culture: StringIndex(2),
hash_value: BlobIndex(3),
};
const AssemblyRefOsRow = packed struct {
pub usingnamespace RowMixin(.assembly_ref_os, @This());
os_platform_id: u32,
os_major_version: u32,
os_minor_version: u32,
assembly_ref: TableIndex(.assembly_ref, 0),
};
const AssemblyRefProcessorRow = packed struct {
pub usingnamespace RowMixin(.assembly_ref_processor, @This());
processor: u32,
assembly_ref: TableIndex(.assembly_ref, 0),
};
const ClassLayoutRow = packed struct {
pub usingnamespace RowMixin(.class_layout, @This());
// maximum field alignment.
// If 0, use ABI alignment.
packing_size: u16,
// total size, 0 means it must be calculated
// based on packing size and ABI alignment
class_size: u32,
parent: TableIndex(.type_def, 0),
};
const ConstantRow = packed struct {
pub usingnamespace RowMixin(.constant, @This());
type: ConstantType,
__reserved0: u8 = 0,
/// The scope in which the constant is declared
parent: CodedIndex(HasConstant, 0),
/// The constant value
value: BlobIndex(1),
};
const CustomAttributeRow = packed struct {
pub usingnamespace RowMixin(.custom_attribute, @This());
parent: CodedIndex(HasCustomAttribute, 0),
type: CodedIndex(CustomAttributeType, 1),
value: BlobIndex(2),
};
const DeclSecurityRow = packed struct {
pub usingnamespace RowMixin(.decl_security, @This());
action: u16,
parent: CodedIndex(HasDeclSecurity, 0),
permission_set: BlobIndex(1),
};
const EventMapRow = packed struct {
pub usingnamespace RowMixin(.event_map, @This());
parent: TableIndex(.type_def, 0),
events: ListIndex(.event, 1),
};
const EventRow = packed struct {
pub usingnamespace RowMixin(.event, @This());
flags: EventAttributes,
name: StringIndex(0),
/// This is the type of the event, not the
/// type containing the event declaration.
event_type: CodedIndex(TypeDefOrRef, 1),
};
const ExportedTypeRow = packed struct {
pub usingnamespace RowMixin(.exported_type, @This());
flags: TypeAttributes,
/// Hint for the index into the type def table.
/// This may be incorrect, and should be zero
/// if flags.is_type_forwarder is true.
type_def_hint: u32,
type_name: StringIndex(0),
type_namespace: StringIndex(1),
/// Points to the location of the implementation.
/// .file => the module containing the implementation,
/// .exported_type => the type containing this type,
/// .assembly_ref => the assembly containing the implementation,
/// flags.is_type_forwarder must be set if this is .assembly_ref.
implementation: CodedIndex(Implementation, 2),
};
const FieldRow = packed struct {
pub usingnamespace RowMixin(.field, @This());
flags: FieldAttributes,
name: StringIndex(0),
signature: BlobIndex(1),
};
const FieldLayoutRow = packed struct {
pub usingnamespace RowMixin(.field_layout, @This());
/// Offset into the struct
offset: u32,
field: TableIndex(.field, 0),
};
const FieldMarshalRow = packed struct {
pub usingnamespace RowMixin(.field_marshal, @This());
parent: CodedIndex(HasFieldMarshal, 0),
native_type: BlobIndex(1),
};
const FieldRvaRow = packed struct {
pub usingnamespace RowMixin(.field_rva, @This());
rva: u32,
field: TableIndex(.field, 0),
};
const FileRow = packed struct {
pub usingnamespace RowMixin(.file, @This());
flags: FileAttributes,
name: StringIndex(0),
hash: BlobIndex(1),
};
const GenericParamRow = packed struct {
pub usingnamespace RowMixin(.generic_param, @This());
/// Index of this parameter, starting at 0
number: u16,
flags: GenericParameterAttributes,
owner: CodedIndex(TypeOrMethodDef, 0),
name: StringIndex(1),
};
const GenericParamConstraintRow = packed struct {
pub usingnamespace RowMixin(.generic_param_constraint, @This());
owner: TableIndex(.generic_param, 0),
constraint: CodedIndex(TypeDefOrRef, 1),
};
const ImplMapRow = packed struct {
pub usingnamespace RowMixin(.impl_map, @This());
flags: PInvokeAttributes,
/// Can only actually index .method_def, because
/// field export is not supported :(
member_forwarded: CodedIndex(MemberForwarded, 0),
import_name: StringIndex(1),
import_scope: TableIndex(.module_ref, 2),
};
const InterfaceImplRow = packed struct {
pub usingnamespace RowMixin(.interface_impl, @This());
class: TableIndex(.type_def, 0),
interface: CodedIndex(TypeDefOrRef, 1),
};
const ManifestResourceRow = packed struct {
pub usingnamespace RowMixin(.manifest_resource, @This());
offset: u32,
flags: ManifestResourceAttributes,
name: StringIndex(0),
implementation: CodedIndex(Implementation, 1),
};
const MemberRefRow = packed struct {
pub usingnamespace RowMixin(.member_ref, @This());
class: CodedIndex(MemberRefParent, 0),
name: StringIndex(1),
signature: BlobIndex(2),
};
const MethodDefRow = packed struct {
pub usingnamespace RowMixin(.method_def, @This());
rva: u32,
impl_flags: MethodImplAttributes,
flags: MethodAttributes,
name: StringIndex(0),
signature: BlobIndex(1),
param_list: ListIndex(.param, 2),
};
const MethodImplRow = packed struct {
pub usingnamespace RowMixin(.method_impl, @This());
class: TableIndex(.type_def, 0),
method_body: CodedIndex(MethodDefOrRef, 1),
method_declaration: CodedIndex(MethodDefOrRef, 2),
};
const MethodSemanticsRow = packed struct {
pub usingnamespace RowMixin(.method_semantics, @This());
semantics: MethodSemanticsAttributes,
method: TableIndex(.method_def, 0),
association: CodedIndex(HasSemantics, 1),
};
const MethodSpecRow = packed struct {
pub usingnamespace RowMixin(.method_spec, @This());
method: CodedIndex(MethodDefOrRef, 0),
instantiation: BlobIndex(1),
};
const ModuleRow = packed struct {
pub usingnamespace RowMixin(.module, @This());
generation: u16 = 0,
name: StringIndex(0),
mvid: GuidIndex(1),
enc_id: GuidIndex(2),
enc_base_id: GuidIndex(3),
};
const ModuleRefRow = packed struct {
pub usingnamespace RowMixin(.module_ref, @This());
name: StringIndex(0),
};
const NestedClassRow = packed struct {
pub usingnamespace RowMixin(.nested_class, @This());
nested_class: TableIndex(.type_def, 0),
enclosing_class: TableIndex(.type_def, 1),
};
const ParamRow = packed struct {
pub usingnamespace RowMixin(.param, @This());
flags: ParamAttributes,
sequence: u16,
name: StringIndex(0),
};
const PropertyRow = packed struct {
pub usingnamespace RowMixin(.property, @This());
flags: PropertyAttributes,
name: StringIndex(0),
type_signature: BlobIndex(1),
};
const PropertyMapRow = packed struct {
pub usingnamespace RowMixin(.property_map, @This());
parent: TableIndex(.type_def, 0),
property_list: ListIndex(.property, 1),
};
const StandaloneSigRow = packed struct {
pub usingnamespace RowMixin(.standalone_sig, @This());
signature: BlobIndex(0),
};
const TypeDefRow = packed struct {
pub usingnamespace RowMixin(.type_def, @This());
flags: TypeAttributes,
type_name: StringIndex(0),
type_namespace: StringIndex(1),
extends: CodedIndex(TypeDefOrRef, 2),
field_list: ListIndex(.field, 3),
method_list: ListIndex(.method_def, 4),
};
const TypeRefRow = packed struct {
pub usingnamespace RowMixin(.type_ref, @This());
resolution_scope: CodedIndex(ResolutionScope, 0),
type_name: StringIndex(1),
type_namespace: StringIndex(2),
};
const TypeSpecRow = packed struct {
pub usingnamespace RowMixin(.type_spec, @This());
signature: BlobIndex(0),
};
const all_row_types = [_]type{
AssemblyRow,
AssemblyOsRow,
AssemblyProcessorRow,
AssemblyRefRow,
AssemblyRefOsRow,
AssemblyRefProcessorRow,
ClassLayoutRow,
ConstantRow,
CustomAttributeRow,
DeclSecurityRow,
EventMapRow,
EventRow,
ExportedTypeRow,
FieldRow,
FieldLayoutRow,
FieldMarshalRow,
FieldRvaRow,
FileRow,
GenericParamRow,
ImplMapRow,
InterfaceImplRow,
ManifestResourceRow,
MemberRefRow,
MethodDefRow,
MethodImplRow,
MethodSemanticsRow,
MethodSpecRow,
ModuleRow,
ModuleRefRow,
NestedClassRow,
ParamRow,
PropertyRow,
PropertyMapRow,
StandaloneSigRow,
TypeDefRow,
TypeRefRow,
TypeSpecRow,
};
// --------------------------------- Tests ---------------------------------
// mark Tests as referenced so its' tests get compiled.
comptime { _ = Tests; }
pub const runAllTests = Tests.runAll;
const Module = @This();
const Tests = struct {
const testing = std.testing;
const print = std.debug.print;
fn runAll() void {
comptime {
@setEvalBranchQuota(100000);
refAllDeclsRecursive(Module);
for (TableKind.values) |kind| {
refAllDeclsRecursive(Table(kind));
}
var i = 0;
while (i < MAX_INDEX_COLUMNS) : (i += 1) {
refAllDeclsRecursive(StringIndex(i));
refAllDeclsRecursive(BlobIndex(i));
refAllDeclsRecursive(GuidIndex(i));
for (TableKind.values) |kind| {
refAllDeclsRecursive(TableIndex(kind, i));
refAllDeclsRecursive(ListIndex(kind, i));
}
for (all_coded_enums) |Code| {
refAllDeclsRecursive(CodedIndex(Code, i));
}
}
}
const tests = .{
"initIndices"
};
print("Running tests...\n", .{});
inline for (tests) |fn_name| {
print("{}...\n", .{fn_name});
@field(@This(), "test_"++fn_name)();
}
print("All {} tests passed.\n", .{tests.len});
}
test "initIndices" { test_initIndices(); }
fn test_initIndices() void {
var d: Database = undefined;
d.initIndices();
}
fn refDeclsList(comptime T: type, comptime decls: []const std.builtin.TypeInfo.Declaration) void {
for (decls) |decl| {
if (decl.is_pub) {
_ = @field(T, decl.name);
switch (decl.data) {
.Type => |SubType| refAllDeclsRecursive(SubType),
.Var => |Type| {},
.Fn => |fn_decl| {},
}
}
}
}
fn refAllDeclsRecursive(comptime T: type) void {
comptime {
switch (@typeInfo(T)) {
.Struct => |info| refDeclsList(T, info.decls),
.Union => |info| refDeclsList(T, info.decls),
.Enum => |info| refDeclsList(T, info.decls),
.Opaque => |info| refDeclsList(T, info.decls),
else => {},
}
}
}
}; | src/winmd.zig |
const std = @import("std");
const Self = @This();
const BIOS_FILE = @embedFile("../gba.bin");
const BIOS_START = 0x00000000;
const BIOS_END = 0x00003FFF;
const BIOS_SIZE = BIOS_END - BIOS_START + 1;
const WRAM_OB_START = 0x02000000;
const WRAM_OB_END = 0x0203FFFF;
const WRAM_OB_SIZE = WRAM_OB_END - WRAM_OB_START + 1;
const WRAM_OC_START = 0x03000000;
const WRAM_OC_END = 0x03007FFF;
const WRAM_OC_SIZE = WRAM_OC_END - WRAM_OC_START + 1;
const IO_START = 0x04000000;
const IO_END = 0x040003FE;
const IO_SIZE = IO_END - IO_START + 1;
const PAL_START = 0x05000000;
const PAL_END = 0x050003FF;
const PAL_SIZE = PAL_END - PAL_START + 1;
const VRAM_START = 0x06000000;
const VRAM_END = 0x06017FFF;
const VRAM_SIZE = VRAM_END - VRAM_START + 1;
const OAM_START = 0x07000000;
const OAM_END = 0x070003FF;
const OAM_SIZE = OAM_END - OAM_START + 1;
// TODO allocate to avoid ptr invalidation
bios: [BIOS_SIZE]u8,
wram_ob: [WRAM_OB_SIZE]u8,
wram_oc: [WRAM_OC_SIZE]u8,
io: [IO_SIZE]u8,
pal: [PAL_SIZE]u8,
vram: [VRAM_SIZE]u8,
oam: [OAM_SIZE]u8,
pub fn init() Self {
var bios = [_]u8{undefined} ** BIOS_SIZE;
@memcpy(&bios, BIOS_FILE, BIOS_SIZE);
const wram_ob = [_]u8{undefined} ** WRAM_OB_SIZE;
const wram_oc = [_]u8{undefined} ** WRAM_OC_SIZE;
const io = [_]u8{undefined} ** IO_SIZE;
const pal = [_]u8{undefined} ** PAL_SIZE;
const vram = [_]u8{undefined} ** VRAM_SIZE;
const oam = [_]u8{undefined} ** OAM_SIZE;
return .{
.bios = bios,
.wram_ob = wram_ob,
.wram_oc = wram_oc,
.io = io,
.pal = pal,
.vram = vram,
.oam = oam,
};
}
pub fn deinit(self: Self) void {
_ = self;
}
pub fn readByte(self: *Self, addr: u32) u8 {
return self.getAddr(addr).*;
}
pub fn readHalfWord(self: *Self, addr: u32) u16 {
return @intCast(u16, self.getAddr(addr + 1).*) << 8 |
@intCast(u16, self.getAddr(addr).*);
}
pub fn readWord(self: *Self, addr: u32) u32 {
return @intCast(u32, self.getAddr(addr + 3).*) << 24 |
@intCast(u32, self.getAddr(addr + 2).*) << 16 |
@intCast(u32, self.getAddr(addr + 1).*) << 8 |
@intCast(u32, self.getAddr(addr).*);
}
pub fn writeByte(self: *Self, addr: u32, n: u8) void {
self.getAddr(addr).* = n;
}
pub fn writeHalfWord(self: *Self, addr: u32, n: u16) void {
@ptrCast(*u16, @alignCast(@alignOf(*u16), self.getAddr(addr))).* = n;
}
pub fn writeWord(self: *Self, addr: u32, n: u32) void {
@ptrCast(*u32, @alignCast(@alignOf(*u32), self.getAddr(addr))).* = n;
}
pub fn getAddr(self: *Self, addr: u32) *u8 {
return switch (addr) {
0x00000000...0x00003FFF => &self.bios[addr],
0x02000000...0x0203FFFF => &self.wram_ob[@mod(addr, WRAM_OB_START)],
0x03000000...0x03007FFF => &self.wram_oc[@mod(addr, WRAM_OC_START)],
0x04000000...0x040003FE => &self.io[@mod(addr, IO_START)],
0x05000000...0x050003FF => &self.pal[@mod(addr, PAL_START)],
0x06000000...0x06017FFF => &self.vram[@mod(addr, VRAM_START)],
0x07000000...0x070003FF => &self.oam[@mod(addr, OAM_START)],
else => std.debug.todo("Attempted to access unused memory region."),
};
}
test "static analysis" {
std.testing.refAllDecls(@This());
}
test "memory sizes" {
try std.testing.expect(BIOS_SIZE == 0x00004000);
try std.testing.expect(WRAM_OB_SIZE == 0x000040000);
try std.testing.expect(WRAM_OC_SIZE == 0x000008000);
try std.testing.expect(IO_SIZE == 0x0000003FF);
try std.testing.expect(PAL_SIZE == 0x000000400);
try std.testing.expect(VRAM_SIZE == 0x000018000);
try std.testing.expect(OAM_SIZE == 0x000000400);
} | src/Bus.zig |
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const config = @import("../config.zig");
const Cluster = @import("cluster.zig").Cluster;
const Network = @import("network.zig").Network;
const StateMachine = @import("state_machine.zig").StateMachine;
const MessagePool = @import("../message_pool.zig").MessagePool;
const Message = MessagePool.Message;
const RingBuffer = @import("../ring_buffer.zig").RingBuffer;
const RequestQueue = RingBuffer(u128, config.message_bus_messages_max - 1);
const StateTransitions = std.AutoHashMap(u128, u64);
const log = std.log.scoped(.state_checker);
pub const StateChecker = struct {
/// Indexed by client index as used by Cluster.
client_requests: [config.clients_max]RequestQueue =
[_]RequestQueue{.{}} ** config.clients_max,
/// Indexed by replica index.
state_machine_states: [config.replicas_max]u128,
history: StateTransitions,
/// The highest cannonical state reached by the cluster.
state: u128,
/// The number of times the cannonical state has been advanced.
transitions: u64 = 0,
pub fn init(allocator: *mem.Allocator, cluster: *Cluster) !StateChecker {
const state = cluster.state_machines[0].state;
var state_machine_states: [config.replicas_max]u128 = undefined;
for (cluster.state_machines) |state_machine, i| {
assert(state_machine.state == state);
state_machine_states[i] = state_machine.state;
}
var history = StateTransitions.init(allocator);
errdefer history.deinit();
var state_checker = StateChecker{
.state_machine_states = state_machine_states,
.history = history,
.state = state,
};
try state_checker.history.putNoClobber(state, state_checker.transitions);
return state_checker;
}
pub fn deinit(state_checker: *StateChecker) void {
state_checker.history.deinit();
}
pub fn check_state(state_checker: *StateChecker, replica: u8) void {
const cluster = @fieldParentPtr(Cluster, "state_checker", state_checker);
const a = state_checker.state_machine_states[replica];
const b = cluster.state_machines[replica].state;
if (b == a) return;
state_checker.state_machine_states[replica] = b;
// If some other replica has already reached this state, then it will be in the history:
if (state_checker.history.get(b)) |transition| {
// A replica may transition more than once to the same state, for example, when
// restarting after a crash and replaying the log. The more important invariant is that
// the cluster as a whole may not transition to the same state more than once, and once
// transitioned may not regress.
log.info(
"{d:0>4}/{d:0>4} {x:0>32} > {x:0>32} {}",
.{ transition, state_checker.transitions, a, b, replica },
);
return;
}
// The replica has transitioned to state `b` that is not yet in the history.
// Check if this is a valid new state based on all currently inflight client requests.
for (state_checker.client_requests) |*queue| {
if (queue.head_ptr()) |input| {
if (b == StateMachine.hash(state_checker.state, std.mem.asBytes(input))) {
const transitions_executed = state_checker.history.get(a).?;
if (transitions_executed < state_checker.transitions) {
@panic("replica skipped interim transitions");
} else {
assert(transitions_executed == state_checker.transitions);
}
state_checker.state = b;
state_checker.transitions += 1;
log.info(" {d:0>4} {x:0>32} > {x:0>32} {}", .{
state_checker.transitions,
a,
b,
replica,
});
state_checker.history.putNoClobber(b, state_checker.transitions) catch {
@panic("state checker unable to allocate memory for history.put()");
};
// As soon as we reach a valid state we must pop the inflight request.
// We cannot wait until the client receives the reply because that would allow
// the inflight request to be used to reach other states in the interim.
// We must therefore use our own queue rather than the clients' queues.
_ = queue.pop();
return;
}
}
}
@panic("replica transitioned to an invalid state");
}
pub fn convergence(state_checker: *StateChecker) bool {
const cluster = @fieldParentPtr(Cluster, "state_checker", state_checker);
const a = state_checker.state_machine_states[0];
for (state_checker.state_machine_states[1..cluster.options.replica_count]) |b| {
if (b != a) return false;
}
const transitions_executed = state_checker.history.get(a).?;
if (transitions_executed < state_checker.transitions) {
@panic("cluster reached convergence but on a regressed state");
} else {
assert(transitions_executed == state_checker.transitions);
}
return true;
}
}; | src/test/state_checker.zig |
const std = @import("std");
const ArrayList = std.ArrayList;
const SemanticVersion = std.SemanticVersion;
const Target = std.Target;
const debug = std.debug;
const fmt = std.fmt;
const fs = std.fs;
const heap = std.heap;
const io = std.io;
const math = std.math;
const mem = std.mem;
const process = std.process;
const unicode = std.unicode;
const clap = @import("thirdparty/zig-clap");
const algorithm = @import("algorithm.zig");
pub fn main() !void {
const allocator = heap.page_allocator;
const stderr = io.getStdErr();
const stdout = io.getStdOut();
const param = parseParam(allocator) catch |err| switch (err) {
error.Help => return printHelp(stdout.writer()),
error.Version => return printVersion(),
else => {
if (err == error.NotEnoughParams) printHelp(stderr.writer());
return err;
},
};
defer allocator.free(param.user_name);
defer allocator.free(param.site_name);
const password = try readPasswordFromStdin(allocator);
defer allocator.free(password);
const result = try algorithm.generateDiceware(
allocator,
param.user_name,
param.site_name,
param.counter,
param.word_count,
password,
);
defer allocator.free(result);
stdout.writer().print("{}", .{result}) catch return;
if (stdout.isTty()) stdout.writer().print("\n", .{}) catch return;
}
const Param = struct {
allocator: *mem.Allocator,
user_name: []u8,
site_name: []u8,
counter: u8 = 1,
word_count: u4 = 6,
};
fn parseEnvVar(allocator: *mem.Allocator, param: *Param) !void {
const is_windows = Target.current.os.tag == .windows;
const counter = process.getEnvVarOwned(allocator, "LAPPLAND_COUNTER") catch |err| switch (err) {
error.OutOfMemory, error.EnvironmentVariableNotFound => null,
error.InvalidUtf8 => if (is_windows) null else unreachable,
};
const words = process.getEnvVarOwned(allocator, "LAPPLAND_WORD_COUNT") catch |err| switch (err) {
error.OutOfMemory, error.EnvironmentVariableNotFound => null,
error.InvalidUtf8 => if (is_windows) null else unreachable,
};
defer if (counter) |c| allocator.free(c);
defer if (words) |w| allocator.free(w);
if (counter) |c| param.*.counter = try fmt.parseUnsigned(@TypeOf(param.*.counter), c, 10);
if (words) |w| param.*.word_count = try fmt.parseUnsigned(@TypeOf(param.*.word_count), w, 10);
}
fn parseParam(allocator: *mem.Allocator) !Param {
var param = Param{
.allocator = allocator,
.user_name = undefined,
.site_name = undefined,
};
try parseEnvVar(allocator, ¶m);
var array_list = blk: {
var self = ArrayList([]const u8).init(allocator);
const new_memory = try self.allocator.allocAdvanced([]const u8, null, 2, .exact);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
break :blk self;
};
defer array_list.deinit();
const flags = comptime [_]clap.Param(u8){
clap.Param(u8){
.id = 'c',
.names = .{ .short = 'c', .long = "counter" },
.takes_value = .One,
},
clap.Param(u8){
.id = 'w',
.names = .{ .short = 'w', .long = "words" },
.takes_value = .One,
},
clap.Param(u8){
.id = 'h',
.names = .{ .short = 'h', .long = "help" },
},
clap.Param(u8){
.id = 255,
.names = .{ .long = "version" },
},
clap.Param(u8){
.id = 0,
.takes_value = .One,
},
};
const is_windows = Target.current.os.tag == .windows;
var iter = clap.args.OsIterator.init(allocator) catch |err| if (is_windows) return err else unreachable;
defer iter.deinit();
var parser = clap.StreamingClap(@TypeOf(flags[0].id), clap.args.OsIterator){
.params = &flags,
.iter = &iter,
};
while (parser.next(null) catch |err| switch (err) {
error.OutOfMemory => if (is_windows) return err else unreachable,
else => return err,
}) |arg| {
switch (arg.param.id) {
'c' => param.counter = try fmt.parseUnsigned(@TypeOf(param.counter), arg.value.?, 10),
'w' => param.word_count = try fmt.parseUnsigned(@TypeOf(param.word_count), arg.value.?, 10),
'h' => return error.Help,
255 => return error.Version,
0 => {
parser.state = .rest_are_positional;
if (array_list.items.len < 2) array_list.appendAssumeCapacity(arg.value.?);
},
else => unreachable,
}
}
if (array_list.items.len < 2) return error.NotEnoughParams;
if (param.counter == 0) return error.InvalidValue;
if (param.word_count < 5)
return error.WordsTooFew
else if (param.word_count > 10)
return error.WordsTooMany;
for (array_list.items) |item|
if (!unicode.utf8ValidateSlice(item))
return error.InvalidUtf8;
param.user_name = try param.allocator.dupe(u8, array_list.items[0]);
errdefer param.allocator.free(param.user_name);
param.site_name = try param.allocator.dupe(u8, array_list.items[1]);
return param;
}
fn printHelp(writer: fs.File.Writer) void {
const usage =
\\Usage: lapp [options] <username> <sitename>
\\
\\Options:
\\ -c, --counter <value> Set the counter value (default: 0)
\\ -w, --words <value> Set how many words will be generated
\\ (default: 6, min: 5, max: 10)
\\ -h, --help Print this help and exit
\\ --version Print version number and exit
\\
\\Password is read from the standard input.
;
writer.print("{}\n", .{usage}) catch return;
}
fn printVersion() void {
@setCold(true);
const stdout = io.getStdOut().writer();
const version = SemanticVersion{
.major = 0,
.minor = 1,
.patch = 0,
};
stdout.print("lappland version ", .{}) catch return;
version.format("", .{}, stdout) catch return;
stdout.print("\n", .{}) catch return;
}
fn readPasswordFromStdin(allocator: *mem.Allocator) ![]u8 {
const file_size = @typeInfo(fs.File.Stat).Struct.fields[1];
comptime debug.assert(mem.eql(u8, file_size.name, "size"));
comptime debug.assert(file_size.field_type == u64);
const stdin = io.getStdIn().reader();
return stdin.readAllAlloc(allocator, math.maxInt(file_size.field_type));
} | src/main.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const testing = std.testing;
const expectEqual = testing.expectEqual;
const ArrayList = std.ArrayList;
// https://encoding.spec.whatwg.org/#concept-stream
pub fn IoQueue(comptime T: type) type {
return struct {
allocator: *Allocator,
items: []const T,
pushed: ArrayList(T),
pos: usize,
pub const Item = union(enum) {
Regular: T,
EndOfQueue,
};
pub fn init(allocator: *Allocator, items: []const T) IoQueue(T) {
return IoQueue(T){
.allocator = allocator,
.items = items,
.pushed = ArrayList(T).init(allocator),
.pos = 0,
};
}
pub fn deinit(self: *IoQueue(T)) void {
self.pushed.deinit();
}
pub fn peek(self: *IoQueue(T), n: usize) ![]Item {
var result = ArrayList(Item).init(self.allocator);
defer result.deinit();
var i: usize = 0;
while (i < n) : (i += 1) {
if (self.pos + i < self.items.len) {
try result.append(Item{ .Regular = self.items[self.pos + i] });
} else if (self.pos + i - self.items.len < self.pushed.items.len) {
try result.append(Item{ .Regular = self.pushed.items[self.pos + i - self.items.len] });
} else {
break;
}
}
return std.mem.dupe(self.allocator, Item, result.items);
}
pub fn peekOne(self: *IoQueue(T)) ?T {
if (self.pos < self.items.len) {
return self.items[self.pos];
} else if (self.pos - self.items.len < self.pushed.items.len) {
return self.pushed.items[self.pos - self.items.len];
}
return null;
}
pub fn size(self: *IoQueue(T)) usize {
var result: usize = self.items.len + self.pushed.items.len - self.pos;
result += 1;
return result;
}
pub fn read(self: *IoQueue(T)) Item {
if (self.pos >= self.items.len + self.pushed.items.len) {
return .EndOfQueue;
}
var item: Item = undefined;
if (self.pos < self.items.len) {
item = Item{ .Regular = self.items[self.pos] };
} else {
item = Item{ .Regular = self.pushed.items[self.pos - self.items.len] };
}
self.pos += 1;
return item;
}
pub fn push(self: *IoQueue(T), item: Item) !void {
switch (item) {
.Regular => |unwrapped| {
try self.pushed.append(unwrapped);
},
.EndOfQueue => {},
}
}
pub fn serialize(self: *IoQueue(T)) ![]T {
var buffer = try self.allocator.alloc(T, self.items.len + self.pushed.items.len - self.pos);
if (self.pos < self.items.len) {
std.mem.copy(T, buffer, self.items);
}
std.mem.copy(T, buffer[self.items.len..], self.pushed.items);
return buffer;
}
};
}
test "Immediate I/O Queue of bytes" {
var bytes = [_]u8{ 'A', 'B', 'C' };
var byte_queue = IoQueue(u8).init(testing.allocator, &bytes);
defer byte_queue.deinit();
var peeked: []IoQueue(u8).Item = try byte_queue.peek(1);
try expectEqual(@intCast(usize, 1), peeked.len);
try expectEqual(IoQueue(u8).Item{ .Regular = 'A' }, peeked[0]);
testing.allocator.free(peeked);
peeked = try byte_queue.peek(2);
try expectEqual(@intCast(usize, 2), peeked.len);
try expectEqual(IoQueue(u8).Item{ .Regular = 'A' }, peeked[0]);
try expectEqual(IoQueue(u8).Item{ .Regular = 'B' }, peeked[1]);
testing.allocator.free(peeked);
try expectEqual(IoQueue(u8).Item{ .Regular = 'A' }, byte_queue.read());
try expectEqual(IoQueue(u8).Item{ .Regular = 'B' }, byte_queue.read());
peeked = try byte_queue.peek(2);
try expectEqual(@intCast(usize, 1), peeked.len);
try expectEqual(IoQueue(u8).Item{ .Regular = 'C' }, peeked[0]);
testing.allocator.free(peeked);
try byte_queue.push(IoQueue(u8).Item{ .Regular = 'D' });
try expectEqual(@intCast(u8, 'C'), byte_queue.peekOne().?);
peeked = try byte_queue.peek(3);
try expectEqual(@intCast(usize, 2), peeked.len);
try expectEqual(IoQueue(u8).Item{ .Regular = 'C' }, peeked[0]);
try expectEqual(IoQueue(u8).Item{ .Regular = 'D' }, peeked[1]);
testing.allocator.free(peeked);
try expectEqual(IoQueue(u8).Item{ .Regular = 'C' }, byte_queue.read());
try expectEqual(@intCast(u8, 'D'), byte_queue.peekOne().?);
try expectEqual(IoQueue(u8).Item{ .Regular = 'D' }, byte_queue.read());
try expectEqual(IoQueue(u8).Item.EndOfQueue, byte_queue.read());
try expectEqual(IoQueue(u8).Item.EndOfQueue, byte_queue.read());
try expectEqual(@as(?u8, null), byte_queue.peekOne());
peeked = try byte_queue.peek(3);
try expectEqual(@intCast(usize, 0), peeked.len);
testing.allocator.free(peeked);
}
test "Immediate I/O Queue of code points" {
var code_points = [_]u21{ 'A', 'B', 0x26A1 }; // AB⚡
var code_point_queue = IoQueue(u21).init(testing.allocator, &code_points);
defer code_point_queue.deinit();
var peeked: []IoQueue(u21).Item = try code_point_queue.peek(1);
try expectEqual(@intCast(usize, 1), peeked.len);
try expectEqual(IoQueue(u21).Item{ .Regular = 'A' }, peeked[0]);
testing.allocator.free(peeked);
peeked = try code_point_queue.peek(2);
try expectEqual(@intCast(usize, 2), peeked.len);
try expectEqual(IoQueue(u21).Item{ .Regular = 'A' }, peeked[0]);
try expectEqual(IoQueue(u21).Item{ .Regular = 'B' }, peeked[1]);
testing.allocator.free(peeked);
try expectEqual(IoQueue(u21).Item{ .Regular = 'A' }, code_point_queue.read());
try expectEqual(IoQueue(u21).Item{ .Regular = 'B' }, code_point_queue.read());
peeked = try code_point_queue.peek(2);
try expectEqual(@intCast(usize, 1), peeked.len);
try expectEqual(IoQueue(u21).Item{ .Regular = 0x26A1 }, peeked[0]); // ⚡
testing.allocator.free(peeked);
try code_point_queue.push(IoQueue(u21).Item{ .Regular = 0xFFFD }); // �
try expectEqual(@intCast(u21, 0x26A1), code_point_queue.peekOne().?);
peeked = try code_point_queue.peek(3);
try expectEqual(@intCast(usize, 2), peeked.len);
try expectEqual(IoQueue(u21).Item{ .Regular = 0x26A1 }, peeked[0]); // ⚡
try expectEqual(IoQueue(u21).Item{ .Regular = 0xFFFD }, peeked[1]); // �
testing.allocator.free(peeked);
try expectEqual(IoQueue(u21).Item{ .Regular = 0x26A1 }, code_point_queue.read()); // ⚡
try expectEqual(@intCast(u21, 0xFFFD), code_point_queue.peekOne().?); // �
try expectEqual(IoQueue(u21).Item{ .Regular = 0xFFFD }, code_point_queue.read()); // �
try expectEqual(IoQueue(u21).Item.EndOfQueue, code_point_queue.read());
try expectEqual(IoQueue(u21).Item.EndOfQueue, code_point_queue.read());
try expectEqual(@as(?u21, null), code_point_queue.peekOne());
peeked = try code_point_queue.peek(3);
try expectEqual(@intCast(usize, 0), peeked.len);
testing.allocator.free(peeked);
}
// TODO: Handle this use-case of streaming queue where read/peek operations wait until enough "items" are
// available. If possible, handle this with options to the IoQueue instead of adding a new type, but only if
// the syntax and calling of IoQueue remains convenient for the Immediate mode (meaning I don't want to have
// to suddenly add "async" or "await" to any of my calling code, but more "try"'s would be fine).
//
// pub fn StreamingIoQueue(type: comptime T) type {
// return struct {
// allocator: *Allocator,
// items: []T,
// pushed: ArrayList(T),
// pos: usize,
// pub const Item = union(enum) {
// Regular: T,
// EndOfQueue,
// };
// pub fn init(allocator: *Allocator, items: []T) IoQueue(T) {
// }
// pub fn deinit(self: *IoQueue(T)) void {
// }
// pub fn peek(self: *IoQueue(T), n: usize) ![]Item {
// }
// pub fn peekOne(self: *IoQueue(T)) ?T {
// }
// pub fn size(self: *IoQueue(T)) usize {
// }
// pub fn read(self: *IoQueue(T)) Item {
// }
// pub fn push(self: *IoQueue(T), item: Item) !void {
// }
// };
// }
// test "Streaming I/O Queue of bytes" {
// var bytes = [_]u8{ 'A', 'B', 'C' };
// var byte_queue = IoQueue(u8).init(testing.allocator, &bytes, .{ .type = .Streaming });
// defer byte_queue.deinit();
// var peeked: []IoQueue(u8).Item = try byte_queue.peek(1);
// try expectEqual(@intCast(usize, 1), peeked.len);
// try expectEqual(IoQueue(u8).Item{ .Regular = 'A' }, peeked[0]);
// testing.allocator.free(peeked);
// peeked = try byte_queue.peek(2);
// try expectEqual(@intCast(usize, 2), peeked.len);
// try expectEqual(IoQueue(u8).Item{ .Regular = 'A' }, peeked[0]);
// try expectEqual(IoQueue(u8).Item{ .Regular = 'B' }, peeked[1]);
// testing.allocator.free(peeked);
// try expectEqual(IoQueue(u8).Item{ .Regular = 'A' }, byte_queue.read());
// try expectEqual(IoQueue(u8).Item{ .Regular = 'B' }, byte_queue.read());
// peeked = try byte_queue.peek(2);
// try expectEqual(@intCast(usize, 1), peeked.len);
// try expectEqual(IoQueue(u8).Item{ .Regular = 'C' }, peeked[0]);
// testing.allocator.free(peeked);
// try byte_queue.push(IoQueue(u8).Item{ .Regular = 'D' });
// try expectEqual(@intCast(u8, 'C'), byte_queue.peekOne().?);
// peeked = try byte_queue.peek(3);
// try expectEqual(@intCast(usize, 2), peeked.len);
// try expectEqual(IoQueue(u8).Item{ .Regular = 'C' }, peeked[0]);
// try expectEqual(IoQueue(u8).Item{ .Regular = 'D' }, peeked[1]);
// testing.allocator.free(peeked);
// try expectEqual(IoQueue(u8).Item{ .Regular = 'C' }, byte_queue.read());
// try expectEqual(@intCast(u8, 'D'), byte_queue.peekOne().?);
// try expectEqual(IoQueue(u8).Item{ .Regular = 'D' }, byte_queue.read());
// try expectEqual(IoQueue(u8).Item.EndOfQueue, byte_queue.read());
// try expectEqual(IoQueue(u8).Item.EndOfQueue, byte_queue.read());
// try expectEqual(@as(?u8, null), byte_queue.peekOne());
// peeked = try byte_queue.peek(3);
// try expectEqual(@intCast(usize, 0), peeked.len);
// testing.allocator.free(peeked);
// } | src/io_queue.zig |
const std = @import("std.zig");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const math = std.math;
const builtin = @import("builtin");
/// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. O(1) memory (no allocator required).
pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool) void {
{
var i: usize = 1;
while (i < items.len) : (i += 1) {
const x = items[i];
var j: usize = i;
while (j > 0 and lessThan(x, items[j - 1])) : (j -= 1) {
items[j] = items[j - 1];
}
items[j] = x;
}
}
}
const Range = struct {
start: usize,
end: usize,
fn init(start: usize, end: usize) Range {
return Range{
.start = start,
.end = end,
};
}
fn length(self: Range) usize {
return self.end - self.start;
}
};
const Iterator = struct {
size: usize,
power_of_two: usize,
numerator: usize,
decimal: usize,
denominator: usize,
decimal_step: usize,
numerator_step: usize,
fn init(size2: usize, min_level: usize) Iterator {
const power_of_two = math.floorPowerOfTwo(usize, size2);
const denominator = power_of_two / min_level;
return Iterator{
.numerator = 0,
.decimal = 0,
.size = size2,
.power_of_two = power_of_two,
.denominator = denominator,
.decimal_step = size2 / denominator,
.numerator_step = size2 % denominator,
};
}
fn begin(self: *Iterator) void {
self.numerator = 0;
self.decimal = 0;
}
fn nextRange(self: *Iterator) Range {
const start = self.decimal;
self.decimal += self.decimal_step;
self.numerator += self.numerator_step;
if (self.numerator >= self.denominator) {
self.numerator -= self.denominator;
self.decimal += 1;
}
return Range{
.start = start,
.end = self.decimal,
};
}
fn finished(self: *Iterator) bool {
return self.decimal >= self.size;
}
fn nextLevel(self: *Iterator) bool {
self.decimal_step += self.decimal_step;
self.numerator_step += self.numerator_step;
if (self.numerator_step >= self.denominator) {
self.numerator_step -= self.denominator;
self.decimal_step += 1;
}
return (self.decimal_step < self.size);
}
fn length(self: *Iterator) usize {
return self.decimal_step;
}
};
const Pull = struct {
from: usize,
to: usize,
count: usize,
range: Range,
};
/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. O(1) memory (no allocator required).
/// Currently implemented as block sort.
pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool) void {
// Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c
var cache: [512]T = undefined;
if (items.len < 4) {
if (items.len == 3) {
// hard coded insertion sort
if (lessThan(items[1], items[0])) mem.swap(T, &items[0], &items[1]);
if (lessThan(items[2], items[1])) {
mem.swap(T, &items[1], &items[2]);
if (lessThan(items[1], items[0])) mem.swap(T, &items[0], &items[1]);
}
} else if (items.len == 2) {
if (lessThan(items[1], items[0])) mem.swap(T, &items[0], &items[1]);
}
return;
}
// sort groups of 4-8 items at a time using an unstable sorting network,
// but keep track of the original item orders to force it to be stable
// http://pages.ripco.net/~jgamble/nw.html
var iterator = Iterator.init(items.len, 4);
while (!iterator.finished()) {
var order = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7 };
const range = iterator.nextRange();
const sliced_items = items[range.start..];
switch (range.length()) {
8 => {
swap(T, sliced_items, lessThan, &order, 0, 1);
swap(T, sliced_items, lessThan, &order, 2, 3);
swap(T, sliced_items, lessThan, &order, 4, 5);
swap(T, sliced_items, lessThan, &order, 6, 7);
swap(T, sliced_items, lessThan, &order, 0, 2);
swap(T, sliced_items, lessThan, &order, 1, 3);
swap(T, sliced_items, lessThan, &order, 4, 6);
swap(T, sliced_items, lessThan, &order, 5, 7);
swap(T, sliced_items, lessThan, &order, 1, 2);
swap(T, sliced_items, lessThan, &order, 5, 6);
swap(T, sliced_items, lessThan, &order, 0, 4);
swap(T, sliced_items, lessThan, &order, 3, 7);
swap(T, sliced_items, lessThan, &order, 1, 5);
swap(T, sliced_items, lessThan, &order, 2, 6);
swap(T, sliced_items, lessThan, &order, 1, 4);
swap(T, sliced_items, lessThan, &order, 3, 6);
swap(T, sliced_items, lessThan, &order, 2, 4);
swap(T, sliced_items, lessThan, &order, 3, 5);
swap(T, sliced_items, lessThan, &order, 3, 4);
},
7 => {
swap(T, sliced_items, lessThan, &order, 1, 2);
swap(T, sliced_items, lessThan, &order, 3, 4);
swap(T, sliced_items, lessThan, &order, 5, 6);
swap(T, sliced_items, lessThan, &order, 0, 2);
swap(T, sliced_items, lessThan, &order, 3, 5);
swap(T, sliced_items, lessThan, &order, 4, 6);
swap(T, sliced_items, lessThan, &order, 0, 1);
swap(T, sliced_items, lessThan, &order, 4, 5);
swap(T, sliced_items, lessThan, &order, 2, 6);
swap(T, sliced_items, lessThan, &order, 0, 4);
swap(T, sliced_items, lessThan, &order, 1, 5);
swap(T, sliced_items, lessThan, &order, 0, 3);
swap(T, sliced_items, lessThan, &order, 2, 5);
swap(T, sliced_items, lessThan, &order, 1, 3);
swap(T, sliced_items, lessThan, &order, 2, 4);
swap(T, sliced_items, lessThan, &order, 2, 3);
},
6 => {
swap(T, sliced_items, lessThan, &order, 1, 2);
swap(T, sliced_items, lessThan, &order, 4, 5);
swap(T, sliced_items, lessThan, &order, 0, 2);
swap(T, sliced_items, lessThan, &order, 3, 5);
swap(T, sliced_items, lessThan, &order, 0, 1);
swap(T, sliced_items, lessThan, &order, 3, 4);
swap(T, sliced_items, lessThan, &order, 2, 5);
swap(T, sliced_items, lessThan, &order, 0, 3);
swap(T, sliced_items, lessThan, &order, 1, 4);
swap(T, sliced_items, lessThan, &order, 2, 4);
swap(T, sliced_items, lessThan, &order, 1, 3);
swap(T, sliced_items, lessThan, &order, 2, 3);
},
5 => {
swap(T, sliced_items, lessThan, &order, 0, 1);
swap(T, sliced_items, lessThan, &order, 3, 4);
swap(T, sliced_items, lessThan, &order, 2, 4);
swap(T, sliced_items, lessThan, &order, 2, 3);
swap(T, sliced_items, lessThan, &order, 1, 4);
swap(T, sliced_items, lessThan, &order, 0, 3);
swap(T, sliced_items, lessThan, &order, 0, 2);
swap(T, sliced_items, lessThan, &order, 1, 3);
swap(T, sliced_items, lessThan, &order, 1, 2);
},
4 => {
swap(T, sliced_items, lessThan, &order, 0, 1);
swap(T, sliced_items, lessThan, &order, 2, 3);
swap(T, sliced_items, lessThan, &order, 0, 2);
swap(T, sliced_items, lessThan, &order, 1, 3);
swap(T, sliced_items, lessThan, &order, 1, 2);
},
else => {},
}
}
if (items.len < 8) return;
// then merge sort the higher levels, which can be 8-15, 16-31, 32-63, 64-127, etc.
while (true) {
// if every A and B block will fit into the cache, use a special branch specifically for merging with the cache
// (we use < rather than <= since the block size might be one more than iterator.length())
if (iterator.length() < cache.len) {
// if four subarrays fit into the cache, it's faster to merge both pairs of subarrays into the cache,
// then merge the two merged subarrays from the cache back into the original array
if ((iterator.length() + 1) * 4 <= cache.len and iterator.length() * 4 <= items.len) {
iterator.begin();
while (!iterator.finished()) {
// merge A1 and B1 into the cache
var A1 = iterator.nextRange();
var B1 = iterator.nextRange();
var A2 = iterator.nextRange();
var B2 = iterator.nextRange();
if (lessThan(items[B1.end - 1], items[A1.start])) {
// the two ranges are in reverse order, so copy them in reverse order into the cache
mem.copy(T, cache[B1.length()..], items[A1.start..A1.end]);
mem.copy(T, cache[0..], items[B1.start..B1.end]);
} else if (lessThan(items[B1.start], items[A1.end - 1])) {
// these two ranges weren't already in order, so merge them into the cache
mergeInto(T, items, A1, B1, lessThan, cache[0..]);
} else {
// if A1, B1, A2, and B2 are all in order, skip doing anything else
if (!lessThan(items[B2.start], items[A2.end - 1]) and !lessThan(items[A2.start], items[B1.end - 1])) continue;
// copy A1 and B1 into the cache in the same order
mem.copy(T, cache[0..], items[A1.start..A1.end]);
mem.copy(T, cache[A1.length()..], items[B1.start..B1.end]);
}
A1 = Range.init(A1.start, B1.end);
// merge A2 and B2 into the cache
if (lessThan(items[B2.end - 1], items[A2.start])) {
// the two ranges are in reverse order, so copy them in reverse order into the cache
mem.copy(T, cache[A1.length() + B2.length() ..], items[A2.start..A2.end]);
mem.copy(T, cache[A1.length()..], items[B2.start..B2.end]);
} else if (lessThan(items[B2.start], items[A2.end - 1])) {
// these two ranges weren't already in order, so merge them into the cache
mergeInto(T, items, A2, B2, lessThan, cache[A1.length()..]);
} else {
// copy A2 and B2 into the cache in the same order
mem.copy(T, cache[A1.length()..], items[A2.start..A2.end]);
mem.copy(T, cache[A1.length() + A2.length() ..], items[B2.start..B2.end]);
}
A2 = Range.init(A2.start, B2.end);
// merge A1 and A2 from the cache into the items
const A3 = Range.init(0, A1.length());
const B3 = Range.init(A1.length(), A1.length() + A2.length());
if (lessThan(cache[B3.end - 1], cache[A3.start])) {
// the two ranges are in reverse order, so copy them in reverse order into the items
mem.copy(T, items[A1.start + A2.length() ..], cache[A3.start..A3.end]);
mem.copy(T, items[A1.start..], cache[B3.start..B3.end]);
} else if (lessThan(cache[B3.start], cache[A3.end - 1])) {
// these two ranges weren't already in order, so merge them back into the items
mergeInto(T, cache[0..], A3, B3, lessThan, items[A1.start..]);
} else {
// copy A3 and B3 into the items in the same order
mem.copy(T, items[A1.start..], cache[A3.start..A3.end]);
mem.copy(T, items[A1.start + A1.length() ..], cache[B3.start..B3.end]);
}
}
// we merged two levels at the same time, so we're done with this level already
// (iterator.nextLevel() is called again at the bottom of this outer merge loop)
_ = iterator.nextLevel();
} else {
iterator.begin();
while (!iterator.finished()) {
var A = iterator.nextRange();
var B = iterator.nextRange();
if (lessThan(items[B.end - 1], items[A.start])) {
// the two ranges are in reverse order, so a simple rotation should fix it
mem.rotate(T, items[A.start..B.end], A.length());
} else if (lessThan(items[B.start], items[A.end - 1])) {
// these two ranges weren't already in order, so we'll need to merge them!
mem.copy(T, cache[0..], items[A.start..A.end]);
mergeExternal(T, items, A, B, lessThan, cache[0..]);
}
}
}
} else {
// this is where the in-place merge logic starts!
// 1. pull out two internal buffers each containing √A unique values
// 1a. adjust block_size and buffer_size if we couldn't find enough unique values
// 2. loop over the A and B subarrays within this level of the merge sort
// 3. break A and B into blocks of size 'block_size'
// 4. "tag" each of the A blocks with values from the first internal buffer
// 5. roll the A blocks through the B blocks and drop/rotate them where they belong
// 6. merge each A block with any B values that follow, using the cache or the second internal buffer
// 7. sort the second internal buffer if it exists
// 8. redistribute the two internal buffers back into the items
var block_size: usize = math.sqrt(iterator.length());
var buffer_size = iterator.length() / block_size + 1;
// as an optimization, we really only need to pull out the internal buffers once for each level of merges
// after that we can reuse the same buffers over and over, then redistribute it when we're finished with this level
var A: Range = undefined;
var B: Range = undefined;
var index: usize = 0;
var last: usize = 0;
var count: usize = 0;
var find: usize = 0;
var start: usize = 0;
var pull_index: usize = 0;
var pull = [_]Pull{
Pull{
.from = 0,
.to = 0,
.count = 0,
.range = Range.init(0, 0),
},
Pull{
.from = 0,
.to = 0,
.count = 0,
.range = Range.init(0, 0),
},
};
var buffer1 = Range.init(0, 0);
var buffer2 = Range.init(0, 0);
// find two internal buffers of size 'buffer_size' each
find = buffer_size + buffer_size;
var find_separately = false;
if (block_size <= cache.len) {
// if every A block fits into the cache then we won't need the second internal buffer,
// so we really only need to find 'buffer_size' unique values
find = buffer_size;
} else if (find > iterator.length()) {
// we can't fit both buffers into the same A or B subarray, so find two buffers separately
find = buffer_size;
find_separately = true;
}
// we need to find either a single contiguous space containing 2√A unique values (which will be split up into two buffers of size √A each),
// or we need to find one buffer of < 2√A unique values, and a second buffer of √A unique values,
// OR if we couldn't find that many unique values, we need the largest possible buffer we can get
// in the case where it couldn't find a single buffer of at least √A unique values,
// all of the Merge steps must be replaced by a different merge algorithm (MergeInPlace)
iterator.begin();
while (!iterator.finished()) {
A = iterator.nextRange();
B = iterator.nextRange();
// just store information about where the values will be pulled from and to,
// as well as how many values there are, to create the two internal buffers
// check A for the number of unique values we need to fill an internal buffer
// these values will be pulled out to the start of A
last = A.start;
count = 1;
while (count < find) : ({
last = index;
count += 1;
}) {
index = findLastForward(T, items, items[last], Range.init(last + 1, A.end), lessThan, find - count);
if (index == A.end) break;
}
index = last;
if (count >= buffer_size) {
// keep track of the range within the items where we'll need to "pull out" these values to create the internal buffer
pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
.to = A.start,
};
pull_index = 1;
if (count == buffer_size + buffer_size) {
// we were able to find a single contiguous section containing 2√A unique values,
// so this section can be used to contain both of the internal buffers we'll need
buffer1 = Range.init(A.start, A.start + buffer_size);
buffer2 = Range.init(A.start + buffer_size, A.start + count);
break;
} else if (find == buffer_size + buffer_size) {
// we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values,
// so we still need to find a second separate buffer of at least √A unique values
buffer1 = Range.init(A.start, A.start + count);
find = buffer_size;
} else if (block_size <= cache.len) {
// we found the first and only internal buffer that we need, so we're done!
buffer1 = Range.init(A.start, A.start + count);
break;
} else if (find_separately) {
// found one buffer, but now find the other one
buffer1 = Range.init(A.start, A.start + count);
find_separately = false;
} else {
// we found a second buffer in an 'A' subarray containing √A unique values, so we're done!
buffer2 = Range.init(A.start, A.start + count);
break;
}
} else if (pull_index == 0 and count > buffer1.length()) {
// keep track of the largest buffer we were able to find
buffer1 = Range.init(A.start, A.start + count);
pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
.to = A.start,
};
}
// check B for the number of unique values we need to fill an internal buffer
// these values will be pulled out to the end of B
last = B.end - 1;
count = 1;
while (count < find) : ({
last = index - 1;
count += 1;
}) {
index = findFirstBackward(T, items, items[last], Range.init(B.start, last), lessThan, find - count);
if (index == B.start) break;
}
index = last;
if (count >= buffer_size) {
// keep track of the range within the items where we'll need to "pull out" these values to create the internal buffe
pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
.to = B.end,
};
pull_index = 1;
if (count == buffer_size + buffer_size) {
// we were able to find a single contiguous section containing 2√A unique values,
// so this section can be used to contain both of the internal buffers we'll need
buffer1 = Range.init(B.end - count, B.end - buffer_size);
buffer2 = Range.init(B.end - buffer_size, B.end);
break;
} else if (find == buffer_size + buffer_size) {
// we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values,
// so we still need to find a second separate buffer of at least √A unique values
buffer1 = Range.init(B.end - count, B.end);
find = buffer_size;
} else if (block_size <= cache.len) {
// we found the first and only internal buffer that we need, so we're done!
buffer1 = Range.init(B.end - count, B.end);
break;
} else if (find_separately) {
// found one buffer, but now find the other one
buffer1 = Range.init(B.end - count, B.end);
find_separately = false;
} else {
// buffer2 will be pulled out from a 'B' subarray, so if the first buffer was pulled out from the corresponding 'A' subarray,
// we need to adjust the end point for that A subarray so it knows to stop redistributing its values before reaching buffer2
if (pull[0].range.start == A.start) pull[0].range.end -= pull[1].count;
// we found a second buffer in an 'B' subarray containing √A unique values, so we're done!
buffer2 = Range.init(B.end - count, B.end);
break;
}
} else if (pull_index == 0 and count > buffer1.length()) {
// keep track of the largest buffer we were able to find
buffer1 = Range.init(B.end - count, B.end);
pull[pull_index] = Pull{
.range = Range.init(A.start, B.end),
.count = count,
.from = index,
.to = B.end,
};
}
}
// pull out the two ranges so we can use them as internal buffers
pull_index = 0;
while (pull_index < 2) : (pull_index += 1) {
const length = pull[pull_index].count;
if (pull[pull_index].to < pull[pull_index].from) {
// we're pulling the values out to the left, which means the start of an A subarray
index = pull[pull_index].from;
count = 1;
while (count < length) : (count += 1) {
index = findFirstBackward(T, items, items[index - 1], Range.init(pull[pull_index].to, pull[pull_index].from - (count - 1)), lessThan, length - count);
const range = Range.init(index + 1, pull[pull_index].from + 1);
mem.rotate(T, items[range.start..range.end], range.length() - count);
pull[pull_index].from = index + count;
}
} else if (pull[pull_index].to > pull[pull_index].from) {
// we're pulling values out to the right, which means the end of a B subarray
index = pull[pull_index].from + 1;
count = 1;
while (count < length) : (count += 1) {
index = findLastForward(T, items, items[index], Range.init(index, pull[pull_index].to), lessThan, length - count);
const range = Range.init(pull[pull_index].from, index - 1);
mem.rotate(T, items[range.start..range.end], count);
pull[pull_index].from = index - 1 - count;
}
}
}
// adjust block_size and buffer_size based on the values we were able to pull out
buffer_size = buffer1.length();
block_size = iterator.length() / buffer_size + 1;
// the first buffer NEEDS to be large enough to tag each of the evenly sized A blocks,
// so this was originally here to test the math for adjusting block_size above
// assert((iterator.length() + 1)/block_size <= buffer_size);
// now that the two internal buffers have been created, it's time to merge each A+B combination at this level of the merge sort!
iterator.begin();
while (!iterator.finished()) {
A = iterator.nextRange();
B = iterator.nextRange();
// remove any parts of A or B that are being used by the internal buffers
start = A.start;
if (start == pull[0].range.start) {
if (pull[0].from > pull[0].to) {
A.start += pull[0].count;
// if the internal buffer takes up the entire A or B subarray, then there's nothing to merge
// this only happens for very small subarrays, like √4 = 2, 2 * (2 internal buffers) = 4,
// which also only happens when cache.len is small or 0 since it'd otherwise use MergeExternal
if (A.length() == 0) continue;
} else if (pull[0].from < pull[0].to) {
B.end -= pull[0].count;
if (B.length() == 0) continue;
}
}
if (start == pull[1].range.start) {
if (pull[1].from > pull[1].to) {
A.start += pull[1].count;
if (A.length() == 0) continue;
} else if (pull[1].from < pull[1].to) {
B.end -= pull[1].count;
if (B.length() == 0) continue;
}
}
if (lessThan(items[B.end - 1], items[A.start])) {
// the two ranges are in reverse order, so a simple rotation should fix it
mem.rotate(T, items[A.start..B.end], A.length());
} else if (lessThan(items[A.end], items[A.end - 1])) {
// these two ranges weren't already in order, so we'll need to merge them!
var findA: usize = undefined;
// break the remainder of A into blocks. firstA is the uneven-sized first A block
var blockA = Range.init(A.start, A.end);
var firstA = Range.init(A.start, A.start + blockA.length() % block_size);
// swap the first value of each A block with the value in buffer1
var indexA = buffer1.start;
index = firstA.end;
while (index < blockA.end) : ({
indexA += 1;
index += block_size;
}) {
mem.swap(T, &items[indexA], &items[index]);
}
// start rolling the A blocks through the B blocks!
// whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well
var lastA = firstA;
var lastB = Range.init(0, 0);
var blockB = Range.init(B.start, B.start + math.min(block_size, B.length()));
blockA.start += firstA.length();
indexA = buffer1.start;
// if the first unevenly sized A block fits into the cache, copy it there for when we go to Merge it
// otherwise, if the second buffer is available, block swap the contents into that
if (lastA.length() <= cache.len) {
mem.copy(T, cache[0..], items[lastA.start..lastA.end]);
} else if (buffer2.length() > 0) {
blockSwap(T, items, lastA.start, buffer2.start, lastA.length());
}
if (blockA.length() > 0) {
while (true) {
// if there's a previous B block and the first value of the minimum A block is <= the last value of the previous B block,
// then drop that minimum A block behind. or if there are no B blocks left then keep dropping the remaining A blocks.
if ((lastB.length() > 0 and !lessThan(items[lastB.end - 1], items[indexA])) or blockB.length() == 0) {
// figure out where to split the previous B block, and rotate it at the split
const B_split = binaryFirst(T, items, items[indexA], lastB, lessThan);
const B_remaining = lastB.end - B_split;
// swap the minimum A block to the beginning of the rolling A blocks
var minA = blockA.start;
findA = minA + block_size;
while (findA < blockA.end) : (findA += block_size) {
if (lessThan(items[findA], items[minA])) {
minA = findA;
}
}
blockSwap(T, items, blockA.start, minA, block_size);
// swap the first item of the previous A block back with its original value, which is stored in buffer1
mem.swap(T, &items[blockA.start], &items[indexA]);
indexA += 1;
// locally merge the previous A block with the B values that follow it
// if lastA fits into the external cache we'll use that (with MergeExternal),
// or if the second internal buffer exists we'll use that (with MergeInternal),
// or failing that we'll use a strictly in-place merge algorithm (MergeInPlace)
if (lastA.length() <= cache.len) {
mergeExternal(T, items, lastA, Range.init(lastA.end, B_split), lessThan, cache[0..]);
} else if (buffer2.length() > 0) {
mergeInternal(T, items, lastA, Range.init(lastA.end, B_split), lessThan, buffer2);
} else {
mergeInPlace(T, items, lastA, Range.init(lastA.end, B_split), lessThan);
}
if (buffer2.length() > 0 or block_size <= cache.len) {
// copy the previous A block into the cache or buffer2, since that's where we need it to be when we go to merge it anyway
if (block_size <= cache.len) {
mem.copy(T, cache[0..], items[blockA.start .. blockA.start + block_size]);
} else {
blockSwap(T, items, blockA.start, buffer2.start, block_size);
}
// this is equivalent to rotating, but faster
// the area normally taken up by the A block is either the contents of buffer2, or data we don't need anymore since we memcopied it
// either way, we don't need to retain the order of those items, so instead of rotating we can just block swap B to where it belongs
blockSwap(T, items, B_split, blockA.start + block_size - B_remaining, B_remaining);
} else {
// we are unable to use the 'buffer2' trick to speed up the rotation operation since buffer2 doesn't exist, so perform a normal rotation
mem.rotate(T, items[B_split .. blockA.start + block_size], blockA.start - B_split);
}
// update the range for the remaining A blocks, and the range remaining from the B block after it was split
lastA = Range.init(blockA.start - B_remaining, blockA.start - B_remaining + block_size);
lastB = Range.init(lastA.end, lastA.end + B_remaining);
// if there are no more A blocks remaining, this step is finished!
blockA.start += block_size;
if (blockA.length() == 0) break;
} else if (blockB.length() < block_size) {
// move the last B block, which is unevenly sized, to before the remaining A blocks, by using a rotation
// the cache is disabled here since it might contain the contents of the previous A block
mem.rotate(T, items[blockA.start..blockB.end], blockB.start - blockA.start);
lastB = Range.init(blockA.start, blockA.start + blockB.length());
blockA.start += blockB.length();
blockA.end += blockB.length();
blockB.end = blockB.start;
} else {
// roll the leftmost A block to the end by swapping it with the next B block
blockSwap(T, items, blockA.start, blockB.start, block_size);
lastB = Range.init(blockA.start, blockA.start + block_size);
blockA.start += block_size;
blockA.end += block_size;
blockB.start += block_size;
if (blockB.end > B.end - block_size) {
blockB.end = B.end;
} else {
blockB.end += block_size;
}
}
}
}
// merge the last A block with the remaining B values
if (lastA.length() <= cache.len) {
mergeExternal(T, items, lastA, Range.init(lastA.end, B.end), lessThan, cache[0..]);
} else if (buffer2.length() > 0) {
mergeInternal(T, items, lastA, Range.init(lastA.end, B.end), lessThan, buffer2);
} else {
mergeInPlace(T, items, lastA, Range.init(lastA.end, B.end), lessThan);
}
}
}
// when we're finished with this merge step we should have the one or two internal buffers left over, where the second buffer is all jumbled up
// insertion sort the second buffer, then redistribute the buffers back into the items using the opposite process used for creating the buffer
// while an unstable sort like quicksort could be applied here, in benchmarks it was consistently slightly slower than a simple insertion sort,
// even for tens of millions of items. this may be because insertion sort is quite fast when the data is already somewhat sorted, like it is here
insertionSort(T, items[buffer2.start..buffer2.end], lessThan);
pull_index = 0;
while (pull_index < 2) : (pull_index += 1) {
var unique = pull[pull_index].count * 2;
if (pull[pull_index].from > pull[pull_index].to) {
// the values were pulled out to the left, so redistribute them back to the right
var buffer = Range.init(pull[pull_index].range.start, pull[pull_index].range.start + pull[pull_index].count);
while (buffer.length() > 0) {
index = findFirstForward(T, items, items[buffer.start], Range.init(buffer.end, pull[pull_index].range.end), lessThan, unique);
const amount = index - buffer.end;
mem.rotate(T, items[buffer.start..index], buffer.length());
buffer.start += (amount + 1);
buffer.end += amount;
unique -= 2;
}
} else if (pull[pull_index].from < pull[pull_index].to) {
// the values were pulled out to the right, so redistribute them back to the left
var buffer = Range.init(pull[pull_index].range.end - pull[pull_index].count, pull[pull_index].range.end);
while (buffer.length() > 0) {
index = findLastBackward(T, items, items[buffer.end - 1], Range.init(pull[pull_index].range.start, buffer.start), lessThan, unique);
const amount = buffer.start - index;
mem.rotate(T, items[index..buffer.end], amount);
buffer.start -= amount;
buffer.end -= (amount + 1);
unique -= 2;
}
}
}
}
// double the size of each A and B subarray that will be merged in the next level
if (!iterator.nextLevel()) break;
}
}
// merge operation without a buffer
fn mergeInPlace(comptime T: type, items: []T, A_arg: Range, B_arg: Range, lessThan: fn (T, T) bool) void {
if (A_arg.length() == 0 or B_arg.length() == 0) return;
// this just repeatedly binary searches into B and rotates A into position.
// the paper suggests using the 'rotation-based Hwang and Lin algorithm' here,
// but I decided to stick with this because it had better situational performance
//
// (Hwang and Lin is designed for merging subarrays of very different sizes,
// but WikiSort almost always uses subarrays that are roughly the same size)
//
// normally this is incredibly suboptimal, but this function is only called
// when none of the A or B blocks in any subarray contained 2√A unique values,
// which places a hard limit on the number of times this will ACTUALLY need
// to binary search and rotate.
//
// according to my analysis the worst case is √A rotations performed on √A items
// once the constant factors are removed, which ends up being O(n)
//
// again, this is NOT a general-purpose solution – it only works well in this case!
// kind of like how the O(n^2) insertion sort is used in some places
var A = A_arg;
var B = B_arg;
while (true) {
// find the first place in B where the first item in A needs to be inserted
const mid = binaryFirst(T, items, items[A.start], B, lessThan);
// rotate A into place
const amount = mid - A.end;
mem.rotate(T, items[A.start..mid], A.length());
if (B.end == mid) break;
// calculate the new A and B ranges
B.start = mid;
A = Range.init(A.start + amount, B.start);
A.start = binaryLast(T, items, items[A.start], A, lessThan);
if (A.length() == 0) break;
}
}
// merge operation using an internal buffer
fn mergeInternal(comptime T: type, items: []T, A: Range, B: Range, lessThan: fn (T, T) bool, buffer: Range) void {
// whenever we find a value to add to the final array, swap it with the value that's already in that spot
// when this algorithm is finished, 'buffer' will contain its original contents, but in a different order
var A_count: usize = 0;
var B_count: usize = 0;
var insert: usize = 0;
if (B.length() > 0 and A.length() > 0) {
while (true) {
if (!lessThan(items[B.start + B_count], items[buffer.start + A_count])) {
mem.swap(T, &items[A.start + insert], &items[buffer.start + A_count]);
A_count += 1;
insert += 1;
if (A_count >= A.length()) break;
} else {
mem.swap(T, &items[A.start + insert], &items[B.start + B_count]);
B_count += 1;
insert += 1;
if (B_count >= B.length()) break;
}
}
}
// swap the remainder of A into the final array
blockSwap(T, items, buffer.start + A_count, A.start + insert, A.length() - A_count);
}
fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_size: usize) void {
var index: usize = 0;
while (index < block_size) : (index += 1) {
mem.swap(T, &items[start1 + index], &items[start2 + index]);
}
}
// combine a linear search with a binary search to reduce the number of comparisons in situations
// where have some idea as to how many unique values there are and where the next value might be
fn findFirstForward(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, @as(usize, 1));
var index = range.start + skip;
while (lessThan(items[index - 1], value)) : (index += skip) {
if (index >= range.end - skip) {
return binaryFirst(T, items, value, Range.init(index, range.end), lessThan);
}
}
return binaryFirst(T, items, value, Range.init(index - skip, index), lessThan);
}
fn findFirstBackward(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, @as(usize, 1));
var index = range.end - skip;
while (index > range.start and !lessThan(items[index - 1], value)) : (index -= skip) {
if (index < range.start + skip) {
return binaryFirst(T, items, value, Range.init(range.start, index), lessThan);
}
}
return binaryFirst(T, items, value, Range.init(index, index + skip), lessThan);
}
fn findLastForward(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, @as(usize, 1));
var index = range.start + skip;
while (!lessThan(value, items[index - 1])) : (index += skip) {
if (index >= range.end - skip) {
return binaryLast(T, items, value, Range.init(index, range.end), lessThan);
}
}
return binaryLast(T, items, value, Range.init(index - skip, index), lessThan);
}
fn findLastBackward(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, @as(usize, 1));
var index = range.end - skip;
while (index > range.start and lessThan(value, items[index - 1])) : (index -= skip) {
if (index < range.start + skip) {
return binaryLast(T, items, value, Range.init(range.start, index), lessThan);
}
}
return binaryLast(T, items, value, Range.init(index, index + skip), lessThan);
}
fn binaryFirst(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
while (start < end) {
const mid = start + (end - start) / 2;
if (lessThan(items[mid], value)) {
start = mid + 1;
} else {
end = mid;
}
}
if (start == range.end - 1 and lessThan(items[start], value)) {
start += 1;
}
return start;
}
fn binaryLast(comptime T: type, items: []T, value: T, range: Range, lessThan: fn (T, T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
while (start < end) {
const mid = start + (end - start) / 2;
if (!lessThan(value, items[mid])) {
start = mid + 1;
} else {
end = mid;
}
}
if (start == range.end - 1 and !lessThan(value, items[start])) {
start += 1;
}
return start;
}
fn mergeInto(comptime T: type, from: []T, A: Range, B: Range, lessThan: fn (T, T) bool, into: []T) void {
var A_index: usize = A.start;
var B_index: usize = B.start;
const A_last = A.end;
const B_last = B.end;
var insert_index: usize = 0;
while (true) {
if (!lessThan(from[B_index], from[A_index])) {
into[insert_index] = from[A_index];
A_index += 1;
insert_index += 1;
if (A_index == A_last) {
// copy the remainder of B into the final array
mem.copy(T, into[insert_index..], from[B_index..B_last]);
break;
}
} else {
into[insert_index] = from[B_index];
B_index += 1;
insert_index += 1;
if (B_index == B_last) {
// copy the remainder of A into the final array
mem.copy(T, into[insert_index..], from[A_index..A_last]);
break;
}
}
}
}
fn mergeExternal(comptime T: type, items: []T, A: Range, B: Range, lessThan: fn (T, T) bool, cache: []T) void {
// A fits into the cache, so use that instead of the internal buffer
var A_index: usize = 0;
var B_index: usize = B.start;
var insert_index: usize = A.start;
const A_last = A.length();
const B_last = B.end;
if (B.length() > 0 and A.length() > 0) {
while (true) {
if (!lessThan(items[B_index], cache[A_index])) {
items[insert_index] = cache[A_index];
A_index += 1;
insert_index += 1;
if (A_index == A_last) break;
} else {
items[insert_index] = items[B_index];
B_index += 1;
insert_index += 1;
if (B_index == B_last) break;
}
}
}
// copy the remainder of A into the final array
mem.copy(T, items[insert_index..], cache[A_index..A_last]);
}
fn swap(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool, order: *[8]u8, x: usize, y: usize) void {
if (lessThan(items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(items[x], items[y]))) {
mem.swap(T, &items[x], &items[y]);
mem.swap(u8, &(order.*)[x], &(order.*)[y]);
}
}
// Use these to generate a comparator function for a given type. e.g. `sort(u8, slice, asc(u8))`.
pub fn asc(comptime T: type) fn (T, T) bool {
const impl = struct {
fn inner(a: T, b: T) bool {
return a < b;
}
};
return impl.inner;
}
pub fn desc(comptime T: type) fn (T, T) bool {
const impl = struct {
fn inner(a: T, b: T) bool {
return a > b;
}
};
return impl.inner;
}
test "stable sort" {
testStableSort();
comptime testStableSort();
}
fn testStableSort() void {
var expected = [_]IdAndValue{
IdAndValue{ .id = 0, .value = 0 },
IdAndValue{ .id = 1, .value = 0 },
IdAndValue{ .id = 2, .value = 0 },
IdAndValue{ .id = 0, .value = 1 },
IdAndValue{ .id = 1, .value = 1 },
IdAndValue{ .id = 2, .value = 1 },
IdAndValue{ .id = 0, .value = 2 },
IdAndValue{ .id = 1, .value = 2 },
IdAndValue{ .id = 2, .value = 2 },
};
var cases = [_][9]IdAndValue{
[_]IdAndValue{
IdAndValue{ .id = 0, .value = 0 },
IdAndValue{ .id = 0, .value = 1 },
IdAndValue{ .id = 0, .value = 2 },
IdAndValue{ .id = 1, .value = 0 },
IdAndValue{ .id = 1, .value = 1 },
IdAndValue{ .id = 1, .value = 2 },
IdAndValue{ .id = 2, .value = 0 },
IdAndValue{ .id = 2, .value = 1 },
IdAndValue{ .id = 2, .value = 2 },
},
[_]IdAndValue{
IdAndValue{ .id = 0, .value = 2 },
IdAndValue{ .id = 0, .value = 1 },
IdAndValue{ .id = 0, .value = 0 },
IdAndValue{ .id = 1, .value = 2 },
IdAndValue{ .id = 1, .value = 1 },
IdAndValue{ .id = 1, .value = 0 },
IdAndValue{ .id = 2, .value = 2 },
IdAndValue{ .id = 2, .value = 1 },
IdAndValue{ .id = 2, .value = 0 },
},
};
for (cases) |*case| {
insertionSort(IdAndValue, (case.*)[0..], cmpByValue);
for (case.*) |item, i| {
testing.expect(item.id == expected[i].id);
testing.expect(item.value == expected[i].value);
}
}
}
const IdAndValue = struct {
id: usize,
value: i32,
};
fn cmpByValue(a: IdAndValue, b: IdAndValue) bool {
return asc(i32)(a.value, b.value);
}
test "std.sort" {
const u8cases = [_][]const []const u8{
[_][]const u8{
"",
"",
},
[_][]const u8{
"a",
"a",
},
[_][]const u8{
"az",
"az",
},
[_][]const u8{
"za",
"az",
},
[_][]const u8{
"asdf",
"adfs",
},
[_][]const u8{
"one",
"eno",
},
};
for (u8cases) |case| {
var buf: [8]u8 = undefined;
const slice = buf[0..case[0].len];
mem.copy(u8, slice, case[0]);
sort(u8, slice, asc(u8));
testing.expect(mem.eql(u8, slice, case[1]));
}
const i32cases = [_][]const []const i32{
[_][]const i32{
[_]i32{},
[_]i32{},
},
[_][]const i32{
[_]i32{1},
[_]i32{1},
},
[_][]const i32{
[_]i32{ 0, 1 },
[_]i32{ 0, 1 },
},
[_][]const i32{
[_]i32{ 1, 0 },
[_]i32{ 0, 1 },
},
[_][]const i32{
[_]i32{ 1, -1, 0 },
[_]i32{ -1, 0, 1 },
},
[_][]const i32{
[_]i32{ 2, 1, 3 },
[_]i32{ 1, 2, 3 },
},
};
for (i32cases) |case| {
var buf: [8]i32 = undefined;
const slice = buf[0..case[0].len];
mem.copy(i32, slice, case[0]);
sort(i32, slice, asc(i32));
testing.expect(mem.eql(i32, slice, case[1]));
}
}
test "std.sort descending" {
const rev_cases = [_][]const []const i32{
[_][]const i32{
[_]i32{},
[_]i32{},
},
[_][]const i32{
[_]i32{1},
[_]i32{1},
},
[_][]const i32{
[_]i32{ 0, 1 },
[_]i32{ 1, 0 },
},
[_][]const i32{
[_]i32{ 1, 0 },
[_]i32{ 1, 0 },
},
[_][]const i32{
[_]i32{ 1, -1, 0 },
[_]i32{ 1, 0, -1 },
},
[_][]const i32{
[_]i32{ 2, 1, 3 },
[_]i32{ 3, 2, 1 },
},
};
for (rev_cases) |case| {
var buf: [8]i32 = undefined;
const slice = buf[0..case[0].len];
mem.copy(i32, slice, case[0]);
sort(i32, slice, desc(i32));
testing.expect(mem.eql(i32, slice, case[1]));
}
}
test "another sort case" {
var arr = [_]i32{ 5, 3, 1, 2, 4 };
sort(i32, arr[0..], asc(i32));
testing.expect(mem.eql(i32, arr, [_]i32{ 1, 2, 3, 4, 5 }));
}
test "sort fuzz testing" {
var prng = std.rand.DefaultPrng.init(0x12345678);
const test_case_count = 10;
var i: usize = 0;
while (i < test_case_count) : (i += 1) {
fuzzTest(&prng.random);
}
}
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
fn fuzzTest(rng: *std.rand.Random) void {
const array_size = rng.range(usize, 0, 1000);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var array = fixed_allocator.allocator.alloc(IdAndValue, array_size) catch unreachable;
// populate with random data
for (array) |*item, index| {
item.id = index;
item.value = rng.range(i32, 0, 100);
}
sort(IdAndValue, array, cmpByValue);
var index: usize = 1;
while (index < array.len) : (index += 1) {
if (array[index].value == array[index - 1].value) {
testing.expect(array[index].id > array[index - 1].id);
} else {
testing.expect(array[index].value > array[index - 1].value);
}
}
}
pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool) T {
var i: usize = 0;
var smallest = items[0];
for (items[1..]) |item| {
if (lessThan(item, smallest)) {
smallest = item;
}
}
return smallest;
}
pub fn max(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool) T {
var i: usize = 0;
var biggest = items[0];
for (items[1..]) |item| {
if (lessThan(biggest, item)) {
biggest = item;
}
}
return biggest;
} | lib/std/sort.zig |
const std = @import("std");
const mem = std.mem;
const Allocator = mem.Allocator;
const bog = @import("bog.zig");
const Vm = bog.Vm;
const Module = bog.Module;
const util = @import("util.zig");
pub const Type = enum(u8) {
none = 0,
int,
num,
bool,
str,
tuple,
map,
list,
err,
range,
func,
tagged,
/// pseudo type user should not have access to via valid bytecode
iterator,
/// native being separate from .func is an implementation detail
native,
_,
};
pub const Value = union(Type) {
tuple: []*Value,
map: Map,
list: List,
err: *Value,
int: i64,
num: f64,
range: struct {
start: i64 = 0,
end: i64 = std.math.maxInt(i64),
step: i64 = 1,
},
str: String,
func: struct {
/// offset to the functions first instruction
offset: u32,
arg_count: u8,
/// module in which this function exists
module: *Module,
captures: []*Value,
},
native: Native,
tagged: struct {
name: []const u8,
value: *Value,
},
iterator: struct {
value: *Value,
i: packed union {
u: usize,
i: i64,
} = .{ .u = 0 },
pub fn next(iter: *@This(), vm: *Vm, res: *?*Value) !void {
switch (iter.value.*) {
.tuple => |tuple| {
if (iter.i.u == tuple.len) {
res.* = &Value.None;
return;
}
res.* = tuple[iter.i.u];
iter.i.u += 1;
},
.list => |*list| {
if (iter.i.u == list.items.len) {
res.* = &Value.None;
return;
}
res.* = list.items[iter.i.u];
iter.i.u += 1;
},
.str => |*str| {
if (iter.i.u == str.data.len) {
res.* = &Value.None;
return;
}
if (res.* == null)
res.* = try vm.gc.alloc();
const cp_len = std.unicode.utf8ByteSequenceLength(str.data[iter.i.u]) catch
return vm.fatal("invalid utf-8 sequence");
iter.i.u += cp_len;
res.*.?.* = .{
.str = .{
.data = str.data[iter.i.u - cp_len .. iter.i.u],
},
};
},
.map => |*map| {
if (iter.i.u == map.entries.items.len) {
res.* = &Value.None;
return;
}
if (res.* == null)
res.* = try vm.gc.alloc();
if (iter.i.u == 0) {
res.*.?.* = .{ .tuple = try vm.gc.gpa.alloc(*Value, 2) };
}
const e = &map.entries.items[iter.i.u];
const t = res.*.?.tuple;
// removing `const` on `Map` causes dependency loop??
t[0] = @intToPtr(*Value, @ptrToInt(e.key));
t[1] = e.value;
iter.i.u += 1;
},
.range => {
if (iter.i.i >= iter.value.range.end) {
res.* = &Value.None;
return;
}
if (res.* == null)
res.* = try vm.gc.alloc();
res.*.?.* = .{
.int = iter.i.i,
};
iter.i.i += iter.value.range.step;
},
else => unreachable,
}
}
},
/// always memoized
bool: bool,
none,
pub const String = @import("String.zig");
pub const Map = std.array_hash_map.ArrayHashMapUnmanaged(*const Value, *Value, hash, eql, true);
pub const List = std.ArrayListUnmanaged(*Value);
pub const Native = struct {
arg_count: u8,
func: fn (*Vm, []*Value) Vm.Error!*Value,
};
pub var None = Value{ .none = {} };
pub var True = Value{ .bool = true };
pub var False = Value{ .bool = false };
pub fn string(data: anytype) Value {
return switch (@TypeOf(data)) {
[]u8 => .{
.str = .{
.data = data,
.capacity = data.len,
},
},
else => .{
.str = .{
.data = data,
},
},
};
}
/// Frees any extra memory allocated by value.
/// Does not free values recursively.
pub fn deinit(value: *Value, allocator: *Allocator) void {
switch (value.*) {
.bool, .none => return,
.int, .num, .native, .tagged, .range, .iterator, .err => {},
.tuple => |t| allocator.free(t),
.map => |*m| m.deinit(allocator),
.list => |*l| l.deinit(allocator),
.str => |*s| s.deinit(allocator),
.func => |*f| allocator.free(f.captures),
}
value.* = undefined;
}
pub fn hash(key: *const Value) u32 {
const autoHash = std.hash.autoHash;
var hasher = std.hash.Wyhash.init(0);
autoHash(&hasher, @as(Type, key.*));
switch (key.*) {
.iterator => unreachable,
.none => {},
.int => |int| autoHash(&hasher, int),
.num => {},
.bool => |b| autoHash(&hasher, b),
.str => |*str| hasher.update(str.data),
.tuple => |tuple| {
autoHash(&hasher, tuple.len);
autoHash(&hasher, tuple.ptr);
},
.map => |*map| {
autoHash(&hasher, map.items().len);
autoHash(&hasher, map.items().ptr);
autoHash(&hasher, map.index_header);
},
.list => |*list| {
autoHash(&hasher, list.items.len);
autoHash(&hasher, list.items.ptr);
},
.err => |err| autoHash(&hasher, @as(Type, err.*)),
.range => |*range| {
autoHash(&hasher, range.start);
autoHash(&hasher, range.end);
autoHash(&hasher, range.step);
},
.func => |*func| {
autoHash(&hasher, func.offset);
autoHash(&hasher, func.arg_count);
autoHash(&hasher, func.module);
},
.native => |*func| {
autoHash(&hasher, func.arg_count);
autoHash(&hasher, func.func);
},
.tagged => |*tagged| {
hasher.update(tagged.name);
autoHash(&hasher, tagged.value);
},
}
return @truncate(u32, hasher.final());
}
pub fn eql(a: *const Value, b: *const Value) bool {
switch (a.*) {
.int => |i| return switch (b.*) {
.int => |b_val| i == b_val,
.num => |b_val| @intToFloat(f64, i) == b_val,
else => false,
},
.num => |n| return switch (b.*) {
.int => |b_val| n == @intToFloat(f64, b_val),
.num => |b_val| n == b_val,
else => false,
},
else => if (a.* != @as(@TagType(@TypeOf(b.*)), b.*)) return false,
}
return switch (a.*) {
.iterator, .int, .num => unreachable,
.none => true,
.bool => |bool_val| bool_val == b.bool,
.str => |s| s.eql(b.str),
.tuple => |t| {
const b_val = b.tuple;
if (t.len != b_val.len) return false;
for (t) |v, i| {
if (!v.eql(b_val[i])) return false;
}
return true;
},
.map => |*m| @panic("TODO eql for maps"),
.list => |*l| {
if (l.items.len != b.list.items.len) return false;
for (l.items) |v, i| {
if (!v.eql(b.list.items[i])) return false;
}
return true;
},
.err => |e| e.eql(b.err),
.range => |*r| {
return r.start == b.range.start and
r.end == b.range.end and
r.step == b.range.step;
},
.func => |*f| {
return f.offset == b.func.offset and
f.arg_count == b.func.arg_count and
f.module == b.func.module;
},
.native => |*n| n.func == b.native.func,
.tagged => |*t| {
if (!mem.eql(u8, t.name, b.tagged.name)) return false;
return t.value.eql(b.tagged.value);
},
};
}
/// Prints string representation of value to writer
pub fn dump(value: *const Value, writer: anytype, level: u32) @TypeOf(writer).Error!void {
switch (value.*) {
.iterator => unreachable,
.int => |i| try writer.print("{}", .{i}),
.num => |n| try writer.print("{d}", .{n}),
.bool => |b| try writer.writeAll(if (b) "true" else "false"),
.none => try writer.writeAll("()"),
.range => |*r| {
try writer.print("{}:{}:{}", .{ r.start, r.end, r.step });
},
.tuple => |t| {
if (level == 0) {
try writer.writeAll("(...)");
} else {
try writer.writeByte('(');
for (t) |v, i| {
if (i != 0) try writer.writeAll(", ");
try v.dump(writer, level - 1);
}
try writer.writeByte(')');
}
},
.map => |*m| {
if (level == 0) {
try writer.writeAll("{...}");
} else {
try writer.writeByte('{');
for (m.items()) |*entry, i| {
if (i != 0)
try writer.writeAll(", ");
try entry.key.dump(writer, level - 1);
try writer.writeAll(": ");
try entry.value.dump(writer, level - 1);
}
try writer.writeByte('}');
}
},
.list => |*l| {
if (level == 0) {
try writer.writeAll("[...]");
} else {
try writer.writeByte('[');
for (l.items) |v, i| {
if (i != 0) try writer.writeAll(", ");
try v.dump(writer, level - 1);
}
try writer.writeByte(']');
}
},
.err => |e| {
if (level == 0) {
try writer.writeAll("error(...)");
} else {
try writer.writeAll("error(");
try e.dump(writer, level - 1);
try writer.writeByte(')');
}
},
.str => |s| try s.dump(writer),
.func => |*f| {
try writer.print("fn({})@0x{X}[{}]", .{ f.arg_count, f.offset, f.captures.len });
},
.native => |*n| {
try writer.print("native({})@0x{}", .{ n.arg_count, @ptrToInt(n.func) });
},
.tagged => |*t| {
try writer.print("@{}", .{t.name});
if (level == 0) {
try writer.writeAll("(...)");
} else {
try t.value.dump(writer, level - 1);
}
},
}
}
/// Returns value in `container` at `index`.
pub fn get(container: *const Value, vm: *Vm, index: *const Value, res: *?*Value) Vm.Error!void {
switch (container.*) {
.tuple => |tuple| switch (index.*) {
.int => {
var i = index.int;
if (i < 0)
i += @intCast(i64, tuple.len);
if (i < 0 or i >= tuple.len)
return vm.fatal("index out of bounds");
res.* = tuple[@intCast(u32, i)];
},
.range => return vm.fatal("TODO get with ranges"),
.str => |*s| {
if (res.* == null) {
res.* = try vm.gc.alloc();
}
if (mem.eql(u8, s.data, "len")) {
res.*.?.* = .{ .int = @intCast(i64, tuple.len) };
} else {
return vm.fatal("no such property");
}
},
else => return vm.fatal("invalid index type"),
},
.list => |*list| switch (index.*) {
.int => {
var i = index.int;
if (i < 0)
i += @intCast(i64, list.items.len);
if (i < 0 or i >= list.items.len)
return vm.fatal("index out of bounds");
res.* = list.items[@intCast(u32, i)];
},
.range => return vm.fatal("TODO get with ranges"),
.str => |*s| {
if (res.* == null) {
res.* = try vm.gc.alloc();
}
if (mem.eql(u8, s.data, "len")) {
res.*.?.* = .{ .int = @intCast(i64, list.items.len) };
} else if (mem.eql(u8, s.data, "append")) {
res.* = try zigToBog(vm, struct {
fn append(_vm: *Vm, val: *Value) !void {
if (_vm.last_get.* != .list)
return _vm.fatal("expected list");
try _vm.last_get.list.append(_vm.gc.gpa, try _vm.gc.dupe(val));
}
}.append);
} else {
return vm.fatal("no such property");
}
},
else => return vm.fatal("invalid index type"),
},
.map => |*map| {
res.* = map.get(index) orelse
return vm.fatal("TODO better handling undefined key");
},
.str => |*str| return str.get(vm, index, res),
.iterator => unreachable,
.range => |*r| switch (index.*) {
.str => |*s| {
if (res.* == null) {
res.* = try vm.gc.alloc();
}
if (mem.eql(u8, s.data, "start")) {
res.*.?.* = .{ .int = r.start };
} else if (mem.eql(u8, s.data, "end")) {
res.*.?.* = .{ .int = r.end };
} else if (mem.eql(u8, s.data, "step")) {
res.*.?.* = .{ .int = r.step };
} else {
return vm.fatal("no such property");
}
},
else => return vm.fatal("invalid index type"),
},
else => return vm.fatal("invalid subscript type"),
}
}
/// Sets index of container to value. Does a shallow copy if value stored.
pub fn set(container: *Value, vm: *Vm, index: *const Value, new_val: *const Value) Vm.Error!void {
switch (container.*) {
.tuple => |tuple| if (index.* == .int) {
var i = index.int;
if (i < 0)
i += @intCast(i64, tuple.len);
if (i < 0 or i >= tuple.len)
return vm.fatal("index out of bounds");
tuple[@intCast(u32, i)] = try vm.gc.dupe(new_val);
} else {
return vm.fatal("TODO set with ranges");
},
.map => |*map| {
_ = try map.put(vm.gc.gpa, try vm.gc.dupe(index), try vm.gc.dupe(new_val));
},
.list => |*list| if (index.* == .int) {
var i = index.int;
if (i < 0)
i += @intCast(i64, list.items.len);
if (i < 0 or i >= list.items.len)
return vm.fatal("index out of bounds");
list.items[@intCast(u32, i)] = try vm.gc.dupe(new_val);
} else {
return vm.fatal("TODO set with ranges");
},
.str => |*str| try str.set(vm, index, new_val),
.iterator => unreachable,
else => return vm.fatal("invalid subscript type"),
}
}
/// `type_id` must be valid and cannot be .err, .range, .func or .native
pub fn as(val: *Value, vm: *Vm, type_id: Type) Vm.Error!*Value {
if (type_id == .none) {
return &Value.None;
}
if (val.* == type_id) {
return val;
}
if (val.* == .str) {
return val.str.as(vm, type_id);
}
if (type_id == .bool) {
const bool_res = switch (val.*) {
.int => |int| int != 0,
.num => |num| num != 0,
.bool => unreachable,
.str => unreachable,
else => return vm.errorFmt("cannot cast {} to bool", .{@tagName(val.*)}),
};
return if (bool_res) &Value.True else &Value.False;
} else if (type_id == .str) {
return String.from(val, vm);
}
const new_val = try vm.gc.alloc();
new_val.* = switch (type_id) {
.int => .{
.int = switch (val.*) {
.int => unreachable,
.num => |num| @floatToInt(i64, num),
.bool => |b| @boolToInt(b),
.str => unreachable,
else => return vm.errorFmt("cannot cast {} to int", .{@tagName(val.*)}),
},
},
.num => .{
.num = switch (val.*) {
.num => unreachable,
.int => |int| @intToFloat(f64, int),
.bool => |b| @intToFloat(f64, @boolToInt(b)),
.str => unreachable,
else => return vm.errorFmt("cannot cast {} to num", .{@tagName(val.*)}),
},
},
.str, .bool, .none => unreachable,
.tuple,
.map,
.list,
=> return vm.fatal("TODO more casts"),
else => unreachable,
};
return new_val;
}
pub fn is(val: *const Value, type_id: Type) bool {
if (val.* == type_id) return true;
if (type_id == .func and val.* == .native) return true;
return false;
}
/// Returns whether `container` has `val` in it.
pub fn in(val: *const Value, container: *const Value) bool {
switch (container.*) {
.str => |*str| return str.in(val),
.tuple => |tuple| {
for (tuple) |v| {
if (v.eql(val)) return true;
}
return false;
},
.list => |*list| {
for (list.items) |v| {
if (v.eql(val)) return true;
}
return false;
},
.map => |*map| return map.contains(val),
.range => |*r| {
if (val.* != .int) return false;
const int = val.int;
if (int < r.start or int > r.end) return false;
if (@rem(int - r.start, r.step) != 0) return false;
return true;
},
.iterator => unreachable,
else => unreachable,
}
}
pub fn iterator(val: *const Value, vm: *Vm) Vm.Error!*Value {
var start: ?i64 = null;
switch (val.*) {
.range => |*r| start = r.start,
.str, .tuple, .list, .map => {},
.iterator => unreachable,
else => return vm.errorFmt("cannot iterate {}", .{@tagName(val.*)}),
}
const iter = try vm.gc.alloc();
iter.* = .{
.iterator = .{
.value = try vm.gc.dupe(val),
},
};
if (start) |some| iter.iterator.i.i = some;
return iter;
}
/// Converts Zig value to Bog value. Allocates copy in the gc.
pub fn zigToBog(vm: *Vm, val: anytype) Vm.Error!*Value {
if (comptime std.meta.trait.hasFn("intoBog")(@TypeOf(val))) {
return try val.intoBog(vm);
}
switch (@TypeOf(val)) {
void => return &Value.None,
bool => return if (val) &Value.True else &Value.False,
*Value => return val,
Value => {
const ret = try vm.gc.alloc();
ret.* = val;
return ret;
},
[]const u8, []u8 => {
// assume val was allocated with vm.gc
const str = try vm.gc.alloc();
str.* = Value.string(val);
return str;
},
String => {
const str = try vm.gc.alloc();
str.* = Value{ .str = val };
return str;
},
type => switch (@typeInfo(val)) {
.Struct => |info| {
var map = Value.Map{};
errdefer map.deinit(vm.gc.gpa);
comptime var pub_decls = 0;
inline for (info.decls) |decl| {
if (decl.is_pub) pub_decls += 1;
}
try map.ensureCapacity(vm.gc.gpa, pub_decls);
inline for (info.decls) |decl| {
if (!decl.is_pub) continue;
// skip common interfaces
if (comptime std.mem.eql(u8, decl.name, "intoBog")) continue;
if (comptime std.mem.eql(u8, decl.name, "fromBog")) continue;
if (comptime std.mem.eql(u8, decl.name, "format")) continue;
const key = try vm.gc.alloc();
key.* = Value.string(decl.name);
const value = try zigToBog(vm, @field(val, decl.name));
map.putAssumeCapacityNoClobber(key, value);
}
const ret = try vm.gc.alloc();
ret.* = .{
.map = map,
};
return ret;
},
else => @compileError("unsupported type: " ++ @typeName(val)),
},
else => switch (@typeInfo(@TypeOf(val))) {
.Pointer => |info| {
if (info.size == .Slice) @compileError("unsupported type: " ++ @typeName(val));
const int = try vm.gc.alloc();
int.* = .{
.int = @bitCast(isize, @ptrToInt(val)),
};
return int;
},
.Fn => {
const native = try vm.gc.alloc();
native.* = .{
.native = wrapZigFunc(val),
};
return native;
},
.ComptimeInt, .Int => {
const int = try vm.gc.alloc();
int.* = .{
// try to implicit cast the value
.int = val,
};
return int;
},
.ComptimeFloat, .Float => {
const num = try vm.gc.alloc();
num.* = .{
// try to implicit cast the value
.num = val,
};
return num;
},
.ErrorUnion => if (val) |some| {
return zigToBog(vm, some);
} else |e| {
// capture runtime errors
if (@as(anyerror, e) == error.RuntimeError)
return error.RuntimeError;
// wrap error string
const str = try vm.gc.alloc();
str.* = Value.string(@errorName(e));
const err = try vm.gc.alloc();
err.* = .{ .err = str };
return err;
},
.Enum => {
const tag = try vm.gc.alloc();
tag.* = .{
.tagged = .{
.name = @tagName(val),
.value = &None,
},
};
return tag;
},
.Optional => if (val) |some| {
return zigToBog(vm, some);
} else {
return &Value.None;
},
else => @compileError("unsupported type: " ++ @typeName(@TypeOf(val))),
},
}
}
/// Converts Bog value to Zig value. Returned string is invalidated
/// on next garbage collection.
pub fn bogToZig(val: *Value, comptime T: type, vm: *Vm) Vm.Error!T {
if (comptime std.meta.trait.hasFn("fromBog")(T)) {
return try T.fromBog(val, vm);
}
return switch (T) {
void => {
if (val.* != .none)
return vm.fatal("expected none");
},
bool => {
if (val.* != .bool)
return vm.fatal("expected bool");
return val.bool;
},
[]const u8 => {
if (val.* != .str)
return vm.fatal("expected string");
return val.str.data;
},
*Map, *const Map => {
if (val.* != .map)
return vm.fatal("expected map");
return &val.map;
},
*Vm => vm,
*Value, *const Value => val,
Value => return val.*,
String => {
if (val.* != .str)
return vm.fatal("expected string");
return val.str;
},
[]*Value, []const *Value, []*const Value, []const *const Value => {
switch (val.*) {
.tuple => |t| return t,
.list => |*l| return l.items,
else => return vm.fatal("expected a list or a tuple"),
}
},
else => switch (@typeInfo(T)) {
.Int => if (val.* == .int) blk: {
if (val.int < std.math.minInt(T) or val.int > std.math.maxInt(T))
return vm.fatal("cannot fit int in desired type");
break :blk @intCast(T, val.int);
} else if (val.* == .num)
@floatToInt(T, val.num)
else
return vm.fatal("expected int"),
.Float => |info| switch (info.bits) {
32 => if (val.* == .num)
@floatCast(f32, val.num)
else if (val.* == .int)
@intToFloat(f32, val.int)
else
return vm.fatal("expected num"),
64 => if (val.* == .num)
val.num
else if (val.* == .int)
@intToFloat(f64, val.int)
else
return vm.fatal("expected num"),
else => @compileError("unsupported float"),
},
.Enum => {
if (val.* != .tagged)
return vm.fatal("expected tag");
const e = std.meta.stringToEnum(T, val.tagged.name) orelse
return vm.fatal("no value by such name");
if (val.tagged.value.* != .none)
return vm.fatal("expected no value");
return e;
},
else => @compileError("unsupported type: " ++ @typeName(T)),
},
};
}
pub fn jsonStringify(val: Value, options: std.json.StringifyOptions, writer: anytype) @TypeOf(writer).Error!void {
switch (val) {
.none => try writer.writeAll("null"),
.tuple => |t| {
try writer.writeByte('[');
for (t) |e, i| {
if (i != 0) try writer.writeByte(',');
try e.jsonStringify(options, writer);
}
try writer.writeByte(']');
},
.list => |*l| {
try writer.writeByte('[');
for (l.items) |e, i| {
if (i != 0) try writer.writeByte(',');
try e.jsonStringify(options, writer);
}
try writer.writeByte(']');
},
.map => |*m| {
try writer.writeByte('{');
for (m.items()) |*entry, i| {
if (i != 0)
try writer.writeAll(", ");
try entry.key.jsonStringify(options, writer);
try writer.writeAll(":");
try entry.value.jsonStringify(options, writer);
}
try writer.writeByte('}');
},
.int,
.num,
.bool,
=> try val.dump(writer, 0),
.str => |s| {
try writer.print("\"{Z}\"", .{s.data});
},
.native,
.func,
.range,
.err,
.tagged,
=> {
try writer.writeByte('\"');
try val.dump(writer, 0);
try writer.writeByte('\"');
},
.iterator => unreachable,
}
}
};
fn wrapZigFunc(func: anytype) Value.Native {
const Fn = @typeInfo(@TypeOf(func)).Fn;
if (Fn.is_generic or Fn.is_var_args)
@compileError("unsupported function");
@setEvalBranchQuota(Fn.args.len * 1000);
const S = struct {
// cannot directly use `func` so declare a pointer to it
var _func: @TypeOf(func) = undefined;
fn native(vm: *Vm, bog_args: []*Value) Vm.Error!*Value {
var args: std.meta.ArgsTuple(@TypeOf(_func)) = undefined;
comptime var bog_arg_i: u8 = 0;
inline for (Fn.args) |arg, i| {
if (arg.arg_type.? == *Vm) {
args[i] = vm;
} else {
args[i] = try bog_args[bog_arg_i].bogToZig(arg.arg_type.?, vm);
bog_arg_i += 1;
}
}
return Value.zigToBog(vm, @call(.{}, _func, args));
}
};
S._func = func;
// TODO can't use bog_arg_i due to a stage1 bug.
comptime var bog_arg_count = 0;
comptime {
for (Fn.args) |arg| {
if (arg.arg_type != *Vm) bog_arg_count += 1;
}
}
return .{
.arg_count = bog_arg_count,
.func = S.native,
};
}
var buffer: [1024]u8 = undefined;
fn testDump(val: Value, expected: []const u8) void {
var fbs = std.io.fixedBufferStream(&buffer);
val.dump(fbs.writer(), 4) catch @panic("test failed");
std.testing.expectEqualStrings(expected, fbs.getWritten());
}
test "dump int/num" {
var int = Value{
.int = 2,
};
testDump(int, "2");
var num = Value{
.num = 2.5,
};
testDump(num, "2.5");
}
test "dump error" {
var int = Value{
.int = 2,
};
var err = Value{
.err = &int,
};
testDump(err, "error(2)");
} | src/value.zig |
const std = @import("std");
const system = std.os.linux;
const ArrayList = std.ArrayList;
const Candidate = filter.Candidate;
const File = std.fs.File;
const filter = @import("filter.zig");
// Select Graphic Rendition (SGR) attributes
const Attribute = enum(u8) {
RESET = 0,
REVERSE = 7,
FG_CYAN = 36,
FG_DEFAULT = 39,
};
pub const Terminal = struct {
tty: File,
writer: File.Writer,
termios: std.os.termios,
raw_termios: std.os.termios,
height: usize = undefined,
max_height: usize,
pub fn init(max_height: usize) !Terminal {
var tty = try std.fs.openFileAbsolute("/dev/tty", .{ .read = true, .write = true });
// store original terminal settings to restore later
var termios = try std.os.tcgetattr(tty.handle);
var raw_termios = termios;
raw_termios.iflag &= ~@as(u32, system.ICRNL);
raw_termios.lflag &= ~@as(u32, system.ICANON | system.ECHO | system.ISIG);
try std.os.tcsetattr(tty.handle, .NOW, raw_termios);
return Terminal{
.tty = tty,
.writer = tty.writer(),
.termios = termios,
.raw_termios = raw_termios,
.max_height = max_height,
};
}
pub fn nodelay(self: *Terminal, state: bool) void {
self.raw_termios.cc[system.V.MIN] = if (state) 0 else 1;
std.os.tcsetattr(self.tty.handle, .NOW, self.raw_termios) catch unreachable;
}
pub fn deinit(self: *Terminal) void {
std.os.tcsetattr(self.tty.handle, .NOW, self.termios) catch return;
self.tty.close();
}
pub fn determineHeight(self: *Terminal) void {
const win_size = self.windowSize();
self.height = std.math.clamp(self.max_height, 1, win_size.?.y - 1);
}
fn write(self: *Terminal, args: anytype) void {
self.writer.print("\x1b[{d}{c}", args) catch unreachable;
}
fn writeBytes(self: *Terminal, bytes: []const u8) void {
_ = std.os.write(self.tty.handle, bytes) catch unreachable;
}
pub fn clearLine(self: *Terminal) void {
self.cursorCol(1);
self.write(.{ 2, 'K' });
}
pub fn scrollDown(self: *Terminal, num: usize) void {
var i: usize = 0;
while (i < num) : (i += 1) {
_ = self.writer.write("\n") catch unreachable;
}
}
pub fn cursorUp(self: *Terminal, num: usize) void {
self.write(.{ num, 'A' });
}
pub fn cursorDown(self: *Terminal, num: usize) void {
self.write(.{ num, 'B' });
}
pub fn cursorRight(self: *Terminal, num: usize) void {
self.write(.{ num, 'C' });
}
pub fn cursorLeft(self: *Terminal, num: usize) void {
self.write(.{ num, 'D' });
}
pub fn cursorCol(self: *Terminal, col: usize) void {
self.write(.{ col, 'G' });
}
pub fn sgr(self: *Terminal, code: Attribute) void {
self.write(.{ @enumToInt(code), 'm' });
}
const WinSize = struct {
x: usize,
y: usize,
};
pub fn windowSize(self: *Terminal) ?WinSize {
var size: std.os.linux.winsize = undefined;
if (std.os.linux.ioctl(self.tty.handle, std.os.system.T.IOCGWINSZ, @ptrToInt(&size)) == -1) {
return null;
}
return WinSize{ .x = size.ws_col, .y = size.ws_row };
}
};
const Key = union(enum) {
character: u8,
control: u8,
esc,
up,
down,
left,
right,
backspace,
delete,
enter,
none,
};
fn readDelete(reader: anytype) Key {
const byte = reader.readByte() catch return .esc;
if (byte == '~') return .delete;
return .esc;
}
fn readKey(terminal: *Terminal) Key {
const reader = terminal.tty.reader();
// reading may fail (timeout)
var byte = reader.readByte() catch return .none;
// escape
if (byte == '\x1b') {
terminal.nodelay(true);
defer terminal.nodelay(false);
var seq: [2]u8 = undefined;
seq[0] = reader.readByte() catch return .esc;
seq[1] = reader.readByte() catch return .esc;
// DECCKM mode sends \x1bO* instead of \x1b[*
if (seq[0] == '[' or seq[0] == 'O') {
return switch (seq[1]) {
'A' => .up,
'B' => .down,
'C' => .right,
'D' => .left,
'3' => readDelete(reader),
else => .esc,
};
}
return .esc;
}
switch (byte) {
'\r' => return .enter,
127 => return .backspace,
else => {},
}
// control chars
if (std.ascii.isCntrl(byte)) return .{ .control = byte };
// ascii chars
if (std.ascii.isPrint(byte)) return .{ .character = byte };
return .none;
}
const State = struct {
cursor: usize,
selected: usize,
};
fn getNextSlice(ranges: []filter.Range, start: usize) ?*filter.Range {
var min: ?*filter.Range = null;
for (ranges) |*r| {
if (r.start >= start) {
if (min == null or r.start < min.?.start) {
min = r;
} else if (r.start == min.?.start and r.end > min.?.end) {
min = r;
}
}
}
return min;
}
inline fn drawCandidate(terminal: *Terminal, candidate: Candidate, width: usize, selected: bool) void {
if (selected) terminal.sgr(.REVERSE);
defer terminal.sgr(.RESET);
const str = candidate.str[0..std.math.min(width, candidate.str.len)];
// no highlights, just draw the string
if (candidate.ranges == null) {
_ = terminal.writer.write(str) catch unreachable;
} else {
// slice into substrings for highlighting
var index: usize = 0;
while (true) {
if (getNextSlice(candidate.ranges.?, index)) |slice| {
// not at a range, draw the chars up to the range
if (index != slice.start) {
terminal.sgr(.FG_DEFAULT);
terminal.writeBytes(str[index..slice.start]);
}
terminal.sgr(.FG_CYAN);
terminal.writeBytes(str[slice.start .. slice.end + 1]);
index = slice.end + 1;
} else {
// potentially some chars left to draw
terminal.sgr(.FG_DEFAULT);
terminal.writeBytes(str[index..]);
break;
}
}
}
}
inline fn numDigits(number: usize) u16 {
if (number == 0) return 1;
return @intCast(u16, std.math.log10(number) + 1);
}
fn draw(terminal: *Terminal, state: *State, query: ArrayList(u8), candidates: []Candidate, total_candidates: usize) !void {
const width = terminal.windowSize().?.x;
// draw the candidates
var line: usize = 0;
while (line < terminal.height) : (line += 1) {
terminal.cursorDown(1);
terminal.clearLine();
if (line < candidates.len) drawCandidate(terminal, candidates[line], width, line == state.selected);
}
terminal.sgr(.RESET);
terminal.cursorUp(terminal.height);
// draw the prompt
terminal.clearLine();
try terminal.writer.print("> {s}", .{query.items[0..std.math.min(width - 2, query.items.len)]});
// draw info if there is room
const prompt_width = 2;
const separator_width = 1;
const spacing = @intCast(i32, width) - @intCast(i32, prompt_width + query.items.len + numDigits(candidates.len) + numDigits(total_candidates) + separator_width);
if (spacing >= 1) {
terminal.cursorRight(@intCast(usize, spacing));
try terminal.writer.print("{}/{}", .{ candidates.len, total_candidates });
}
// position the cursor at the edit location
terminal.cursorCol(1);
terminal.cursorRight(std.math.min(width - 1, state.cursor + 2));
}
const Action = union(enum) {
byte: u8,
line_up,
line_down,
cursor_left,
cursor_right,
backspace,
delete,
delete_word,
delete_line,
select,
close,
pass,
};
fn ctrl(comptime key: u8) u8 {
return key & 0x1f;
}
// TODO: for some reason this needs to be extracted to a separate function,
// perhaps related to ziglang/zig#137
fn ctrlToAction(key: u8) Action {
return switch (key) {
ctrl('c') => .close,
ctrl('w') => .delete_word,
ctrl('u') => .delete_line,
ctrl('p'), ctrl('k') => .line_up,
ctrl('n'), ctrl('j') => .line_down,
else => .pass,
};
}
fn keyToAction(key: Key) Action {
return switch (key) {
.character => |c| .{ .byte = c },
.control => |c| ctrlToAction(c),
.backspace => .backspace,
.delete => .delete,
.up => .line_up,
.down => .line_down,
.left => .cursor_left,
.right => .cursor_right,
.enter => .select,
.esc => .close,
.none => .pass,
};
}
fn charOrNull(char: u8) ?u8 {
// word separator chars for c-w word deletion
const word_chars = " -_/.";
const idx = std.mem.indexOfScalar(u8, word_chars, char);
if (idx) |i| {
return word_chars[i];
}
return null;
}
fn actionDeleteWord(query: *ArrayList(u8), cursor: *usize) void {
if (cursor.* > 0) {
const first_sep = charOrNull(query.items[cursor.* - 1]);
while (first_sep != null and cursor.* > 0 and first_sep.? == query.items[cursor.* - 1]) {
_ = query.pop();
cursor.* -= 1;
}
while (cursor.* > 0) {
_ = query.pop();
cursor.* -= 1;
if (cursor.* == 0) break;
const sep = charOrNull(query.items[cursor.* - 1]);
if (first_sep == null and sep != null) break;
if (first_sep != null and sep != null and first_sep.? == sep.?) break;
}
}
}
pub fn run(
allocator: std.mem.Allocator,
terminal: *Terminal,
candidates: []Candidate,
keep_order: bool,
) !?[]const u8 {
var query = ArrayList(u8).init(allocator);
defer query.deinit();
var state = State{
.cursor = 0,
.selected = 0,
};
// ensure enough room to draw all lines of output by drawing blank lines,
// effectively scrolling the view. + 1 to also include the prompt's offset
terminal.determineHeight();
terminal.scrollDown(terminal.height);
terminal.cursorUp(terminal.height);
var filtered = candidates;
var old_state = state;
var old_query = try allocator.alloc(u8, query.items.len);
var redraw = true;
while (true) {
// did the query change?
if (!std.mem.eql(u8, query.items, old_query)) {
allocator.free(old_query);
old_query = try allocator.alloc(u8, query.items.len);
std.mem.copy(u8, old_query, query.items);
filtered = try filter.rankCandidates(allocator, candidates, query.items, keep_order);
redraw = true;
state.selected = 0;
}
// did the selection move?
if (redraw or state.cursor != old_state.cursor or state.selected != old_state.selected) {
old_state = state;
try draw(terminal, &state, query, filtered, candidates.len);
redraw = false;
}
const visible_rows = @intCast(i64, std.math.min(terminal.height, filtered.len));
const action = keyToAction(readKey(terminal));
switch (action) {
.byte => |b| {
try query.insert(state.cursor, b);
state.cursor += 1;
},
.delete_word => actionDeleteWord(&query, &state.cursor),
.delete_line => {
while (state.cursor > 0) {
_ = query.orderedRemove(state.cursor - 1);
state.cursor -= 1;
}
},
.backspace => {
if (query.items.len > 0 and state.cursor == query.items.len) {
_ = query.pop();
state.cursor -= 1;
} else if (query.items.len > 0 and state.cursor > 0) {
_ = query.orderedRemove(state.cursor);
state.cursor -= 1;
}
},
.delete => {
if (query.items.len > 0 and state.cursor < query.items.len) {
_ = query.orderedRemove(state.cursor);
}
},
.line_up => if (state.selected > 0) {
state.selected -= 1;
},
.line_down => if (state.selected < visible_rows - 1) {
state.selected += 1;
},
.cursor_left => if (state.cursor > 0) {
state.cursor -= 1;
},
.cursor_right => if (state.cursor < query.items.len) {
state.cursor += 1;
},
.select => {
if (filtered.len == 0) break;
return filtered[state.selected].str;
},
.close => break,
.pass => {},
}
}
return null;
}
pub fn cleanUp(terminal: *Terminal) !void {
var i: usize = 0;
while (i < terminal.height) : (i += 1) {
terminal.clearLine();
terminal.cursorDown(1);
}
terminal.clearLine();
terminal.cursorUp(terminal.height);
} | src/ui.zig |
const builtin = @import("builtin");
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const rotr = std.math.rotr;
// ----------------------------------------------------------------------------
const arm_cmse = @import("../drivers/arm_cmse.zig");
const arm_m = @import("../drivers/arm_m.zig");
const EXC_RETURN = arm_m.EXC_RETURN;
const getMspNs = arm_m.getMspNs;
const getPspNs = arm_m.getPspNs;
// ----------------------------------------------------------------------------
const constants = @import("../constants.zig");
const VEC_TABLE = constants.VEC_TABLE;
const reverse = @import("utils.zig").reverse;
const threads = @import("threads.zig");
const TCThreadCreateInfo = @import("ffi.zig").TCThreadCreateInfo;
const log = @import("debug.zig").log;
const markEvent = @import("profiler.zig").markEvent;
const options = @import("options.zig");
const SHADOW_EXC_STACK_TYPE = options.SHADOW_EXC_STACK_TYPE;
const ShadowExcStackType = options.ShadowExcStackType;
// ----------------------------------------------------------------------------
/// A copy of a portion of an exception frame.
const Frame = struct {
/// The saved (original) program counter.
pc: usize = 0,
/// The saved (original) value of the LR register.
lr: usize = 0,
/// The `EXC_RETURN` value of the corresponding exception activation.
exc_return: usize = 0,
/// The original location of the exception frame.
frame: usize = 0,
/// The saved R12 (IP). This register is used to store code pointers in
/// our shadow stack implementation.
r12: usize = 0,
const Self = @This();
fn eq(self: Self, rhs: Self) bool {
return self.pc == rhs.pc and
self.lr == rhs.lr and
self.exc_return == rhs.exc_return and
self.frame == rhs.frame and
self.r12 == rhs.r12;
}
};
/// Models a set of program counter values which are recognized as exception
/// entry.
const ExecptionEntryPCSet = struct {
start: usize = 0,
len: usize = 0,
const Self = @This();
fn setFromVtor(self: *Self, vtor: usize) void {
// Validate the Non-Secure pointer
const hdr = arm_cmse.checkSlice(usize, vtor, 2, arm_cmse.CheckOptions{}) catch |err| {
@panic("The Non-Secure exception vector table's location is invalid.");
};
// Get the number of entries (including the header)
var size = hdr[0];
if ((size & VEC_TABLE.HDR_SIGNATURE_MASK) != VEC_TABLE.HDR_SIGNATURE) {
@panic("TZmCFI magic number was not found in the Non-Secure exception vector table.");
}
size &= VEC_TABLE.HDR_SIZE_MASK;
if (size < 2) {
@panic("The Non-Secure exception vector table is too small.");
} else if (size > 256) {
@panic("The Non-Secure exception vector table is too large.");
}
if (size == 2) {
self.len = 0;
return;
}
// Find the rest of entries
const entries = arm_cmse.checkSlice(usize, vtor, size, arm_cmse.CheckOptions{}) catch |err| {
@panic("The Non-Secure exception vector table's location is invalid.");
};
const start = entries[2];
if ((start & 1) == 0) {
@panic("The address of an exception trampoline is malformed.");
}
var i: usize = 2;
while (i < size) : (i += 1) {
if (entries[i] != start + (i - 2) * VEC_TABLE.TRAMPOLINE_STRIDE) {
@panic("Some exception trampolines are not layouted as expected.");
}
}
self.start = start & ~@as(usize, 1);
self.len = size - 2;
}
fn contains(self: *const Self, pc: usize) bool {
const rel = pc -% self.start;
// return pc < (self.len << stride_shift) and pc % VEC_TABLE.TRAMPOLINE_STRIDE == 0;
// Use a bit rotation trick to do alignment and boundary checks
// at the same time.
return rotr(usize, rel, VEC_TABLE.TRAMPOLINE_STRIDE_SHIFT) < self.len;
}
};
var g_exception_entry_pc_set = ExecptionEntryPCSet{};
/// Iterates through the chained exception stack.
const ChainedExceptionStackIterator = struct {
msp: usize,
psp: usize,
exc_return: usize,
frame: [*]const usize,
const Self = @This();
/// Start iteration.
fn new(exc_return: usize, msp: usize, psp: usize) Self {
var self = Self{
.msp = msp,
.psp = psp,
.exc_return = exc_return,
.frame = undefined,
};
self.fillFrameAddress();
return self;
}
fn fillFrameAddress(self: *Self) void {
if ((self.exc_return & EXC_RETURN.S) != 0) {
// `g_exception_entry_pc_set` only contains non-Secure exception
// entries. Thus it's invalid for `getOriginalPc()` to be included
// in `g_exception_entry_pc_set`. But we can't check that here;
// `getOriginalPc` assumes the exception frame is in a non-Secure
// stack.
// Assumption: For every exception entry chain A → B, A never
// belongs to Secure mode.
//
// This assumption makes things simpler. When a Non-Secure
// exception takes in Secure mode, additional state context data is
// pushed, changing the layout of an exception frame.
// We think this use case is extremely rare, so we chose to assume
// this never happens and not to introduce additional latency by
// addressing this case.
//
// Since there are no more exception entry chains, the chained
// exception stack ends here, so does unwinding. Still, we need an
// exception frame for `asFrame` to read. Just use a dummy
// exception frame here.
self.frame = @ptrCast([*]const usize, &dummy_exc_frame[0]);
} else if ((self.exc_return & EXC_RETURN.SPSEL) != 0) {
self.frame = @intToPtr([*]const usize, self.psp);
} else {
self.frame = @intToPtr([*]const usize, self.msp);
}
}
fn getOriginalPc(self: *const Self) usize {
return self.frame[6];
}
fn getOriginalLr(self: *const Self) usize {
return self.frame[5];
}
fn getOriginalR12(self: *const Self) usize {
return self.frame[4];
}
fn getFrameAddress(self: *const Self) usize {
return @ptrToInt(self.frame);
}
fn getExcReturn(self: *const Self) usize {
return self.exc_return;
}
fn asFrame(self: *const Self) Frame {
return Frame{
.pc = self.getOriginalPc(),
.lr = self.getOriginalLr(),
.frame = self.getFrameAddress(),
.exc_return = self.getExcReturn(),
.r12 = self.getOriginalR12(),
};
}
/// Moves to the next stack entry. Returns `false` if we can't proceed due
/// to one of the following reasons:
/// - We reached the end of the chained exception stack.
/// - We reached the end of the exception stack.
fn moveNext(self: *Self) bool {
if ((self.exc_return & EXC_RETURN.MODE) != 0) {
// Reached the end of the exception stack.
return false;
}
if (!g_exception_entry_pc_set.contains(self.getOriginalPc())) {
// The background context is an exception activation that already
// started running software code. Thus we reached the end of the
// chained exception stack.
// (Even if we go on, we wouldn't be able to retrieve the rest of
// the exception stack because we can't locate exception frames
// without doing DWARF CFI-based stack unwinding.)
return false;
}
const new_exc_return = self.getOriginalLr();
// An execption context never uses PSP.
assert((self.exc_return & EXC_RETURN.SPSEL) == 0);
// Unwind the stack
const frameSize = if ((self.exc_return & EXC_RETURN.FTYPE) != 0) @as(usize, 32) else @as(usize, 104);
self.msp += frameSize;
self.exc_return = new_exc_return;
self.fillFrameAddress();
return true;
}
};
const dummy_exc_frame = [1]usize{0} ** 7;
// The default SES implementation supporting nested exceptions.
fn fullImpl(comptime safe: bool) type {
return struct {
/// The default shadow stack
var g_default_stack_storage: [32]Frame = undefined;
/// Bundles the state of a single instance of shadow exception stack.
const StackStateImpl = struct {
current: [*]Frame,
top: [*]Frame,
limit: [*]Frame,
const Self = @This();
/// Construct a `StackStateImpl` by allocating memory from `allocator`.
pub fn new(allocator: *Allocator, create_info: *const TCThreadCreateInfo) !Self {
const frames = try allocator.alloc(Frame, 4);
var self = fromSlice(frames);
self.top[0] = Frame{
.pc = create_info.initialPC,
.lr = create_info.initialLR,
.exc_return = create_info.excReturn,
.frame = create_info.exceptionFrame,
.r12 = 0x12121212,
};
self.top += 1;
return self;
}
/// Release the memory allocated for `self`. `self` must have been created
/// by `new(allocator, _)`.
pub fn destroy(self: *const Self, allocator: *Allocator) void {
allocator.free(self.asSlice());
}
fn fromSlice(frames: []Frame) Self {
var start = @ptrCast([*]Frame, &frames[0]);
return Self{
.current = start,
.top = start,
.limit = start + frames.len,
};
}
fn asSlice(self: *const Self) []Frame {
const len = @divExact(@ptrToInt(self.limit) - @ptrToInt(self.current), @sizeOf(Frame));
return self.current[0..len];
}
};
fn createStackStateWithDefaultStorage() StackStateImpl {
return StackStateImpl.fromSlice(&g_default_stack_storage);
}
/// Perform the shadow push operation.
fn pushShadowExcStack(exc_return: usize) void {
var exc_stack = ChainedExceptionStackIterator.new(exc_return, getMspNs(), getPspNs());
const stack = &g_stack;
var new_top: [*]Frame = stack.top;
// TODO: Add bounds check using `StackStateImpl::limit`
if (safe) {
if (new_top == stack.current) {
// The shadow exception stack is empty -- push every frame we find
while (true) {
new_top.* = exc_stack.asFrame();
new_top += 1;
if (!exc_stack.moveNext()) {
break;
}
}
} else {
// Push until a known entry is encountered
const top_frame = (new_top - 1).*.frame;
while (true) {
if (exc_stack.getFrameAddress() == top_frame) {
break;
}
new_top.* = exc_stack.asFrame();
new_top += 1;
if (!exc_stack.moveNext()) {
break;
}
}
}
// The entries were inserted in a reverse order. Reverse them to be in the
// correct order.
reverse(Frame, stack.top, new_top);
} else {
// Push one frame
new_top.* = exc_stack.asFrame();
new_top += 1;
}
stack.top = new_top;
}
/// Perform the shadow pop (assert) opertion and get the `EXC_RETURN` that
/// corresponds to the current exception activation.
fn popShadowExcStack() usize {
const stack = &g_stack;
if (stack.top == stack.current) {
@panic("Exception return trampoline was called but the shadow exception stack is empty.");
}
const exc_return = (stack.top - 1)[0].exc_return;
var exc_stack = ChainedExceptionStackIterator.new(exc_return, getMspNs(), getPspNs());
// Validate *two* top entries. This is required for the soundness
// of the safe shadow exception stack algorithm. (Please see the
// paper for details.)
if (!exc_stack.asFrame().eq((stack.top - 1)[0])) {
log(.Warning, "popShadowExcStack: {} != {}\r\n", .{ exc_stack.asFrame(), (stack.top - 1)[0] });
@panic("Exception stack integrity check has failed.");
}
if (safe) {
if (exc_stack.moveNext()) {
if (stack.top == stack.current + 1) {
@panic("The number of entries in the shadow exception stack is lower than expected.");
}
if (!exc_stack.asFrame().eq((stack.top - 2)[0])) {
log(.Warning, "popShadowExcStack: {} != {}\r\n", .{ exc_stack.asFrame(), (stack.top - 2)[0] });
@panic("Exception stack integrity check has failed.");
}
}
}
stack.top -= 1;
return exc_return;
}
};
}
// The simplified SES implementation not supporting nested exceptions.
const unnested_impl = struct {
/// The default shadow stack
var g_default_stack_storage: Frame = undefined;
/// Bundles the state of a single instance of shadow exception stack.
const StackStateImpl = struct {
current: ?*Frame,
storage: *Frame,
const Self = @This();
/// Construct a `StackStateImpl` by allocating memory from `allocator`.
pub fn new(allocator: *Allocator, create_info: *const TCThreadCreateInfo) !Self {
const storage = try allocator.create(Frame);
var self = fromStorage(storage);
storage.* = Frame{
.pc = create_info.initialPC,
.lr = create_info.initialLR,
.exc_return = create_info.excReturn,
.frame = create_info.exceptionFrame,
.r12 = 0x12121212,
};
self.current = self.storage;
return self;
}
/// Release the memory allocated for `self`. `self` must have been created
/// by `new(allocator, _)`.
pub fn destroy(self: *const Self, allocator: *Allocator) void {
allocator.destroy(self.storage);
}
fn fromStorage(storage: *Frame) Self {
return Self{
.current = null,
.storage = storage,
};
}
};
fn createStackStateWithDefaultStorage() StackStateImpl {
return StackStateImpl.fromStorage(&g_default_stack_storage);
}
/// Perform the shadow push operation.
fn pushShadowExcStack(exc_return: usize) void {
const stack = &g_stack;
var exc_stack = ChainedExceptionStackIterator.new(exc_return, getMspNs(), getPspNs());
if (stack.current != null) {
@panic("Shadow exception stack is already occupied.");
}
stack.current = stack.storage;
stack.storage.* = exc_stack.asFrame();
}
/// Perform the shadow pop (assert) opertion and get the `EXC_RETURN` that
/// corresponds to the current exception activation.
fn popShadowExcStack() usize {
const stack = &g_stack;
const frame = stack.current orelse {
@panic("Exception return trampoline was called but the shadow exception stack is empty.");
};
const exc_return = frame.exc_return;
var exc_stack = ChainedExceptionStackIterator.new(exc_return, getMspNs(), getPspNs());
// Validate the entry.
if (!exc_stack.asFrame().eq(frame.*)) {
log(.Warning, "popShadowExcStack: {} != {}\r\n", .{ exc_stack.asFrame(), frame });
@panic("Exception stack integrity check has failed.");
}
stack.current = null;
return exc_return;
}
};
// The minimal SES implementation.
const min_impl = struct {
/// Bundles the state of a single instance of shadow exception stack.
const StackStateImpl = struct {
exc_return: usize = undefined,
const Self = @This();
/// Construct a `StackStateImpl` by allocating memory from `allocator`.
pub fn new(allocator: *Allocator, create_info: *const TCThreadCreateInfo) !Self {
return Self{ .exc_return = create_info.excReturn };
}
/// Release the memory allocated for `self`. `self` must have been created
/// by `new(allocator, _)`.
pub fn destroy(self: *const Self, allocator: *Allocator) void {}
};
fn createStackStateWithDefaultStorage() StackStateImpl {
return StackStateImpl{};
}
/// Perform the shadow push operation.
fn pushShadowExcStack(exc_return: usize) void {
const stack = &g_stack;
stack.exc_return = exc_return;
}
/// Perform the shadow pop (assert) opertion and get the `EXC_RETURN` that
/// corresponds to the current exception activation.
fn popShadowExcStack() usize {
const stack = &g_stack;
return stack.exc_return;
}
};
// Choose an implementation based on `NO_NESTED_EXCEPTIONS`
const impl = if (SHADOW_EXC_STACK_TYPE == ShadowExcStackType.Safe)
fullImpl(true)
else if (SHADOW_EXC_STACK_TYPE == ShadowExcStackType.Unnested)
unnested_impl
else if (SHADOW_EXC_STACK_TYPE == ShadowExcStackType.Naive)
fullImpl(false)
else
min_impl;
pub const StackState = impl.StackStateImpl;
// TODO: Static initialize
var g_stack: StackState = undefined;
const Usizex2 = @Vector(2, usize);
export fn __tcEnterInterrupt(isr_body: usize, exc_return: usize) Usizex2 {
markEvent(.EnterInterrupt);
impl.pushShadowExcStack(exc_return);
// TODO: Conceal `r3` and `r4`?
var ret = [2]usize{ exc_return, isr_body };
return @bitCast(Usizex2, ret);
}
export fn __tcLeaveInterrupt() usize {
markEvent(.LeaveInterrupt);
return impl.popShadowExcStack();
}
pub fn saveState(state: *StackState) void {
state.* = g_stack;
}
pub fn loadState(state: *const StackState) void {
g_stack = state.*;
}
export fn __tcReportBadNesting() noreturn {
@panic("Nested exception is disallowed.");
}
// Non-Secure application interface
// ----------------------------------------------------------------------------
/// Implements a secure function in `Secure.h`.
pub export fn TCInitialize(ns_vtor: usize) void {
threads.init();
g_stack = impl.createStackStateWithDefaultStorage();
if (SHADOW_EXC_STACK_TYPE == ShadowExcStackType.Safe) {
g_exception_entry_pc_set.setFromVtor(ns_vtor);
}
}
/// Implements a private gateway function in `PrivateGateway.h`.
pub export fn __TCPrivateEnterInterrupt() callconv(.Naked) noreturn {
// This `asm` block provably never returns
@setRuntimeSafety(false);
if (SHADOW_EXC_STACK_TYPE == ShadowExcStackType.Unnested) {
asm volatile (
\\ sg
\\
\\ # r0 = handler function pointer
\\ mov r1, lr
\\
\\ # Deny nested exception by checking the Mode bit. A cleared Mode
\\ # bit means the exception was taken in Handler mode.
\\ tst lr, #8
\\ beq __tcReportBadNesting
\\
\\ bl __tcEnterInterrupt
\\
\\ # r0 = lr (EXC_RETURN)
\\ # r1 = handler function pointer
\\
\\ bxns r1
);
} else {
asm volatile (
\\ sg
\\
\\ # r0 = handler function pointer
\\ mov r1, lr
\\
\\ bl __tcEnterInterrupt
\\
\\ # r0 = lr (EXC_RETURN)
\\ # r1 = handler function pointer
\\
\\ bxns r1
);
}
unreachable;
}
/// Implements a private gateway function in `PrivateGateway.h`.
pub export fn __TCPrivateLeaveInterrupt() callconv(.Naked) noreturn {
// This `asm` block provably never returns
@setRuntimeSafety(false);
asm volatile (
\\ sg
\\
\\ bl __tcLeaveInterrupt
\\
\\ # r0 = EXC_RETURN
\\
\\ bx r0
);
unreachable;
}
// Export the gateway functions to Non-Secure
comptime {
@export(__TCPrivateEnterInterrupt, .{ .name = "__acle_se___TCPrivateEnterInterrupt", .linkage = .Strong, .section = ".gnu.sgstubs" });
@export(__TCPrivateLeaveInterrupt, .{ .name = "__acle_se___TCPrivateLeaveInterrupt", .linkage = .Strong, .section = ".gnu.sgstubs" });
} | src/monitor/shadowexcstack.zig |
const graphics = @import("didot-graphics");
const zlm = @import("zlm");
const std = @import("std");
usingnamespace @import("assets.zig");
const Mesh = graphics.Mesh;
const Window = graphics.Window;
const Material = graphics.Material;
const Allocator = std.mem.Allocator;
pub const GameObjectArrayList = std.ArrayList(GameObject);
/// Mesh of a plane.
pub var PrimitivePlaneMesh: Mesh = undefined;
/// Mesh of a cube.
pub var PrimitiveCubeMesh: Mesh = undefined;
/// Material must be set manually.
/// Memory is caller owned
/// initPrimitives() must have been called before calling this function!
pub fn createSkybox(allocator: *Allocator) !GameObject {
var go = GameObject.createCustom(allocator, "skybox", 0);
return go;
}
/// This function must be called before primitive meshes (like PrimitiveCubeMesh) can be used.
/// Since it create meshes it must be called after the window context is set.
/// It is also automatically called by didot-app.Application
pub fn initPrimitives() void {
var planeVert = [_]f32 {
-0.5, 0.5, 0.0, 0.0, 0.0,
0.5, 0.5, 0.0, 1.0, 0.0,
0.5, -0.5, 0.0, 1.0, 1.0,
-0.5, -0.5, 0.0, 0.0, 1.0
};
var planeElem = [_]graphics.MeshElementType {
0, 1, 2,
2, 3, 0
};
PrimitivePlaneMesh = Mesh.create(planeVert[0..], planeElem[0..]);
// position, normal, tex coords
// var cubeVert = [_]f32 {
// // front
// -0.5, 0.5, 0.5, 0.0, 0.0, 1.0, 0.0, 0.0, // upper left
// 0.5, 0.5, 0.5, 0.0, 0.0, 1.0, 1.0, 0.0, // upper right
// 0.5, -0.5, 0.5, 0.0, 0.0, 1.0, 1.0, 1.0, // bottom right
// -0.5, -0.5, 0.5, 0.0, 0.0, 1.0, 0.0, 1.0, // bottom left
// // bottom
// -0.5, -0.5, -0.5, 0.0, -1.0, 0.0, 0.0, 0.0, // bottom left
// 0.5, -0.5, -0.5, 0.0, -1.0, 0.0, 1.0, 0.0, // bottom right
// // right
// -0.5, 0.5, -0.5, 1.0, 0.0, 1.0, 1.0, 0.0, // upper left
// -0.5, -0.5, -0.5, 1.0, 0.0, -1.0, 1.0, 1.0, // bottom left
// // left
// 0.5, 0.5, -0.5, -1.0, 0.0, 0.0, 0.0, 0.0, // upper left
// 0.5, -0.5, -0.5, -1.0, 0.0, 0.0, 0.0, 1.0, // bottom left
// // top
// -0.5, 0.5, -0.5, 0.0, 1.0, 0.0, 0.0, 1.0, // top left
// 0.5, 0.5, -0.5, 0.0, 1.0, 0.0, 1.0, 1.0, // top right
// };
// var cubeElem = [_]graphics.MeshElementType {
// // front
// 0, 1, 3,
// 1, 3, 2,
// // bottom
// 3, 2, 4,
// 2, 5, 4,
// // right
// 0, 3, 6,
// 3, 6, 7,
// // left
// 1, 2, 8,
// 2, 8, 9,
// // top
// 0, 1, 10,
// 1, 11, 10,
// };
//PrimitiveCubeMesh = Mesh.create(cubeVert[0..], cubeElem[0..]);
var cubeVert = [_]f32{
// back
-0.5, -0.5, -0.5, 0.0, 0.0, -1.0, 0.0, 0.0,
0.5, -0.5, -0.5, 0.0, 0.0, -1.0, 1.0, 0.0,
0.5, 0.5, -0.5, 0.0, 0.0, -1.0, 1.0, 1.0,
0.5, 0.5, -0.5, 0.0, 0.0, -1.0, 1.0, 1.0,
-0.5, 0.5, -0.5, 0.0, 0.0, -1.0, 0.0, 1.0,
-0.5, -0.5, -0.5, 0.0, 0.0, -1.0, 0.0, 0.0,
// front
-0.5, -0.5, 0.5, 0.0, 0.0, 1.0, 0.0, 0.0,
0.5, -0.5, 0.5, 0.0, 0.0, 1.0, 1.0, 0.0,
0.5, 0.5, 0.5, 0.0, 0.0, 1.0, 1.0, 1.0,
0.5, 0.5, 0.5, 0.0, 0.0, 1.0, 1.0, 1.0,
-0.5, 0.5, 0.5, 0.0, 0.0, 1.0, 0.0, 1.0,
-0.5, -0.5, 0.5, 0.0, 0.0, 1.0, 0.0, 0.0,
// left
-0.5, 0.5, 0.5, -1.0, 0.0, 0.0, 0.0, 1.0,
-0.5, 0.5, -0.5, -1.0, 0.0, 0.0, 1.0, 1.0,
-0.5, -0.5, -0.5, -1.0, 0.0, 0.0, 1.0, 0.0,
-0.5, -0.5, -0.5, -1.0, 0.0, 0.0, 1.0, 0.0,
-0.5, -0.5, 0.5, -1.0, 0.0, 0.0, 0.0, 0.0,
-0.5, 0.5, 0.5, -1.0, 0.0, 0.0, 0.0, 1.0,
// right
0.5, 0.5, 0.5, 1.0, 0.0, 0.0, 0.0, 1.0,
0.5, 0.5, -0.5, 1.0, 0.0, 0.0, 1.0, 1.0,
0.5, -0.5, -0.5, 1.0, 0.0, 0.0, 1.0, 0.0,
0.5, -0.5, -0.5, 1.0, 0.0, 0.0, 1.0, 0.0,
0.5, -0.5, 0.5, 1.0, 0.0, 0.0, 0.0, 0.0,
0.5, 0.5, 0.5, 1.0, 0.0, 0.0, 0.0, 1.0,
// bottom
-0.5, -0.5, -0.5, 0.0, -1.0, 0.0, 0.0, 0.0,
0.5, -0.5, -0.5, 0.0, -1.0, 0.0, 0.0, 0.0,
0.5, -0.5, 0.5, 0.0, -1.0, 0.0, 0.0, 0.0,
0.5, -0.5, 0.5, 0.0, -1.0, 0.0, 0.0, 0.0,
-0.5, -0.5, 0.5, 0.0, -1.0, 0.0, 0.0, 0.0,
-0.5, -0.5, -0.5, 0.0, -1.0, 0.0, 0.0, 0.0,
// top
-0.5, 0.5, -0.5, 0.0, 1.0, 0.0, 0.0, 0.0,
0.5, 0.5, -0.5, 0.0, 1.0, 0.0, 1.0, 0.0,
0.5, 0.5, 0.5, 0.0, 1.0, 0.0, 1.0, 1.0,
0.5, 0.5, 0.5, 0.0, 1.0, 0.0, 1.0, 1.0,
-0.5, 0.5, 0.5, 0.0, 1.0, 0.0, 0.0, 1.0,
-0.5, 0.5, -0.5, 0.0, 1.0, 0.0, 0.0, 0.0,
};
PrimitiveCubeMesh = Mesh.create(cubeVert[0..], null);
}
pub const GameObject = struct {
//mesh: ?Mesh = null,
meshPath: ?[]const u8 = null,
name: []const u8 = "Game Object",
/// Functions called regularly depending on the updateTarget value of the Application.
updateFn: ?fn(allocator: *Allocator, gameObject: *GameObject, delta: f32) anyerror!void = null,
position: zlm.Vec3 = zlm.Vec3.zero,
/// In order: yaw, pitch, roll.
/// Note: this will be replaced with quaternions very soon!
rotation: zlm.Vec3 = zlm.Vec3.zero,
scale: zlm.Vec3 = zlm.Vec3.one,
childrens: GameObjectArrayList,
/// Type of object owning this game object ("camera", "scene", etc.)
objectType: ?[]const u8 = null,
/// Pointer to the struct of the object owning this game object.
/// To save space, it must be considered null when objectType is null.
objectPointer: usize = 0,
/// The allocator used to create objectPointer, if any.
objectAllocator: ?*Allocator = null,
material: Material = Material.default,
/// To be used for game objects entirely made of other game objects as childrens, or for script-only game objects.
pub fn createEmpty(allocator: *Allocator) GameObject {
var childs = GameObjectArrayList.init(allocator);
return GameObject {
.childrens = childs
};
}
/// The default kind of game object, it is renderable via its mesh and material.
pub fn createObject(allocator: *Allocator, meshPath: []const u8) GameObject {
var childs = GameObjectArrayList.init(allocator);
return GameObject {
.childrens = childs,
.meshPath = meshPath
};
}
/// For cameras, scenes, etc.
pub fn createCustom(allocator: *Allocator, customType: []const u8, ptr: usize) GameObject {
var childs = GameObjectArrayList.init(allocator);
return GameObject {
.childrens = childs,
.objectType = customType,
.objectPointer = ptr,
.objectAllocator = allocator
};
}
pub fn update(self: *GameObject, allocator: *Allocator, delta: f32) anyerror!void {
if (self.updateFn) |func| {
try func(allocator, self, delta);
}
for (self.childrens.items) |*child| {
try child.update(allocator, delta); // TODO: correctly handle errors
}
}
pub fn findChild(self: *const GameObject, name: []const u8) ?*GameObject {
for (self.childrens.items) |*child| {
if (std.mem.eql(u8, child.name, name)) return child;
}
return null;
}
/// This functions returns the forward (the direction) vector of this game object using its rotation.
pub fn getForward(self: *const GameObject) zlm.Vec3 {
const rot = self.rotation;
return zlm.Vec3.new(
std.math.cos(rot.x) * std.math.cos(rot.y),
std.math.sin(rot.y),
std.math.sin(rot.x) * std.math.cos(rot.y)
);
}
/// This functions returns the left vector of this game object using its rotation.
pub fn getLeft(self: *const GameObject) zlm.Vec3 {
const rot = self.rotation;
return zlm.Vec3.new(
-std.math.sin(rot.x),
0,
std.math.cos(rot.x)
);
}
pub fn look(self: *GameObject, direction: zlm.Vec3, up: zlm.Vec3) void {
// self.rotation.x = ((std.math.cos(direction.z)) * (std.math.cos(direction.x)) +1)* std.math.pi;
// self.rotation.y = (std.math.cos(direction.y) + 1) * std.math.pi;
// self.rotation.z = 0;
// const mat = zlm.Mat4.createLook(self.position, direction, up);
// self.rotation = mat.mulVec3(zlm.Vec3.new(0, 0, 1));
// self.rotation.x = (self.rotation.x * self.rotation.z) * std.math.pi;
// self.rotation.y = 0;
// self.rotation.z = 0;
var angle = std.math.atan2(f32, direction.y, direction.x);
}
pub fn lookAt(self: *GameObject, target: zlm.Vec3, up: zlm.Vec3) void {
self.look(target.sub(self.position).normalize(), up);
}
/// Add a game object as children to this game object.
pub fn add(self: *GameObject, go: GameObject) !void {
try self.childrens.append(go);
}
/// Frees childrens array list (not childrens themselves!), the object associated to it and itself.
pub fn deinit(self: *const GameObject) void {
self.childrens.deinit();
if (self.objectAllocator) |alloc| {
if (self.objectPointer != 0) {
alloc.destroy(@intToPtr(*u8, self.objectPointer));
}
}
}
/// De-init the game object and its children (recursive deinit)
pub fn deinitAll(self: *const GameObject) void {
for (self.childrens.items) |*child| {
child.deinitAll();
}
self.deinit();
}
};
pub const Camera = struct {
fov: f32,
gameObject: GameObject,
viewMatrix: zlm.Mat4,
shader: graphics.ShaderProgram,
skyboxShader: ?graphics.ShaderProgram,
/// Memory is caller-owned (de-init must be called before)
pub fn create(allocator: *Allocator, shader: graphics.ShaderProgram) !*Camera {
var camera = try allocator.create(Camera);
var go = GameObject.createCustom(allocator, "camera", @ptrToInt(camera));
go.rotation = zlm.Vec3.new(zlm.toRadians(-90.0), 0, 0);
camera.gameObject = go;
camera.shader = shader;
camera.fov = 70;
return camera;
}
pub fn deinit(self: *const Camera) void {
self.gameObject.deinit();
}
};
pub const PointLight = struct {
gameObject: GameObject,
color: graphics.Color,
/// Constant attenuation (the higher it is, the darker the light is)
constant: f32,
/// Linear attenuation
linear: f32,
/// Quadratic attenuation
quadratic: f32,
pub fn create(allocator: *Allocator) !*PointLight {
var light = try allocator.create(PointLight);
var go = GameObject.createCustom(allocator, "point_light", @ptrToInt(light));
light.gameObject = go;
light.color = zlm.Vec3.one;
light.constant = 1.0;
light.linear = 0.018;
light.quadratic = 0.016;
return light;
}
};
pub const Scene = struct {
gameObject: GameObject,
/// The camera the scene is currently using.
/// It is auto-detected at runtime before each render by looking
/// on top-level game objects to select one that corresponds
/// to the "camera" type.
camera: ?*Camera,
/// The skybox the scene is currently using.
/// It is auto-detected at runtime before each render by looking
/// on top-level game objects to select one that corresponds
/// to the "skybox" type.
skybox: ?*GameObject,
pointLight: ?*PointLight,
assetManager: AssetManager,
pub fn create(allocator: *Allocator, assetManager: ?AssetManager) !*Scene {
var scene = try allocator.create(Scene);
scene.gameObject = GameObject.createCustom(allocator, "scene", @ptrToInt(scene));
if (assetManager) |mg| {
scene.assetManager = mg;
} else {
scene.assetManager = AssetManager.init(allocator);
}
return scene;
}
pub fn loadFromFile(allocator: *Allocator, path: []const u8) !Scene {
const file = try std.fs.cwd().openFile(path, .{ .read = true });
defer file.close();
const text = try reader.readAllAlloc(allocator, std.math.maxInt(usize));
defer allocator.free(text);
return Scene.loadFromMemory(allocator, text);
}
pub fn loadFromMemory(allocator: *Allocator, json: []const u8) !Scene {
std.debug.warn("{}\n", .{json});
}
pub fn render(self: *Scene, window: Window) !void {
var childs: GameObjectArrayList = self.gameObject.childrens;
// TODO: only do this when a new child is inserted
self.camera = null;
self.skybox = null;
self.pointLight = null;
for (childs.items) |*child| {
if (child.objectType) |objectType| {
if (std.mem.eql(u8, objectType, "camera")) {
self.camera = @intToPtr(*Camera, child.objectPointer);
self.camera.?.gameObject = child.*;
} else if (std.mem.eql(u8, objectType, "point_light")) {
self.pointLight = @intToPtr(*PointLight, child.objectPointer);
self.pointLight.?.gameObject = child.*;
} else if (std.mem.eql(u8, objectType, "skybox")) {
self.skybox = child;
}
}
}
try graphics.renderScene(self, window);
}
pub fn add(self: *Scene, go: GameObject) !void {
try self.gameObject.add(go);
}
pub fn findChild(self: *const Scene, name: []const u8) ?*GameObject {
return self.gameObject.findChild(name);
}
pub fn deinit(self: *Scene) void {
self.gameObject.deinit();
self.assetManager.deinit();
}
pub fn deinitAll(self: *Scene) void {
self.gameObject.deinitAll();
self.assetManager.deinit();
}
};
pub const ComponentOptions = struct {
/// Functions called regularly depending on the updateTarget value of the Application.
updateFn: ?fn(allocator: *Allocator, component: *Component, gameObject: *GameObject, delta: f32) anyerror!void = null
};
// TODO: redo components
pub const Component = struct {
options: ComponentOptions,
data: usize,
pub fn update(self: *Component, gameObject: *GameObject, allocator: *Allocator, delta: f32) anyerror!void {
if (self.options.updateFn) |func| {
try func(allocator, self, gameObject, delta);
}
}
};
pub fn ComponentType(comptime name: @Type(.EnumLiteral), comptime Data: @Type(.Struct), options: ComponentOptions) type {
return struct {
pub fn new() Component {
var data = Data {};
var cp = Component {
.options = options,
.data = @ptrToInt(&data)
};
return cp;
}
};
}
// Tests
const expect = std.testing.expect;
test "empty gameobject" {
var alloc = std.heap.page_allocator;
var go = GameObject.createEmpty(alloc);
expect(go.childrens.items.len == 0);
expect(go.objectType == null);
}
test "empty scene" {
var alloc = std.heap.page_allocator;
var scene = try Scene.create(alloc);
expect(scene.gameObject.objectType != null);
expect(std.mem.eql(u8, scene.gameObject.objectType.?, "scene"));
scene.deinit();
}
test "default camera" {
var alloc = std.heap.page_allocator;
var cam = try Camera.create(alloc, undefined);
expect(cam.fov == 70); // default FOV
expect(cam.gameObject.objectType != null);
expect(std.mem.eql(u8, cam.gameObject.objectType.?, "camera"));
cam.deinit();
}
comptime {
std.testing.refAllDecls(@This());
std.testing.refAllDecls(GameObject);
std.testing.refAllDecls(PointLight);
} | didot-objects/objects.zig |
const std = @import("std");
const builtin = @import("builtin");
const panic = std.debug.panic;
usingnamespace @import("c.zig");
// settings
const SCR_WIDTH: u32 = 1920;
const SCR_HEIGHT: u32 = 1080;
const vertexShaderSource: [:0]const u8 =
\\#version 330 core
\\layout (location = 0) in vec3 aPos;
\\void main()
\\{
\\ gl_Position = vec4(aPos.x, aPos.y, aPos.z, 1.0);
\\};
;
const fragmentShaderSource: [:0]const u8 =
\\#version 330 core
\\out vec4 FragColor;
\\void main()
\\{
\\ FragColor = vec4(1.0f, 0.5f, 0.2f, 1.0f);
\\};
;
pub fn main() void {
const ok = glfwInit();
if (ok == 0) {
panic("Failed to initialise GLFW\n", .{});
}
defer glfwTerminate();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
// glfw: initialize and configure
var window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "Learn OpenGL", null, null);
if (window == null) {
panic("Failed to create GLFW window\n", .{});
}
glfwMakeContextCurrent(window);
const resizeCallback = glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
// glad: load all OpenGL function pointers
if (gladLoadGLLoader(@ptrCast(GLADloadproc, glfwGetProcAddress)) == 0) {
panic("Failed to initialise GLAD\n", .{});
}
// build and compile our shader program
// vertex shader
const vertexShader = glCreateShader(GL_VERTEX_SHADER);
const vertexSrcPtr: ?[*]const u8 = vertexShaderSource.ptr;
// const vertexSrcLen = @intCast(c_int, vertexShaderSource.len);
glShaderSource(vertexShader, 1, &vertexSrcPtr, null);
glCompileShader(vertexShader);
// check for shader compile errors
var success: c_int = undefined;
var infoLog: [512]u8 = undefined;
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &success);
if (success == 0) {
glGetShaderInfoLog(vertexShader, 512, null, &infoLog);
panic("ERROR::SHADER::VERTEX::COMPILATION_FAILED\n{s}\n", .{infoLog});
}
// fragment shader
const fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
const fragmentSrcPtr: ?[*]const u8 = fragmentShaderSource.ptr;
glShaderSource(fragmentShader, 1, &fragmentSrcPtr, null);
glCompileShader(fragmentShader);
// check for shader compile errors
glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &success);
if (success == 0) {
glGetShaderInfoLog(vertexShader, 512, null, &infoLog);
panic("ERROR::SHADER::FRAGMENT::COMPILATION_FAILED\n{s}\n", .{infoLog});
}
// link shaders
const shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glLinkProgram(shaderProgram);
// check for linking errors
glGetProgramiv(shaderProgram, GL_LINK_STATUS, &success);
if (success == 0) {
glGetShaderInfoLog(vertexShader, 512, null, &infoLog);
panic("ERROR::SHADER::PROGRAM::LINKING_FAILED\n{s}\n", .{infoLog});
}
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
// set up vertex data (and buffer(s)) and configure vertex attributes
const vertices = [_]f32{
0.5, 0.5, 0.0, // top right
0.5, -0.5, 0.0, // bottom right
-0.5, -0.5, 0.0, // bottom let
-0.5, 0.5, 0.0, // top left
};
const indices = [_]u32{ // note that we start from 0!
0, 1, 3, // first Triangle
1, 2, 3, // second Triangle
};
var VAO: c_uint = undefined;
var VBO: c_uint = undefined;
var EBO: c_uint = undefined;
glGenVertexArrays(1, &VAO);
defer glDeleteVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
defer glDeleteBuffers(1, &VBO);
glGenBuffers(1, &EBO);
defer glDeleteBuffers(1, &EBO);
// bind the Vertex Array Object first, then bind and set vertex buffer(s), and then configure vertex attributes(s).
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, vertices.len * @sizeOf(f32), &vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.len * @sizeOf(u32), &indices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * @sizeOf(f32), null);
glEnableVertexAttribArray(0);
// note that this is allowed, the call to glVertexAttribPointer registered VBO as the vertex attribute's bound vertex buffer object so afterwards we can safely unbind
glBindBuffer(GL_ARRAY_BUFFER, 0);
// remember: do NOT unbind the EBO while a VAO is active as the bound element buffer object IS stored in the VAO; keep the EBO bound.
//glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// You can unbind the VAO afterwards so other VAO calls won't accidentally modify this VAO, but this rarely happens. Modifying other
// VAOs requires a call to glBindVertexArray anyways so we generally don't unbind VAOs (nor VBOs) when it's not directly necessary.
glBindVertexArray(0);
// uncomment this call to draw in wireframe polygons.
//glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
// render loop
// -----------
while (glfwWindowShouldClose(window) == 0) {
// input
processInput(window);
// render
glClearColor(0.2, 0.3, 0.3, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
// draw our first triangle
glUseProgram(shaderProgram);
glBindVertexArray(VAO); // seeing as we only have a single VAO there's no need to bind it every time, but we'll do so to keep things a bit more organized
//glDrawArrays(GL_TRIANGLES, 0, 6);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, null);
// glBindVertexArray(0); // no need to unbind it every time
// glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.)
glfwSwapBuffers(window);
glfwPollEvents();
}
}
// process all input: query GLFW whether relevant keys are pressed/released this frame and react accordingly
pub fn processInput(window: ?*GLFWwindow) callconv(.C) void {
if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS)
glfwSetWindowShouldClose(window, 1);
}
// glfw: whenever the window size changed (by OS or user resize) this callback function executes
pub fn framebuffer_size_callback(window: ?*GLFWwindow, width: c_int, height: c_int) callconv(.C) void {
// make sure the viewport matches the new window dimensions; note that width and
// height will be significantly larger than specified on retina displays.
glViewport(0, 0, width, height);
} | src/1_2_hello_triangle.zig |
const std = @import("std");
const Process = @import("process.zig").Process;
/// Cooperative scheduler for processes. Each process is invoked once per tick. If a process terminates, it's
/// removed automatically from the scheduler and it's never invoked again. A process can also have a child. In
/// this case, the process is replaced with its child when it terminates if it returns with success. In case of errors,
/// both the process and its child are discarded. In order to invoke all scheduled processes, call the `update` member function
/// Processes add themselves by calling `attach` and must satisfy the following conditions:
/// - have a field `process: Process`
/// - have a method `initialize(self: *@This(), data: var) void` that initializes all fields and takes in a the data passed to `attach`
/// - when initializing the `process` field it ust be given an `updateFn`. All other callbacks are optional.
/// - in any callback you can get your oiginal struct back via `process.getParent(@This())`
pub const Scheduler = struct {
processes: std.ArrayList(*Process),
allocator: std.mem.Allocator,
/// helper to create and prepare a process
fn createProcessHandler(comptime T: type, data: anytype, allocator: std.mem.Allocator) *Process {
var proc = allocator.create(T) catch unreachable;
proc.initialize(data);
// get a closure so that we can safely deinit this later
proc.process.deinit = struct {
fn deinit(process: *Process, alloc: std.mem.Allocator) void {
if (process.next) |next_process| {
next_process.deinit(next_process, alloc);
}
alloc.destroy(@fieldParentPtr(T, "process", process));
}
}.deinit;
return &proc.process;
}
/// returned when appending a process so that sub-processes can be added to the process
const Continuation = struct {
process: *Process,
allocator: std.mem.Allocator,
pub fn init(process: *Process, allocator: std.mem.Allocator) Continuation {
return .{ .process = process, .allocator = allocator };
}
pub fn next(self: *@This(), comptime T: type, data: anytype) *@This() {
self.process.next = createProcessHandler(T, data, self.allocator);
self.process = self.process.next.?;
return self;
}
};
pub fn init(allocator: std.mem.Allocator) Scheduler {
return .{
.processes = std.ArrayList(*Process).init(allocator),
.allocator = allocator,
};
}
pub fn deinit(self: *Scheduler) void {
self.clear();
self.processes.deinit();
}
/// Schedules a process for the next tick
pub fn attach(self: *Scheduler, comptime T: type, data: anytype) Continuation {
std.debug.assert(@hasDecl(T, "initialize"));
std.debug.assert(@hasField(T, "process"));
var process = createProcessHandler(T, data, self.allocator);
process.tick();
self.processes.append(process) catch unreachable;
return Continuation.init(process, self.allocator);
}
fn updateProcess(process: **Process, allocator: std.mem.Allocator) bool {
const current_process = process.*;
current_process.tick();
if (current_process.dead()) {
if (!current_process.rejected() and current_process.next != null) {
// grab the next process and null it out so we dont double-free it later
const next_process = current_process.next.?;
current_process.next = null;
process.* = next_process;
// kill the old Process parent
current_process.deinit(current_process, allocator);
return updateProcess(process, allocator);
} else {
return true;
}
}
return false;
}
/// Updates all scheduled processes
pub fn update(self: *Scheduler) void {
if (self.processes.items.len == 0) return;
var i: usize = self.processes.items.len - 1;
while (true) : (i -= 1) {
if (updateProcess(&self.processes.items[i], self.allocator)) {
var dead_process = self.processes.swapRemove(i);
dead_process.deinit(dead_process, self.allocator);
}
if (i == 0) break;
}
}
/// gets the number of processes still running
pub fn len(self: Scheduler) usize {
return self.processes.items.len;
}
/// resets the scheduler to its initial state and discards all the processes
pub fn clear(self: *Scheduler) void {
for (self.processes.items) |process| {
process.deinit(process, self.allocator);
}
self.processes.items.len = 0;
}
/// Aborts all scheduled processes. Unless an immediate operation is requested, the abort is scheduled for the next tick
pub fn abort(self: *Scheduler, immediately: bool) void {
for (self.processes.items) |handler| {
handler.process.abort(immediately);
}
}
};
test "" {
std.debug.print("\n", .{});
const Tester = struct {
process: Process,
fart: usize,
pub fn initialize(self: *@This(), data: anytype) void {
self.process = .{
.startFn = start,
.updateFn = update,
.abortedFn = aborted,
.failedFn = failed,
.succeededFn = succeeded,
};
self.fart = data;
}
fn start(process: *Process) void {
_ = process.getParent(@This());
// std.debug.print("start {}\n", .{self.fart});
}
fn aborted(process: *Process) void {
_ = process.getParent(@This());
// std.debug.print("aborted {}\n", .{self.fart});
}
fn failed(process: *Process) void {
_ = process.getParent(@This());
// std.debug.print("failed {}\n", .{self.fart});
}
fn succeeded(process: *Process) void {
_ = process.getParent(@This());
// std.debug.print("succeeded {}\n", .{self.fart});
}
fn update(process: *Process) void {
_ = process.getParent(@This());
// std.debug.print("update {}\n", .{self.fart});
process.succeed();
}
};
var scheduler = Scheduler.init(std.testing.allocator);
defer scheduler.deinit();
_ = scheduler.attach(Tester, 33).next(Tester, 66).next(Tester, 88).next(Tester, 99);
scheduler.update();
scheduler.update();
scheduler.update();
scheduler.update();
scheduler.update();
}
test "scheduler.clear" {
const Tester = struct {
process: Process,
pub fn initialize(self: *@This(), _: anytype) void {
self.process = .{ .updateFn = update };
}
fn update(_: *Process) void {
std.debug.assert(false);
}
};
var scheduler = Scheduler.init(std.testing.allocator);
defer scheduler.deinit();
_ = scheduler.attach(Tester, {}).next(Tester, {});
scheduler.clear();
scheduler.update();
}
test "scheduler.attach.next" {
const Tester = struct {
process: Process,
counter: *usize,
pub fn initialize(self: *@This(), data: anytype) void {
self.process = .{ .updateFn = update };
self.counter = data;
}
fn update(process: *Process) void {
const self = process.getParent(@This());
self.counter.* += 1;
process.succeed();
}
};
var scheduler = Scheduler.init(std.testing.allocator);
defer scheduler.deinit();
var counter: usize = 0;
_ = scheduler.attach(Tester, &counter).next(Tester, &counter);
scheduler.update();
scheduler.update();
try std.testing.expectEqual(counter, 2);
} | src/process/scheduler.zig |
const std = @import("std");
const os = std.os;
const assert = std.debug.assert;
const FIFO = @import("fifo.zig").FIFO;
const Time = @import("time.zig").Time;
const buffer_limit = @import("io.zig").buffer_limit;
pub const IO = struct {
kq: os.fd_t,
time: Time = .{},
timeouts: FIFO(Completion) = .{},
completed: FIFO(Completion) = .{},
io_pending: FIFO(Completion) = .{},
pub fn init(entries: u12, flags: u32) !IO {
const kq = try os.kqueue();
assert(kq > -1);
return IO{ .kq = kq };
}
pub fn deinit(self: *IO) void {
assert(self.kq > -1);
os.close(self.kq);
self.kq = -1;
}
/// Pass all queued submissions to the kernel and peek for completions.
pub fn tick(self: *IO) !void {
return self.flush(false);
}
/// Pass all queued submissions to the kernel and run for `nanoseconds`.
/// The `nanoseconds` argument is a u63 to allow coercion to the i64 used
/// in the __kernel_timespec struct.
pub fn run_for_ns(self: *IO, nanoseconds: u63) !void {
var timed_out = false;
var completion: Completion = undefined;
const on_timeout = struct {
fn callback(
timed_out_ptr: *bool,
_completion: *Completion,
_result: TimeoutError!void,
) void {
timed_out_ptr.* = true;
}
}.callback;
// Submit a timeout which sets the timed_out value to true to terminate the loop below.
self.timeout(
*bool,
&timed_out,
on_timeout,
&completion,
nanoseconds,
);
// Loop until our timeout completion is processed above, which sets timed_out to true.
// LLVM shouldn't be able to cache timed_out's value here since its address escapes above.
while (!timed_out) {
try self.flush(true);
}
}
fn flush(self: *IO, wait_for_completions: bool) !void {
var io_pending = self.io_pending.peek();
var events: [256]os.Kevent = undefined;
// Check timeouts and fill events with completions in io_pending
// (they will be submitted through kevent).
// Timeouts are expired here and possibly pushed to the completed queue.
const next_timeout = self.flush_timeouts();
const change_events = self.flush_io(&events, &io_pending);
// Only call kevent() if we need to submit io events or if we need to wait for completions.
if (change_events > 0 or self.completed.peek() == null) {
// Zero timeouts for kevent() implies a non-blocking poll
var ts = std.mem.zeroes(os.timespec);
// We need to wait (not poll) on kevent if there's nothing to submit or complete.
// We should never wait indefinitely (timeout_ptr = null for kevent) given:
// - tick() is non-blocking (wait_for_completions = false)
// - run_for_ns() always submits a timeout
if (change_events == 0 and self.completed.peek() == null) {
if (!wait_for_completions) return;
const timeout_ns = next_timeout orelse @panic("kevent() blocking forever");
ts.tv_nsec = @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s);
ts.tv_sec = @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s);
}
const new_events = try os.kevent(
self.kq,
events[0..change_events],
events[0..events.len],
&ts,
);
// Mark the io events submitted only after kevent() successfully processed them
self.io_pending.out = io_pending;
if (io_pending == null) {
self.io_pending.in = null;
}
for (events[0..new_events]) |event| {
const completion = @intToPtr(*Completion, event.udata);
self.completed.push(completion);
}
}
var completed = self.completed;
self.completed = .{};
while (completed.pop()) |completion| {
(completion.callback)(self, completion);
}
}
fn flush_io(self: *IO, events: []os.Kevent, io_pending_top: *?*Completion) usize {
for (events) |*event, flushed| {
const completion = io_pending_top.* orelse return flushed;
io_pending_top.* = completion.next;
const event_info = switch (completion.operation) {
.accept => |op| [2]c_int{ op.socket, os.EVFILT_READ },
.connect => |op| [2]c_int{ op.socket, os.EVFILT_WRITE },
.read => |op| [2]c_int{ op.fd, os.EVFILT_READ },
.write => |op| [2]c_int{ op.fd, os.EVFILT_WRITE },
.recv => |op| [2]c_int{ op.socket, os.EVFILT_READ },
.send => |op| [2]c_int{ op.socket, os.EVFILT_WRITE },
else => @panic("invalid completion operation queued for io"),
};
event.* = .{
.ident = @intCast(u32, event_info[0]),
.filter = @intCast(i16, event_info[1]),
.flags = os.EV_ADD | os.EV_ENABLE | os.EV_ONESHOT,
.fflags = 0,
.data = 0,
.udata = @ptrToInt(completion),
};
}
return events.len;
}
fn flush_timeouts(self: *IO) ?u64 {
var min_timeout: ?u64 = null;
var timeouts: ?*Completion = self.timeouts.peek();
while (timeouts) |completion| {
timeouts = completion.next;
// NOTE: We could cache `now` above the loop but monotonic() should be cheap to call.
const now = self.time.monotonic();
const expires = completion.operation.timeout.expires;
// NOTE: remove() could be O(1) here with a doubly-linked-list
// since we know the previous Completion.
if (now >= expires) {
self.timeouts.remove(completion);
self.completed.push(completion);
continue;
}
const timeout_ns = expires - now;
if (min_timeout) |min_ns| {
min_timeout = std.math.min(min_ns, timeout_ns);
} else {
min_timeout = timeout_ns;
}
}
return min_timeout;
}
/// This struct holds the data needed for a single IO operation
pub const Completion = struct {
next: ?*Completion,
context: ?*c_void,
callback: fn (*IO, *Completion) void,
operation: Operation,
};
const Operation = union(enum) {
accept: struct {
socket: os.socket_t,
flags: u32,
},
close: struct {
fd: os.fd_t,
},
connect: struct {
socket: os.socket_t,
address: std.net.Address,
initiated: bool,
},
fsync: struct {
fd: os.fd_t,
flags: u32,
},
openat: struct {
fd: os.fd_t,
path: [*:0]const u8,
flags: u32,
mode: os.mode_t,
},
read: struct {
fd: os.fd_t,
buf: [*]u8,
len: u32,
offset: u64,
},
recv: struct {
socket: os.socket_t,
buf: [*]u8,
len: u32,
flags: u32,
},
send: struct {
socket: os.socket_t,
buf: [*]const u8,
len: u32,
flags: u32,
},
timeout: struct {
expires: u64,
},
write: struct {
fd: os.fd_t,
buf: [*]const u8,
len: u32,
offset: u64,
},
};
fn submit(
self: *IO,
context: anytype,
comptime callback: anytype,
completion: *Completion,
comptime operation_tag: std.meta.Tag(Operation),
operation_data: anytype,
comptime OperationImpl: type,
) void {
const Context = @TypeOf(context);
const onCompleteFn = struct {
fn onComplete(io: *IO, _completion: *Completion) void {
// Perform the actual operaton
const op_data = &@field(_completion.operation, @tagName(operation_tag));
const result = OperationImpl.doOperation(op_data);
// Requeue onto io_pending if error.WouldBlock
switch (operation_tag) {
.accept, .connect, .read, .write, .send, .recv => {
_ = result catch |err| switch (err) {
error.WouldBlock => {
_completion.next = null;
io.io_pending.push(_completion);
return;
},
else => {},
};
},
else => {},
}
// Complete the Completion
return callback(
@intToPtr(Context, @ptrToInt(_completion.context)),
_completion,
result,
);
}
}.onComplete;
completion.* = .{
.next = null,
.context = context,
.callback = onCompleteFn,
.operation = @unionInit(Operation, @tagName(operation_tag), operation_data),
};
switch (operation_tag) {
.timeout => self.timeouts.push(completion),
else => self.completed.push(completion),
}
}
pub const AcceptError = os.AcceptError;
pub fn accept(
self: *IO,
comptime Context: type,
context: Context,
comptime callback: fn (
context: Context,
completion: *Completion,
result: AcceptError!os.socket_t,
) void,
completion: *Completion,
socket: os.socket_t,
flags: u32,
) void {
self.submit(
context,
callback,
completion,
.accept,
.{
.socket = socket,
.flags = flags,
},
struct {
fn doOperation(op: anytype) AcceptError!os.socket_t {
return os.accept(op.socket, null, null, op.flags);
}
},
);
}
pub const CloseError = error{
FileDescriptorInvalid,
DiskQuota,
InputOutput,
NoSpaceLeft,
} || os.UnexpectedError;
pub fn close(
self: *IO,
comptime Context: type,
context: Context,
comptime callback: fn (
context: Context,
completion: *Completion,
result: CloseError!void,
) void,
completion: *Completion,
fd: os.fd_t,
) void {
self.submit(
context,
callback,
completion,
.close,
.{
.fd = fd,
},
struct {
fn doOperation(op: anytype) CloseError!void {
return switch (os.errno(os.system.close(op.fd))) {
0 => {},
os.EBADF => error.FileDescriptorInvalid,
os.EINTR => {}, // A success, see https://github.com/ziglang/zig/issues/2425
os.EIO => error.InputOutput,
else => |errno| os.unexpectedErrno(errno),
};
}
},
);
}
pub const ConnectError = os.ConnectError;
pub fn connect(
self: *IO,
comptime Context: type,
context: Context,
comptime callback: fn (
context: Context,
completion: *Completion,
result: ConnectError!void,
) void,
completion: *Completion,
socket: os.socket_t,
address: std.net.Address,
) void {
self.submit(
context,
callback,
completion,
.connect,
.{
.socket = socket,
.address = address,
.initiated = false,
},
struct {
fn doOperation(op: anytype) ConnectError!void {
// Don't call connect after being rescheduled by io_pending as it gives EISCONN.
// Instead, check the socket error to see if has been connected successfully.
const result = switch (op.initiated) {
true => os.getsockoptError(op.socket),
else => os.connect(op.socket, &op.address.any, op.address.getOsSockLen()),
};
op.initiated = true;
return result;
}
},
);
}
pub const FsyncError = error{
FileDescriptorInvalid,
DiskQuota,
ArgumentsInvalid,
InputOutput,
NoSpaceLeft,
ReadOnlyFileSystem,
} || os.UnexpectedError;
pub fn fsync(
self: *IO,
comptime Context: type,
context: Context,
comptime callback: fn (
context: Context,
completion: *Completion,
result: FsyncError!void,
) void,
completion: *Completion,
fd: os.fd_t,
flags: u32,
) void {
self.submit(
context,
callback,
completion,
.fsync,
.{
.fd = fd,
.flags = flags,
},
struct {
fn doOperation(op: anytype) FsyncError!void {
_ = os.fcntl(op.fd, os.F_FULLFSYNC, 1) catch return os.fsync(op.fd);
}
},
);
}
pub const OpenatError = error{
AccessDenied,
FileDescriptorInvalid,
DeviceBusy,
PathAlreadyExists,
FileTooBig,
ArgumentsInvalid,
IsDir,
SymLinkLoop,
ProcessFdQuotaExceeded,
NameTooLong,
SystemFdQuotaExceeded,
NoDevice,
FileNotFound,
SystemResources,
NoSpaceLeft,
NotDir,
FileLocksNotSupported,
WouldBlock,
} || os.UnexpectedError;
pub fn openat(
self: *IO,
comptime Context: type,
context: Context,
comptime callback: fn (
context: Context,
completion: *Completion,
result: OpenatError!os.fd_t,
) void,
completion: *Completion,
fd: os.fd_t,
path: [*:0]const u8,
flags: u32,
mode: os.mode_t,
) void {
self.submit(
context,
callback,
completion,
.openat,
.{
.fd = fd,
.path = path,
.mode = mode,
.flags = flags,
},
struct {
fn doOperation(op: anytype) OpenatError!os.fd_t {
return os.openatZ(op.fd, op.path, op.flags, op.mode);
}
},
);
}
pub const ReadError = error{
WouldBlock,
NotOpenForReading,
ConnectionResetByPeer,
Alignment,
InputOutput,
IsDir,
SystemResources,
Unseekable,
} || os.UnexpectedError;
pub fn read(
self: *IO,
comptime Context: type,
context: Context,
comptime callback: fn (
context: Context,
completion: *Completion,
result: ReadError!usize,
) void,
completion: *Completion,
fd: os.fd_t,
buffer: []u8,
offset: u64,
) void {
self.submit(
context,
callback,
completion,
.read,
.{
.fd = fd,
.buf = buffer.ptr,
.len = @intCast(u32, buffer_limit(buffer.len)),
.offset = offset,
},
struct {
fn doOperation(op: anytype) ReadError!usize {
while (true) {
const rc = os.system.pread(
op.fd,
op.buf,
op.len,
@bitCast(isize, op.offset),
);
return switch (os.errno(rc)) {
0 => @intCast(usize, rc),
os.EINTR => continue,
os.EAGAIN => error.WouldBlock,
os.EBADF => error.NotOpenForReading,
os.ECONNRESET => error.ConnectionResetByPeer,
os.EFAULT => unreachable,
os.EINVAL => error.Alignment,
os.EIO => error.InputOutput,
os.EISDIR => error.IsDir,
os.ENOBUFS => error.SystemResources,
os.ENOMEM => error.SystemResources,
os.ENXIO => error.Unseekable,
os.EOVERFLOW => error.Unseekable,
os.ESPIPE => error.Unseekable,
else => |err| os.unexpectedErrno(err),
};
}
}
},
);
}
pub const RecvError = os.RecvFromError;
pub fn recv(
self: *IO,
comptime Context: type,
context: Context,
comptime callback: fn (
context: Context,
completion: *Completion,
result: RecvError!usize,
) void,
completion: *Completion,
socket: os.socket_t,
buffer: []u8,
flags: u32,
) void {
self.submit(
context,
callback,
completion,
.recv,
.{
.socket = socket,
.buf = buffer.ptr,
.len = @intCast(u32, buffer_limit(buffer.len)),
.flags = flags,
},
struct {
fn doOperation(op: anytype) RecvError!usize {
return os.recv(op.socket, op.buf[0..op.len], op.flags);
}
},
);
}
pub const SendError = os.SendError;
pub fn send(
self: *IO,
comptime Context: type,
context: Context,
comptime callback: fn (
context: Context,
completion: *Completion,
result: SendError!usize,
) void,
completion: *Completion,
socket: os.socket_t,
buffer: []const u8,
flags: u32,
) void {
self.submit(
context,
callback,
completion,
.send,
.{
.socket = socket,
.buf = buffer.ptr,
.len = @intCast(u32, buffer_limit(buffer.len)),
.flags = flags,
},
struct {
fn doOperation(op: anytype) SendError!usize {
return os.send(op.socket, op.buf[0..op.len], op.flags);
}
},
);
}
pub const TimeoutError = error{Canceled} || os.UnexpectedError;
pub fn timeout(
self: *IO,
comptime Context: type,
context: Context,
comptime callback: fn (
context: Context,
completion: *Completion,
result: TimeoutError!void,
) void,
completion: *Completion,
nanoseconds: u63,
) void {
self.submit(
context,
callback,
completion,
.timeout,
.{
.expires = self.time.monotonic() + nanoseconds,
},
struct {
fn doOperation(_: anytype) TimeoutError!void {
return; // timeouts don't have errors for now
}
},
);
}
pub const WriteError = os.PWriteError;
pub fn write(
self: *IO,
comptime Context: type,
context: Context,
comptime callback: fn (
context: Context,
completion: *Completion,
result: WriteError!usize,
) void,
completion: *Completion,
fd: os.fd_t,
buffer: []const u8,
offset: u64,
) void {
self.submit(
context,
callback,
completion,
.write,
.{
.fd = fd,
.buf = buffer.ptr,
.len = @intCast(u32, buffer_limit(buffer.len)),
.offset = offset,
},
struct {
fn doOperation(op: anytype) WriteError!usize {
return os.pwrite(op.fd, op.buf[0..op.len], op.offset);
}
},
);
}
}; | src/io_darwin.zig |
const std = @import("std");
const helper = @import("helper.zig");
const Allocator = std.mem.Allocator;
const HashMap = std.AutoHashMap;
const ArrayList = std.ArrayList;
const input = @embedFile("../inputs/day13.txt");
pub fn run(alloc: Allocator, stdout_: anytype) !void {
var parsed = try Input.init(alloc, input);
defer parsed.deinit();
const num_points = @intCast(usize, parsed.count());
var point_cache = try ArrayList(Input.Point).initCapacity(alloc, num_points);
defer point_cache.deinit();
try parsed.fold(parsed.rules.items[0], &point_cache);
const res1 = parsed.count();
for (parsed.rules.items[1..]) |rule| {
try parsed.fold(rule, &point_cache);
}
if (stdout_) |stdout| {
try stdout.print("Part 1: {}\n", .{res1});
try stdout.print("Part 2:\n", .{});
try parsed.printGrid(stdout);
}
}
const Input = struct {
points: HashMap(Point, void),
rules: ArrayList(Rule),
const Self = @This();
pub fn init(alloc: Allocator, inp: []const u8) !Self {
var points = HashMap(Point, void).init(alloc);
var rules = ArrayList(Rule).init(alloc);
var lines = tokenize(u8, inp, "\r\n");
while (lines.next()) |line| {
const point = try Self.parsePoint(line);
try points.put(point, {});
if (lines.rest()[0] == 'f') break;
}
while (lines.next()) |line| {
const rule = try Rule.init(line);
try rules.append(rule);
}
return Self{ .points = points, .rules = rules };
}
pub fn fold(self: *Self, rule: Rule, point_cache: *ArrayList(Point)) !void {
point_cache.clearRetainingCapacity();
var point_iter = self.points.keyIterator();
while (point_iter.next()) |point| {
try point_cache.append(point.*);
}
for (point_cache.items) |point| {
try self.foldPoint(point, rule);
}
}
pub fn count(self: Self) HashMap(Point, void).Size {
return self.points.count();
}
pub fn printGrid(self: Self, stdout: anytype) !void {
var max = Point{ .x = 0, .y = 0 };
var point_iter = self.points.keyIterator();
while (point_iter.next()) |point| {
max.x = std.math.max(max.x, point.x + 1);
max.y = std.math.max(max.y, point.y + 1);
}
var j: usize = 0;
while (j < max.y) : (j += 1) {
var i: usize = 0;
while (i < max.x) : (i += 1) {
const point = Point{ .x = i, .y = j };
const ch: u8 = if (self.points.contains(point)) '#' else ' ';
try stdout.print("{c}", .{ch});
}
try stdout.print("\n", .{});
}
}
fn foldPoint(self: *Self, point_: Point, rule: Rule) !void {
var point = point_;
switch (rule) {
.fold_x => |num| {
if (point.x < num) return;
_ = self.points.remove(point);
if (point.x > num) {
const offset = point.x - num;
point.x = num - offset;
try self.points.put(point, {});
}
},
.fold_y => |num| {
if (point.y < num) return;
_ = self.points.remove(point);
if (point.y > num) {
const offset = point.y - num;
point.y = num - offset;
try self.points.put(point, {});
}
},
}
}
pub fn deinit(self: *Self) void {
self.points.deinit();
self.rules.deinit();
}
fn parsePoint(line: []const u8) !Point {
var splut = split(u8, line, ",");
const x_str = splut.next().?;
const y_str = splut.next().?;
if (splut.next() != null) unreachable;
const x = try parseUnsigned(usize, x_str, 10);
const y = try parseUnsigned(usize, y_str, 10);
return Point{ .x = x, .y = y };
}
const Point = struct { x: usize, y: usize };
};
const Rule = union(enum) {
fold_x: usize,
fold_y: usize,
const Self = @This();
pub fn init(line: []const u8) !Self {
var tokens = tokenize(u8, line, " =");
_ = tokens.next().?; // fold
_ = tokens.next().?; // along
const letter = tokens.next().?; // x/y
const num_str = tokens.next().?;
if (tokens.next() != null) unreachable;
const num: usize = try parseUnsigned(usize, num_str, 10);
if (letter[0] == 'x') {
return Self{ .fold_x = num };
} else if (letter[0] == 'y') {
return Self{ .fold_y = num };
} else {
unreachable;
}
}
};
const eql = std.mem.eql;
const tokenize = std.mem.tokenize;
const split = std.mem.split;
const count = std.mem.count;
const parseUnsigned = std.fmt.parseUnsigned;
const parseInt = std.fmt.parseInt;
const sort = std.sort.sort; | src/day13.zig |
const std = @import("std");
const fs = std.fs;
const Builder = std.build.Builder;
const LibExeObjStep = std.build.LibExeObjStep;
const CrossTarget = std.zig.CrossTarget;
const Mode = std.builtin.Mode;
const builtin = @import("builtin");
pub fn build(b: *Builder) void {
const target = b.standardTargetOptions(.{});
const mode = b.standardReleaseOptions();
if (target.getCpu().arch != .wasm32) {
buildNative(b, target, mode) catch unreachable;
} else {
buildWasm(b, target, mode) catch |err| {
std.log.err("{s}", .{err});
};
}
}
// this is the regular build for all native platforms
fn buildNative(b: *Builder, target: CrossTarget, mode: Mode) !void {
const tracy = b.option([]const u8, "tracy", "Enable Tracy integration. Supply path to Tracy source");
const tracy_callstack = b.option(bool, "tracy-callstack", "Include callstack information with Tracy data. Does nothing if -Dtracy is not provided") orelse false;
const tracy_allocation = b.option(bool, "tracy-allocation", "Include allocation information with Tracy data. Does nothing if -Dtracy is not provided") orelse false;
const exe = b.addExecutable("pacman", "src/pacman.zig");
const cross_compiling_to_darwin = target.isDarwin() and (target.getOsTag() != builtin.os.tag);
exe.setTarget(target);
exe.setBuildMode(mode);
exe.addPackagePath("sokol", "src/sokol/sokol.zig");
exe.linkLibrary(libSokol(b, target, mode, cross_compiling_to_darwin, ""));
if (cross_compiling_to_darwin) {
addDarwinCrossCompilePaths(b, exe);
}
const exe_options = b.addOptions();
exe.addOptions("build_options", exe_options);
exe_options.addOption(bool, "enable_tracy", tracy != null);
exe_options.addOption(bool, "enable_tracy_callstack", tracy_callstack);
exe_options.addOption(bool, "enable_tracy_allocation", tracy_allocation);
if (tracy) |tracy_path| {
const client_cpp = fs.path.join(
b.allocator,
&[_][]const u8{ tracy_path, "TracyClient.cpp" },
) catch unreachable;
// On mingw, we need to opt into windows 7+ to get some features required by tracy.
const tracy_c_flags: []const []const u8 = if (target.isWindows() and target.getAbi() == .gnu)
&[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined", "-D_WIN32_WINNT=0x601" }
else
&[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" };
exe.addIncludeDir(tracy_path);
exe.addCSourceFile(client_cpp, tracy_c_flags);
exe.linkSystemLibraryName("c++");
exe.linkLibC();
if (target.isWindows()) {
exe.linkSystemLibrary("dbghelp");
exe.linkSystemLibrary("ws2_32");
}
}
exe.install();
b.step("run", "Run pacman").dependOn(&exe.run().step);
// for iOS generate a valid app bundle directory structure
if (target.getOsTag() == .ios) {
const install_path = try fs.path.join(b.allocator, &.{ b.install_path, "bin", "pacman" });
defer b.allocator.free(install_path);
b.installFile(install_path, "bin/Pacman.app/pacman");
b.installFile("src/ios/Info.plist", "bin/Pacman.app/Info.plist");
}
}
// building for WASM/HTML5 requires a couple of hacks and workarounds:
//
// - emcc must be used as linker instead of the zig linker to implement
// the additional "Emscripten magic" (e.g. generating the .html and .js
// file, setting up the web API shims, etc...)
// - the Sokol C headers must be compiled as target wasm32-emscripten, otherwise
// the EMSCRIPTEN_KEEPALIVE and EM_JS macro magic doesn't work
// - an additional header search path into Emscripten's sysroot
// must be set so that the C code compiled with Zig finds the Emscripten
// sysroot headers
// - the Zig code must *not* be compiled with wasm32-emscripten, because parts
// of the Zig stdlib doesn't compile, so instead use wasm32-freestanding
// - the game code in pacman.zig is compiled into a library, and a
// C file (src/emscripten/entry.c) is used as entry point, which then
// calls an exported entry function "emsc_main()" in pacman.zig instead
// of the regular zig main function.
//
fn buildWasm(b: *Builder, target: CrossTarget, mode: Mode) !void {
if (b.sysroot == null) {
std.log.err("Please build with 'zig build -Dtarget=wasm32-emscripten --sysroot [path/to/emsdk]/upstream/emscripten/cache/sysroot", .{});
return error.SysRootExpected;
}
// derive the emcc and emrun paths from the provided sysroot:
const emcc_path = try fs.path.join(b.allocator, &.{ b.sysroot.?, "../../emcc" });
defer b.allocator.free(emcc_path);
const emrun_path = try fs.path.join(b.allocator, &.{ b.sysroot.?, "../../emrun" });
defer b.allocator.free(emrun_path);
// for some reason, the sysroot/include path must be provided separately
const include_path = try fs.path.join(b.allocator, &.{ b.sysroot.?, "include" });
defer b.allocator.free(include_path);
// sokol must be built with wasm32-emscripten
var wasm32_emscripten_target = target;
wasm32_emscripten_target.os_tag = .emscripten;
const libsokol = libSokol(b, wasm32_emscripten_target, mode, false, "");
libsokol.defineCMacro("__EMSCRIPTEN__", "1");
libsokol.addIncludeDir(include_path);
libsokol.install();
// the game code must be build as library with wasm32-freestanding
var wasm32_freestanding_target = target;
wasm32_freestanding_target.os_tag = .freestanding;
const libgame = b.addStaticLibrary("game", "src/pacman.zig");
libgame.setTarget(wasm32_freestanding_target);
libgame.setBuildMode(mode);
libgame.addPackagePath("sokol", "src/sokol/sokol.zig");
libgame.install();
// call the emcc linker step as a 'system command' zig build step which
// depends on the libsokol and libgame build steps
try fs.cwd().makePath("zig-out/web");
const emcc = b.addSystemCommand(&.{
emcc_path,
"-Os",
"--closure",
"1",
"src/emscripten/entry.c",
"-ozig-out/web/pacman.html",
"--shell-file",
"src/emscripten/shell.html",
"-Lzig-out/lib/",
"-lgame",
"-lsokol",
"-sNO_FILESYSTEM=1",
"-sMALLOC='emmalloc'",
"-sASSERTIONS=0",
"-sEXPORTED_FUNCTIONS=['_malloc','_free','_main']",
});
emcc.step.dependOn(&libsokol.install_step.?.step);
emcc.step.dependOn(&libgame.install_step.?.step);
// get the emcc step to run on 'zig build'
b.getInstallStep().dependOn(&emcc.step);
// a seperate run step using emrun
const emrun = b.addSystemCommand(&.{ emrun_path, "zig-out/web/pacman.html" });
emrun.step.dependOn(&emcc.step);
b.step("run", "Run pacman").dependOn(&emrun.step);
}
fn libSokol(b: *Builder, target: CrossTarget, mode: Mode, cross_compiling_to_darwin: bool, comptime prefix_path: []const u8) *LibExeObjStep {
const lib = b.addStaticLibrary("sokol", null);
lib.setTarget(target);
lib.setBuildMode(mode);
lib.linkLibC();
const sokol_path = prefix_path ++ "src/sokol/sokol.c";
if (lib.target.isDarwin()) {
lib.addCSourceFile(sokol_path, &.{"-ObjC"});
lib.linkFramework("MetalKit");
lib.linkFramework("Metal");
lib.linkFramework("AudioToolbox");
if (target.getOsTag() == .ios) {
lib.linkFramework("UIKit");
lib.linkFramework("AVFoundation");
lib.linkFramework("Foundation");
} else {
lib.linkFramework("Cocoa");
lib.linkFramework("QuartzCore");
}
} else {
lib.addCSourceFile(sokol_path, &.{});
if (lib.target.isLinux()) {
lib.linkSystemLibrary("X11");
lib.linkSystemLibrary("Xi");
lib.linkSystemLibrary("Xcursor");
lib.linkSystemLibrary("GL");
lib.linkSystemLibrary("asound");
} else if (lib.target.isWindows()) {
lib.linkSystemLibrary("kernel32");
lib.linkSystemLibrary("user32");
lib.linkSystemLibrary("gdi32");
lib.linkSystemLibrary("ole32");
lib.linkSystemLibrary("d3d11");
lib.linkSystemLibrary("dxgi");
}
}
// setup cross-compilation search paths
if (cross_compiling_to_darwin) {
addDarwinCrossCompilePaths(b, lib);
}
return lib;
}
fn addDarwinCrossCompilePaths(b: *Builder, step: *LibExeObjStep) void {
checkDarwinSysRoot(b);
step.addLibPath("/usr/lib");
step.addSystemIncludeDir("/usr/include");
step.addFrameworkDir("/System/Library/Frameworks");
}
fn checkDarwinSysRoot(b: *Builder) void {
if (b.sysroot == null) {
std.log.warn("===================================================================================", .{});
std.log.warn("You haven't set the path to Apple SDK which may lead to build errors.", .{});
std.log.warn("Hint: you can the path to Apple SDK with --sysroot <path> flag like so:", .{});
std.log.warn(" zig build --sysroot $(xcrun --sdk iphoneos --show-sdk-path) -Dtarget=aarch64-ios", .{});
std.log.warn("or:", .{});
std.log.warn(" zig build --sysroot $(xcrun --sdk iphonesimulator --show-sdk-path) -Dtarget=aarch64-ios-simulator", .{});
std.log.warn("===================================================================================", .{});
}
} | build.zig |
const std = @import("std");
const zs = @import("zstack.zig");
// Puts an upper bound on a game of 49.7 days for 1 ms tick cycle, or 397 days for
// a more usual 8ms tick cycle.
pub const ReplayInput = packed struct {
tick: u32,
keys: u32,
};
pub const ReplayInputIterator = struct {
const Self = @This();
// Bytes are stored as little-endian.
raw: []const u8,
pos: usize,
pub fn init(bytes: []const u8) !Self {
if (bytes.len % @sizeOf(ReplayInput) != 0) {
return error.InvalidInputLength;
}
return Self{
.raw = bytes,
.pos = 0,
};
}
pub fn next(self: *Self) ?ReplayInput {
if (self.pos >= self.raw.len) return null;
const input = ReplayInput{
.tick = std.mem.readIntSliceLittle(u32, self.raw[self.pos..]),
.keys = std.mem.readIntSliceLittle(u32, self.raw[self.pos + 4 ..]),
};
self.pos += 8;
return input;
}
};
// A callback is provided to the engine which is used to periodically dump an array
// of inputs with their tickstamps. This can be used to store the partial data or
// write to disk immediately.
fn replayCallback(inputs: []ReplayInput) void {
//
}
// We need to serialize the following content for a replay:
//
// - file version
// - random seed
// - game options
// - input sequence
// - statistics
//
// A replay also indicates overall statistics in the same file.
// 1832 01 // Magic number followed by file version
// seed=120397124
// rotation_system=srs // options are specified via key:value pairs, can we re-use config parser?
// <ffff>
// <binary inputs>
// <ffff>
// time_ticks=19383
// kpt=302983
// finesse=34
// Format is as follows:
//
// ```
// ZS1
// seed=12301824
// ```
pub const v1 = struct {
const header = "ZS1\n";
const marker = []u8{0xff} ** 8;
// var r = Reader.init();
// r.readOptions();
// var keys = r.readInput(engine.tick); // return the input for the current tick
// TODO: Make a Reader and take an instream instead. Can use a SliceInStream if needed.
pub fn read(options: *zs.Options, inputs: *ReplayInputIterator, game: []const u8) !void {
if (std.mem.eql(u8, header, game)) {
return error.InvalidV1Header;
}
const maybe_replay_inputs = std.mem.indexOf(u8, game, marker);
const replay_inputs = maybe_replay_inputs orelse return error.NoInputsFound;
const replay_options = game[header.len..replay_inputs];
try zs.config.parseIni(options, replay_options);
inputs.* = try ReplayInputIterator.init(game[replay_inputs + marker.len ..]);
}
pub fn Writer(comptime Error: type) type {
return struct {
const Self = @This();
stream: *std.io.OutStream(Error),
last_keys: u32,
pub fn init(stream: *std.io.OutStream(Error)) Self {
return Self{ .stream = stream, .last_keys = std.math.maxInt(u32) };
}
pub fn writeHeader(self: Self, options: zs.Options) !void {
try self.stream.write(header);
// write options
// TODO: Don't require [game] header if possible
try self.stream.print("[game]\n");
inline for (@typeInfo(@typeOf(options)).Struct.fields) |field| {
switch (@typeInfo(field.field_type)) {
.Enum => {
try self.stream.print("{}={}\n", field.name, @tagName(@field(options, field.name)));
},
else => {
try self.stream.print("{}={}\n", field.name, @field(options, field.name));
},
}
}
try self.stream.writeIntLittle(u32, 0xffffffff); // tick marker
try self.stream.writeIntLittle(u32, 0xffffffff); // keys marker
}
pub fn writeInputs(self: Self, inputs: []const ReplayInput) !void {
for (inputs) |input| {
try self.stream.writeIntLittle(u32, input.tick);
try self.stream.writeIntLittle(u32, input.keys);
}
}
pub fn writeKeys(self: Self, tick: u32, keys: u32) !void {
if (keys != self.last_keys) {
try self.stream.writeIntLittle(u32, tick);
try self.stream.writeIntLittle(u32, keys);
self.last_keys = keys;
}
}
};
}
};
test "v1.read" {
const game_replay =
\\ZS1
\\;TODO: Handle no group specification or don't use ini parser
\\[game]
\\rotation_system =dtet
\\goal=10
++ v1.marker ++ "\x12\x03\x00\x00\x98\x01\x00\x30";
var options = zs.Options{};
var inputs: ReplayInputIterator = undefined;
try v1.read(&options, &inputs, game_replay);
std.testing.expectEqual(options.rotation_system, .dtet);
std.testing.expectEqual(options.goal, 10);
const expected_inputs = []ReplayInput{ReplayInput{ .tick = 786, .keys = 0x30000198 }};
var i: usize = 0;
while (inputs.next()) |input| {
std.testing.expect(i < expected_inputs.len);
std.testing.expectEqual(input.tick, expected_inputs[i].tick);
std.testing.expectEqual(input.keys, expected_inputs[i].keys);
i += 1;
}
std.testing.expectEqual(i, expected_inputs.len);
}
test "v1.write" {
const options = zs.Options{};
const inputs = []ReplayInput{ReplayInput{ .tick = 786, .keys = 0x30000198 }};
var storage: [1024]u8 = undefined;
var slice_out_stream = std.io.SliceOutStream.init(storage[0..]);
var writer = v1.Writer(std.io.SliceOutStream.Error).init(&slice_out_stream.stream);
try writer.writeHeader(options);
try writer.writeInputs(inputs[0..]);
const replay_output = slice_out_stream.getWritten();
var read_options: zs.Options = undefined;
var read_inputs: ReplayInputIterator = undefined;
try v1.read(&read_options, &read_inputs, replay_output);
inline for (@typeInfo(zs.Options).Struct.fields) |field| {
std.testing.expectEqual(@field(read_options, field.name), @field(options, field.name));
}
var i: usize = 0;
while (read_inputs.next()) |read_input| {
std.testing.expectEqual(read_input.tick, inputs[i].tick);
std.testing.expectEqual(read_input.keys, inputs[i].keys);
i += 1;
}
}
// TODO: Statistics are stored separately and point to a filename. We can then
// merge all hiscore details into a single file. | src/replay.zig |
const builtin = @import("builtin");
const is_test = builtin.is_test;
const low = switch (builtin.endian) {
builtin.Endian.Big => 1,
builtin.Endian.Little => 0,
};
const high = 1 - low;
pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt {
@setRuntimeSafety(is_test);
const SingleInt = @IntType(false, @divExact(DoubleInt.bit_count, 2));
const SignedDoubleInt = @IntType(true, DoubleInt.bit_count);
const Log2SingleInt = @import("std").math.Log2Int(SingleInt);
const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421
const d = @ptrCast(*const [2]SingleInt, &b).*; // TODO issue #421
var q: [2]SingleInt = undefined;
var r: [2]SingleInt = undefined;
var sr: c_uint = undefined;
// special cases, X is unknown, K != 0
if (n[high] == 0) {
if (d[high] == 0) {
// 0 X
// ---
// 0 X
if (maybe_rem) |rem| {
rem.* = n[low] % d[low];
}
return n[low] / d[low];
}
// 0 X
// ---
// K X
if (maybe_rem) |rem| {
rem.* = n[low];
}
return 0;
}
// n[high] != 0
if (d[low] == 0) {
if (d[high] == 0) {
// K X
// ---
// 0 0
if (maybe_rem) |rem| {
rem.* = n[high] % d[low];
}
return n[high] / d[low];
}
// d[high] != 0
if (n[low] == 0) {
// K 0
// ---
// K 0
if (maybe_rem) |rem| {
r[high] = n[high] % d[high];
r[low] = 0;
rem.* = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
}
return n[high] / d[high];
}
// K K
// ---
// K 0
if ((d[high] & (d[high] - 1)) == 0) {
// d is a power of 2
if (maybe_rem) |rem| {
r[low] = n[low];
r[high] = n[high] & (d[high] - 1);
rem.* = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
}
return n[high] >> @intCast(Log2SingleInt, @ctz(SingleInt, d[high]));
}
// K K
// ---
// K 0
sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high])));
// 0 <= sr <= SingleInt.bit_count - 2 or sr large
if (sr > SingleInt.bit_count - 2) {
if (maybe_rem) |rem| {
rem.* = a;
}
return 0;
}
sr += 1;
// 1 <= sr <= SingleInt.bit_count - 1
// q.all = a << (DoubleInt.bit_count - sr);
q[low] = 0;
q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
// r.all = a >> sr;
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
} else {
// d[low] != 0
if (d[high] == 0) {
// K X
// ---
// 0 K
if ((d[low] & (d[low] - 1)) == 0) {
// d is a power of 2
if (maybe_rem) |rem| {
rem.* = n[low] & (d[low] - 1);
}
if (d[low] == 1) {
return a;
}
sr = @ctz(SingleInt, d[low]);
q[high] = n[high] >> @intCast(Log2SingleInt, sr);
q[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
return @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421
}
// K X
// ---
// 0 K
sr = 1 + SingleInt.bit_count + @as(c_uint, @clz(SingleInt, d[low])) - @as(c_uint, @clz(SingleInt, n[high]));
// 2 <= sr <= DoubleInt.bit_count - 1
// q.all = a << (DoubleInt.bit_count - sr);
// r.all = a >> sr;
if (sr == SingleInt.bit_count) {
q[low] = 0;
q[high] = n[low];
r[high] = 0;
r[low] = n[high];
} else if (sr < SingleInt.bit_count) {
// 2 <= sr <= SingleInt.bit_count - 1
q[low] = 0;
q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
} else {
// SingleInt.bit_count + 1 <= sr <= DoubleInt.bit_count - 1
q[low] = n[low] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr);
q[high] = (n[high] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count));
r[high] = 0;
r[low] = n[high] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count);
}
} else {
// K X
// ---
// K K
sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high])));
// 0 <= sr <= SingleInt.bit_count - 1 or sr large
if (sr > SingleInt.bit_count - 1) {
if (maybe_rem) |rem| {
rem.* = a;
}
return 0;
}
sr += 1;
// 1 <= sr <= SingleInt.bit_count
// q.all = a << (DoubleInt.bit_count - sr);
// r.all = a >> sr;
q[low] = 0;
if (sr == SingleInt.bit_count) {
q[high] = n[low];
r[high] = 0;
r[low] = n[high];
} else {
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
}
}
}
// Not a special case
// q and r are initialized with:
// q.all = a << (DoubleInt.bit_count - sr);
// r.all = a >> sr;
// 1 <= sr <= DoubleInt.bit_count - 1
var carry: u32 = 0;
var r_all: DoubleInt = undefined;
while (sr > 0) : (sr -= 1) {
// r:q = ((r:q) << 1) | carry
r[high] = (r[high] << 1) | (r[low] >> (SingleInt.bit_count - 1));
r[low] = (r[low] << 1) | (q[high] >> (SingleInt.bit_count - 1));
q[high] = (q[high] << 1) | (q[low] >> (SingleInt.bit_count - 1));
q[low] = (q[low] << 1) | carry;
// carry = 0;
// if (r.all >= b)
// {
// r.all -= b;
// carry = 1;
// }
r_all = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
const s: SignedDoubleInt = @intCast(SignedDoubleInt, b -% r_all -% 1) >> (DoubleInt.bit_count - 1);
carry = @intCast(u32, s & 1);
r_all -= b & @bitCast(DoubleInt, s);
r = @ptrCast(*[2]SingleInt, &r_all).*; // TODO issue #421
}
const q_all = ((@ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*) << 1) | carry; // TODO issue #421
if (maybe_rem) |rem| {
rem.* = r_all;
}
return q_all;
} | lib/std/special/compiler_rt/udivmod.zig |
const std = @import("std");
const warn = std.debug.warn;
const pf = @import("parse-float.zig");
const c = @cImport({
@cInclude("lzma_header.h");
@cInclude("easylzma/simple.h");
});
// Taken from std/debug/leb128.zig
fn readULEB128(comptime T: type, in_stream: var) !T {
const ShiftT = std.meta.IntType(false, std.math.log2(T.bit_count));
var result: T = 0;
var shift: usize = 0;
while (true) {
const byte = try in_stream.readByte();
if (shift > T.bit_count)
return error.Overflow;
var operand: T = undefined;
if (@shlWithOverflow(T, byte & 0x7f, @intCast(ShiftT, shift), &operand))
return error.Overflow;
result |= operand;
if ((byte & 0x80) == 0)
return result;
shift += 7;
}
}
fn readString(in_stream: var, alloc: *std.mem.Allocator) ![]u8 {
const is_str_present = try in_stream.readByte();
if (is_str_present == 0x0b) {
const len = try readULEB128(u64, in_stream);
var data: []u8 = try alloc.alloc(u8, len);
try in_stream.readNoEof(data);
return data;
}
return "";
}
inline fn isDigit(ch: u8) bool {
return (ch >= '0' and ch <= '9') or ch == '-' or ch == '.';
}
pub const ReplayAction = struct {
time_since_previous: i64 = undefined,
x: f64 = undefined,
y: f64 = undefined,
key_mouse_set: i32 = undefined,
fn init(data: []const u8) !ReplayAction {
var res = ReplayAction{};
// Entries are in "w|x|y|z" format, first is a long int,
// second and third are floats, fourth is a int
var it = std.mem.tokenize(data, "|");
var current_var: usize = 0;
while (it.next()) |item| : (current_var += 1) {
switch (current_var) {
0 => res.time_since_previous = try std.fmt.parseInt(i64, item, 10),
1 => res.x = try pf.parse_float(item),
2 => res.y = try pf.parse_float(item),
3 => res.key_mouse_set = try std.fmt.parseInt(i32, item, 10),
else => return error.InvalidReplayFile,
}
}
return res;
}
};
pub const PlayMode = packed enum {
Standard,
Taiko,
CTB,
Mania,
};
pub const OsuReplay = struct {
play_mode: PlayMode,
game_version: i32,
beatmap_hash: []const u8,
player_name: []const u8,
replay_hash: []const u8,
count_300s: i16,
count_100s: i16,
count_50s: i16,
count_gekis: i16,
count_katus: i16,
count_misses: i16,
total_score: i32,
max_combo: i16,
is_fc: bool,
mod_list: i32,
life_bar_graph: []const u8,
unix_timestamp: i64,
replay_data: std.ArrayList(ReplayAction) = undefined,
online_score_id: i64 = 0,
alloc: *std.mem.Allocator,
pub fn init(st: var, alloc: *std.mem.Allocator) !OsuReplay {
var r = OsuReplay{
.play_mode = @intToEnum(PlayMode, @truncate(u2, try st.readByte())),
.game_version = try st.readIntLittle(i32),
.beatmap_hash = try readString(st, alloc),
.player_name = try readString(st, alloc),
.replay_hash = try readString(st, alloc),
.count_300s = try st.readIntLittle(i16),
.count_100s = try st.readIntLittle(i16),
.count_50s = try st.readIntLittle(i16),
.count_gekis = try st.readIntLittle(i16),
.count_katus = try st.readIntLittle(i16),
.count_misses = try st.readIntLittle(i16),
.total_score = try st.readIntLittle(i32),
.max_combo = try st.readIntLittle(i16),
.is_fc = (try st.readByte()) != 0,
.mod_list = try st.readIntLittle(i32),
.life_bar_graph = try readString(st, alloc),
.unix_timestamp = @divFloor(((try st.readIntLittle(i64)) - 621355968000000000), 10000000),
.alloc = alloc,
};
// Read length of LZMA-compressed data
const in_data_len = try st.readIntLittle(u32);
// Allocate and read compressed data into the buffer
var lzma_data = try alloc.alloc(u8, in_data_len);
defer alloc.free(lzma_data);
try st.readNoEof(lzma_data);
// Variables for output from the C function
var out_data: []u8 = undefined;
var out_len: usize = undefined;
// Decompress LZMA-compressed string using easylzma C library
const error_code = c.simpleDecompress(c.elzma_file_format.ELZMA_lzma, &lzma_data[0], in_data_len, @ptrCast([*c][*c]u8, &out_data.ptr), &out_len);
if (error_code != 0) {
return error.InvalidReplayFile;
}
// Create an array list which we will use to hold all parsed ReplayAction objects
r.replay_data = std.ArrayList(ReplayAction).init(alloc);
// Tokenize the string by commas and parse separate items
var it = std.mem.tokenize(out_data[0..out_len], ",");
while (it.next()) |item| {
try r.replay_data.append(try ReplayAction.init(item));
}
// Free LZMA decompressed string
alloc.free(out_data[0..out_len]);
r.online_score_id = try st.readIntLittle(i64);
return r;
}
fn deinit(self: *OsuReplay) void {
// Free all strings (they need to be freed since read_string uses alloc)
self.alloc.free(self.beatmap_hash);
self.alloc.free(self.player_name);
self.alloc.free(self.replay_hash);
self.alloc.free(self.life_bar_graph);
// Free replay_data array list
self.replay_data.deinit();
}
};
test "Replay parsing" {
const fs = std.fs;
const io = std.io;
const c_alloc = std.heap.c_allocator;
const expect = std.testing.expect;
var file = try fs.cwd().openFile("resources/cookiezi817.osr", .{});
const file_len = try file.getEndPos();
var file_data: []u8 = try c_alloc.alloc(u8, file_len);
defer c_alloc.free(file_data);
_ = try file.read(file_data);
file.close();
var buf_stream = io.fixedBufferStream(file_data);
const st = &buf_stream.inStream();
var r = try OsuReplay.init(st, c_alloc);
defer r.deinit();
expect(r.play_mode == PlayMode.Standard);
expect(r.game_version == 20151228);
expect(std.mem.eql(u8, r.beatmap_hash, "d7e1002824cb188bf318326aa109469d"));
expect(std.mem.eql(u8, r.player_name, "Cookiezi"));
expect(r.count_300s == 1165);
expect(r.count_100s == 8);
expect(r.count_50s == 0);
expect(r.count_gekis == 254);
expect(r.count_katus == 7);
expect(r.count_misses == 0);
expect(r.total_score == 72389038);
expect(r.max_combo == 1773);
expect(r.is_fc == false);
expect(r.replay_data.len == 16160);
} | src/osureplay.zig |
usingnamespace @import("root").preamble;
const log = lib.output.log.scoped(.{
.prefix = "AHCI",
.filter = .info,
}).write;
const memory = os.memory;
const thread = os.thread;
const platform = os.platform;
const bf = lib.util.bitfields;
const libalign = lib.util.libalign;
const abar_size = 0x1100;
const port_control_registers_size = 0x80;
const Port = packed struct {
command_list_base: [2]u32,
fis_base: [2]u32,
interrupt_status: u32,
interrupt_enable: u32,
command_status: extern union {
raw: u32,
start: bf.Boolean(u32, 0),
recv_enable: bf.Boolean(u32, 4),
fis_recv_running: bf.Boolean(u32, 14),
command_list_running: bf.Boolean(u32, 15),
},
reserved_0x1C: u32,
task_file_data: extern union {
raw: u32,
transfer_requested: bf.Boolean(u32, 3),
interface_busy: bf.Boolean(u32, 7),
},
signature: u32,
sata_status: u32,
sata_control: u32,
sata_error: u32,
sata_active: u32,
command_issue: u32,
sata_notification: u32,
fis_switching_control: u32,
device_sleep: u32,
reserved_0x48: [0x70 - 0x48]u8,
vendor_0x70: [0x80 - 0x70]u8,
fn getCommandHeaders(self: *const volatile @This()) *volatile [32]CommandTableHeader {
const addr = read_u64(&self.command_list_base);
const cmd_list = os.platform.phys_ptr(*volatile CommandList).from_int(addr).get_uncached();
return &cmd_list.command_headers;
}
pub fn startCommandEngine(self: *volatile @This()) void {
log(.debug, "Starting command engine for port at 0x{X}", .{@ptrToInt(self)});
self.waitRdy();
self.command_status.start.write(false);
self.command_status.recv_enable.write(false);
const status = &self.command_status;
while (status.command_list_running.read() or status.fis_recv_running.read()) {
thread.scheduler.yield();
}
self.command_status.recv_enable.write(true);
self.command_status.start.write(true);
}
pub fn stopCommandEngine(self: *volatile @This()) void {
log(.debug, "Stopping command engine for port at 0x{X}", .{@ptrToInt(self)});
self.command_status.start.write(false);
while (self.command_status.command_list_running.read()) {
thread.scheduler.yield();
}
self.command_status.recv_enable.write(false);
while (self.command_status.fis_recv_running.read()) {
thread.scheduler.yield();
}
}
pub fn waitRdy(self: *volatile @This()) void {
const task_file_data = &self.task_file_data;
while (task_file_data.transfer_requested.read() or task_file_data.interface_busy.read()) {
thread.scheduler.yield();
}
}
pub fn issueCommands(self: *volatile @This(), slot_bits: u32) void {
log(.debug, "Sending {d} command(s) to port 0x{X}", .{
@popCount(u32, slot_bits),
@ptrToInt(self),
});
self.waitRdy();
self.command_issue |= slot_bits;
while ((self.command_issue & slot_bits) != 0) {
thread.scheduler.yield();
}
}
pub fn getCommandHeader(self: *const volatile @This(), slot: u5) *volatile CommandTableHeader {
return &self.getCommandHeaders()[slot];
}
pub fn getCommandTable(self: *const volatile @This(), slot: u5) *volatile CommandTable {
return self.getCommandHeader(slot).table();
}
pub fn getFis(self: *volatile @This(), slot: u5) *volatile CommandFis {
return &self.getCommandTable(slot).command_fis;
}
pub fn makeH2D(self: *volatile @This(), slot: u5) *volatile FisH2D {
const fis = &self.getFis(slot).h2d;
fis.fis_type = 0x27;
return fis;
}
pub fn getPrd(self: *volatile @This(), slot: u5, prd_idx: usize) *volatile PRD {
return &self.getCommandTable(slot).prds[prd_idx];
}
pub fn buffer(self: *volatile @This(), slot: u5, prd_idx: usize) []u8 {
const prd_ptr = self.getPrd(slot, 0);
const buf_addr = read_u64(&prd_ptr.data_base_addr);
const buf_size = @as(usize, prd_ptr.sizem1) + 1;
return os.platform.phys_slice(u8).init(buf_addr, buf_size).to_slice_writeback();
}
pub fn read_single_sector(self: *volatile @This(), slot: u5) void {
self.issueCommands(1 << slot);
}
};
comptime {
std.debug.assert(@sizeOf(Port) == 0x80);
}
const Abar = struct {
hba_capabilities: u32,
global_hba_control: u32,
interrupt_status: u32,
ports_implemented: u32,
version: extern union {
value: u32,
major: bf.Bitfield(u32, 16, 16),
minor_high: bf.Bitfield(u32, 8, 8),
minor_low: bf.Bitfield(u32, 0, 8),
pub fn format(
self: *const @This(),
fmt: anytype,
) void {
fmt("{d}.{d}", .{ self.major.read(), self.minor_high.read() });
if (self.minor_low.read() != 0) {
fmt(".{d}", .{self.minor_low.read()});
}
}
comptime {
std.debug.assert(@sizeOf(@This()) == 4);
std.debug.assert(@bitSizeOf(@This()) == 32);
}
},
command_completion_coalescing_control: u32,
command_completion_coalescing_port: u32,
enclosure_managment_location: u32,
enclosure_managment_control: u32,
hba_capabilities_extended: u32,
bios_handoff: extern union {
value: u32,
bios_owned: bf.Boolean(u32, 4),
os_owned: bf.Boolean(u32, 1),
bios_busy: bf.Boolean(u32, 0),
fn setHandoff(self: *volatile @This()) void {
self.os_owned.write(true);
}
fn checkHandoff(self: *volatile @This()) bool {
if (self.bios_owned.read())
return false;
if (self.bios_busy.read())
return false;
if (self.os_owned.read())
return true;
return false;
}
fn tryToClaim(self: *volatile @This()) bool {
self.setHandoff();
return self.checkHandoff();
}
comptime {
std.debug.assert(@sizeOf(@This()) == 4);
std.debug.assert(@bitSizeOf(@This()) == 32);
}
},
reserved_0x2C: u32,
reserved_0x30: [0xA0 - 0x30]u8,
vendor_0xA0: [0x100 - 0xA0]u8,
ports: [32]Port,
};
comptime {
std.debug.assert(@sizeOf(Abar) == 0x1100);
}
const SataPortType = enum {
ata,
atapi,
semb,
pm,
};
const CommandTableHeader = packed struct {
command_fis_length: u5,
atapi: u1,
write: u1,
prefetchable: u1,
sata_reset_control: u1,
bist: u1,
clear: u1,
_res1_3: u1,
pmp: u4,
pdrt_count: u16,
command_table_byte_size: u32,
command_table_addr: [2]u32,
reserved: [4]u32,
pub fn table(self: *volatile @This()) *volatile CommandTable {
const addr = read_u64(&self.command_table_addr);
return os.platform.phys_ptr(*volatile CommandTable).from_int(addr).get_uncached();
}
};
comptime {
std.debug.assert(@sizeOf(CommandTableHeader) == 0x20);
}
const CommandList = struct {
command_headers: [32]CommandTableHeader,
};
const RecvFis = struct {
dma_setup: [0x1C]u8,
_res1C: [0x20 - 0x1C]u8,
pio_setup: [0x14]u8,
_res34: [0x40 - 0x34]u8,
d2h_register: [0x14]u8,
_res54: [0x58 - 0x54]u8,
set_device_bits: [8]u8,
unknown_fis: [0x40]u8,
_resA0: [0x100 - 0xA0]u8,
};
comptime {
std.debug.assert(@offsetOf(RecvFis, "dma_setup") == 0);
std.debug.assert(@offsetOf(RecvFis, "pio_setup") == 0x20);
std.debug.assert(@offsetOf(RecvFis, "d2h_register") == 0x40);
std.debug.assert(@offsetOf(RecvFis, "set_device_bits") == 0x58);
std.debug.assert(@offsetOf(RecvFis, "unknown_fis") == 0x60);
std.debug.assert(@sizeOf(RecvFis) == 0x100);
}
const PRD = packed struct {
data_base_addr: [2]u32,
_res08: u32,
sizem1: u22,
_res10_22: u9,
completion_interrupt: u1,
};
comptime {
std.debug.assert(@sizeOf(PRD) == 0x10);
}
const FisH2D = packed struct {
fis_type: u8,
pmport: u4,
_res1_4: u3,
c: u1,
command: u8,
feature_low: u8,
lba_low: u24,
device: u8,
lba_high: u24,
feature_high: u8,
count: u16,
icc: u8,
control: u8,
};
comptime {
std.debug.assert(@offsetOf(FisH2D, "command") == 2);
}
const CommandFis = extern union {
bytes: [0x40]u8,
h2d: FisH2D,
//d2h: FisD2H,
};
const CommandTable = struct {
command_fis: CommandFis,
atapi_command: [0x10]u8,
_res50: [0x80 - 0x50]u8,
// TODO: Maybe more(?)
// First buffer should always be pointing to a single preallocated page
// when this command table is unused. Make sure to restore it if you overwrite it
prds: [8]PRD,
};
const ReadOrWrite = enum {
read,
write,
};
// Our own structure for keeping track of everything we need for a port
const PortState = struct {
mmio: *volatile Port = undefined,
num_sectors: usize = undefined,
sector_size: usize = 512,
port_type: SataPortType = undefined,
pub fn init(port: *volatile Port) !PortState {
var result: PortState = .{};
result.mmio = port;
result.port_type =
switch (result.mmio.signature) {
0x00000101 => .ata,
//0xEB140101 => .atapi, // Drop atapi for now
else => return error.UnknownSignature,
};
result.mmio.stopCommandEngine();
try result.setupCommandHeaders();
try result.setupPrdts();
result.mmio.startCommandEngine();
try result.identify();
return result;
}
fn setupCommandHeaders(self: *@This()) !void {
const port_io_size = @sizeOf(CommandList) + @sizeOf(RecvFis);
const commands_phys = try memory.pmm.allocPhys(port_io_size);
const fis_phys = commands_phys + @sizeOf(CommandList);
@memset(
os.platform.phys_ptr([*]u8).from_int(commands_phys).get_uncached(),
0,
port_io_size,
);
write_u64(&self.mmio.command_list_base, commands_phys);
write_u64(&self.mmio.fis_base, fis_phys);
}
fn setupPrdts(self: *@This()) !void {
const page_size = os.platform.paging.page_sizes[0];
var current_table_addr: usize = undefined;
var reamining_table_size: usize = 0;
for (self.mmio.getCommandHeaders()) |*header| {
if (reamining_table_size < @sizeOf(CommandTable)) {
reamining_table_size = page_size;
current_table_addr = try memory.pmm.allocPhys(page_size);
@memset(
os.platform.phys_ptr([*]u8).from_int(current_table_addr).get_uncached(),
0,
page_size,
);
}
write_u64(&header.command_table_addr, current_table_addr);
header.pdrt_count = 1;
header.command_fis_length = @sizeOf(FisH2D) / @sizeOf(u32);
header.atapi = if (self.port_type == .atapi) 1 else 0;
current_table_addr += @sizeOf(CommandTable);
reamining_table_size -= @sizeOf(CommandTable);
// First PRD is just a small preallocated single page buffer
const buf = try memory.pmm.allocPhys(page_size);
@memset(os.platform.phys_ptr([*]u8).from_int(buf).get_uncached(), 0, page_size);
write_u64(&header.table().prds[0].data_base_addr, buf);
header.table().prds[0].sizem1 = @intCast(u22, page_size - 1);
}
}
fn identifyCommand(self: *@This()) u8 {
return switch (self.port_type) {
.ata => 0xEC,
.atapi => 0xA1,
else => unreachable,
};
}
fn identify(self: *@This()) !void {
log(.debug, "Identifying drive...", .{});
const identify_fis = self.mmio.makeH2D(0);
identify_fis.command = self.identifyCommand();
identify_fis.c = 1;
identify_fis.device = 0;
self.mmio.issueCommands(1);
const buf = self.mmio.buffer(0, 0);
//os.hexdump(buf[0..256]);
const data_valid = std.mem.readIntLittle(u16, buf[212..][0..2]);
var read_sector_size = true;
read_sector_size = read_sector_size and (data_valid & (1 << 15) == 0);
read_sector_size = read_sector_size and (data_valid & (1 << 14) != 0);
read_sector_size = read_sector_size and (data_valid & (1 << 12) != 0);
if (read_sector_size) {
self.sector_size = std.mem.readIntLittle(u32, buf[234..][0..4]);
}
self.num_sectors = std.mem.readIntLittle(u64, buf[200..][0..8]);
if (self.num_sectors == 0)
self.num_sectors = std.mem.readIntLittle(u32, buf[120..][0..4]);
if (self.num_sectors == 0) {
return error.NoSectors;
}
log(.info, "Disk has 0x{X} sectors of size {d}", .{
self.num_sectors,
self.sector_size,
});
}
fn issueCommandOnPort(self: *@This(), command_slot: u5) void {
// TODO: Call this from command slot task and
// make this dispatch work to the port task
self.mmio.issueCommands(@as(u32, 1) << command_slot);
}
fn finalizeIo(
self: *@This(),
command_slot: u5,
lba: u48,
sector_count: u16,
mode: ReadOrWrite,
) void {
const fis = self.mmio.makeH2D(0);
fis.command =
switch (self.port_type) {
.ata => switch (mode) {
.read => @as(u8, 0x25),
.write => 0x35,
},
else => unreachable,
};
fis.device = 0xA0 | (1 << 6);
fis.control = 0x08;
fis.lba_low = @truncate(u24, lba);
fis.lba_high = @truncate(u24, lba >> 24);
fis.count = sector_count;
self.issueCommandOnPort(command_slot);
}
// All the following functions will sooner or later be moved out into a general
// block dev interface, and this will just have a simple dma interface instead.
pub fn offsetToSector(self: *@This(), offset: usize) u48 {
return @intCast(u48, offset / self.sector_size);
}
fn doSmallWrite(
self: *@This(),
command_slot: u5,
buffer: []const u8,
lba: u48,
offset: usize,
) void {
self.mmio.getCommandHeader(command_slot).pdrt_count = 1;
// Read the shit we're not going to overwite
self.finalizeIo(command_slot, lba, 1, .read);
// Overwrite what we want to buffer
for (buffer) |b, i| {
self.mmio.buffer(command_slot, 0)[offset + i] = b;
}
// Write buffer to disk
self.finalizeIo(command_slot, lba, 1, .write);
}
fn doSmallRead(self: *@This(), command_slot: u5, buffer: []u8, lba: u48, offset: usize) void {
self.finalizeIo(command_slot, lba, 1, .read);
for (buffer) |*b, i|
b.* = self.mmio.buffer(command_slot, 0)[offset + i];
}
fn doLargeWrite(self: *@This(), command_slot: u5, buffer: []const u8, lba: u48) void {
for (buffer[0..self.sector_size]) |b, i|
self.mmio.buffer(command_slot, 0)[i] = b;
self.finalizeIo(command_slot, lba, 1, .write);
}
fn doLargeRead(self: *@This(), command_slot: u5, buffer: []u8, lba: u48) void {
self.finalizeIo(command_slot, lba, 1, .read);
for (buffer[0..self.sector_size]) |*b, i|
b.* = self.mmio.buffer(command_slot, 0)[i];
}
fn iterateByteSectors(
self: *@This(),
command_slot: u5,
buffer_in: anytype,
disk_offset_in: usize,
small_callback: anytype,
large_callback: anytype,
) void {
if (buffer_in.len == 0)
return;
self.mmio.getCommandHeader(command_slot).pdrt_count = 1;
var first_sector = self.offsetToSector(disk_offset_in);
const last_sector = self.offsetToSector(disk_offset_in + buffer_in.len - 1);
if (first_sector == last_sector) {
small_callback(
self,
command_slot,
buffer_in,
first_sector,
disk_offset_in % self.sector_size,
);
return;
}
var disk_offset = disk_offset_in;
var buffer = buffer_in;
// We need to preserve data on the first sector
if (!libalign.isAligned(usize, self.sector_size, disk_offset)) {
const step = libalign.alignUp(usize, self.sector_size, disk_offset) - disk_offset;
small_callback(
self,
command_slot,
buffer[0..step],
first_sector,
self.sector_size - step,
);
buffer.ptr += step;
buffer.len -= step;
disk_offset += step;
first_sector += 1;
}
// Now we're sector aligned, we can do the transfer sector by sector
// TODO: make this faster, doing multiple sectors at a time
while (buffer.len > self.sector_size) {
log(.debug, "Doing entire sector {}", .{first_sector});
large_callback(self, command_slot, buffer, first_sector);
buffer.ptr += self.sector_size;
buffer.len -= self.sector_size;
first_sector += 1;
}
if (buffer.len == 0)
return;
log(.debug, "Doing last partial sector {}", .{first_sector});
// Last sector, partial
small_callback(self, command_slot, buffer, first_sector, 0);
}
pub fn doIOBytesWrite(
self: *@This(),
command_slot: u5,
buffer: []const u8,
disk_offset: usize,
) void {
self.iterateByteSectors(command_slot, buffer, disk_offset, doSmallWrite, doLargeWrite);
}
pub fn doIOBytesRead(
self: *@This(),
command_slot: u5,
buffer: []u8,
disk_offset: usize,
) void {
self.iterateByteSectors(command_slot, buffer, disk_offset, doSmallRead, doLargeRead);
}
};
comptime {
std.debug.assert(@sizeOf(CommandTable) == 0x100);
}
fn write_u64(mmio: anytype, value: u64) void {
mmio[0] = @truncate(u32, value);
mmio[1] = @truncate(u32, value >> 32);
}
fn read_u64(mmio: anytype) u64 {
return @as(u64, mmio[0]) | (@as(u64, mmio[1]) << 32);
}
fn claim_controller(abar: *volatile Abar) void {
{
const version = abar.version;
log(.info, "Version: {}", .{version});
if (version.major.read() < 1 or version.minor_high.read() < 2) {
log(.debug, "Handoff not supported (version)", .{});
return;
}
}
if (abar.hba_capabilities_extended & 1 == 0) {
log(.debug, "Handoff not supported (capabilities)", .{});
return;
}
while (!abar.bios_handoff.tryToClaim()) {
thread.scheduler.yield();
}
log(.debug, "Got handoff!", .{});
}
fn command(port: *volatile Port, slot: u5) void {}
fn commandWithBuffer(port: *volatile Port, slot: u5, buf: usize, bufsize: usize) void {
const header = &port.getCommandHeaders()[slot];
//const oldbuf = header.;
//const oldsize = ;
}
fn sataPortTask(port_type: SataPortType, port: *volatile Port) !void {
switch (port_type) {
.ata, .atapi => {},
else => return,
}
log(.debug, "{s} task started for port at 0x{X}", .{ @tagName(port_type), @ptrToInt(port) });
var port_state = try PortState.init(port);
// Put 0x204 'x's across 3 sectors if sector size is 0x200
//port_state.doIOBytesWrite(0, "x" ** 0x204, port_state.sector_size - 2);
//port_state.finalizeIo(0, 0, 3, .read);
//const sector_size = port_state.sector_size;
//os.hexdump(port_state.mmio.buffer(0, 0)[sector_size - 0x10..sector_size * 2 + 0x10]);
// Read first disk sector
port_state.finalizeIo(0, 0, 1, .read);
//os.hexdump(port_state.mmio.buffer(0, 0)[0..port_state.sector_size]);
// Read first sector into buffer
//port_state.doIOBytesRead(0, test_buf[0..512], 0);
//os.hexdump(test_buf[0..512]);
}
fn controllerTask(abar: *volatile Abar) !void {
claim_controller(abar);
log(.debug, "Claimed controller.", .{});
const ports_implemented = abar.ports_implemented;
for (abar.ports) |*port, i| {
if ((ports_implemented >> @intCast(u5, i)) & 1 == 0) {
continue;
}
{
const sata_status = port.sata_status;
{
const com_status = sata_status & 0xF;
if (com_status == 0)
continue;
if (com_status != 3) {
log(.warn, "Unknown port com_status: {X}", .{com_status});
continue;
}
}
{
const ipm_status = (sata_status >> 8) & 0xF;
if (ipm_status != 1) {
log(.warn, "Device sleeping: {X}", .{ipm_status});
continue;
}
}
}
switch (port.signature) {
0x00000101 => try thread.scheduler.spawnTask("SATA port task", sataPortTask, .{ .ata, port }),
//0xEB140101 => try thread.scheduler.spawnTask(sataPortTask, .{.atapi, port}),
0xC33C0101, 0x96690101 => {
log(.notice, "Known TODO port signature: 0x{X}", .{port.signature});
//thread.scheduler.spawnTask(sataPortTask, .{.semb, port})
//thread.scheduler.spawnTask(sataPortTask, .{.pm, port})
},
else => {
log(.warn, "Unknown port signature: 0x{X}", .{port.signature});
return;
},
}
}
}
pub fn registerController(addr: platform.pci.Addr) void {
if (comptime (!config.drivers.block.ahci.enable))
return;
// Busty master bit
addr.command().write(addr.command().read() | 0x6);
const AbarPhysPtr = os.platform.phys_ptr(*volatile Abar);
const abar = AbarPhysPtr.from_int(addr.barinfo(5).phy & 0xFFFFF000).get_uncached();
const cap = abar.hba_capabilities;
if ((cap & (1 << 31)) == 0) {
log(.crit, "Controller is 32 bit only, ignoring.", .{});
return;
}
if (abar.global_hba_control & (1 << 31) == 0) {
log(.crit, "AE not set!", .{});
return;
}
thread.scheduler.spawnTask("AHCI controller task", controllerTask, .{abar}) catch |err| {
log(.crit, "Failed to make controller task: {s}", .{@errorName(err)});
};
} | subprojects/flork/src/drivers/block/ahci.zig |
const std = @import("std");
const engine = @import("kiragine");
usingnamespace engine.kira.log;
const gl = engine.kira.gl;
const c = engine.kira.c;
const windowWidth = 1024;
const windowHeight = 768;
const title = "Textures";
const targetfps = 60;
var texture: engine.Texture = undefined;
const rect: engine.Rectangle = .{ .x = 500, .y = 380, .width = 32 * 3, .height = 32 * 3 };
const rect2: engine.Rectangle = .{ .x = 300, .y = 400, .width = 32 * 6, .height = 32 * 6 };
const srcrect: engine.Rectangle = .{ .x = 0, .y = 0, .width = 32, .height = 32 };
fn draw() !void {
engine.clearScreen(0.1, 0.1, 0.1, 1.0);
//const texture = engine.Texture{
//.id = atlas.textureid,
//};
// Enable the texture batch with given texture
engine.enableTextureBatch2D(texture);
// Push the quad batch, it can't be mixed with any other 'cuz it's textured
try engine.pushBatch2D(engine.Renderer2DBatchTag.quads);
// Draw texture rotated
try engine.drawTextureRotated(rect2, srcrect, .{ .x = 16, .y = 16 }, engine.kira.math.deg2radf(45), engine.Colour.rgba(255, 0, 0, 255));
// Draw texture
try engine.drawTexture(rect, srcrect, engine.Colour.rgba(255, 255, 255, 255));
// Pops the current batch
try engine.popBatch2D();
// Disable the texture batch
engine.disableTextureBatch2D();
}
pub fn main() !void {
const callbacks = engine.Callbacks{
.draw = draw,
};
try engine.init(callbacks, windowWidth, windowHeight, title, targetfps, std.heap.page_allocator);
var file: std.fs.File = undefined;
file = try std.fs.cwd().openFile("assets/test.png", .{});
const testpnglen = try file.getEndPos();
const stream = file.reader();
const testpng = try stream.readAllAlloc(std.heap.page_allocator, testpnglen);
file.close();
defer std.heap.page_allocator.free(testpng);
file = try std.fs.cwd().openFile("assets/test2.png", .{});
const testpng2len = try file.getEndPos();
const sstream = file.reader();
const testpng2 = try sstream.readAllAlloc(std.heap.page_allocator, testpng2len);
file.close();
defer std.heap.page_allocator.free(testpng2);
file = try std.fs.cwd().createFile("testbuf", .{});
try file.writeAll(testpng);
try file.writeAll("\n\n");
try file.writeAll(testpng2);
file.close();
file = try std.fs.cwd().openFile("testbuf", .{});
const testbufferlen = try file.getEndPos();
const ssstream = file.reader();
const testbuffer = try ssstream.readAllAlloc(std.heap.page_allocator, testpng2len + testpnglen + 2);
const mem = testbuffer[testpnglen + 2 ..];
const mem2 = testbuffer[0..testpnglen];
file.close();
defer std.heap.page_allocator.free(testbuffer);
var nrchannels: i32 = 0;
var data: ?*u8 = c.stbi_load_from_memory(@ptrCast([*c]const u8, mem2), @intCast(i32, mem2.len), &texture.width, &texture.height, &nrchannels, 4);
defer c.stbi_image_free(data);
texture = engine.Texture{
.id = 0,
.width = 32,
.height = 32,
};
gl.texturesGen(1, @ptrCast([*]u32, &texture.id));
gl.textureBind(gl.TextureType.t2D, texture.id);
gl.textureTexParameteri(gl.TextureType.t2D, gl.TextureParamaterType.min_filter, gl.TextureParamater.filter_nearest);
gl.textureTexParameteri(gl.TextureType.t2D, gl.TextureParamaterType.mag_filter, gl.TextureParamater.filter_nearest);
gl.textureTexParameteri(gl.TextureType.t2D, gl.TextureParamaterType.wrap_s, gl.TextureParamater.wrap_repeat);
gl.textureTexParameteri(gl.TextureType.t2D, gl.TextureParamaterType.wrap_t, gl.TextureParamater.wrap_repeat);
gl.textureTexImage2D(gl.TextureType.t2D, 0, gl.TextureFormat.rgba8, 32, 32, 0, gl.TextureFormat.rgba, u8, @ptrCast(?*c_void, data));
gl.textureBind(gl.TextureType.t2D, 0);
try engine.open();
try engine.update();
texture.destroy();
try engine.deinit();
} | src/tests/atlaspacker.zig |
const std = @import("../std.zig");
const io = std.io;
/// Provides `io.Reader`, `io.Writer`, and `io.SeekableStream` for in-memory buffers as
/// well as files.
/// For memory sources, if the supplied byte buffer is const, then `io.Writer` is not available.
/// The error set of the stream functions is the error set of the corresponding file functions.
pub const StreamSource = union(enum) {
const has_file = (std.builtin.os.tag != .freestanding);
/// The stream access is redirected to this buffer.
buffer: io.FixedBufferStream([]u8),
/// The stream access is redirected to this buffer.
/// Writing to the source will always yield `error.AccessDenied`.
const_buffer: io.FixedBufferStream([]const u8),
/// The stream access is redirected to this file.
/// On freestanding, this must never be initialized!
file: if (has_file) std.fs.File else void,
pub const ReadError = io.FixedBufferStream([]u8).ReadError || (if (has_file) std.fs.File.ReadError else error{});
pub const WriteError = error{AccessDenied} || io.FixedBufferStream([]u8).WriteError || (if (has_file) std.fs.File.WriteError else error{});
pub const SeekError = io.FixedBufferStream([]u8).SeekError || (if (has_file) std.fs.File.SeekError else error{});
pub const GetSeekPosError = io.FixedBufferStream([]u8).GetSeekPosError || (if (has_file) std.fs.File.GetSeekPosError else error{});
pub const Reader = io.Reader(*StreamSource, ReadError, read);
pub const Writer = io.Writer(*StreamSource, WriteError, write);
pub const SeekableStream = io.SeekableStream(
*StreamSource,
SeekError,
GetSeekPosError,
seekTo,
seekBy,
getPos,
getEndPos,
);
pub fn read(self: *StreamSource, dest: []u8) ReadError!usize {
switch (self.*) {
.buffer => |*x| return x.read(dest),
.const_buffer => |*x| return x.read(dest),
.file => |x| if (!has_file) unreachable else return x.read(dest),
}
}
pub fn write(self: *StreamSource, bytes: []const u8) WriteError!usize {
switch (self.*) {
.buffer => |*x| return x.write(bytes),
.const_buffer => return error.AccessDenied,
.file => |x| if (!has_file) unreachable else return x.write(bytes),
}
}
pub fn seekTo(self: *StreamSource, pos: u64) SeekError!void {
switch (self.*) {
.buffer => |*x| return x.seekTo(pos),
.const_buffer => |*x| return x.seekTo(pos),
.file => |x| if (!has_file) unreachable else return x.seekTo(pos),
}
}
pub fn seekBy(self: *StreamSource, amt: i64) SeekError!void {
switch (self.*) {
.buffer => |*x| return x.seekBy(amt),
.const_buffer => |*x| return x.seekBy(amt),
.file => |x| if (!has_file) unreachable else return x.seekBy(amt),
}
}
pub fn getEndPos(self: *StreamSource) GetSeekPosError!u64 {
switch (self.*) {
.buffer => |*x| return x.getEndPos(),
.const_buffer => |*x| return x.getEndPos(),
.file => |x| if (!has_file) unreachable else return x.getEndPos(),
}
}
pub fn getPos(self: *StreamSource) GetSeekPosError!u64 {
switch (self.*) {
.buffer => |*x| return x.getPos(),
.const_buffer => |*x| return x.getPos(),
.file => |x| if (!has_file) unreachable else return x.getPos(),
}
}
pub fn reader(self: *StreamSource) Reader {
return .{ .context = self };
}
pub fn writer(self: *StreamSource) Writer {
return .{ .context = self };
}
pub fn seekableStream(self: *StreamSource) SeekableStream {
return .{ .context = self };
}
};
test "StreamSource (refs)" {
std.testing.refAllDecls(StreamSource);
}
test "StreamSource (mutable buffer)" {
var buffer: [64]u8 = undefined;
var source = StreamSource{ .buffer = std.io.fixedBufferStream(&buffer) };
var writer = source.writer();
try writer.writeAll("Hello, World!");
try std.testing.expectEqualStrings("Hello, World!", source.buffer.getWritten());
}
test "StreamSource (const buffer)" {
const buffer: [64]u8 = "Hello, World!".* ++ ([1]u8{0xAA} ** 51);
var source = StreamSource{ .const_buffer = std.io.fixedBufferStream(&buffer) };
var reader = source.reader();
var dst_buffer: [13]u8 = undefined;
try reader.readNoEof(&dst_buffer);
try std.testing.expectEqualStrings("Hello, World!", &dst_buffer);
} | lib/std/io/stream_source.zig |
const std = @import("std");
const c = @import("internal/c.zig");
const internal = @import("internal/internal.zig");
const log = std.log.scoped(.git);
const git = @import("git.zig");
pub const Transaction = opaque {
/// Free the resources allocated by this transaction
///
/// If any references remain locked, they will be unlocked without any changes made to them.
pub fn deinit(self: *Transaction) !void {
log.debug("Transaction.deinit called", .{});
c.git_transaction_free(@ptrCast(*c.git_transaction, self));
log.debug("transaction freed successfully", .{});
}
/// Lock a reference
///
/// Lock the specified reference. This is the first step to updating a reference
///
/// ## Parameters
/// * `refname` - The reference to lock
pub fn lockReference(self: *Transaction, refname: [:0]const u8) !void {
log.debug("Transaction.lockReference called, refname={s}", .{refname});
try internal.wrapCall("git_transaction_lock_ref", .{
@ptrCast(*c.git_transaction, self),
refname.ptr,
});
log.debug("successfully locked reference", .{});
}
/// Set the target of a reference
///
/// Set the target of the specified reference. This reference must be locked.
///
/// ## Parameters
/// * `refname` - The reference to lock
/// * `target` - Target to set the reference to
/// * `signature` - Signature to use in the reflog; pass `null` to read the identity from the config
/// * `message` - Message to use in the reflog
pub fn setTarget(
self: *Transaction,
refname: [:0]const u8,
target: *const git.Oid,
signature: ?*const git.Signature,
message: [:0]const u8,
) !void {
log.debug("Transaction.setTarget called, refname={s}, target={*}, signature={*}, message={s}", .{
refname,
target,
signature,
message,
});
try internal.wrapCall("git_transaction_set_target", .{
@ptrCast(*c.git_transaction, self),
refname.ptr,
@ptrCast(*const c.git_oid, target),
@ptrCast(?*const c.git_signature, signature),
message.ptr,
});
log.debug("successfully set target", .{});
}
/// Set the target of a reference
///
/// Set the target of the specified reference. This reference must be locked.
///
/// ## Parameters
/// * `refname` - The reference to lock
/// * `target` - Target to set the reference to
/// * `signature` - Signature to use in the reflog; pass `null` to read the identity from the config
/// * `message` - Message to use in the reflog
pub fn setSymbolicTarget(
self: *Transaction,
refname: [:0]const u8,
target: [:0]const u8,
signature: ?*const git.Signature,
message: [:0]const u8,
) !void {
log.debug("Transaction.setSymbolicTarget called, refname={s}, target={s}, signature={*}, message={s}", .{
refname,
target,
signature,
message,
});
try internal.wrapCall("git_transaction_set_symbolic_target", .{
@ptrCast(*c.git_transaction, self),
refname.ptr,
target.ptr,
@ptrCast(?*const c.git_signature, signature),
message.ptr,
});
log.debug("successfully set target", .{});
}
/// Set the reflog of a reference
///
/// Set the specified reference's reflog. If this is combined with setting the target, that update won't be written to the
/// reflog.
///
/// ## Parameters
/// * `refname` - The reference to lock
/// * `reflog` - The reflog as it should be written out
pub fn setReflog(self: *Transaction, refname: [:0]const u8, reflog: *const git.Reflog) !void {
log.debug("Transaction.setReflog called, refname={s}, reflog={*}", .{ refname, reflog });
try internal.wrapCall("git_transaction_set_reflog", .{
@ptrCast(*c.git_transaction, self),
refname.ptr,
@ptrCast(?*const c.git_reflog, reflog),
});
log.debug("successfully set reflog", .{});
}
/// Remove a reference
///
/// ## Parameters
/// * `refname` - The reference to remove
pub fn remove(self: *Transaction, refname: [:0]const u8) !void {
log.debug("Transaction.remove called, refname={s}", .{refname});
try internal.wrapCall("git_transaction_remove", .{
@ptrCast(*c.git_transaction, self),
refname.ptr,
});
log.debug("successfully removed reference", .{});
}
/// Commit the changes from the transaction
///
/// Perform the changes that have been queued. The updates will be made one by one, and the first failure will stop the
/// processing.
pub fn commit(self: *Transaction) !void {
log.debug("Transaction.commit called", .{});
try internal.wrapCall("git_transaction_commit", .{
@ptrCast(*c.git_transaction, self),
});
log.debug("successfully commited transaction", .{});
}
comptime {
std.testing.refAllDecls(@This());
}
};
comptime {
std.testing.refAllDecls(@This());
} | src/transaction.zig |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.