code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
pub fn main() !void {
const file_name = std.fs.path.basename(@src().file);
print(" file: {}\n", .{file_name});
}
test "string concat at comptime" {
const str1 = "one";
const str2: []const u8 = "two";
const str3 = str1 ++ " " ++ str2;
print("str3={}\n", .{str3});
expect(eql(u8, str3, "one two"));
}
test "string eql" {
const str = "string";
expect(eql(u8, str, "string"));
}
test "string startsWith" {
const str = "string";
expect(startsWith(u8, str, "stri"));
}
test "string endsWith" {
const str = "string";
expect(endsWith(u8, str, "ing"));
}
test "muliline string" {
const str =
\\line one
\\line two
\\line three
; // end
expect(endsWith(u8, str, "two\nline three"));
}
// joining requires an allocator to create a new string.
test "join strings" {
var buffer: [64]u8 = undefined;
var fba = std.heap.FixedBufferAllocator.init(&buffer); // or buffer[0..]
var allocator = &fba.allocator;
const str1 = try std.mem.join(allocator, ", ", &[_][]const u8{ "jill", "jack", "jane", "john" });
defer allocator.free(str1);
const str2 = try std.mem.join(allocator, "... ", &[_][]const u8{ "four", "three", "two", "one" });
defer allocator.free(str2);
expect(eql(u8, str1, "jill, jack, <NAME>"));
expect(eql(u8, str2, "four... three... two... one"));
}
test "iterating utf8 runes" {
const str1: []const u8 = "こんにちは!";
print("str1={}\n", .{str1});
const view = try std.unicode.Utf8View.init(str1);
var itr = view.iterator();
var idx: u8 = 0;
while (itr.nextCodepointSlice()) |rune| : (idx += 1) {
if (idx == 0) {
expect(eql(u8, rune, "こ"));
}
if (idx == 1) {
expect(eql(u8, rune, "ん"));
}
if (idx == 4) {
expect(eql(u8, rune, "は"));
}
if (idx == 5) {
expect(eql(u8, rune, "!"));
}
}
}
// imports
const std = @import("std");
const print = std.debug.print;
const expect = std.testing.expect;
const eql = std.mem.eql;
const startsWith = std.mem.startsWith;
const endsWith = std.mem.endsWith; | 11_strings.zig |
const std = @import("std");
pub fn build(b: *std.build.Builder) void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
// Standard release options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
const mode = b.standardReleaseOptions();
const exe = b.addExecutable("life", "src/life.zig");
const exe_opt = b.addOptions();
exe.addOptions("build_options", exe_opt);
exe_opt.addOption([]const u8,"pattern",p_95_206_595m);
// p_95_206_595m // pattern used in benchmarking (corder ship, non symetric, terminating)
// p_19_659_494m // symetric quasi-crystal coorder ship reaction.
// p_1_700_000m // my current favorite pattern, use cursor keys and watch at -n,-n where n<1400 or so.
// p_850_000m // at about 450k a glider starts towards the engines, at 650k it destroys them, at 750k ash is penitrated
// p_max // fastest growth possible
// p_52513m // longest running mesuthelah currently known (march 2021)
exe_opt.addOption(u32,"Threads",3); // Threads excluding display update thread - we also use a thread for display updates.
//exe_opt.addOption(u32,"Threads",@intCast(u32,std.math.max(2, std.Thread.getCpuCount() catch 2))-1); // highest performance
exe_opt.addOption(u32,"staticSize",4); // Size of static tiles, must be a power of 2 (4 is optimal for most patterns)
// If a pattern consists of almost all still lives, increase this value (try 8)
exe_opt.addOption(u32,"chunkSize",255); // The number of cells to use when balancing alive arrays. Smaller gives better balanced arrays,
// larger better performance up to a point (testing shows 255 is a good choise).
exe_opt.addOption(u32,"numChunks",4); // initial memory to allocation. Optimally, the starting population should be less than
// Threads*chunkSize*NumChunks
exe_opt.addOption(u32,"origin",7_500_000); // This number is important, it position cells for middle squares to work well. We combine the high
// bits of the low 32bits of X & Y values squared to index cells. A number that, when squared, has its
// middle bits in those high bits works better (between 2^22 and 2^24 is ideal, up to 2^26 work okay).
// Use bigger origins when following the patterns for more than 2*origin generations (try 15m or 30m).
exe.addPackagePath("zbox","../zbox/src/box.zig");
//exe.addPackagePath("tracy","../zig-tracy/src/lib.zig");
//exe.addIncludeDir("../zig-tracy/.zigmod/deps/v/git/github.com/wolfpld/tracy/tag-v0.7.8");
//exe.addCSourceFile("../zig-tracy/.zigmod/deps/v/git/github.com/wolfpld/tracy/tag-v0.7.8/TracyClient.cpp", &[_][]const u8{ "-DTRACY_ENABLE=1", "-fno-sanitize=undefined" });
//exe.linkSystemLibrary("c++");
//exe.linkSystemLibrary("pthread");
//exe.linkSystemLibrary("c"); // exe.linkLibC() to use c_allocator. looks like we are not allocator limited - its no faster...
exe.setTarget(target); // use the best options for the cpu we are building on
//exe.setTarget(.{ // generic x86_64 - about 8% slower on my box
// .cpu_arch = .x86_64,
// .os_tag = .linux,
// .abi = .gnu,
// .cpu_model = .baseline, // .baseline encompasses more old cpus
//});
exe.setBuildMode(mode);
//exe.pie = true;
//exe.setBuildMode(std.builtin.Mode.ReleaseFast); // to hard code ReleaseFast/ReleaseSafe etc
exe.setOutputDir(".");
exe.install();
const run_cmd = exe.run();
run_cmd.step.dependOn(b.getInstallStep());
if (b.args) |args| {
run_cmd.addArgs(args);
}
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
}
// add rle encoded patterns
const p_chaotic1 =
\\9bobo$8bo$9bo2bo$11b3o8$2bo$2ob2o$2ob2o$2o$2b2obo$3bo2bo$3bo2bo$4b2o!
;
const p_chaotic2 =
\\23bo$21b2ob2o$21b2ob2o$21b2o$23b2obo$24bo2bo$24bo2bo$25b2o11$bobo$o$bo2bo$3b3o!
;
// nonsymetric quasicrystal switch engine reaction
// by 1300k geneations the switch engines have be stopped, the residual gliders keep the ash active till about 1700k.
const p_1_700_000m =
\\7bobo$6bo$7bo2bo$9b3o7$3o2$bo$b2o25b2o$2b2o24bo$o2b2o!
;
// another nonsymetric quasicrystal switch engine reaction
// at about generation 450k a glider starts towards the switch engines, at about 650k it stops them. At about 750k
// the residual gliders break thru the ash and the last of them exit the ash around 850k
const p_850_000m =
\\7bobo$6bo$7bo2bo$9b3o7$3o2$bo$b2o$2b2o$o2b2o6$38b2o$38bo!
;
const p_95_206_595m = // nonsymetric quasicrystal coorder ship reaction
//x = 16, y = 16, rule = 23/3
\\bbbbbbbobooboobo$
\\bbbboboobbobbobb$
\\bbobbboobboobboo$
\\booobbbooooooobb$
\\oobobbooobobbboo$
\\bobobbobobooobbb$
\\obobbobboobbobob$
\\boobbobbobbooobo$
\\bbbobboboooobobb$
\\ooboobbbobobbbob$
\\obbbobobboobbbbo$
\\bbobbbobbooooobb$
\\oobooooobboooooo$
\\booooobbooobobob$
\\boooobooobboooob$
\\oboobbobooboobbb!
;
const p_52513m =
\\4b2o$5bo$2bobo$2bo10$4bobo$5bo$5bo5$b2o$b2o5$4b2o$4bobo$o6bo$bo2bo2bo
\\$2bo4bo$o5bo$bo3bo$2b3o!
;
const p_pony_express =
\\5o4bob2ob2o$2bob3o3bobobo$2b3obo5bob2o$bobobobob2obob2o$bo3b6ob2obo$o
\\5b2ob2o2b2o$2bo2bobobob2o$ob2obobob2o2bo$o4b3o$b2ob3o2bo2bo$2obo2bob2o
\\2b3o$bobob2obo2bo2b2o$b2o9b2o$ob3o4bobo2b2o$b3o2b2o2b2o3bo$obob3o3bobo!
;
const p_c5_puffer_p1450 =
\\3o$3bo$2o2bo$2obob2o$2o2bo2bo$bobo3bo$2bob2obo$4bobo$3b4o$3b5o$3bo3b2o
\\20b3o$6bo2bo22bo$7bo2bo18b2o2bo$11bo17b2obob2o$9bob2o2b3o11b2o2bo2bo$
\\10bob3o3bo11bobo3bo$12b2obo2bo12bob2obo$12b4obobo13bobo$11b3o2bobobo
\\11b4o16bo$15bo5bo10b5o15b2o$16b4obo10bo3b2o16bo$17b3obo13bo2bo12b2ob3o
\\$36bo2bo10bo3bob2o$40bo13b3obo$38bob2o2b3o4b2obobob2o$39bob3o3bo5bob2o
\\bo$41b2obo2bo10bo$41b4obobo$40b3o2bobobo3bo5bo$44bo5bo3bo4bo$45b4obo6b
\\o2bo$46b3obo10bo$62b2o3bo$65b4o$61bo7bo$66b5o$66bobobo$62bo4b5o$63bo2b
\\o5bo$67bo2bob2o$67bo2bo$69bo6$62bo$62b2o$64bo$61b2ob3o$60bo3bob2o$64b
\\3obo$61b2obobob2o$63bob2obo$68bo2$63bo5bo$64bo4bo$67bo2bo$71bo$72b2o3b
\\o$75b4o$71bo7bo$76b5o$76bobobo$72bo4b5o$73bo2bo5bo$77bo2bob2o$77bo2bo$
\\29b2o48bo$28bobo$30bo!
;
const p_noahs_ark =
\\9bobo$10b2o$10bo6bo$16bo$16b3o$20b2o$20bobo$20bo2$o$b2o$2o5$3b2o$2bobo
\\$4bo2$5b3o$5bo$6bo!
;
const p_hacksaw =
\\17b2o180b$18bo180b$91bo107b$91bobo105b$94b2o6b2o95b$52b2o24bo15b2o3bo
\\4bo94b$53bo23b2o15b2o3b2o4bo93b$34b2o17bobo5b2o13b2o13bobo11bo9bo83b$
\\33bobo18b2o5b3o11b3o13bo13bo8b2o83b$23b2o7b3o12bo15b2obo9b2o26bo94b$
\\23bo7b3o13b4o12bo2bo10b2o23b2o95b$16b3o13b3o4bobo6b4o11b2obo11bo120b$
\\15bo3bo13bobo3b2o7bo2bo9b3o135b$14bo5bo13b2o4bo7b4o9b2o136b$15bo3bo27b
\\4o4b2o17b2o123b$16b3o28bo7bobo16bo124b$16b3o38bo141b$57b2o140b$40b2o9b
\\2o64b2o80b$39bo2bo7bo2bo63bo81b$39b3o9b3o61bobo81b$14b3o25b9o64b2o82b$
\\13b2ob2o23bo2b5o2bo147b$13b2ob2o23b2o2b3o2b2o134bo12b$13b5o169bo11b$
\\12b2o3b2o163bo4bo11b$46bo136b5o11b$47bo151b$43bo3bo12bo138b$44b4o13bo
\\137b$56bo4bo123b2o12b$57b5o120b3ob2o11b$177b3o3b4o12b$184b2o13b$15bo
\\183b$15b2o182b$67b2o115bo14b$66bobo116bo13b$9bobo40bo12b3o8b2o101bo5bo
\\13b$7bo3bo40b4o8b3o10bo102b6o13b$2o5bo10b2o3b2o28b4o8b3o24bo106b$o5bo
\\10bobo3bo2bo15bo10bo2bo9bobo18b3ob2o3bo102b$7bo11bo7bo7bo6b2o9b4o10b2o
\\18b4o4b2o102b$7bo3bo15bo6b2o16b4o35b2o106b$9bobo15bo24bo146b$23bo2bo
\\38b2o16b2o114b$23b2o40bo16bobo114b$81b3o115b$81b2o79bo36b$84b2o77bo35b
\\$83b3o71bo5bo35b$158b6o35b2$84bo114b$83b2o114b2$57bo126b2o13b$56b2o
\\122b4ob2o12b$45b2o8b2o11bo26bo84b6o13b$45bo8b3o9bobo26b2o84b4o12bob$
\\55b2o8bobo12bo54b5o58bo$56b2o2b2o2bo2bo11b2o14bo38bo4bo54bo3bo$57bo2bo
\\4bobo26bobo42bo55b4o$66bobo25bo2bo40bo20b4o27b3o6b$68bo26bo2bo59bo3bo
\\25b2ob2o6b$162bo21b3o3b3o6b$95bo65bo33b4o$95b2o97bo3bo$172b2o24bo$171b
\\4o22bob$171b2ob2o23b$66b2o105b2o6b6o12b$65b3o27bo73bo10bo5bo12b$55b2o
\\5bob2o12bo15b2o71bobo16bo12b$55bo6bo2bo12b2o29b2o58bo15bo13b$62bob2o
\\13b2o27bo64b2o24b$65b3o11b3o11bo13bo9b2o52b2ob2o23b$66b2o11b2o10bobo
\\13bo10bo17b2o33b4o24b$78b2o9b2o16bo27bo2bo33b2o25b$78bo10b2o17bo29bo
\\60b$89b2o18b2o27bo22b6o32b$91bobo41b2obo21bo5bo32b$93bo42bo29bo32b$
\\149b2obo2bo9bo33b$148b2o6bo42b$137bo12bobo3bo42b$136b2o15b4o42b4$168bo
\\30b$137bo29b2o30b$137b2o12bo14b2o8b2o21b$125bo23bobo13b3o9bo21b$125bob
\\o20bobo15b2o31b$114b2o12b2o9b2o6bo2bo16b2o30b$114bo13b2o9bo8bobo17bo
\\30b$128b2o6b2o6b2o3bobo47b$125bobo7b3o7bo5bo47b$125bo10b2o61b$139bo59b
\\$139b2o!
;
const p_cord_puller =
\\28b3o67b$27bo3bo66b$26bo4bo8b2o3b3o50b$26bo2bobo7b4o55b$26b2obobo6bo3b
\\2o54b$28b2obo2bo4b2obobo53b$18b3o9b2o2bo63b$17bo2bo10b3o13bo6b2o42b$
\\16bo4bo22bo2bo6bo43b$16bo2b3o23b3o50b$16bo5bo75b$17b7o74b$23bo74b$23bo
\\74b$21b2o75b$62b2o34b$62bo35b2$24b3obo69b$24b3obo69b$25bo8b2o62b$26bo
\\3b3ob2o62b$27bo6bo63b$28bobobobo35b2o26b$29bo40bo9bo17b$61bo16b3o17b$
\\2b3o55bobo14bo20b$bo3bo53b2ob2o13b2o19b$o4bo53b2ob2o34b$o2bobo52b3o37b
\\$2obobo52b3o3bo33b$2b2obo2bo50b2o37b$4b2o2bo50bobo36b$5b3o64b2o3b2o19b
\\$72bobobobo19b$73b5o20b$74b3o21b$75bo22b3$47b2o49b$45b6o47b$44b6o48b$
\\43bo6bo26b2o19b$44b3o30bo20b$8b2o35b2o31b3o17b$8bo39bo8bo22bo17b$53b2o
\\2b2ob3o35b$53bo5b4o35b$57b2o39b2$35bo15bo29bo2bo13b$34bobo12b3o28bo3b
\\2o2b2o8b$16b2o15b2ob2o10bo31bo7bo9b$16bo16b2ob2o10b2o19bo11b4o8b2o3b$
\\32b3o32b3o23bo4b$32b3o3bo27bo12bo18b$33b2o31b2o3bo6b2o18b$33bobo35b3o
\\4bobo17b$74bo23b$73b2o23b$24b2o17b2o3b2o48b$24bo27b3o43b$43bo5bo4bo6b
\\2o3b2o30b$53bo8b5o31b$44b2ob2o14b3o32b$46bo17bo8b2o3b2o12b5ob$58b3o12b
\\o5bo11bob3obo$58bo33bo3bob$48bo10bo14bo3bo14b3o2b$47bobo25b3o16bo3b$
\\46bo3bo41b2o4b$46b5o40bobo4b$45b2o3b2o39bobo4b$46b5o41bo5b$47b3o11b3o
\\34b$48bo49b$61bobo14bo10b2obob2o2b$60b5o11b2ob2o8bo5bo2b$59b2o3b2o24bo
\\3bo3b$59b2o3b2o9bo5bo9b3o4b2$75b2o3b2o16b3$48b2o48b$48bo49b2$61b2o28b
\\2o5b$61bo29bo6b2$78b2o18b$78bo!
;
const p_acorn =
\\bo5b$3bo3b$2o2b3o!
;
const p_2_engine_cordership =
\\19b2o$19b4o$19bob2o2$20bo$19b2o$19b3o$21bo$33b2o$33b2o7$36bo$35b2o$34b
\\o3bo$35b2o2bo$40bo$37bobo$38bo$38bo$38b2o$38b2o3$13bo10bo$12b5o5bob2o
\\11bo$11bo10bo3bo9bo$12b2o8b3obo9b2o$13b2o9b2o12bo$2o13bo21b3o$2o35b3o
\\7$8b2o$8b2o11b2o$19b2o2bo$24bo3bo$18bo5bo3bo$19bo2b2o3bobo$20b3o5bo$
\\28bo!
;
const p_glider_making_switch_engine =
\\bobo24b$o27b$bo2bo23b$3b3o22b$26b2o$26b2o!
;
const p_slow_puffer_1 =
\\76b2o4b$75b2ob4o$76b6o$77b4ob$64b3o15b$63b5o14b$62b2ob3o14b$52b2o9b2o
\\17b$51b2ob4o24b$52b6o24b$53b4o25b3$44b2o36b$43b2ob2o26b2o6b$44b4o24bo
\\4bo4b$45b2o24bo10b$24b6o41bo5bo4b$24bo5bo40b6o5b$24bo57b$25bo4bo51b$
\\27b2o53b2$54b4o24b$53b6o23b$52b2ob4o23b$3bo49b2o27b$bo3bo76b$o81b$o4bo
\\76b$5o77b3$20bo61b$b2o10b2obo2bobo60b$2ob3o6bobob4obo60b$b4o3b3obo69b$
\\2b2o8bobob4obo60b$13b2obo2bobo60b$5b2o13bo61b$3bo4bo73b$2bo79b$2bo5bo
\\73b$2b6o74b3$53b2o27b$52b2ob4o23b$53b6o23b$54b4o24b2$27b2o53b$25bo4bo
\\51b$24bo57b$24bo5bo40b6o5b$24b6o41bo5bo4b$45b2o24bo10b$44b4o24bo4bo4b$
\\43b2ob2o26b2o6b$44b2o36b3$53b4o25b$52b6o24b$51b2ob4o24b$52b2o9b2o17b$
\\62b2ob3o14b$63b5o14b$64b3o15b$77b4ob$76b6o$75b2ob4o$76b2o!
;
const p_rake_sp2 =
\\o$4bo9b2o$2bo3bo6b2ob3o83b2o15b2o$bo12b5o55b5o22b4o13b2ob4o$bo4bo8b3o56bo4b
\\o20b2ob2o14b6o$b5o68bo26b2o17b4o$75bo3bo$77bo$21bo$2b2o10b2obo2bobo98b2o$b
\\2ob3o6bobob4obo97bo2bo$2b4o3b3obo106bobo$3b2o8bobob4obo97b2o$14b2obo2bobo
\\97bo$6b2o13bo96bobo$4bo4bo107b2ob2o$3bo76b4o34bobo$3bo5bo69b6o34bo$3b6o6b3o
\\60b2ob4o$14b5o60b2o$13b2ob3o$14b2o3$88b2o$87b2ob4o$88b6o$89b4o4$103b2o$102b
\\2ob4o$103b6o$104b4o41$180bo$179bo$179b3o!
;
const p_52444M =
\\11bo$10bob2o$9b2o11$11bobo$12bo$12bo10$15bo$13b2o$8b2o4bo$9b2o$10bo6bo
\\$17b2o4$bo$b2o$obo!
;
const p_birthday_puffer =
\\101bo2$81b2o$80bobo$56b3o22bo$2o$2o8bo$4bo4bobo11bo41b2o$4bo4bobo10bob
\\o24bo15b2o$4bo5bo11b2o24bobo$48bo2bo$6b3o40b2o33b2o$68bo15b2o$68bo$68b
\\o57b3o4$26bo78b2o18b2o$26bo71b2o5b2o18b2o$26bo71b2o32b2o$132b2o3$107b
\\2o$107b2o$124b2o$30b3o90bo2bo$41b2o80bo2bo30bo$41b2o81b2o31bo$157bo2$
\\190bo$189bobo$48b2o37bo101bobo$48b2o36bobo101bo$33bo32bo19bobo$32bobo
\\30bobo19bo$32b2o32b2o$b2o156bo$obo155bobo29b2o$2o157b2o29b2o$112b2o6b
\\2o3b2o$36b2o73bo2bo5b2o3b2o50b2o$36bobo73b2o63bobo$19b2o16b2o139bo$19b
\\2o130b2o$130b2o19b2o$84bo45b2o$11b2o70bobo49b2o$11b2o70bobo29b2o7b2o9b
\\2o$78b3o3bo30bobo5bo2bo$43b2o71b2o6b2o$42bo2bo22b2o21b3o$43b2o22bo2bo$
\\68bobo$69bo$95bo$61b3o31bo43b2o$39b2o54bo43b2o14b2o$39b2o40bo73b2o$80b
\\obo84bo$48bo31b2o84bobo$47bobo17b3o97bo$48b2o2$65bo110b3o$65bo34bo39bo
\\$53b2o10bo33b3o18b3o16bobo$53b2o43b5o36b2o$97b3ob2o$33b2o52bo9b2ob2o
\\46b2o$32bobo51bobo10bo13bo31b2o3bo$32b2o23bo28b2o24bobo29b4obo$56b3o
\\53bo2bo33b2o$55b2ob2o53b2o28bob3o5b2o$55bo3b2o6b2o74b5obo4bo$57bob2o6b
\\2o82bo17b2o$53bo3bobo91bo16b2o20b3o$53bo3b2o66bo43bobobo$54b2o68bobo
\\19bo7bo15b2ob2o$43b2o55b2o23bo18b2o8bo15b2obo$43b2o14bo40b2o43b2o8bo
\\16bo16b2o$57b2ob2o127b2o$80b2o8bo90b2o13b2o$56bobo21b2o8bo12b2o38bo37b
\\2o13b2o$56bo3bo29bo8b2o2b2o37bobo$56b2o2bo38b2o41bobo$57b3o83bo25bo$
\\58bo16bobo74bo15bobo$76b2o74bo15bo$76bo75bo16bo3bo$221bo$171bobo47bo$
\\172bo17b3o28bo2$254bo$84bo49b2o117bobo$83bobo48bobo11bo30b2o66bo5bobo$
\\83bo2bo48b2o10bobo28bobo65bobo5bo$81bo3bo61bobo30b2o64bobo$80bo67bo24b
\\o7bo65bo$81bobo68bo17b2obo7b2o$88b2o16bo29bo14bobo23b2o2bobo39bo18b2o$
\\76b2o10b2o15bobo27bobo13b2o16bo2bo3bobo5bo37bobo16bo2bo9b2o$76b2o27b2o
\\28b2o39b3obo42b2o17b2o10b2o$173bobo4bo4bo3b2o$87bo83bo4bo2b3obobo3b2o
\\59bo$86bo85bo5b2o70bo$85b3o85bo2b3o71bo$85bo92bo36b2o$90b2o42b2o40bo
\\17b2o19b2o$89b2obo32bo8b2o39b3o16b2o48b2o$90bobo31bobo72b2o43b2o$91bo
\\32b2o62b2o9b2o$180b2o5bo2bo$82bo97b2o6b2o$82b2o61bo8b2o$82b3o60bo8b2o$
\\85b2o58bo84bo$84b3o19b2o121bobo$69b2o4bo11bo17bo2bo120bo2bo$69b2o4b2o
\\3bo3b4o18b2o38b2o82b2o$77b6o3b2o12bo16bo28b2o$75b2ob2o3b2obo13bo15bobo
\\$75bo4bo17b3o15bobo109b2o$80bo2bo12bo20bo109bo2bo$92bo2b3o65b2o63bobo$
\\65bo27bo11bo57b2o64bo$64bobo14b2o3b2o3bo5bo6bobo$64bobo13bobo3bobo3b3o
\\2bo6bobo102b2o$65bo14bo7bo6b2o8bo102bobo$80bo3bo3bo28bo66b3o22bo$81bo
\\5bo12b2o6bo7bobo$82bo3bo12bo2bo4bobo6bobo19bo59bo$83b3o14b2o4bo2bo2b2o
\\3bo14bo4bobo53b2o2bobo$107b2o3b2o18bo4bobo53b2o2bobo$132bo5bo6b2o51bo
\\5bo$145b2o56bobo$96b2o27b2o7b3o66bobo5b2o$95bobo7b3o16bobo51b3o19b2o2b
\\2o4bo2bo21bo$94bobo16bo9bobo74b2ob3o5b2o13b4o3b3o$73b3o19bo17bo10bo66b
\\3o4b2o4b2o18bo3b3o3bo19b3o$113bo83b2o5b2o9b3o6bo5b2o$86b2o101bo5bob3o
\\14bo3bo5b2obo$86b2o21b3o77bo5bo27bo$189bo5bo9b2o7b2ob2o5bo28b2o$205b2o
\\4b2o3bo7bo5b3o20b2o$179bo11b3o17b2o16b6o25b2o$178bobo47bo2b2obo25b2o$
\\178b2o48bo2bobo$226bob2obob3o$226b3o$200bo36bo$199bobo31bo$158b3o39b2o
\\30bo5bo$227bo5bo$227b3o3bobo28b2o$227bo2bo6b2o15b3o7b2o$227bo2b2o2bo$
\\227b4o4b3o8bo$185b2o41b2o7bo8bo$185b2o37b2o20bo$224b2o$199bo$160b2o36b
\\obo$160b2o8bo28b2o29bo$132b2o30bo4bobo11bo46bo$132b2o11bo18bo4bobo10bo
\\bo11bo33bo$144bobo17bo5bo11b2o11bobo$128b2o15b2o49b2o42b2o6b2o3b2o$
\\127bo2bo35b3o70bo2bo5b2o3b2o$128b2o110b2o39bo$199b2o18bo60bobo$199b2o
\\18bo61b2o$132b2o85bo38b2o$131bo2bo54bo22bo45b2o$132b2o54bobo20bobo49b
\\2o$188b2o21bobo29b2o7b2o9b2o$206b3o3bo30bobo5bo2bo$220bo23b2o6b2o38b2o
\\$219bobo56b2o12b2o$220b2o56b2o5$277b3o$225b2ob2o14bo32b4o19b2o$197b3o
\\25bo3bo13b3o6b2o25b3o18b2o$226b3o14b3o6b2o25bobo$226b3o10b2o6bo23bo4b
\\2obobo$237bo2bo5bobo26bo3b2o$236bobobo4bo$223bo11b3obo6bobo$222bobo10b
\\2o35bo4bo$222b2o13bo2bo32b3o3b3o$192b2o45b2o61b3o$192b2o8bo36bo16bobo
\\20bobo20bobo$161b2o33bo4bobo11bo29bo9bo2bo21bo21b3o$160bobo33bo4bobo
\\10bobo27bobo8bo2b2o$160b2o34bo5bo11b2o31bo7b5o20b2o16b2o$238b2o3b2obo
\\9b3o3b2o15bo2bo$198b3o36bobo7bo9bo21bo2bo15bo$238bo7b2obob2o26bo3bo12b
\\obo$179b2o66bo3b2o27bobo14bobo$179b2o64b2o6bo27bo15bobo13bo$246bob5obo
\\38b2o3bobo11b2o$221bo32bo33b2o3b2o5bo11bobo$171b2o47bobo24b2o7bo31b2o
\\8bobo12bo$171b2o47b2o27b2o4bo42b2o12bobo$249bo4b2o56bobo$248bo3bo$248b
\\3o$248bo45b5o11b5o$259b2o35b3o10bob4o$259b2o47bo$309b2o2$312b3o7$255bo
\\$254bobo$254b2o$224b2o$224b2o8bo$193b2o33bo4bobo11bo24bo$192bobo33bo4b
\\obo10bobo22bobo$192b2o34bo5bo11b2o24b2o2$230b3o2$211b2o$211b2o$283b2o$
\\253bo15b2o12b2o$203b2o47bobo14b2o$203b2o47b2o2$265b2o$265b2o$270b2o2b
\\2o$274b3o3b2obo7b2o$269bo10bo3b2o5b2o$276bobo2bo4b2o$269bo2bob4o6bo$
\\263b2o5b3ob3o$263b2o7$271b2o16bo$271b2o13b5o$286b3ob2o$281b2o4b4o3bo$
\\280b2o7bob3obo$281b3o4b2ob3ob2o$281b3o5b2ob4o$291bo6$225bo$223bobo$
\\224b2o62$289bo$287bobo$288b2o62$353bo$351bobo$352b2o!
;
const p_c5_diagoonal_puffer =
\\82bo2b$81b2o2b$80bo4b$78b3ob2ob$77b2obo3bo$76bob3o4b$75b2obobob2ob$76b
\\ob2obo3b$76bo8b2$53bo21bo5bo3b$52b2o21bo4bo4b$51bo22bo2bo7b$49b3ob2o
\\18bo11b$48b2obo3bo11bo3b2o12b$47bob3o14b4o15b$46b2obobob2o10bo7bo11b$
\\47bob2obo11b5o16b$47bo16bobobo16b$30b2o31b5o4bo12b$30b2o14bo5bo9bo5bo
\\2bo13b$29bo2bo13bo4bo9b2obo2bo17b$26b2obo2bo12bo2bo15bo2bo17b$32bo11bo
\\20bo19b$24b2o3bo2bo5bo3b2o41b$24b2o5bo5b4o44b$25bob5o4bo7bo40b$26bo8b
\\5o45b$35bobobo45b$34b5o4bo41b$23b3o7bo5bo2bo42b$23bo8b2obo2bo46b$21b2o
\\12bo2bo46b$15b2o4bo14bo48b$15b3o3bo63b$13bo4bo66b$13bo3bo67b$17bo67b$
\\12b2obobo67b$10b2o5bo67b$10b2o4b2o67b$12b4o69b2$25b2o58b$24b2o59b$26bo
\\58b3$20b2o63b$20b2o63b$19bo2bo62b$16b2obo2bo62b$22bo62b$14b2o3bo2bo62b
\\$14b2o5bo63b$15bob5o63b$16bo68b3$13b3o69b$13bo71b$11b2o72b$5b2o4bo73b$
\\5b3o3bo73b$3bo4bo76b$3bo3bo77b$7bo77b$2b2obobo77b$2o5bo77b$2o4b2o77b$
\\2b4o!
;
const p_bubblegum =
\\9bo11bo$8b3o9b3o$10bo9bo$6bob2o11b2obo$6bo4b3o3b3o4bo$5bob3ob3o3b3ob3o
\\bo2$4b3ob4obo3bob4ob3o$4b2o7bo3bo7b2o$4b3o2bob2o5b2obo2b3o2$9bo2bo5bo
\\2bo$9bo11bo$7b2o2bobo3bobo2b2o$7bo3b3o3b3o3bo$5b2o4bo2bobo2bo4b2o$3bob
\\2obob2o2bobo2b2obob2obo$2b2obo3bo3bo3bo3bo3bob2o$bobo3b2o13b2o3bobo$b
\\2o8bo7bo8b2o$2bo7b3o5b3o7bo$10b2obo3bob2o$2o2b2o5b3o3b3o5b2o2b2o$obo3b
\\o5b2o3b2o5bo3bobo$4bob2o15b2obo$8bo13bo$bo5b3o11b3o5bo$7b2o13b2o$3bo
\\23bo$5b2o17b2o$5b6o9b6o$6bobo13bobo$2b3o4b2o4bo4b2o4b3o$2b2o4b3o3b3o3b
\\3o4b2o$3b2obo6bo3bo6bob2o$12bobobobo$11bob2ob2obo$11bob5obo$9b2o9b2o$
\\8b2o11b2o$8bo13bo2$6bobo13bobo$9bo11bo$5bo3bo11bo3bo$5bo4bo9bo4bo$5bo
\\4bo9bo4bo$6bo17bo$8b3o9b3o$8bo13bo2$8bo13bo2$10bo9bo$9b3o7b3o$10b2o7b
\\2o$10b2o7b2o$7bo2bo9bo2bo$7bobobo7bobobo$11bo7bo3$8b3o9b3o2$9bo11bo$9b
\\o11bo$9bo11bo2$6bo5bo5bo5bo$5bobo3bobo3bobo3bobo$5b2o5b2o3b2o5b2o!
;
const p_p720_dirty_puffer =
\\40bo9bo48bo9bo$38b3ob3oboobboo46boobboob3ob3o$34booboobbo4boobboboo4b
\\oo30boo4boobobboo4bobbooboo$34boobbo3boobobbobbobboobboboo26boobobboo
\\bbobbobboboo3bobboo$33bobbo14boo4bobbob3o20b3obobbo4boo14bobbo$55boobb
\\3o4b3o12b3o4b3obboo$59bob3obobobo10bobobob3obo$30bo15bo13b4obobobo10bo
\\bobob4o13bo15bo$29boo15boo10bobbo4bobo12bobo4bobbo10boo15boo$11bo7b4o
\\5bobb5o4boo4boboboo7bo30bo7boobobo4boo4b5obbo5b4o7bo$4boob3ob3o5bobob
\\oobo4bobb3o3booboobboo3bo14bo14bo14bo3boobbooboo3b3obbo4boboobobo5b3ob
\\3oboo$4boo4bobbooboobbo4boo3b5o4bo3boo3bo4bo13bo14bo13bo4bo3boo3bo4b5o
\\3boo4bobbooboobbo4boo$3bobboboo3bobboo6bo3bobboo3bobbobboboobo5bo42bo
\\5boboobobbobbo3boobbo3bo6boobbo3boobobbo$15bo20b4obo7b3oboo13b3o8b3o
\\13boob3o7bob4o20bo$49bobobobo15boobooboo15bobobobo$53bob3obo8boo4boo4b
\\oo8bob3obo$53bo5b3o8boob4oboo8b3o5bo$7boo18boo5bo19bob3obboo7boo6boo7b
\\oobb3obo19bo5boo18boo$4booboboo9boo6boo3boobooboob3obo16bo10boo10bo16b
\\ob3oboobooboo3boo6boo9booboboo$b4obbo4boob3oboobbobooboo3boobboboo4bob
\\oboo7boo3boobo7boo7boboo3boo7boobobo4boobobboo3booboobobboob3oboo4bobb
\\4o$o4bo6boobob3o4bobobb4obo4bo3bo5boo4bobboboob3obo5bobbo5bob3oboobobb
\\o4boo5bo3bo4bob4obbobo4b3oboboo6bo4bo$boo7bobbobo3bobobbobo5bo8bo7boo
\\4b3o6bo3bo5boo5bo3bo6b3o4boo7bo8bo5bobobbobo3bobobbo7boo$17bo11bo3bob
\\oo6booboo7boboboboboo8bobbo8boobobobobo7booboo6boobo3bo11bo$32booboo5b
\\o3boo3boobbo7bobobb3o3boo3b3obbobo7bobboo3boo3bo5booboo$51b4o3boobo3bo
\\bobboboobboobobbobo3boboo3b4o$23boo28boobo5booboo3boboboobobo3booboo5b
\\oboo28boo$11boboo7bob3o38boob5o4b5oboo38b3obo7boobo$7b3obooboboo3boo3b
\\oo17bo19bo4bo3boo3bo4bo19bo17boo3boo3booboboob3o$6bo6boobb5obobbobbo5b
\\ob3ob3ob4o17bo16bo17b4ob3ob3obo5bobbobbob5obboo6bo$7boo3bo3bo3bo6bobbo
\\3boob3o3boobboob3o12b5ob3obb3ob5o12b3oboobboo3b3oboo3bobbo6bo3bo3bo3b
\\oo$19bo10bobbo3bobboo11bo3bo3booboobb3ob6ob3obbooboo3bo3bo11boobbo3bo
\\bbo10bo$31b3obo8b3o3b3o3booboobo8bo6bo8bobooboo3b3o3b3o8bob3o$29boobo
\\11boob4oboo10boo18boo10boob4oboo11boboo$14boobobb3o10boo10bob3o3bobbo
\\14b8o14bobbo3b3obo10boo$11boobooboo4bo29bo19b4o19bo31b3obboboo$11bobbo
\\bo6b3o47b4o50bo4boobooboo$5boo4boo5boobobboobooboo5bo27b3o57b3o6bobobb
\\o$5booboo12bobobo3boboobb4o5bo3b3o8booboobobo3boboobo5b3o27bo5booboob
\\oobboboo5boo4boo$4bo3boo17bobbo4bo3boo3bobobo6boo3boo5bo3boobooboo3bob
\\obooboo8b3o3bo5b4obboobo3bobobo12booboo$8boo25b3o3bobo4bo4b3obobb5o7bo
\\4bo5bo5boo3boo6bobobo3boo3bo4bobbo17boo3bo$23boo10b3obobobo3b3obboo3b
\\oo27b5obbob3o4bo4bobo3b3o25boo$22bobbo6bobb3o9bobbobboboboobboo29boo3b
\\oobb3o3bobobob3o10boo$8boo9boobo3bo5b3o7boobooboobobbo3bo29boobboobobo
\\bbobbo9b3obbo6bobbo$5booboo9boo7b6o5b4o6boboboobb3obbo29bo3bobbobooboo
\\boo7b3o5bo3boboo9boo$bb4obboo4boobboboob6obbobbo3bo5bo4bob3o4bobobo25b
\\obb3obboobobo6b4o5b6o7boo9booboo$bo4bo4booboobbo6bo3bo6boo5bo3b3o3bobb
\\oo30bobobo4b3obo4bo5bo3bobbobb6oboobobboo4boobb4o$bboo7boo3bobo3bo14bo
\\6bo3booboo40boobbo3b3o3bo5boo6bo3bo6bobbooboo4bo4bo$11boo5bo79booboo3b
\\o6bo14bo3bobo3boo7boo$18boboo110bo5boo$89boo38boobo$89booboo$83boo4boo
\\bb4o$83booboo4bo4bo$82bo3boo7boo$86boo!
;
// When we hit memory limits, this will abort - pop grows very fast.
const p_max =
\\18bo8b$17b3o7b$12b3o4b2o6b$11bo2b3o2bob2o4b$10bo3bobo2bobo5b$10bo4bobo
\\bobob2o2b$12bo4bobo3b2o2b$4o5bobo4bo3bob3o2b$o3b2obob3ob2o9b2ob$o5b2o
\\5bo13b$bo2b2obo2bo2bob2o10b$7bobobobobobo5b4o$bo2b2obo2bo2bo2b2obob2o
\\3bo$o5b2o3bobobo3b2o5bo$o3b2obob2o2bo2bo2bob2o2bob$4o5bobobobobobo7b$
\\10b2obo2bo2bob2o2bob$13bo5b2o5bo$b2o9b2ob3obob2o3bo$2b3obo3bo4bobo5b4o
\\$2b2o3bobo4bo12b$2b2obobobobo4bo10b$5bobo2bobo3bo10b$4b2obo2b3o2bo11b$
\\6b2o4b3o12b$7b3o17b$8bo!
;
const p_19_659_494m = // symetric quasicrystal coorder ship reaction
\\bboooboobobboooooooobboboobooobb$
\\bbobboobbbobobbbbbbobobbboobbobb$
\\ooobbbbboobbobboobbobboobbbbbooo$
\\obbbbooooobbboboobobbbooooobbbbo$
\\obbbobbobobboobbbboobbobobbobbbo$
\\bobobobboobbboboobobbboobbobobob$
\\oobobbbbbbobboooooobbobbbbbboboo$
\\obboobbboobooobbbboooboobbboobbo$
\\bbooboboooobooobbooobooooboboobb$
\\oboooobooobbboboobobbboooboooobo$
\\bobbbbobobbbobbbbbbobbbobobbbbob$
\\bbbbbbbobbbbbobbbbobbbbbobbbbbbb$
\\ooobobboobobooboobooboboobbobooo$
\\obboooooooboooooooooobooooooobbo$
\\obbbbbobobbbbobbbbobbbbobobbbbbo$
\\obooboobbobboobbbboobbobbooboobo$
\\obooboobbobboobbbboobbobbooboobo$
\\obbbbbobobbbbobbbbobbbbobobbbbbo$
\\obboooooooboooooooooobooooooobbo$
\\ooobobboobobooboobooboboobbobooo$
\\bbbbbbbobbbbbobbbbobbbbbobbbbbbb$
\\bobbbbobobbbobbbbbbobbbobobbbbob$
\\oboooobooobbboboobobbboooboooobo$
\\bbooboboooobooobbooobooooboboobb$
\\obboobbboobooobbbboooboobbboobbo$
\\oobobbbbbbobboooooobbobbbbbboboo$
\\bobobobboobbboboobobbboobbobobob$
\\obbbobbobobboobbbboobbobobbobbbo$
\\obbbbooooobbboboobobbbooooobbbbo$
\\ooobbbbboobbobboobbobboobbbbbooo$
\\bbobboobbbobobbbbbbobobbboobbobb$
\\bboooboobobboooooooobboboobooobb!
;
const p_5_931_548m = // symetric quasicrystal coorder ship reaction
\\obbbbooboobbooobbooobbooboobbbbo$
\\bbbobobobbbobbboobbbobbbobobobbb$
\\bbbobbbobbboooboobooobbbobbbobbb$
\\booboobboboobooooooboobobbooboob$
\\bbboobbboobbbbboobbbbboobbboobbb$
\\oobobbbbboooboooooobooobbbbboboo$
\\obbbbbbobboboobooboobobbobbbbbbo$
\\boobbboobobooboooobooboboobbboob$
\\obboobbboobbooobbooobboobbboobbo$
\\obbboobooobbooobbooobboooboobbbo$
\\bbboboobbbbbobboobbobbbbboobobbb$
\\booobobobbbobobbbbobobbbobobooob$
\\obobbboooooboobbbboobooooobbbobo$
\\obooboobooboooobbooooboobooboobo$
\\obbobobooobbboboobobbbooobobobbo$
\\booooooobbobbbobbobbbobbooooooob$
\\booooooobbobbbobbobbbobbooooooob$
\\obbobobooobbboboobobbbooobobobbo$
\\obooboobooboooobbooooboobooboobo$
\\obobbboooooboobbbboobooooobbbobo$
\\booobobobbbobobbbbobobbbobobooob$
\\bbboboobbbbbobboobbobbbbboobobbb$
\\obbboobooobbooobbooobboooboobbbo$
\\obboobbboobbooobbooobboobbboobbo$
\\boobbboobobooboooobooboboobbboob$
\\obbbbbbobboboobooboobobbobbbbbbo$
\\oobobbbbboooboooooobooobbbbboboo$
\\bbboobbboobbbbboobbbbboobbboobbb$
\\booboobboboobooooooboobobbooboob$
\\bbbobbbobbboooboobooobbbobbbobbb$
\\bbbobobobbbobbboobbbobbbobobobbb$
\\obbbbooboobbooobbooobbooboobbbbo!
;
const p_2_230_963m = // symetric quasicrystal coorder ship reaction
\\obobbobbboooboobboobooobbbobbobo$
\\bboobooboboooooooooooobobooboobb$
\\oobboooboooboooooooobooobooobboo$
\\bobbobbboboobboooobboobobbbobbob$
\\bboobboobbbbbboooobbbbbboobboobb$
\\ooobbboboobbbobbbbobbboobobbbooo$
\\boobooobooboboooooobobooboooboob$
\\bbbbobbbbboobbboobbboobbbbbobbbb$
\\boooboobboooobboobboooobboobooob$
\\obobboobobbbbobbbbobbbboboobbobo$
\\oooobbboobooobobboboooboobbboooo$
\\oobobbooobobbbbbbbbbbobooobboboo$
\\boobbbbbobobooobbooobobobbbbboob$
\\ooobboobbobbobbbbbbobbobboobbooo$
\\ooooobobbboboboooobobobbbobooooo$
\\boooobooobbbbboooobbbbboooboooob$
\\boooobooobbbbboooobbbbboooboooob$
\\ooooobobbboboboooobobobbbobooooo$
\\ooobboobbobbobbbbbbobbobboobbooo$
\\boobbbbbobobooobbooobobobbbbboob$
\\oobobbooobobbbbbbbbbbobooobboboo$
\\oooobbboobooobobboboooboobbboooo$
\\obobboobobbbbobbbbobbbboboobbobo$
\\boooboobboooobboobboooobboobooob$
\\bbbbobbbbboobbboobbboobbbbbobbbb$
\\boobooobooboboooooobobooboooboob$
\\ooobbboboobbbobbbbobbboobobbbooo$
\\bboobboobbbbbboooobbbbbboobboobb$
\\bobbobbboboobboooobboobobbbobbob$
\\oobboooboooboooooooobooobooobboo$
\\bboobooboboooooooooooobobooboobb$
\\obobbobbboooboobboobooobbbobbobo!
;
//#N Caber tosser 1
//#O <NAME>
//#C A smaller version of the first caber tosser found. Uses a 7-engine
//#C Cordership. Originally found on May 1, 1991, smaller form found on
//#C September 2, 1994.
//#C www.conwaylife.com/wiki/index.php?title=Caber_tosser_1
//x = 145, y = 114, rule = 23/3
const caber_tosser =
\\31bo113b$32bob2obobo105b$26b3o3b4o3bo105b$32bo2b2ob2o105b$31bo113b$29b
\\3o113b$29b3o17b2o94b$49bo95b3$14bo130b$14bo130b$14bo130b$17b2o126b$17b
\\2o38b2o86b$12bo3b3o38bo87b$13b3o129b$14bo130b$13b2o130b$13b3o129b$15bo
\\8bo120b$13bo8b2ob2o118b$15bo9b2o7bo30b2o78b$13b3o6b3o10b2o28bo79b$24bo
\\3bo5b2o109b$23bo3bo117b$5bo18b3obo116b$6bob2obobo12b2o117b$3o3b4o3bo
\\11bobo117b$6bo2b2ob2o12bo26b2o2bo87b$5bo19b2o29bobo86b$3b3o20bo28bo89b
\\$3b3o21bo117b$57b2o86b2$58b2o85b$57bo87b$55b2ob2o85b$58b2o85b$43bo12bo
\\88b$43bo101b$45bo5bo93b$44bo6bobo91b$43bo3bo2bo94b$3b2o39bo2bobob2o92b
\\$3bo45bob2o92b7$11b2o132b$11bo133b2$27b2o2bo113b$30bobo112b$29bo115b2$
\\31b2o112b$19b2o124b$19bo12b2o111b$31bo113b$29b2ob2o111b$32b2o111b$30bo
\\114b3$27b2o116b$27bo117b14$122bo22b$121bobo21b$121b2obo7b2o11b$121b2ob
\\2o6bobo10b$121b2obob3o6bo9b$111b2o8bobo2bo2bo2bo2bo8bo$110bobo9bo4b2o
\\6bo7b2o$110bo21bobo10b$109b2o21b2o11b6$93b2o15bo34b$94bo15bo34b$110bo
\\34b2$81bo22b2o39b$81bobo20bo40b$70b2o12b2o9b2o5bobo20bobo17b$70bo13b2o
\\9bo6b2o19bo3bo17b$84b2o6b2o17b2o10bo12b2o7b$81bobo7b3o17bo2bo7bo14bo7b
\\$81bo10b2o21bo7bo21b$95bo19bo7bo3bo17b$95b2o18bo9bobo17b$104b2o5bo2bo
\\30b$103bobo5b2o32b$103bo41b$102b2o!
;
const fireship =
\\27b2o2b2o80b2o2b2o$27b2o2b2o80b2o2b2o$26b3o2b3o78b3o2b3o$26b3o2b3o78b
\\3o2b3o2$26bo6bo78bo6bo$26bo2b2o2bo78bo2b2o2bo$27b6o80b6o$10b2o2b2o114b
\\2o2b2o$10b2o2b2o114b2o2b2o$9b3o2b3o12b2o84b2o12b3o2b3o$9b3o2b3o11bo2bo
\\82bo2bo11b3o2b3o$28b4o82b4o$9bo6bo10b2o2b2o80b2o2b2o10bo6bo$9bo2b2o2bo
\\10bo4bo80bo4bo10bo2b2o2bo$10b6o11bo4bo80bo4bo11b6o$28b4o82b4o2$12b2o
\\118b2o9b2o$11bo2bo116bo2bo7bo2bo$11b4o14b2o100b4o7bo2bo$10b2o2b2o15bo
\\98b2o2b2o5bo4bo$10bo4bo14b2o98bo4bo5bo4bo$10bo4bo14bobo97bo4bo6b4o$11b
\\4o116b4o6b2o2b2o$4b2o135bo4bo$3b4o134bo4bo2$2b6o$3b4o135b4o$143b2o$2b
\\2o2b2o$2obo2bob2o130b2o4b2o$3bo2bo132bo2bo2bo2bo$140b3o2b3o$140b2o4b2o
\\$4b2o$4b2o2$bobo2bobo$o2bo2bo2bo$o8bo$o8bo$2o6b2o$2b6o$74bo$74b2o$73bo
\\bo2$9b2o2b2o116b2o2b2o$9b2o2b2o116b2o2b2o$8b3o2b3o114b3o2b3o$8b3o2b3o
\\114b3o2b3o2$8bo6bo114bo6bo$8bo2b2o2bo114bo2b2o2bo$9b6o101b2o13b6o$116b
\\obo$117b3o$11b2o104bo2bo12b2o$10bo2bo103bo3bo10bo2bo$10b4o102b2o3bo10b
\\4o$9b2o2b2o101bo3b2o9b2o2b2o$9bo4bo102b3o11bo4bo$9bo4bo102b3o11bo4bo$
\\10b4o118b4o5$14b2o114b2o$13b4o112b4o2$12b6o110b6o$13b4o112b4o2$12b2o2b
\\2o25b2o56b2o25b2o2b2o$10b2obo2bob2o21b2o2b2o52b2o2b2o21b2obo2bob2o$13b
\\o2bo26b2o56b2o26bo2bo$40bo6bo50bo6bo$41bo4bo52bo4bo$14b2o27b2o56b2o27b
\\2o$14b2o24b3o2b3o50b3o2b3o24b2o$40b2o4b2o50b2o4b2o$11bobo2bobo108bobo
\\2bobo$10bo2bo2bo2bo106bo2bo2bo2bo$10bo8bo23b2o56b2o23bo8bo$10bo8bo22bo
\\2bo54bo2bo22bo8bo$10b2o6b2o106b2o6b2o$12b6o4b2o17bo4bo52bo4bo17b2o4b6o
\\$20b2o2b2o14b2o4b2o50b2o4b2o14b2o2b2o$22b2o15bo2bo2bo2bo48bo2bo2bo2bo
\\15b2o$19bo6bo12b2ob4ob2o48b2ob4ob2o12bo6bo$20bo4bo15bo4bo52bo4bo15bo4b
\\o$22b2o98b2o$19b3o2b3o92b3o2b3o$19b2o4b2o92b2o4b2o3$22b2o98b2o$21bo2bo
\\96bo2bo2$20bo4bo94bo4bo$19b2o4b2o92b2o4b2o$18bo2bo2bo2bo90bo2bo2bo2bo$
\\18b2ob4ob2o90b2ob4ob2o$20bo4bo94bo4bo!
; | build.zig |
const std = @import("std");
const os = @import("root").os;
const Order = std.math.Order;
const rb = os.lib.rbtree;
const sbrk = os.memory.vmm.sbrk;
const Mutex = os.thread.Mutex;
const min_materialize_size = 64 * 1024;
const node_block_size = 0x1000 * @sizeOf(Range) / 16;
const debug = false;
const rb_features: rb.Features = .{
.enable_iterators_cache = true,
.enable_kth_queries = false,
.enable_not_associatve_augment = false,
};
const addr_config: rb.Config = .{
.features = rb_features,
.augment_callback = null,
.comparator = compare_addr,
};
const size_config: rb.Config = .{
.features = rb_features,
.augment_callback = null,
.comparator = compare_size,
};
const AddrNode = rb.Node(rb_features);
const SizeNode = rb.Node(rb_features);
const AddrTree = rb.Tree(Range, "addr_node", addr_config);
const SizeTree = rb.Tree(Range, "size_node", size_config);
const PlacementResult = struct {
effective_size: u64,
offset: u64,
pub fn format(self: *const @This(), fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
try writer.print("size=0x{X}, offset=0x{X}", .{ self.effective_size, self.offset });
}
};
const RangePlacement = struct {
range: *Range,
placement: PlacementResult,
pub fn format(self: *const @This(), fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
try writer.print("Placement{{{}, within freenode {}}}", .{ self.placement, self.range });
}
};
const Range = struct {
size_node: SizeNode = undefined,
addr_node: AddrNode = undefined,
base: usize,
size: usize,
fn returned_base(self: *const @This(), alignment: usize) usize {
if (alignment == 0)
return self.base;
return ((self.base + alignment - 1) / alignment) * alignment;
}
fn effective_size(size: usize, size_alignment: usize) usize {
if (size_alignment == 0)
return size;
return ((size + size_alignment - 1) / size_alignment) * size_alignment;
}
pub fn try_place(self: *const @This(), size: usize, alignment: usize, size_alignment: usize) ?PlacementResult {
const rbase = self.returned_base(alignment);
const es = effective_size(size, size_alignment);
const offset = rbase - self.base;
if (offset + es <= self.size) {
return PlacementResult{
.effective_size = es,
.offset = offset,
};
}
return null;
}
pub fn format(self: *const @This(), fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
try writer.print("[base: 0x{X} size: 0x{X}]", .{ self.base, self.size });
}
};
const compare_addr = struct {
pub fn compare(self: *const @This(), left: *const Range, right: *const Range) bool {
return left.base < right.base;
}
};
const compare_size = struct {
pub fn compare(self: *const @This(), left: *const Range, right: *const Range) bool {
return left.size < right.size;
}
};
pub const RangeAlloc = struct {
range_node_head: ?*Range = null,
allocator: std.mem.Allocator = .{
.allocFn = alloc,
.resizeFn = resize,
},
by_addr: AddrTree = AddrTree.init(.{}, {}),
by_size: SizeTree = SizeTree.init(.{}, {}),
mutex: Mutex = .{},
materialize_bytes: fn (usize) anyerror![]u8,
fn dump_state(self: *@This()) void {
if (!debug)
return;
os.log("Dumping range_alloc state\n", .{});
var range = self.by_addr.iterators.first();
while (range) |r| : (range = self.by_addr.iterators.next(r)) {
os.log("{}\n", .{r});
}
}
fn alloc_impl(self: *@This(), len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) ![]u8 {
if (debug) {
os.log("Calling alloc(len=0x{X},pa=0x{X},la=0x{X})\n", .{ len, ptr_align, len_align });
self.dump_state();
}
const placement = try self.find_placement(len, ptr_align, len_align);
const range = placement.range;
const pmt = placement.placement;
// Return value
const ret = @intToPtr([*]u8, range.base + pmt.offset)[0..len];
// Node maintenance
const has_data_before = pmt.offset != 0;
const has_data_after = pmt.offset + pmt.effective_size < range.size;
if (debug)
os.log("Chose {}\n", .{placement});
if (has_data_before and has_data_after) {
if (debug)
os.log("Has data before and after\n", .{});
// Add the new range
const new_range_offset = pmt.offset + pmt.effective_size;
const new_range = self.add_range(.{
.base = range.base + new_range_offset,
.size = range.size - new_range_offset,
});
// Overwrite the old entry
// Update size node (requires reinsertion)
self.by_size.remove(range);
range.size = pmt.offset;
self.by_size.insert(range);
} else if (has_data_after or has_data_before) {
// Reuse the single node
if (has_data_before) {
if (debug)
os.log("Has data left before\n", .{});
// Cool, only the size has changed, no reinsertion on the addr node
range.size = pmt.offset;
} else {
if (debug)
os.log("Has data left after\n", .{});
// Update addr node and reinsert
self.by_addr.remove(range);
range.size -= pmt.effective_size;
range.base += pmt.effective_size;
self.by_addr.insert(range);
}
// No matter what, we have to update the size node.
self.by_size.remove(range);
self.by_size.insert(range);
} else {
if (debug)
os.log("Removing the node\n", .{});
// Remove the node entirely
self.by_addr.remove(range);
self.by_size.remove(range);
self.free_range(range);
}
if (debug)
self.dump_state();
return ret;
}
fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
const self = @fieldParentPtr(@This(), "allocator", allocator);
self.mutex.lock();
defer self.mutex.unlock();
return self.alloc_impl(len, ptr_align, len_align, ret_addr) catch |err| {
switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => {
os.log("Alloc returned error: {}", .{err});
@panic("Alloc error");
},
}
};
}
fn resize(allocator: *std.mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, len_align: u29, ret_addr: usize) std.mem.Allocator.Error!usize {
const self = @fieldParentPtr(@This(), "allocator", allocator);
self.mutex.lock();
defer self.mutex.unlock();
if (new_size != 0) {
os.log("Todo: RangeAlloc.resize(): actually resize\n", .{});
@panic("");
}
// Free this address
const new_range = self.add_range(.{
.base = @ptrToInt(old_mem.ptr),
.size = old_mem.len,
}) catch |err| {
switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => {
os.log("Error while making new nodes for free(): {}\n", .{err});
@panic("");
},
}
};
// Attempt to merge nodes
self.merge_ranges(new_range);
return 0;
}
fn locate_addr_node(self: *@This(), size_node: *Range) *Range {
const node = self.by_addr.lookup(&size_node.node);
if (node) |n| {
return @fieldParentPtr(Range, "node", n);
}
os.log("Could not locate addr node for {}\n", .{size_node});
@panic("");
}
fn find_placement(self: *@This(), size: usize, alignment: usize, size_alignment: usize) !RangePlacement {
{
const size_finder: struct {
size: usize,
pub fn check(finder: *const @This(), range: *Range) bool {
return range.size >= finder.size;
}
} = .{ .size = size };
var current_range = self.by_size.lower_bound(@TypeOf(size_finder), &size_finder);
while (current_range) |range| {
if (range.try_place(size, alignment, size_alignment)) |placement| {
return RangePlacement{
.range = range,
.placement = placement,
};
} else if (debug) {
os.log("Could not place into {}\n", .{range});
}
current_range = self.by_size.iterators.next(range);
}
}
// We found nothing, make a new one
if (debug)
os.log("Existing range not found, creating a new one\n", .{});
const range = try self.make_range(size);
if (range.try_place(size, alignment, size_alignment)) |placement| {
return RangePlacement{
.range = range,
.placement = placement,
};
} else if (debug) {
os.log("Could not place size = 0x{X}, alignment = {}, size_alignment = {} in new allocation {}\n", .{ size, alignment, size_alignment, range });
}
return error.OutOfMemory;
}
fn free_range(self: *@This(), node: *Range) void {
const new_head = @ptrCast(*?*Range, node);
new_head.* = self.range_node_head;
self.range_node_head = node;
}
fn consume_node_bytes(self: *@This()) !void {
const base = try sbrk(node_block_size);
for (@ptrCast([*]Range, @alignCast(@alignOf(Range), base))[0 .. node_block_size / @sizeOf(Range)]) |*n|
self.free_range(n);
}
fn add_range(self: *@This(), in_range: Range) !*Range {
const range = try self.alloc_range();
range.* = in_range;
self.by_size.insert(range);
self.by_addr.insert(range);
return range;
}
fn make_range(self: *@This(), minBytes: usize) !*Range {
const page_size = os.memory.paging.kernel_context.page_size(0, os.memory.pmm.phys_to_write_back_virt(0));
const size = os.lib.libalign.align_up(
usize,
page_size,
std.math.max(min_materialize_size, minBytes),
);
const result: Range = .{
.base = @ptrToInt((try self.materialize_bytes(size)).ptr),
.size = size,
};
return self.add_range(result);
}
fn alloc_range(self: *@This()) !*Range {
if (self.range_node_head == null) {
try self.consume_node_bytes();
}
if (self.range_node_head) |head| {
const ret = head;
self.range_node_head = @ptrCast(*?*Range, head).*;
return ret;
}
@panic("No nodes!");
}
// Needs to be a node in the addr tree
fn merge_ranges(self: *@This(), range_in: *Range) void {
var current = range_in;
// Try to merge to the left
while (self.by_addr.iterators.prev(current)) |prev| {
if (self.try_merge(prev, current)) {
self.by_addr.remove(current);
self.by_size.remove(current);
self.free_range(current);
current = prev;
} else {
break;
}
}
// Try to merge to the right
while (self.by_addr.iterators.next(current)) |next| {
if (self.try_merge(current, next)) {
self.by_addr.remove(next);
self.by_size.remove(next);
self.free_range(next);
} else {
break;
}
}
}
fn try_merge(self: *@This(), low: *Range, high: *const Range) bool {
if (low.base + low.size == high.base) {
low.size += high.size;
return true;
}
return false;
}
fn init(self: *@This()) void {
self.mutex.init();
}
}; | src/lib/range_alloc.zig |
const sf = struct {
pub usingnamespace @import("../sfml.zig");
pub usingnamespace sf.system;
};
const math = @import("std").math;
pub fn Rect(comptime T: type) type {
return packed struct {
const Self = @This();
/// The CSFML vector type equivalent
const CsfmlEquivalent = switch (T) {
c_int => sf.c.sfIntRect,
f32 => sf.c.sfFloatRect,
else => void,
};
/// Creates a rect (just for convinience)
pub fn init(left: T, top: T, width: T, height: T) Self {
return Self{
.left = left,
.top = top,
.width = width,
.height = height,
};
}
/// Makes a CSFML rect with this rect (only if the corresponding type exists)
/// This is mainly for the inner workings of this wrapper
pub fn _toCSFML(self: Self) CsfmlEquivalent {
if (CsfmlEquivalent == void) @compileError("This rectangle type doesn't have a CSFML equivalent.");
return @bitCast(CsfmlEquivalent, self);
}
/// Creates a rect from a CSFML one (only if the corresponding type exists)
/// This is mainly for the inner workings of this wrapper
pub fn _fromCSFML(rect: CsfmlEquivalent) Self {
if (CsfmlEquivalent == void) @compileError("This rectangle type doesn't have a CSFML equivalent.");
return @bitCast(Self, rect);
}
/// Checks if a point is inside this recangle
pub fn contains(self: Self, vec: sf.Vector2(T)) bool {
// Shamelessly stolen
var min_x: T = math.min(self.left, self.left + self.width);
var max_x: T = math.max(self.left, self.left + self.width);
var min_y: T = math.min(self.top, self.top + self.height);
var max_y: T = math.max(self.top, self.top + self.height);
return (vec.x >= min_x and
vec.x < max_x and
vec.y >= min_y and
vec.y < max_y);
}
/// Checks if two rectangles have a common intersection, if yes returns that zone, if not returns null
pub fn intersects(self: Self, other: Self) ?Self {
// Shamelessly stolen too
var r1_min_x: T = math.min(self.left, self.left + self.width);
var r1_max_x: T = math.max(self.left, self.left + self.width);
var r1_min_y: T = math.min(self.top, self.top + self.height);
var r1_max_y: T = math.max(self.top, self.top + self.height);
var r2_min_x: T = math.min(other.left, other.left + other.width);
var r2_max_x: T = math.max(other.left, other.left + other.width);
var r2_min_y: T = math.min(other.top, other.top + other.height);
var r2_max_y: T = math.max(other.top, other.top + other.height);
var inter_left: T = math.max(r1_min_x, r2_min_x);
var inter_top: T = math.max(r1_min_y, r2_min_y);
var inter_right: T = math.min(r1_max_x, r2_max_x);
var inter_bottom: T = math.min(r1_max_y, r2_max_y);
if (inter_left < inter_right and inter_top < inter_bottom) {
return Self.init(inter_left, inter_top, inter_right - inter_left, inter_bottom - inter_top);
} else {
return null;
}
}
/// Checks if two rectangles are the same
pub fn equals(self: Self, other: Self) bool {
return (self.left == other.left and
self.top == other.top and
self.width == other.width and
self.height == other.height);
}
/// Gets a vector with left and top components inside
pub fn getCorner(self: Self) sf.Vector2(T) {
return sf.Vector2(T){ .x = self.left, .y = self.top };
}
/// Gets a vector with the bottom right corner coordinates
pub fn getOtherCorner(self: Self) sf.Vector2(T) {
return self.getCorner().add(self.getSize());
}
/// Gets a vector with width and height components inside
pub fn getSize(self: Self) sf.Vector2(T) {
return sf.Vector2(T){ .x = self.width, .y = self.height };
}
/// x component of the top left corner
left: T,
/// x component of the top left corner
top: T,
/// width of the rectangle
width: T,
/// height of the rectangle
height: T
};
}
test "rect: intersect" {
const tst = @import("std").testing;
var r1 = Rect(c_int).init(0, 0, 10, 10);
var r2 = Rect(c_int).init(6, 6, 20, 20);
var r3 = Rect(c_int).init(-5, -5, 10, 10);
try tst.expectEqual(@as(?Rect(c_int), null), r2.intersects(r3));
var inter1: sf.c.sfIntRect = undefined;
var inter2: sf.c.sfIntRect = undefined;
try tst.expectEqual(sf.c.sfIntRect_intersects(&r1._toCSFML(), &r2._toCSFML(), &inter1), 1);
try tst.expectEqual(sf.c.sfIntRect_intersects(&r1._toCSFML(), &r3._toCSFML(), &inter2), 1);
try tst.expectEqual(Rect(c_int)._fromCSFML(inter1), r1.intersects(r2).?);
try tst.expectEqual(Rect(c_int)._fromCSFML(inter2), r1.intersects(r3).?);
}
test "rect: contains" {
const tst = @import("std").testing;
var r1 = Rect(f32).init(0, 0, 10, 10);
try tst.expect(r1.contains(.{ .x = 0, .y = 0 }));
try tst.expect(r1.contains(.{ .x = 9, .y = 9 }));
try tst.expect(!r1.contains(.{ .x = 5, .y = -1 }));
try tst.expect(!r1.contains(.{ .x = 10, .y = 5 }));
}
test "rect: sane from/to CSFML rect" {
const tst = @import("std").testing;
inline for ([_]type{ c_int, f32 }) |T| {
const rect = Rect(T).init(1, 3, 5, 10);
const crect = rect._toCSFML();
try tst.expectEqual(rect.left, crect.left);
try tst.expectEqual(rect.top, crect.top);
try tst.expectEqual(rect.width, crect.width);
try tst.expectEqual(rect.height, crect.height);
const rect2 = Rect(T)._fromCSFML(crect);
try tst.expectEqual(rect, rect2);
}
} | src/sfml/graphics/rect.zig |
pub const std = @import("std");
pub const lir = @import("ir.zig");
pub const LufType = @import("Value.zig").Type;
const Allocator = std.mem.Allocator;
const leb = std.leb;
const testing = std.testing;
const Op = enum(u8) {
@"unreachable" = 0x00,
nop = 0x01,
block = 0x02,
loop = 0x03,
@"if" = 0x04,
@"else" = 0x05,
end = 0x0B,
@"break" = 0x0C,
break_if = 0x0D,
break_table = 0x0E,
@"return" = 0x0F,
call = 0x10,
call_indirect = 0x11,
drop = 0x1A,
select = 0x1B,
local_get = 0x20,
local_set = 0x21,
local_tee = 0x22,
global_get = 0x23,
global_set = 0x24,
i64_load = 0x29,
i64_store = 0x37,
mem_size = 0x3F,
mem_grow = 0x40,
i32_const = 0x41,
i64_const = 0x42,
i32_eqz = 0x45,
i32_eq = 0x46,
i32_ne = 0x47,
i32_lt_s = 0x48,
i64_eqz = 0x50,
i64_eq = 0x51,
i64_ne = 0x52,
i64_lt = 0x53,
i64_gt = 0x55,
i64_le = 0x57,
i64_ge = 0x59,
i64_add = 0x7C,
i64_sub = 0x7D,
i64_mul = 0x7E,
i64_div = 0x7F,
i64_rem = 0x81,
i64_and = 0x83,
i64_or = 0x84,
i64_xor = 0x85,
i64_shl = 0x86,
i64_shr = 0x87,
/// Generates the Opcode for specific tag and its type
/// i.e. generates .64_add if `wanted` = .add and `ty` = .integer
fn fromTagAndType(wanted: lir.Inst.Tag, ty: LufType) Op {
return switch (wanted) {
.add, .assign_add => .i64_add,
.sub, .assign_sub => .i64_sub,
.mul, .assign_mul => .i64_mul,
.div, .assign_div => .i64_div,
.eql => .i64_eq,
.nql => .i64_ne,
.lt => .i64_lt,
.gt => .i64_gt,
.eql_lt => .i64_le,
.eql_gt => .i64_ge,
.bitwise_xor => .i64_xor,
.bitwise_or => .i64_or,
.bitwise_and => .i64_and,
.shift_left => .i64_shl,
.shift_right => .i64_shr,
.mod => .i64_rem,
else => unreachable,
};
}
};
/// Section id's as described at:
/// https://webassembly.github.io/spec/core/binary/modules.html#sections
const SectionType = enum {
custom = 0,
@"type" = 1,
import = 2, // not used in Luf, everything is compiled to 1 compile unit
func = 3,
table = 4,
memory = 5,
global = 6,
@"export" = 7,
start = 8,
element = 9,
code = 10,
data = 11,
};
const Section = struct {
/// `SectionType` that is represented by this `Section`
ty: SectionType,
/// The full bytecode contained within this section.
/// use emit() to transfer to another writer
code: std.ArrayList(u8),
/// The amount of additions done to this section.
/// i.e. the amount of function bodies inside the 'code section'
count: u32,
/// Initializes a section
fn init(ty: SectionType, gpa: *Allocator) Section {
return .{ .ty = ty, .code = std.ArrayList(u8).init(gpa), .count = 0 };
}
/// Emits all code from the section into the writer
/// This invalidates the `code` saved into section itself
fn emit(self: *Section, writer: anytype) !void {
if (self.count == 0) return;
// section id
try leb.writeULEB128(writer, @enumToInt(self.ty));
// start section always has 1 element, therefore no need to emit counter
if (self.ty == .start) {
try leb.writeULEB128(writer, @intCast(u32, self.code.items.len));
} else {
// full payload length. + 1 for `count`
try leb.writeULEB128(writer, @intCast(u32, self.code.items.len + 1));
try leb.writeULEB128(writer, self.count);
}
try writer.writeAll(self.code.items);
self.code.deinit();
}
};
/// Contains all possible types as described at
/// https://webassembly.github.io/spec/core/binary/types.html
const Types = struct {
const block: i7 = -0x40; // void block
const func: i7 = -0x20;
const table: u8 = 0x70;
/// Limits as described at:
/// https://webassembly.github.io/spec/core/binary/types.html#limits
const Limits = enum(u1) {
zero,
one,
};
/// Wasm Value types as described at:
/// https://webassembly.github.io/spec/core/binary/types.html#value-types
const Value = enum(i7) {
I32 = -0x01,
I64 = -0x02,
F32 = -0x03,
F64 = -0x04,
/// returns actual value of the enum
/// shorthand for @enumToInt as a method instead
fn val(self: Value) i7 {
return @enumToInt(self);
}
};
/// Export sections as described at:
/// http://webassembly.github.io/spec/core/binary/modules.html#export-section
const Export = enum(u7) {
func = 0,
table = 1,
mem = 2,
global = 3,
};
};
// Magic constants, required for a valid Wasm module
const module_header = [_]u8{ 0x00, 0x61, 0x73, 0x6D }; // 0x00asm
const module_version = [_]u8{ 0x01, 0x00, 0x00, 0x00 }; // v1
/// Utility struct with helper functions to make it easier
/// to manage instructions
pub const Wasm = struct {
/// buffer that will contain all instructions
buffer: std.ArrayList(u8),
/// array of all sections that are optional and may contain instructions
sections: [12]Section,
/// size of the section list that have been initialized. (`sections` will be sorted)
section_size: usize,
/// allocator used to generate the instructions
gpa: *Allocator,
/// current function, used to check for return types in blocks
func: *lir.Inst.Function,
/// funcidx of the main function. Cannot be 'null' after iterating our declarations
main_index: ?usize,
/// Struct to save strings for use in Data section
data: Data,
/// Struct to handle strings inside data section
/// returns the offset of a particular string if found
const Data = struct {
/// map of strings as key and offset as value
strings: std.StringHashMapUnmanaged(i32),
/// wasm offset expects i32
offset: i32,
/// Returns `true` if the string already exists
fn contains(self: Data, key: []const u8) bool {
return self.strings.contains(key);
}
/// Returns a string's offset if found, else returns null
fn get(self: Data, key: []const u8) ?u32 {
return self.strings.get(key);
}
/// Puts a new key string into `strings`, asserts key does not exist
fn put(self: *Data, gpa: *Allocator, key: []const u8) !void {
try self.strings.putNoClobber(gpa, key, self.offset);
self.offset += @intCast(i32, key.len);
}
};
/// Errors emitted while generating the Wasm bytecode
pub const Error = error{
OutOfMemory,
InvalidType,
ParametersDisallowed,
};
/// Creates a new instance of `Instructions`
pub fn init(gpa: *Allocator) Wasm {
return .{
.buffer = std.ArrayList(u8).init(gpa),
.sections = undefined,
.section_size = 0,
.gpa = gpa,
.func = undefined,
.main_index = null,
.data = .{
.strings = std.StringHashMapUnmanaged(i32){},
.offset = 0,
},
};
}
/// Creates a new instance of `Instructions` aswell as codegen bytecode instructions from Luf's IR
pub fn fromCu(gpa: *Allocator, cu: lir.CompileUnit) Error![]const u8 {
var wasm = init(gpa);
const writer = wasm.buffer.writer();
try writer.writeAll(module_header[0..]);
try writer.writeAll(module_version[0..]);
// build the bytecode
for (cu.instructions) |inst| {
if (inst.tag != .decl) continue; // only declarations
const decl = inst.as(lir.Inst.Decl);
if (decl.value.tag == .func)
try wasm.emitFunc(inst.as(lir.Inst.Decl))
else
try wasm.emitGlobal(decl);
}
// if main function defined, insert it into start section
if (wasm.main_index) |start_index| {
const start = wasm.section(.start);
try wasm.emitUnsigned(start.code.writer(), start_index);
start.count += 1;
}
// emit memory and export it
{
const mem = wasm.section(.memory);
try wasm.emitUnsigned(mem.code.writer(), @as(u1, 0));
try wasm.emitUnsigned(mem.code.writer(), @as(u32, 1));
mem.count += 1;
}
// sort our sections so they will be emitted in correct order
std.sort.sort(Section, wasm.sections[0..wasm.section_size], {}, sortSections);
// emit the sections into the final bytecode
for (wasm.sections[0..wasm.section_size]) |*sec| {
try sec.emit(writer);
}
return wasm.final();
}
/// Returns a section based on the given section type
/// if the section does not exist yet, a new one will be created
fn section(self: *Wasm, id: SectionType) *Section {
for (self.sections[0..self.section_size]) |*s| if (s.ty == id) return s;
// not found, so create a new Section and return a pointer to it
self.sections[self.section_size] = Section.init(id, self.gpa);
defer self.section_size += 1;
return &self.sections[self.section_size];
}
/// Returns the current instruction set as Wasm
/// This invalidates the current `buffer` of instructions
/// It is not required to call deinit() after calling this
pub fn final(self: *Wasm) []const u8 {
return self.buffer.toOwnedSlice();
}
/// Frees the `list` of `Instructions`. This function is not required
/// if final() has been called
pub fn deinit(self: *Wasm) void {
self.buffer.deinit(self.gpa);
}
/// Resolves the Wasm's value type given the LufType
fn resolveValType(ty: LufType) Types.Value {
return switch (ty) {
.integer => Types.Value.I64,
.boolean => Types.Value.I32,
else => @panic("TODO: Implement more types for wasm"),
};
}
/// Generates and appends instructions based on the given IR instruction
fn gen(self: *Wasm, writer: anytype, inst: *lir.Inst) Error!void {
switch (inst.tag) {
.add,
.sub,
.mul,
.div,
.eql,
.nql,
.lt,
.gt,
.bitwise_xor,
.bitwise_or,
.bitwise_and,
.shift_left,
.shift_right,
.@"and",
.@"or",
.eql_lt,
.eql_gt,
.mod,
=> try self.emitInfix(writer, inst.as(lir.Inst.Double)),
.not, .bitwise_not, .negate => try self.emitPrefix(writer, inst.as(lir.Inst.Single)),
.int => try self.emitInt(writer, inst.as(lir.Inst.Int)),
.string => try self.emitString(inst.as(lir.Inst.String)),
.primitive => {},
.ident => try self.emitIdent(writer, inst.as(lir.Inst.Ident)),
.expr => try self.gen(writer, inst.as(lir.Inst.Single).rhs),
.decl => try self.emitDecl(writer, inst.as(lir.Inst.Decl)),
.@"return" => try self.emitRet(writer, inst.as(lir.Inst.Single)),
.assign,
.assign_add,
.assign_sub,
.assign_mul,
.assign_div,
=> try self.emitAssign(writer, inst.as(lir.Inst.Double)),
.store => {},
.load => {},
.list, .map => {},
.pair => {},
.range => {},
.import => {},
.@"enum" => {},
.condition => try self.emitCond(writer, inst.as(lir.Inst.Condition)),
.block => try self.emitBlock(writer, inst.as(lir.Inst.Block)),
.func => {},
.call => try self.emitCall(writer, inst.as(lir.Inst.Call)),
.@"while" => try self.emitWhile(writer, inst.as(lir.Inst.Double)),
.@"switch" => {},
.branch => {},
.@"break" => {},
.@"continue" => {},
.@"for" => {},
.slice => {},
.comment, .type_def, .func_arg => {}, //ignore those
}
}
/// Writes an unsigned integer value
fn emitUnsigned(self: *Wasm, writer: anytype, value: anytype) !void {
try leb.writeULEB128(writer, value);
}
/// Writes a signed integer value
fn emitSigned(self: *Wasm, writer: anytype, value: anytype) !void {
try leb.writeILEB128(writer, value);
}
/// Emits an integer
fn emitInt(self: *Wasm, writer: anytype, int: *lir.Inst.Int) !void {
try self.emit(writer, .i64_const);
try self.emitSigned(writer, @intCast(i64, int.value));
}
/// Saves the string in the Data section
fn emitString(self: *Wasm, string: *lir.Inst.String) !void {
if (self.data.contains(string.value)) return;
// string does not exist yet, so add it to our data section, as well as to
// our data struct
const data = self.section(.data);
const writer = data.code.writer();
// index is always 0
try self.emitUnsigned(writer, @as(u32, 0));
// offset
try self.emit(writer, .i32_const);
try self.emitSigned(writer, self.data.offset);
// length + value
try self.emitUnsigned(writer, @intCast(u32, string.value.len));
try writer.writeAll(string.value);
try self.emit(writer, .end);
data.count += 1;
// save it in our data struct to calculate the offset for later
try self.data.put(self.gpa, string.value);
}
/// Emits a single opcode
fn emit(self: *Wasm, writer: anytype, op: Op) !void {
try writer.writeByte(@enumToInt(op));
}
/// Emits a binary operator opcode based on the given types
fn emitInfix(self: *Wasm, writer: anytype, double: *lir.Inst.Double) !void {
try self.gen(writer, double.lhs);
try self.gen(writer, double.rhs);
switch (double.base.tag) {
.@"and", .@"or" => @panic("TODO Add support for logical AND/OR"),
else => try self.emit(writer, Op.fromTagAndType(double.base.tag, double.lhs.ty)),
}
}
/// Emits a prefix operator such as negate, mod, etc
fn emitPrefix(self: *Wasm, writer: anytype, single: *lir.Inst.Single) !void {
// Wasm does not have bytecodes for prefix operands
// therefore we apply it manually
switch (single.base.tag) {
.negate => {
try self.emit(writer, .i64_const);
try self.emitSigned(writer, @as(i64, 0));
try self.gen(writer, single.rhs);
try self.emit(writer, .i64_sub);
},
.not => {
try self.gen(writer, single.rhs);
try self.emit(writer, .i32_eqz);
},
.bitwise_not => {
try self.gen(writer, single.rhs);
try self.emit(writer, .i64_const);
try self.emitSigned(writer, @as(i64, -1));
try self.emit(writer, .i64_xor);
},
else => unreachable,
}
}
/// Emits a Wasm block instruction
fn emitBlock(self: *Wasm, writer: anytype, block: *lir.Inst.Block) !void {
for (block.instructions) |inst| try self.gen(writer, inst);
}
/// Loads a local or global variable onto the stack
fn emitIdent(self: *Wasm, writer: anytype, ident: *lir.Inst.Ident) !void {
try self.emit(writer, if (ident.scope == .global) Op.global_get else Op.local_get);
try self.emitUnsigned(writer, ident.index);
}
/// Emits a return statement in Wasm
fn emitRet(self: *Wasm, writer: anytype, ret: *lir.Inst.Single) !void {
try self.gen(writer, ret.rhs);
try self.emit(writer, .@"return");
}
/// Emits bytecode that first retrieves the local or global and then assigns
/// a new value to it
fn emitAssign(self: *Wasm, writer: anytype, double: *lir.Inst.Double) !void {
const ident = double.lhs.as(lir.Inst.Ident);
// get the local/global
try self.gen(writer, double.lhs);
// generate the new value
try self.gen(writer, double.rhs);
// if operator, first apply it
switch (double.base.tag) {
.assign => {},
else => try self.emit(writer, Op.fromTagAndType(double.base.tag, ident.base.ty)),
}
// set the new value to the local/global
try self.emit(writer, if (ident.scope == .global) Op.global_set else Op.local_set);
try self.emitUnsigned(writer, ident.index);
}
/// Emits wasm for the declation's value to put it on the stack
/// and then generates a .local_set or .global_set based on its scope
fn emitDecl(self: *Wasm, writer: anytype, decl: *lir.Inst.Decl) !void {
try self.gen(writer, decl.value);
try self.emit(writer, .local_set); // globals are done seperately, therefore this is a local
try self.emitUnsigned(writer, decl.index);
}
/// Emits Wasm to perform a while loop. This first creates a block type
/// and then a loop type
fn emitWhile(self: *Wasm, writer: anytype, loop: *lir.Inst.Double) !void {
// begin block
try self.emit(writer, .block);
try self.emitSigned(writer, Types.block);
// begin loop
try self.emit(writer, .loop);
try self.emitSigned(writer, Types.block);
// generate the condition
try self.gen(writer, loop.lhs);
// break loop if condition = false
try self.emit(writer, .i32_eqz);
try self.emit(writer, .break_if);
try self.emitUnsigned(writer, @as(u32, 1));
// finally, generate its body
try self.gen(writer, loop.rhs);
// continue at loop label
try self.emit(writer, .@"break");
try self.emitUnsigned(writer, @as(u32, 0));
// end loop
try self.emit(writer, .end);
// end block
try self.emit(writer, .end);
}
/// Emits Wasm for an if-statement
fn emitCond(self: *Wasm, writer: anytype, cond: *lir.Inst.Condition) !void {
// generate the condition to determine if or else
try self.gen(writer, cond.cond);
// start our if statement with an implicit 'then block'
try self.emit(writer, .@"if");
if (self.func.ret_type.ty == ._void)
try self.emitSigned(writer, Types.block)
else
try self.emitSigned(writer, resolveValType(self.func.ret_type.ty).val());
try self.gen(writer, cond.then_block);
if (cond.else_block) |alt| {
try self.emit(writer, .@"else");
try self.gen(writer, alt);
}
// end our if statement
try self.emit(writer, .end);
}
/// Emits the bytecode for a global variable by appending it to the
/// 'global' section of the module
fn emitGlobal(self: *Wasm, decl: *lir.Inst.Decl) !void {
const sec = self.section(.global);
const writer = sec.code.writer();
// emit its type
try self.emitSigned(writer, resolveValType(decl.value.ty).val());
// mutability: 0 immutable, 1 mutable
try self.emitUnsigned(writer, @boolToInt(decl.is_mut));
switch (decl.value.ty) {
.integer => try self.emitInt(writer, decl.value.as(lir.Inst.Int)),
.boolean => {
try self.emit(writer, .i32_const);
const prim = decl.value.as(lir.Inst.Primitive);
try self.emitSigned(writer, @as(i32, if (prim.prim_type == .@"true") 1 else 0));
},
else => @panic("TODO: Implement globals for non-integer/bool types"),
}
// emit 'end' so wasm is aware where our global ends
try self.emit(writer, .end);
// TODO: Save the globalidx somewhere
sec.count += 1;
}
/// Emits Wasm bytecode to call a function
fn emitCall(self: *Wasm, writer: anytype, call: *lir.Inst.Call) !void {
// generate arguments
for (call.args) |arg| try self.gen(writer, arg);
// find funcidx and emit .call (0x10)
const ident = call.func.as(lir.Inst.Ident);
try self.emit(writer, .call);
// functions are compiled first by the compiler, therefore their indices
// will match that of Wasm's funcidx's. This means we can directly use the index
try self.emitUnsigned(writer, ident.index);
}
/// Emits a Wasm function, expects a declaration instead of a function
/// as we need information regarding its index and name
fn emitFunc(self: *Wasm, decl: *lir.Inst.Decl) !void {
const func = decl.value.as(lir.Inst.Function);
const name = decl.name;
self.func = func;
// if main is declared, ensure no parameters and return type are set
const is_main = if (std.mem.eql(u8, "main", name) and decl.is_pub) blk: {
if (func.ret_type.ty != ._void) return Error.InvalidType;
if (func.args.len > 0) return Error.ParametersDisallowed;
break :blk true;
} else false;
// register the function type
const type_section = self.section(.@"type");
const type_idx = try emitFuncType(type_section, func);
// register the function itself using the type idx created above
const func_section = self.section(.func);
const func_idx = func_section.count;
try leb.writeULEB128(func_section.code.writer(), func_idx);
func_section.count += 1; // manually increase as no helper function here
if (is_main) self.main_index = func_idx;
// register the code section, this will contain the body of the function
const code_section = self.section(.code);
try self.emitFuncBody(code_section, func);
if (decl.is_pub) {
const export_section = self.section(.@"export");
try exportFunc(export_section, decl);
}
}
/// Appends a function body into the given section
/// Will increase the section's count by 1
fn emitFuncBody(self: *Wasm, sec: *Section, func: *lir.Inst.Function) !void {
var func_body = std.ArrayList(u8).init(self.gpa);
defer func_body.deinit();
const writer = func_body.writer();
// generate bytecode for locals
if (func.locals.len - func.args.len > 0) {
var locals_map = std.AutoArrayHashMap(Types.Value, u32).init(self.gpa);
defer locals_map.deinit();
for (func.locals[func.args.len..]) |local| {
const ty = resolveValType(local.ty);
const entry = try locals_map.getOrPut(ty);
if (entry.found_existing)
entry.entry.value += 1
else
entry.entry.value = 1;
}
// write the locals (we actually emit the amount of types)
try self.emitUnsigned(writer, locals_map.items().len);
for (locals_map.items()) |entry| {
try self.emitUnsigned(writer, entry.value);
try self.emitSigned(writer, entry.key.val());
}
} else try self.emitUnsigned(writer, @as(u32, 0));
const body = func.body.as(lir.Inst.Block);
// generate the bytecode for the body
for (body.instructions) |inst| {
try self.gen(writer, inst);
}
// "end" byte that concludes the function body
try self.emit(writer, .end);
const sec_writer = sec.code.writer();
try leb.writeULEB128(sec_writer, @intCast(u32, func_body.items.len));
try sec_writer.writeAll(func_body.items);
sec.count += 1;
}
};
/// Emits a function type and appends it to the given section
/// Returns the typeidx and increases the section's count by 1
fn emitFuncType(sec: *Section, func: *lir.Inst.Function) !u32 {
const writer = sec.code.writer();
// tell wasm it's a function type
try leb.writeILEB128(writer, Types.func);
// emit arguments length and their types
try leb.writeULEB128(writer, @intCast(u32, func.args.len));
for (func.args) |arg| try leb.writeILEB128(writer, Wasm.resolveValType(arg.ty).val());
// Result types -> Wasm only allows for 1 result type, currently
// if return type is void, provide no return type
if (func.ret_type.ty == ._void) {
try leb.writeULEB128(writer, @as(u1, 0));
} else {
try leb.writeULEB128(writer, @as(u1, 1));
try leb.writeILEB128(writer, Wasm.resolveValType(func.ret_type.ty).val());
}
// Make sure we increase the size of the section
sec.count += 1;
// the typeidx
return sec.count - 1;
}
/// Emits a function into the 'export' section
fn exportFunc(sec: *Section, decl: *lir.Inst.Decl) !void {
const writer = sec.code.writer();
try leb.writeULEB128(writer, @intCast(u32, decl.name.len));
try writer.writeAll(decl.name);
try writer.writeByte(@enumToInt(Types.Export.func));
try leb.writeULEB128(writer, decl.index);
sec.count += 1;
}
/// function used to sort sections by their type
fn sortSections(context: void, lhs: Section, rhs: Section) bool {
return @enumToInt(lhs.ty) < @enumToInt(rhs.ty);
}
/// When `file_name` is set to a value, it will generate a wasm binary file
/// which can be used by external tools to expect the output
const TestOutput = struct {
file_name: ?[]const u8 = null,
print_output: bool = false,
};
/// compiles input and checks if it matches the expected output
fn testWasm(input: []const u8, expected: []const u8, with_output: TestOutput) !void {
const alloc = testing.allocator;
var err = @import("error.zig").Errors.init(alloc);
defer err.deinit();
var cu = try @import("compiler.zig").compile(alloc, input, &err);
defer cu.deinit();
const wasm = try Wasm.fromCu(alloc, cu);
defer alloc.free(wasm);
if (with_output.file_name) |name| {
var file = try std.fs.cwd().createFile(name, .{});
defer file.close();
try file.writeAll(wasm);
}
if (with_output.print_output) {
for (wasm) |c| std.debug.print("\\x{x:0>2}", .{c});
std.debug.print("\n", .{});
}
try testing.expectEqualSlices(u8, expected, wasm);
}
const magic_bytes = &[_]u8{ 0, 'a', 's', 'm', 1, 0, 0, 0 };
test "IR to Wasm - Functions" {
const input = "pub const add = fn(x: int, y: int) int { return x + y }";
const expected = magic_bytes ++ // \0asm (module
"\x01\x07\x01\x60\x02\x7e\x7e\x01\x7e" ++ // (type (i64 i64) (func (result i64)))
"\x03\x02\x01\x00" ++ // (func (i64 i64) (type 0))
"\x05\x03\x01\x00\x01" ++ // (memory 0 1)
"\x07\x07\x01\x03\x61\x64\x64\x00\x00" ++ // (export "add" (func 0))
"\x0a\x0a\x01\x08\x00\x20\x00\x20\x01\x7c\x0f\x0b"; // load_local load_local i64.add)
try testWasm(input, expected, .{});
}
test "IR to Wasm - Conditional" {
const input =
\\pub const con = fn(x: int) int {
\\ if (x == 2) {
\\ return 5
\\ } else {
\\ return 10
\\ }
\\}
;
const expected = magic_bytes ++
"\x01\x06\x01\x60\x01\x7e\x01\x7e" ++
"\x03\x02\x01\x00" ++
"\x05\x03\x01\x00\x01" ++
"\x07\x07\x01\x03\x63\x6f\x6e\x00\x00" ++
"\x0a\x13\x01\x11\x00\x20\x00\x42\x02\x51\x04\x7e\x42\x05\x0f\x05\x42\x0a\x0f\x0b\x0b";
try testWasm(input, expected, .{});
}
test "IR to Wasm - Function locals" {
const input =
\\pub const loc = fn(x: int) int {
\\ const y = 20
\\ if (x == 2) {
\\ return 5
\\ } else {
\\ return 10
\\ }
\\}
;
const expected = magic_bytes ++
"\x01\x06\x01\x60\x01\x7e\x01\x7e" ++
"\x03\x02\x01\x00" ++
"\x05\x03\x01\x00\x01" ++
"\x07\x07\x01\x03\x6c\x6f\x63\x00\x00" ++
"\x0a\x19\x01\x17\x01\x01\x7E\x42\x14\x21\x01" ++ // locals section
"\x20\x00\x42\x02\x51\x04\x7e\x42\x05\x0f\x05\x42\x0a\x0f\x0b\x0b";
try testWasm(input, expected, .{});
}
test "IR to Wasm - Globals" {
const input =
\\const x = 5
\\pub const test = fn()void{}
;
const expected = magic_bytes ++
"\x00\x61\x73\x6d" ++
"\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00" ++
"\x03\x02\x01\x00" ++
"\x05\x03\x01\x00\x01" ++
"\x06\x0b\x02\x7e\x00\x42\x05\x0b\x7e\x00\x42\x05\x0b" ++ // globals section
"\x07\x08\x01\x04\x74\x65\x73\x74\x00\x01" ++
"\x0a\x04\x01\x02\x00\x0b";
//try testWasm(input, expected, .{.print_output = true});
}
test "IR to Wasm - main func" {
const input = "pub const main = fn()void{}";
const expected = magic_bytes ++
"\x01\x04\x01\x60\x00\x00" ++
"\x03\x02\x01\x00" ++
"\x05\x03\x01\x00\x01" ++
"\x07\x08\x01\x04\x6d\x61\x69\x6e\x00\x00" ++
"\x08\x01\x00" ++ // start section
"\x0a\x04\x01\x02\x00\x0b";
try testWasm(input, expected, .{});
}
test "IR to Wasm - Function call" {
const input =
\\pub const addOne = fn(x: int) int {
\\ return x + 1
\\}
\\pub const main = fn() void {
\\ const x = addOne(1)
\\}
;
const expected = magic_bytes ++
"\x01\x09\x02\x60\x01\x7e\x01\x7e\x60\x00\x00" ++
"\x03\x03\x02\x00\x01" ++
"\x05\x03\x01\x00\x01" ++
"\x07\x11\x02\x06\x61\x64\x64\x4f\x6e\x65\x00\x00\x04\x6d\x61\x69\x6e\x00\x01" ++
"\x08\x01\x01\x0a\x15\x02\x08\x00\x20\x00\x42\x01\x7c\x0f\x0b" ++
"\x0a\x01\x01\x7e\x42\x01\x10\x00\x21\x00\x0b";
try testWasm(input, expected, .{});
}
test "IR to Wasm - Loop" {
const input =
\\pub const loop = fn() void {
\\ const x = 5
\\ mut i = 0
\\ while(i < x) {
\\ i = i + 1
\\ }
\\}
;
const expected = magic_bytes ++
"\x01\x04\x01\x60\x00\x00" ++
"\x03\x02\x01\x00" ++
"\x05\x03\x01\x00\x01" ++
"\x07\x08\x01\x04\x6c\x6f\x6f\x70\x00\x00" ++
"\x0a\x27\x01\x25\x01\x02\x7e\x42\x05\x21\x00\x42\x00\x21\x01\x02\x40" ++
"\x03\x40\x20\x01\x20\x00\x53\x45\x0d\x01\x20\x01\x20\x01\x42\x01\x7c\x21\x01\x0c\x00\x0b\x0b\x0b"; // loop starts at 0x03
try testWasm(input, expected, .{});
} | src/wasm.zig |
const std = @import("std");
const ast = @import("ast.zig");
const interpreter = @import("interpreter.zig");
const mem = @import("gc.zig");
const SourceLocation = @import("sourcelocation.zig").SourceLocation;
const Env = ast.Env;
const Expr = ast.Expr;
const ExprValue = ast.ExprValue;
const ExprType = ast.ExprType;
const ExprErrors = ast.ExprErrors;
const Interpreter = interpreter.Interpreter;
// Intrinsic symbol- and function expressions. These expressions are not registered with
// the GC and are thus considered pinned and never attemped deallocated.
pub var expr_atom_last_eval = Expr{ .val = ExprValue{ .sym = "#?" } };
pub var expr_atom_last_try_err = Expr{ .val = ExprValue{ .sym = "#!" } };
pub var expr_atom_last_try_value = Expr{ .val = ExprValue{ .sym = "#value" } };
pub var expr_atom_quote = Expr{ .val = ExprValue{ .sym = "quote" } };
pub var expr_atom_quasi_quote = Expr{ .val = ExprValue{ .sym = "quasiquote" } };
pub var expr_atom_unquote = Expr{ .val = ExprValue{ .sym = "unquote" } };
pub var expr_atom_unquote_splicing = Expr{ .val = ExprValue{ .sym = "unquote-splicing" } };
pub var expr_atom_list = Expr{ .val = ExprValue{ .sym = "list" } };
pub var expr_atom_if = Expr{ .val = ExprValue{ .sym = "if" } };
pub var expr_atom_cond = Expr{ .val = ExprValue{ .sym = "cond" } };
pub var expr_atom_begin = Expr{ .val = ExprValue{ .sym = "begin" } };
pub var expr_atom_false = Expr{ .val = ExprValue{ .sym = "#f" } };
pub var expr_atom_true = Expr{ .val = ExprValue{ .sym = "#t" } };
pub var expr_atom_nil = Expr{ .val = ExprValue{ .sym = "nil" } };
pub var expr_atom_rest = Expr{ .val = ExprValue{ .sym = "&rest" } };
pub var expr_atom_mut = Expr{ .val = ExprValue{ .sym = "&mut" } };
pub var expr_atom_break = Expr{ .val = ExprValue{ .sym = "&break" } };
pub var expr_std_math_pi = Expr{ .val = ExprValue{ .num = std.math.pi } };
pub var expr_std_math_e = Expr{ .val = ExprValue{ .num = std.math.e } };
pub var expr_std_import = Expr{ .val = ExprValue{ .fun = stdImport } };
pub var expr_std_exit = Expr{ .val = ExprValue{ .fun = stdExit } };
pub var expr_std_verbose = Expr{ .val = ExprValue{ .fun = stdVerbose } };
pub var expr_std_assert_true = Expr{ .val = ExprValue{ .fun = stdAssertTrue } };
pub var expr_std_is_number = Expr{ .val = ExprValue{ .fun = stdIsNumber } };
pub var expr_std_is_symbol = Expr{ .val = ExprValue{ .fun = stdIsSymbol } };
pub var expr_std_is_list = Expr{ .val = ExprValue{ .fun = stdIsList } };
pub var expr_std_is_hashmap = Expr{ .val = ExprValue{ .fun = stdIsHashmap } };
pub var expr_std_is_err = Expr{ .val = ExprValue{ .fun = stdIsError } };
pub var expr_std_is_callable = Expr{ .val = ExprValue{ .fun = stdIsCallable } };
pub var expr_std_gensym = Expr{ .val = ExprValue{ .fun = stdGenSym } };
pub var expr_std_quote = Expr{ .val = ExprValue{ .fun = stdQuote } };
pub var expr_std_unquote = Expr{ .val = ExprValue{ .fun = stdUnquote } };
pub var expr_std_unquote_splicing = Expr{ .val = ExprValue{ .fun = stdUnquoteSplicing } };
pub var expr_std_quasi_quote = Expr{ .val = ExprValue{ .fun = stdQuasiQuote } };
pub var expr_std_double_quote = Expr{ .val = ExprValue{ .fun = stdDoubleQuote } };
pub var expr_std_len = Expr{ .val = ExprValue{ .fun = stdLen } };
pub var expr_std_range = Expr{ .val = ExprValue{ .fun = stdRange } };
pub var expr_std_rotate_left = Expr{ .val = ExprValue{ .fun = stdRotateLeft } };
pub var expr_std_item_at = Expr{ .val = ExprValue{ .fun = stdItemAt } };
pub var expr_std_item_set = Expr{ .val = ExprValue{ .fun = stdItemSet } };
pub var expr_std_item_remove = Expr{ .val = ExprValue{ .fun = stdItemRemove } };
pub var expr_std_string = Expr{ .val = ExprValue{ .fun = stdString } };
pub var expr_std_print = Expr{ .val = ExprValue{ .fun = stdPrint } };
pub var expr_std_env = Expr{ .val = ExprValue{ .fun = stdEnv } };
pub var expr_std_self = Expr{ .val = ExprValue{ .fun = stdSelf } };
pub var expr_std_define = Expr{ .val = ExprValue{ .fun = stdDefine } };
pub var expr_std_lambda = Expr{ .val = ExprValue{ .fun = stdLambda } };
pub var expr_std_macro = Expr{ .val = ExprValue{ .fun = stdMacro } };
pub var expr_std_eval_string = Expr{ .val = ExprValue{ .fun = stdEvalString } };
pub var expr_std_eval = Expr{ .val = ExprValue{ .fun = stdEval } };
pub var expr_std_apply = Expr{ .val = ExprValue{ .fun = stdApply } };
pub var expr_std_list = Expr{ .val = ExprValue{ .fun = stdList } };
pub var expr_std_iterate = Expr{ .val = ExprValue{ .fun = stdIterate } };
pub var expr_std_map_new = Expr{ .val = ExprValue{ .fun = stdHashmapNew } };
pub var expr_std_map_put = Expr{ .val = ExprValue{ .fun = stdHashmapPut } };
pub var expr_std_map_get = Expr{ .val = ExprValue{ .fun = stdHashmapGet } };
pub var expr_std_map_remove = Expr{ .val = ExprValue{ .fun = stdHashmapRemove } };
pub var expr_std_map_clear = Expr{ .val = ExprValue{ .fun = stdHashmapClear } };
pub var expr_std_loop = Expr{ .val = ExprValue{ .fun = stdLoop } };
pub var expr_std_split = Expr{ .val = ExprValue{ .fun = stdSplit } };
pub var expr_std_append = Expr{ .val = ExprValue{ .fun = stdAppend } };
pub var expr_std_unset = Expr{ .val = ExprValue{ .fun = stdUnset } };
pub var expr_std_error = Expr{ .val = ExprValue{ .fun = stdError } };
pub var expr_std_try = Expr{ .val = ExprValue{ .fun = stdTry } };
pub var expr_std_set = Expr{ .val = ExprValue{ .fun = stdSet } };
pub var expr_std_sum = Expr{ .val = ExprValue{ .fun = stdSum } };
pub var expr_std_sub = Expr{ .val = ExprValue{ .fun = stdSub } };
pub var expr_std_mul = Expr{ .val = ExprValue{ .fun = stdMul } };
pub var expr_std_div = Expr{ .val = ExprValue{ .fun = stdDiv } };
pub var expr_std_pow = Expr{ .val = ExprValue{ .fun = stdPow } };
pub var expr_std_time_now = Expr{ .val = ExprValue{ .fun = stdTimeNow } };
pub var expr_std_floor = Expr{ .val = ExprValue{ .fun = stdFloor } };
pub var expr_std_round = Expr{ .val = ExprValue{ .fun = stdRound } };
pub var expr_std_min = Expr{ .val = ExprValue{ .fun = stdMin } };
pub var expr_std_max = Expr{ .val = ExprValue{ .fun = stdMax } };
pub var expr_std_as = Expr{ .val = ExprValue{ .fun = stdAs } };
pub var expr_std_split_atom = Expr{ .val = ExprValue{ .fun = stdSplitAtom } };
pub var expr_std_order = Expr{ .val = ExprValue{ .fun = stdOrder } };
pub var expr_std_eq = Expr{ .val = ExprValue{ .fun = stdEq } };
pub var expr_std_eq_approx = Expr{ .val = ExprValue{ .fun = stdEqApprox } };
pub var expr_std_eq_reference = Expr{ .val = ExprValue{ .fun = stdEqReference } };
pub var expr_std_run_gc = Expr{ .val = ExprValue{ .fun = stdRunGc } };
pub var expr_std_file_open = Expr{ .val = ExprValue{ .fun = stdFileOpen } };
pub var expr_std_file_close = Expr{ .val = ExprValue{ .fun = stdFileClose } };
pub var expr_std_file_read_line = Expr{ .val = ExprValue{ .fun = stdFileReadLine } };
pub var expr_std_file_write_line = Expr{ .val = ExprValue{ .fun = stdFileWriteLine } };
pub var expr_std_file_read_byte = Expr{ .val = ExprValue{ .fun = stdFileReadByte } };
pub fn requireExactArgCount(args_required: usize, args: []const *Expr) !void {
if (args.len != args_required) {
return ExprErrors.InvalidArgumentCount;
}
}
pub fn requireMinimumArgCount(args_required: usize, args: []const *Expr) !void {
if (args.len < args_required) {
return ExprErrors.InvalidArgumentCount;
}
}
pub fn requireType(ev: *Interpreter, expr: *Expr, etype: ExprType) !void {
if (expr.val != etype) {
try ev.printErrorFmt(&expr.src, "Expected {}, got argument type: {}, actual value:\n", .{ etype, std.meta.activeTag(expr.val) });
try expr.print();
return ExprErrors.AlreadyReported;
}
}
/// Signals the interpreter to terminate with an exit code. We don't simply terminate
/// the process here, as we're running with leak detection, but rather sets and exit code
/// which causes the eval loop to exit. This helps stress testing the GC/cleanup logic.
pub fn stdExit(ev: *Interpreter, _: *Env, args: []const *Expr) anyerror!*Expr {
var exit_code: u8 = 0;
if (args.len > 0 and args[0].val == ExprType.num) {
exit_code = @floatToInt(u8, args[0].val.num);
}
ev.exit_code = exit_code;
return &expr_atom_nil;
}
/// Open a file for reading and writing, create it if necessary. This produces an "any"
/// expression, where the value is an opaque pointer that's casted back to its actual
/// type when needed, such as in stdFileReadline.
pub fn stdFileOpen(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(1, args);
const filename_expr = try ev.eval(env, args[0]);
if (filename_expr.val == ExprType.sym) {
var path = try std.fs.path.resolve(mem.allocator, &.{filename_expr.val.sym});
defer mem.allocator.free(path);
var file = try mem.allocator.create(std.fs.File);
errdefer mem.allocator.destroy(file);
file.* = std.fs.createFileAbsolute(path, .{ .truncate = false, .read = true }) catch |err| {
try ev.printErrorFmt(&filename_expr.src, "Could not open file: {}\n", .{err});
return err;
};
var expr = try Expr.create(true);
expr.val = ExprValue{ .any = @ptrToInt(file) };
return expr;
}
return &expr_atom_nil;
}
/// Close a file, and deallocate the associated File object
pub fn stdFileClose(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(1, args);
const file_ptr = try ev.eval(env, args[0]);
try requireType(ev, file_ptr, ExprType.any);
const file = @intToPtr(*std.fs.File, file_ptr.val.any);
file.close();
mem.allocator.destroy(file);
return &expr_atom_nil;
}
/// Reads a byte from the given file
pub fn stdFileReadByte(ev: *Interpreter, env: *Env, args: []const *Expr) !*Expr {
try requireExactArgCount(1, args);
const file_ptr = try ev.eval(env, args[0]);
try requireType(ev, file_ptr, ExprType.any);
const file = @intToPtr(*std.fs.File, file_ptr.val.any);
if (file.reader().readByte()) |byte| {
return ast.makeAtomByDuplicating(&.{byte});
} else |e| switch (e) {
error.EndOfStream => return try ast.makeError(try ast.makeAtomByDuplicating("EOF")),
else => return try ast.makeError(try ast.makeAtomByDuplicating("Could not read from file")),
}
}
/// Reads a line from the given file, or from stdin if no argument is given
pub fn stdFileReadLine(ev: *Interpreter, env: *Env, args: []const *Expr) !*Expr {
var reader: std.fs.File.Reader = std.io.getStdIn().reader();
if (args.len > 0) {
try requireExactArgCount(1, args);
const file_ptr = try ev.eval(env, args[0]);
try requireType(ev, file_ptr, ExprType.any);
const file = @intToPtr(*std.fs.File, file_ptr.val.any);
reader = file.reader();
}
if (reader.readUntilDelimiterOrEofAlloc(mem.allocator, '\n', std.math.maxInt(usize))) |maybe| {
if (maybe) |line| {
return ast.makeAtomAndTakeOwnership(line);
} else {
return try ast.makeError(try ast.makeAtomByDuplicating("EOF"));
}
} else |_| {
return try ast.makeError(try ast.makeAtomByDuplicating("Could not read from file"));
}
}
/// Appends a line to the file, or to stdout if no argument is given
pub fn stdFileWriteLine(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireMinimumArgCount(1, args);
const line_to_write = try ev.eval(env, args[args.len - 1]);
try requireType(ev, line_to_write, ExprType.sym);
var writer: std.fs.File.Writer = std.io.getStdOut().writer();
if (args.len > 1) {
try requireExactArgCount(2, args);
const file_ptr = try ev.eval(env, args[0]);
try requireType(ev, file_ptr, ExprType.any);
const file = @intToPtr(*std.fs.File, file_ptr.val.any);
try file.seekFromEnd(0);
writer = file.writer();
}
try writer.writeAll(line_to_write.val.sym);
_ = try writer.write("\n");
return &expr_atom_nil;
}
/// Import and evaluate a Bio file
pub fn stdImport(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(1, args);
const filename_expr = try ev.eval(env, args[0]);
if (filename_expr.val == ExprType.sym) {
var out: [std.fs.MAX_PATH_BYTES]u8 = undefined;
var path = std.fs.realpath(filename_expr.val.sym, &out) catch |err| switch (err) {
std.os.RealPathError.FileNotFound => {
try ev.printErrorFmt(&filename_expr.src, "File not found: {s}\n", .{filename_expr.val.sym});
return ExprErrors.AlreadyReported;
},
else => return err,
};
try SourceLocation.push(path[0..]);
defer SourceLocation.pop();
const file = std.fs.openFileAbsolute(path, .{}) catch |err| {
try ev.printErrorFmt(&filename_expr.src, "Could not open file: {}\n", .{err});
return err;
};
defer file.close();
const reader = file.reader();
var res: *Expr = &expr_atom_nil;
while (!ev.has_errors) {
if (ev.readBalancedExpr(&reader, "")) |maybe| {
if (maybe) |input| {
defer mem.allocator.free(input);
if (try ev.parseAndEvalExpression(input)) |e| {
res = e;
try ev.env.put("#?", res);
}
} else {
break;
}
} else |err| {
try ev.printErrorFmt(SourceLocation.current(), "", .{});
try ev.printError(err);
break;
}
}
return res;
} else {
return ExprErrors.InvalidArgumentType;
}
}
pub fn stdRunGc(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
_ = &.{ ev, env, args };
try mem.gc.run(true);
return &expr_atom_nil;
}
pub fn stdVerbose(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
_ = &.{ env, args };
ev.verbose = !ev.verbose;
const bool_str = if (ev.verbose) "on " else "off";
try std.io.getStdOut().writer().print("Verbosity is now {s}\n", .{bool_str});
return &expr_atom_nil;
}
pub fn stdAssertTrue(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(1, args);
if ((try ev.eval(env, args[0])) != &expr_atom_true) {
try std.io.getStdOut().writer().print("Assertion failed {s} line {d}\n", .{ args[0].src.file, args[0].src.line });
std.process.exit(0);
}
return &expr_atom_true;
}
/// Renders the expression as a string and returns an owned slice.
/// For now, only newline escapes are done (this should be extended to handle all of them)
fn render(ev: *Interpreter, env: *Env, expr: *Expr) ![]u8 {
_ = &.{ ev, env };
const str = try expr.toStringAlloc();
defer mem.allocator.free(str);
return try std.mem.replaceOwned(u8, mem.allocator, str, "\\n", "\n");
}
/// Implements (string expr...), i.e. rendering of expressions as strings
pub fn stdString(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
var builder = std.ArrayList(u8).init(mem.allocator);
defer builder.deinit();
const writer = builder.writer();
for (args) |expr| {
const value = try ev.eval(env, expr);
const rendered = try render(ev, env, value);
defer mem.allocator.free(rendered);
try writer.writeAll(rendered);
}
return ast.makeAtomByDuplicating(builder.items);
}
/// Implements (print expr...)
pub fn stdPrint(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
for (args) |expr, index| {
const value = try ev.eval(env, expr);
const rendered = try render(ev, env, value);
defer mem.allocator.free(rendered);
try std.io.getStdOut().writer().print("{s}", .{rendered});
if (index + 1 < args.len) {
try std.io.getStdOut().writer().print(" ", .{});
}
}
return &expr_atom_nil;
}
/// Returns the current environment as an expression, allowing the user to make constructs
/// such as modules and object instances.
pub fn stdSelf(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
_ = &.{ ev, args };
var expr = try Expr.create(true);
expr.val = ExprValue{ .env = env };
return expr;
}
/// Print environments. Runs the GC to minimize the environment listing, unless no-gc is passed.
pub fn stdEnv(ev: *Interpreter, _: *Env, args: []const *Expr) anyerror!*Expr {
if (!(args.len > 0 and args[0].val == ExprType.sym and std.mem.eql(u8, args[0].val.sym, "no-gc"))) {
try mem.gc.run(false);
}
// Print out all environments, including which environment is the parent.
for (mem.gc.registered_envs.items) |registered_env| {
try std.io.getStdOut().writer().print("Environment for {s}: {*}\n", .{ registered_env.name, registered_env });
var iter = registered_env.map.iterator();
while (iter.next()) |item| {
try std.io.getStdOut().writer().writeByteNTimes(' ', 4);
try std.io.getStdOut().writer().print("{s} = ", .{item.key_ptr.*.val.sym});
try item.value_ptr.*.print();
if (ev.verbose) {
try std.io.getStdOut().writer().print(", env {*}", .{item.value_ptr.*.env});
}
try std.io.getStdOut().writer().print("\n", .{});
}
if (registered_env.parent) |parent| {
try std.io.getStdOut().writer().writeByteNTimes(' ', 4);
try std.io.getStdOut().writer().print("Parent environment is {s}: {*}\n", .{ parent.name, parent });
}
try std.io.getStdOut().writer().print("\n", .{});
}
return &expr_atom_nil;
}
/// Like (if), but the branch is chosen based on error state
pub fn stdTry(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireMinimumArgCount(2, args);
// Select branch based on the presence of an error
var branch: usize = 1;
const result = try ev.eval(env, args[0]);
if (result.val == ExprType.err) {
try ev.env.put(expr_atom_last_try_err.val.sym, result);
branch += 1;
} else {
try ev.env.put(expr_atom_last_try_value.val.sym, result);
}
// The error branch is optional
if (branch < args.len) {
return try ev.eval(env, args[branch]);
} else {
return &expr_atom_nil;
}
}
/// Create an error expression
pub fn stdError(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(1, args);
return try ast.makeError(try ev.eval(env, args[0]));
}
pub fn isEmptyList(expr: *Expr) bool {
return (expr.val == ExprType.lst and expr.val.lst.items.len == 0);
}
pub fn isError(expr: *Expr) bool {
return expr.val == ExprType.err;
}
pub fn isFalsy(expr: *Expr) bool {
return expr == &expr_atom_false or expr == &expr_atom_nil or isEmptyList(expr) or isError(expr);
}
/// Ordering, where lists are compared recurisively
fn order(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!std.math.Order {
try requireExactArgCount(2, args);
const op1 = args[0];
const op2 = args[1];
if (op1.val == ExprType.num and op2.val == ExprType.num) {
return std.math.order(op1.val.num, op2.val.num);
} else if (op1 == &expr_atom_nil and op2 == &expr_atom_nil) {
return std.math.Order.eq;
} else if (op1 == &expr_atom_nil and op2 == &expr_atom_false) {
return std.math.Order.eq;
} else if (op1 == &expr_atom_true and op2 == &expr_atom_true) {
return std.math.Order.eq;
} else if (op1 == &expr_atom_true) {
return std.math.Order.lt;
} else if (op1 == &expr_atom_false and isFalsy(op2)) { // and (op2 == &expr_atom_false or op2 == &expr_atom_nil)) {
return std.math.Order.eq;
} else if (op1 == &expr_atom_false) {
return std.math.Order.lt;
} else if (op1.val == ExprType.sym and op2.val == ExprType.sym) {
return std.mem.order(u8, op1.val.sym, op2.val.sym);
} else if (op1 == &expr_atom_nil) {
return if (isEmptyList(op2)) std.math.Order.eq else std.math.Order.lt;
} else if (op2 == &expr_atom_nil) {
return if (isEmptyList(op1)) std.math.Order.eq else std.math.Order.gt;
} else if (op1.val == ExprType.lst and op2.val == ExprType.lst) {
var res = std.math.order(op1.val.lst.items.len, op2.val.lst.items.len);
if (res == std.math.Order.eq) {
for (op1.val.lst.items) |item, index| {
res = try order(ev, env, &.{ item, op2.val.lst.items[index] });
if (res != std.math.Order.eq) {
return res;
}
}
}
return res;
} else {
return ExprErrors.InvalidArgumentType;
}
}
/// Returns an numeric expression with values -1, 0, 1 to represent <, =, > respectively
pub fn stdOrder(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(2, args);
return switch (try order(ev, env, &.{ try ev.eval(env, args[0]), try ev.eval(env, args[1]) })) {
std.math.Order.lt => return ast.makeNumExpr(-1),
std.math.Order.eq => return ast.makeNumExpr(0),
std.math.Order.gt => return ast.makeNumExpr(1),
};
}
/// Turn a boolean into #f or #t
fn boolExpr(val: bool) *Expr {
return if (val) &expr_atom_true else &expr_atom_false;
}
/// Check for equality. If the order operation fails, such as incompatiable types, false is returned.
pub fn stdEq(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(2, args);
return boolExpr((order(ev, env, &.{ try ev.eval(env, args[0]), try ev.eval(env, args[1]) }) catch return &expr_atom_false) == std.math.Order.eq);
}
/// Returns #t if the two arguments evaluate to the exact same object
/// This is mostly useful for debugging Bio itself
/// (^= nil nil) -> #t
pub fn stdEqReference(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(2, args);
return boolExpr((try ev.eval(env, args[0])) == (try ev.eval(env, args[1])));
}
/// Compare floats with a small relative epsilon comparison. An optional third argument overrides the tolerance.
pub fn stdEqApprox(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireMinimumArgCount(2, args);
const op1 = try ev.eval(env, args[0]);
const op2 = try ev.eval(env, args[1]);
var tolerance: f64 = 1e-7;
if (args.len == 3) {
const tolerance_expr = try ev.eval(env, args[2]);
if (tolerance_expr.val == ExprType.num) {
tolerance = tolerance_expr.val.num;
}
}
if (op1.val == ExprType.num and op2.val == ExprType.num) {
return boolExpr(std.math.approxEqRel(f64, op1.val.num, op2.val.num, tolerance));
} else {
return ExprErrors.InvalidArgumentType;
}
}
pub fn stdIsNumber(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
const arg = try ev.eval(env, args[0]);
return switch (arg.val) {
ExprType.num => &expr_atom_true,
else => &expr_atom_false,
};
}
pub fn stdIsSymbol(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
const arg = try ev.eval(env, args[0]);
return switch (arg.val) {
ExprType.sym => &expr_atom_true,
else => &expr_atom_false,
};
}
pub fn stdIsList(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
const arg = try ev.eval(env, args[0]);
return boolExpr(arg.val == ExprType.lst);
}
pub fn stdIsHashmap(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
const arg = try ev.eval(env, args[0]);
return boolExpr(arg.val == ExprType.map);
}
pub fn stdIsError(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
const arg = try ev.eval(env, args[0]);
return boolExpr(arg.val == ExprType.err);
}
pub fn stdIsCallable(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
const arg = try ev.eval(env, args[0]);
return switch (arg.val) {
ExprType.fun, ExprType.lam, ExprType.mac => &expr_atom_true,
else => &expr_atom_false,
};
}
/// (gensym) will generate a unique identifier
pub fn stdGenSym(ev: *Interpreter, _: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(0, args);
ev.gensym_seq += 1;
const sym = try std.fmt.allocPrint(mem.allocator, "gensym_{d}", .{ev.gensym_seq});
return ast.makeAtomAndTakeOwnership(sym);
}
/// Renders the expression and wraps it in double quotes, returning a new atom
pub fn stdDoubleQuote(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(1, args);
const arg = try ev.eval(env, args[0]);
const rendered = try render(ev, env, arg);
defer mem.allocator.free(rendered);
const double_quoted = try std.fmt.allocPrint(mem.allocator, "\"{s}\"", .{rendered});
return ast.makeAtomAndTakeOwnership(double_quoted);
}
/// Returns the first argument unevaluated. Multiple arguments is an error,
/// though the argument may be a list. (quote (1 2 3)) -> '(1 2 3)
pub fn stdQuote(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
_ = &.{ ev, env };
try requireExactArgCount(1, args);
// We must make a fresh copy for quoted lists. Consider (var lst '(1 2 3)) in a lambda. If not making a copy,
// the list would be memoized between calls. This is not a problem for (list 1 2 3) since the list function
// per definition creates a new list on every evaluation.
if (args[0].val == ExprType.lst) {
return try ast.makeListExpr(args[0].val.lst.items);
}
return args[0];
}
/// Unquote is only useful in combination with quasiquoting, see stdQuasiQuote
pub fn stdUnquote(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
_ = &.{ env, args };
try ev.printErrorFmt(SourceLocation.current(), "Can only use unquote inside a quasiquote expression\n", .{});
return ExprErrors.AlreadyReported;
}
/// Unquote with splicing is only useful in combination with quasiquoting, see stdQuasiQuote
pub fn stdUnquoteSplicing(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
_ = &.{ env, args };
try ev.printErrorFmt(SourceLocation.current(), "Can only use unquote-splicing inside a quasiquote expression\n", .{});
return ExprErrors.AlreadyReported;
}
/// Recursive quasi-quote expansion
/// (quasiquote (1 2 (unquote (+ 1 2)) 4)) -> '(1 2 3 4)
/// (quasiquote (1 (unquote-splicing (list 2 3)) 4)) -> '(1 2 3 4)
pub fn stdQuasiQuote(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(1, args);
const qq_expander = struct {
fn expand(ev_inner: *Interpreter, env_inner: *Env, expr: *Expr) anyerror!*Expr {
if (expr.val == ExprType.lst) {
var result_list = try ast.makeListExpr(null);
for (expr.val.lst.items) |item| {
// May encounter empty lists, such as lambda ()
if (item.val == ExprType.lst and item.val.lst.items.len > 0) {
if (item.val.lst.items[0] == &expr_atom_unquote) {
try result_list.val.lst.append(try ev_inner.eval(env_inner, item.val.lst.items[1]));
} else if (item.val.lst.items[0] == &expr_atom_unquote_splicing) {
const list = try ev_inner.eval(env_inner, item.val.lst.items[1]);
try requireType(ev_inner, list, ExprType.lst);
for (list.val.lst.items) |list_item| {
try result_list.val.lst.append(list_item);
}
} else {
try result_list.val.lst.append(try expand(ev_inner, env_inner, item));
}
} else {
if (item.val == ExprType.sym and item == &expr_atom_unquote) {
return try ev_inner.eval(env_inner, expr.val.lst.items[1]);
} else if (item.val == ExprType.sym and item == &expr_atom_unquote_splicing) {
try ev_inner.printErrorFmt(&expr.src, "unquotes-splice must be called from within a list\n", .{});
return ExprErrors.AlreadyReported;
} else {
try result_list.val.lst.append(item);
}
}
}
return result_list;
} else {
return expr;
}
}
};
if (ev.verbose) {
try args[0].print();
try std.io.getStdOut().writer().print("\n ^ quasiquote pre-expand\n", .{});
}
const res = try qq_expander.expand(ev, env, args[0]);
if (ev.verbose) {
try res.print();
try std.io.getStdOut().writer().print("\n ^ quasiquote post-expand\n", .{});
}
return res;
}
/// Implements (item-at list index)
/// If index is out of bounds, nil is returned
pub fn stdItemAt(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(2, args);
const indexArg = try ev.eval(env, args[0]);
const listArg = try ev.eval(env, args[1]);
try requireType(ev, indexArg, ExprType.num);
try requireType(ev, listArg, ExprType.lst);
const index = @floatToInt(isize, indexArg.val.num);
const list = &listArg.val.lst;
return if (index >= 0 and index < list.items.len) list.items[@intCast(usize, index)] else &expr_atom_nil;
}
/// Implements list mutation. If the index is out of bounds, the item is appended or prepended accordingly.
/// The previous value is returned, or nil if index was out of bounds.
/// (item-set 4 list newitem)
pub fn stdItemSet(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(3, args);
const indexArg = try ev.eval(env, args[0]);
const listArg = try ev.eval(env, args[1]);
const newItem = try ev.eval(env, args[2]);
try requireType(ev, indexArg, ExprType.num);
try requireType(ev, listArg, ExprType.lst);
// Index may be negative to prepend, so we use isize
const index = @floatToInt(isize, indexArg.val.num);
var list = &listArg.val.lst;
if (index >= 0 and index < list.items.len) {
var old = list.items[@intCast(usize, index)];
list.items[@intCast(usize, index)] = newItem;
return old;
} else if (index >= list.items.len) {
try list.append(newItem);
return &expr_atom_nil;
} else {
try list.insert(0, newItem);
return &expr_atom_nil;
}
}
/// In-place removal of the n'th item. The removed item is returned, or nil if index is out of bounds.
pub fn stdItemRemove(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(2, args);
const indexArg = try ev.eval(env, args[0]);
const listArg = try ev.eval(env, args[1]);
try requireType(ev, indexArg, ExprType.num);
try requireType(ev, listArg, ExprType.lst);
// Index may be negative to prepend, so we use isize
const index = @floatToInt(isize, indexArg.val.num);
var list = &listArg.val.lst;
if (index >= 0 and index < list.items.len) {
return list.swapRemove(@intCast(usize, index));
}
return &expr_atom_nil;
}
/// In-place rotate a list left by the given amount
/// (rotate-left list amount)
pub fn stdRotateLeft(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(2, args);
const listArg = try ev.eval(env, args[0]);
const amountArg = try ev.eval(env, args[1]);
try requireType(ev, listArg, ExprType.lst);
try requireType(ev, amountArg, ExprType.num);
std.mem.rotate(*Expr, listArg.val.lst.items, @floatToInt(usize, amountArg.val.num));
return listArg;
}
/// Implements (range list start? end?) where negative indices are end-relative
/// If both start and end are missing, return the first element as in (car list)
/// For range results, this produces a new list
pub fn stdRange(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireMinimumArgCount(1, args);
const listArg = try ev.eval(env, args[0]);
try requireType(ev, listArg, ExprType.lst);
const list = &listArg.val.lst;
const size = @intCast(isize, list.items.len);
if (args.len > 1) {
const startArg = try ev.eval(env, args[1]);
try requireType(ev, startArg, ExprType.num);
var start = @floatToInt(isize, startArg.val.num);
var end = val: {
if (args.len > 2) {
const endArg = try ev.eval(env, args[2]);
try requireType(ev, endArg, ExprType.num);
break :val std.math.min(@intCast(isize, list.items.len), @floatToInt(isize, endArg.val.num));
} else break :val @intCast(isize, list.items.len);
};
if (start < 0) {
start = size + start;
}
if (end < 0) {
end = size + end;
}
var res = try ast.makeListExpr(null);
if (size > 0 and end > 0 and start >= 0 and start < size and end <= size) {
try res.val.lst.appendSlice(list.items[@intCast(usize, start)..@intCast(usize, end)]);
return res;
} else {
return &expr_atom_nil;
}
} else {
return if (size > 0) list.items[0] else &expr_atom_nil;
}
}
pub fn stdSum(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
var sum: f64 = 0;
for (args) |expr| {
const arg = try ev.eval(env, expr);
switch (arg.val) {
ExprType.num => |num| sum += num,
else => return ExprErrors.ExpectedNumber,
}
}
return ast.makeNumExpr(sum);
}
pub fn stdSub(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
var res: f64 = 0;
for (args) |expr, index| {
const arg = try ev.eval(env, expr);
switch (arg.val) {
ExprType.num => |num| {
// In the unary case, 0 is the implicit first operand
if (index == 0 and args.len > 1) {
res = num;
} else {
res -= num;
}
},
else => return ExprErrors.ExpectedNumber,
}
}
return ast.makeNumExpr(res);
}
pub fn stdMul(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
var sum: f64 = 1;
for (args) |expr| {
const arg = try ev.eval(env, expr);
switch (arg.val) {
ExprType.num => |num| sum *= num,
else => return ExprErrors.ExpectedNumber,
}
}
return ast.makeNumExpr(sum);
}
pub fn stdDiv(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
var res: f64 = 0;
for (args) |expr, index| {
const arg = try ev.eval(env, expr);
switch (arg.val) {
ExprType.num => |num| {
if (index == 0) {
res = num;
} else {
if (num == 0) {
try ev.printErrorFmt(&expr.src, "Division by zero\n", .{});
return &expr_atom_nil;
}
res /= num;
}
},
else => return ExprErrors.ExpectedNumber,
}
}
return ast.makeNumExpr(res);
}
pub fn stdPow(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(2, args);
const base = try ev.eval(env, args[0]);
const exp = try ev.eval(env, args[1]);
try requireType(ev, base, ExprType.num);
try requireType(ev, exp, ExprType.num);
return ast.makeNumExpr(std.math.pow(f64, base.val.num, exp.val.num));
}
pub fn stdTimeNow(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
_ = &.{ ev, env, args };
return ast.makeNumExpr(@intToFloat(f64, std.time.milliTimestamp()));
}
/// Returns the length of a list or symbol, otherwise nil
/// If input is nil, 0 is returned
pub fn stdLen(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(1, args);
const expr = try ev.eval(env, args[0]);
if (expr == &expr_atom_nil) return try ast.makeNumExpr(@intToFloat(f64, 0));
switch (expr.val) {
ExprType.sym => return try ast.makeNumExpr(@intToFloat(f64, expr.val.sym.len)),
ExprType.lst => {
return try ast.makeNumExpr(@intToFloat(f64, expr.val.lst.items.len));
},
ExprType.map => {
return try ast.makeNumExpr(@intToFloat(f64, expr.val.map.count()));
},
else => {
try ev.printErrorFmt(&expr.src, "len function only works on lists, maps and symbols\n", .{});
return &expr_atom_nil;
},
}
}
/// Splits an atom's constituents into a list
/// (atom.split 123) -> '(1 2 3)
/// (atom.split 'abc) -> '(a b c)
/// (atom.split "a string") -> (list "a" " " "s" "t" "r" "i" "n" "g")
pub fn stdSplitAtom(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(1, args);
const expr = try ev.eval(env, args[0]);
switch (expr.val) {
ExprType.sym => {
const list = try ast.makeListExpr(null);
for (expr.val.sym) |item| {
try list.val.lst.append(try ast.makeAtomByDuplicating(&.{item}));
}
return list;
},
ExprType.num => {
@panic("Splitting numbers not support yet");
},
else => return &expr_atom_nil,
}
}
/// Convert between symbols, numbers and lists. Example: (as number (io.read-line))
/// Returns nil if the conversion fails
pub fn stdAs(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(2, args);
const target_type = args[0];
const expr = try ev.eval(env, args[1]);
try requireType(ev, target_type, ExprType.sym);
if (std.mem.eql(u8, target_type.val.sym, "number")) {
switch (expr.val) {
ExprType.num => return expr,
ExprType.sym => {
const float = std.fmt.parseFloat(f64, expr.val.sym) catch {
return &expr_atom_nil;
};
return try ast.makeNumExpr(float);
},
else => return &expr_atom_nil,
}
} else if (std.mem.eql(u8, target_type.val.sym, "symbol")) {
switch (expr.val) {
ExprType.sym => return expr,
ExprType.num => |num| {
const val = try std.fmt.allocPrint(mem.allocator, "{d}", .{num});
return ast.makeAtomLiteral(val, true);
},
ExprType.lst => |lst| {
var res = std.ArrayList(u8).init(mem.allocator);
var exprWriter = res.writer();
defer res.deinit();
for (lst.items) |item| {
const str = try item.toStringAlloc();
defer mem.allocator.free(str);
try exprWriter.writeAll(str);
}
return ast.makeAtomByDuplicating(res.items);
},
else => return &expr_atom_nil,
}
} else if (std.mem.eql(u8, target_type.val.sym, "list")) {
if (expr.val == ExprType.lst) {
return expr;
}
const list = try ast.makeListExpr(null);
try list.val.lst.append(expr);
return list;
} else {
try ev.printErrorFmt(&expr.src, "Invalid target type in (as): {s}. Must be number, symbol or list\n", .{target_type.val.sym});
return &expr_atom_nil;
}
}
/// Split symbol into a list of symbols by splitting on one or more a separators
/// (split "a,b,c;d" ",;") => '(a b c d)
pub fn stdSplit(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(2, args);
const input = try ev.eval(env, args[0]);
const needle = try ev.eval(env, args[1]);
try requireType(ev, input, ExprType.sym);
try requireType(ev, needle, ExprType.sym);
const list = try ast.makeListExpr(null);
var it = std.mem.tokenize(u8, input.val.sym, needle.val.sym);
while (it.next()) |item| {
if (item.len == 0) {
try list.val.lst.append(&expr_atom_nil);
} else {
try list.val.lst.append(try ast.makeAtomByDuplicating(item));
}
}
return list;
}
pub fn stdFloor(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(1, args);
const arg = try ev.eval(env, args[0]);
try requireType(ev, arg, ExprType.num);
return ast.makeNumExpr(@floor(arg.val.num));
}
pub fn stdRound(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(1, args);
const arg = try ev.eval(env, args[0]);
try requireType(ev, arg, ExprType.num);
return ast.makeNumExpr(@round(arg.val.num));
}
pub fn minMax(ev: *Interpreter, env: *Env, args: []const *Expr, use_order: std.math.Order) anyerror!*Expr {
try requireExactArgCount(1, args);
const arg = try ev.eval(env, args[0]);
try requireType(ev, arg, ExprType.lst);
var winner: *Expr = arg.val.lst.items[0];
for (arg.val.lst.items[1..]) |item| {
if (use_order == try order(ev, env, &.{ try ev.eval(env, winner), try ev.eval(env, item) })) {
winner = item;
}
}
return winner;
}
/// Find the smallest value in a list
pub fn stdMin(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
return minMax(ev, env, args, std.math.Order.gt);
}
/// Find the largest value in a list
pub fn stdMax(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
return minMax(ev, env, args, std.math.Order.lt);
}
/// This is called when a lambda is defined, not when it's invoked
/// The first argument must be a list, namely the lambda arguments
pub fn stdLambda(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireMinimumArgCount(2, args);
try requireType(ev, args[0], ExprType.lst);
var expr = try ast.makeLambdaExpr(env);
try expr.val.lam.appendSlice(args);
return expr;
}
/// This is called when a macro is defined
/// The first argument must be a list, namely the macro arguments
pub fn stdMacro(ev: *Interpreter, _: *Env, args: []const *Expr) anyerror!*Expr {
try requireMinimumArgCount(2, args);
try requireType(ev, args[0], ExprType.lst);
var expr = try ast.makeMacroExpr();
try expr.val.mac.appendSlice(args);
return expr;
}
/// Evaluate the arguments, returning the last one as the result. If quote and quasiquote
/// expressions are encountered, these are unquoted before evaluation.
pub fn stdEval(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
var res: *Expr = &expr_atom_nil;
for (args) |arg| {
if (arg.val == ExprType.lst and (arg.val.lst.items[0] == &expr_atom_quote or arg.val.lst.items[0] == &expr_atom_quasi_quote)) {
res = try ev.eval(env, try ev.eval(env, arg));
} else {
res = try ev.eval(env, arg);
}
}
return res;
}
/// Parses and evaluates a string (that is, a symbol containing Bio source code)
/// The argument can be a literal source string, or an expression producing a source string
pub fn stdEvalString(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireMinimumArgCount(1, args);
const arg = args[0];
const input = expr: {
if (arg.val == ExprType.lst) {
if (arg.val == ExprType.lst and (arg.val.lst.items[0] == &expr_atom_quote or arg.val.lst.items[0] == &expr_atom_quasi_quote)) {
break :expr try ev.eval(env, try ev.eval(env, arg));
} else {
break :expr try ev.eval(env, arg);
}
} else if (arg.val == ExprType.sym) {
if (env.lookup(arg.val.sym, true)) |looked_up| {
break :expr looked_up;
} else {
break :expr arg;
}
} else {
break :expr arg;
}
};
if (input.val == ExprType.sym) {
return try ev.eval(env, try ev.parse(input.val.sym));
} else {
return input;
}
}
/// (apply fn arg1 ... args) where the last argument must be a list, which is the common contract in Lisps
/// Behaves as if fn is called with arguments from the list produced by (append (list arg1 ...) args)
pub fn stdApply(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireMinimumArgCount(2, args);
const last_arg_list = try ev.eval(env, args[args.len - 1]);
try requireType(ev, last_arg_list, ExprType.lst);
var fncall = try ast.makeListExpr(null);
try fncall.val.lst.append(args[0]);
if (args.len > 2) {
for (args[1 .. args.len - 1]) |arg| {
try fncall.val.lst.append(arg);
}
}
for (last_arg_list.val.lst.items) |item| {
try fncall.val.lst.append(item);
}
return try ev.eval(env, fncall);
}
/// Create a new list: (list 1 2 3 'a (* 3 x))
pub fn stdList(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
var list = try ast.makeListExpr(null);
for (args) |arg| {
try list.val.lst.append(try ev.eval(env, arg));
}
return list;
}
/// Create a new hashmap: (hashmap.new '(1 2) '(a 3)))
pub fn stdHashmapNew(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
var hmap = try ast.makeHashmapExpr(null);
for (args) |arg| {
try requireType(ev, arg, ExprType.lst);
try hmap.val.map.put(try ev.eval(env, arg.val.lst.items[0]), try ev.eval(env, arg.val.lst.items[1]));
}
return hmap;
}
/// (iterate mymap (lambda (key value) ...))
/// (iterate mylst (lambda (item) ...))
/// (iterate mystr (lambda (char) ...))
pub fn stdIterate(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(2, args);
var container = try ev.eval(env, args[0]);
const callable = args[1];
switch (container.val) {
ExprType.map => {
var it = container.val.map.iterator();
while (it.next()) |entry| {
var call = try ast.makeListExpr(null);
try call.val.lst.append(callable);
try call.val.lst.append(entry.key_ptr.*);
try call.val.lst.append(entry.value_ptr.*);
_ = try ev.eval(env, call);
}
},
ExprType.lst => {
for (container.val.lst.items) |entry| {
var call = try ast.makeListExpr(null);
try call.val.lst.append(callable);
try call.val.lst.append(entry);
_ = try ev.eval(env, call);
}
},
ExprType.sym => {
for (container.val.sym) |byte| {
var call = try ast.makeListExpr(null);
try call.val.lst.append(callable);
try call.val.lst.append(try ast.makeListExpr(&.{ &expr_atom_quote, try ast.makeAtomByDuplicating(&.{byte}) }));
_ = try ev.eval(env, call);
}
},
else => {
try ev.printErrorFmt(&container.src, "Invalid container type in (iterate). Must be map, list or symbol\n", .{});
},
}
return &expr_atom_nil;
}
/// (map.put mymap 1 "abc")
/// Returns the previous value if present, or nil
pub fn stdHashmapPut(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(3, args);
const m = try ev.eval(env, args[0]);
try requireType(ev, m, ExprType.map);
const k = try ev.eval(env, args[1]);
const v = try ev.eval(env, args[2]);
const previous = m.val.map.get(k);
try m.val.map.put(k, v);
return if (previous) |p| p else &expr_atom_nil;
}
/// (map.get mymap 1)
/// Returns the matching value, otherwise nil
pub fn stdHashmapGet(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(2, args);
const m = try ev.eval(env, args[0]);
try requireType(ev, m, ExprType.map);
const k = try ev.eval(env, args[1]);
const v = m.val.map.get(k);
return if (v) |val| val else &expr_atom_nil;
}
/// (map.remove mymap 1)
/// Returns #t if the entry existed and was removed, otherwise #f
pub fn stdHashmapRemove(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(2, args);
const m = try ev.eval(env, args[0]);
try requireType(ev, m, ExprType.map);
const k = try ev.eval(env, args[1]);
return boolExpr(m.val.map.swapRemove(k));
}
/// Removes all items and returns the number of items removed
pub fn stdHashmapClear(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(1, args);
const m = try ev.eval(env, args[0]);
try requireType(ev, m, ExprType.map);
const count = try ast.makeNumExpr(@intToFloat(f64, m.val.map.count()));
m.val.map.clearAndFree();
return count;
}
/// Loop from n to m or until &break is encountered
/// (loop '(0 9) body goes here) -> loops 10 times
/// (loop '(9 0) body goes here) -> loops 10 times
/// (loop idx '(9 0) body goes here) -> loops 10 times, current iteration count goes into the idx variable
/// (loop '() body goes here (if cond &break)) -> loops until &break is encountered
pub fn stdLoop(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireMinimumArgCount(2, args);
// loop_arg1 is either a loop index variable name (a symbol we don't look up), or the criteria list
// If the first, then use stdDefine, i.e same as (var idx 0)
var loop_arg1 = try ev.eval(env, args[0]);
var critera: ?*Expr = null;
var index_variable: ?*Expr = null;
var index_increment: f64 = 1;
if (loop_arg1.val == ExprType.lst) {
critera = try ev.eval(env, args[0]);
} else if (loop_arg1.val == ExprType.sym) {
var num_expr = try ast.makeNumExpr(0);
index_variable = try putEnv(ev, env, &.{ loop_arg1, num_expr }, true);
critera = try ev.eval(env, args[1]);
try requireType(ev, critera.?, ExprType.lst);
} else {
return ExprErrors.InvalidArgumentType;
}
// Criteria is a list of 2 items, giving start and stop indices, or an empty list for infinite loops
if (critera.?.val.lst.items.len > 0) try requireMinimumArgCount(2, critera.?.val.lst.items);
var start: f64 = 0;
var end: f64 = 0;
var infinite = true;
if (critera.?.val.lst.items.len > 0) {
infinite = false;
try requireMinimumArgCount(2, critera.?.val.lst.items);
var first = try ev.eval(env, critera.?.val.lst.items[0]);
var second = try ev.eval(env, critera.?.val.lst.items[1]);
try requireType(ev, first, ExprType.num);
try requireType(ev, second, ExprType.num);
start = std.math.min(first.val.num, second.val.num);
end = std.math.max(first.val.num, second.val.num);
// Countdown?
if (first.val.num > second.val.num) {
if (index_variable) |iv| {
index_increment = -1;
iv.val.num = end - 1;
}
}
}
var last: *Expr = &expr_atom_nil;
done: while (infinite or (start < end)) : (start += 1) {
// Evaluate loop body
for (args[1..]) |item| {
last = try ev.eval(env, item);
if (ev.break_seen) {
ev.break_seen = false;
break :done;
}
}
if (index_variable) |iv| {
iv.val.num += index_increment;
}
}
return last;
}
/// Creates a new list, or updates an existing one if used with &mut, and populates it with the arguments.
/// If any arguments are lists, their elements are spliced into the result list. nil arguments are ignored.
/// (append '(1 2) '(3 4)) -> (1 2 3 4)
/// (append '(1 2 3) '4) -> (1 2 3 4)
/// (append &mut mylist '(3 4))
pub fn stdAppend(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireMinimumArgCount(2, args);
const Handler = struct {
pub fn handle(ip: *Interpreter, environment: *Env, list: *Expr, arg: *Expr) !void {
const evaled = try ip.eval(environment, arg);
if (evaled.val == ExprType.lst) {
for (evaled.val.lst.items) |item| {
try list.val.lst.append(item);
}
} else {
const expr = try ip.eval(environment, arg);
if (expr != &expr_atom_nil) {
try list.val.lst.append(expr);
}
}
}
};
var target_list: ?*Expr = null;
var start_index: usize = 0;
if (args[0] == &expr_atom_mut) {
target_list = try ev.eval(env, args[1]);
if (target_list.?.val == ExprType.lst) {
start_index = 2;
} else {
target_list = try ast.makeListExpr(null);
start_index = 1;
}
} else {
target_list = try ast.makeListExpr(null);
}
for (args[start_index..]) |arg| {
try Handler.handle(ev, env, target_list.?, arg);
}
return target_list.?;
}
/// Put a variable into the given environment. If the symbol is already in the environment, the
/// `allow_redefinition` flag decides between overwritting and emitting an error.
fn putEnv(ev: *Interpreter, env: *Env, args: []const *Expr, allow_redefinition: bool) anyerror!*Expr {
if (env.lookup(args[0].val.sym, false) != null and !allow_redefinition) {
try ev.printErrorFmt(&args[0].src, "{s} is already defined\n", .{args[0].val.sym});
return ExprErrors.AlreadyReported;
}
if (args.len > 1) {
var value = try ev.eval(env, args[1]);
try env.putWithSymbol(args[0], value);
return value;
} else {
try env.putWithSymbol(args[0], &expr_atom_nil);
return &expr_atom_nil;
}
}
/// Adds a new binding to the current environment
pub fn stdDefine(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireMinimumArgCount(1, args);
try requireType(ev, args[0], ExprType.sym);
return try putEnv(ev, env, args, false);
}
/// Replace binding
pub fn stdSet(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(2, args);
try requireType(ev, args[0], ExprType.sym);
return env.replace(args[0], try ev.eval(env, args[1]));
}
/// Remove binding if it exists
pub fn stdUnset(ev: *Interpreter, env: *Env, args: []const *Expr) anyerror!*Expr {
try requireExactArgCount(1, args);
try requireType(ev, args[0], ExprType.sym);
_ = env.replace(args[0], null);
return &expr_atom_nil;
} | src/intrinsics.zig |
const std = @import("std");
const aoc = @import("aoc-lib.zig");
fn indexOf(h: []const u8, n: u8, o: usize) ?usize {
var i = o;
while (i < h.len) : (i += 1) {
if (h[i] == n) {
return i;
}
}
return null;
}
fn flood(l: []u8, p: usize, w: usize, h: usize) anyerror!usize {
var x: usize = p % w;
var y: usize = p / w;
if (l[p] >= '9') {
return 0;
}
l[p] = '9';
var size: usize = 1;
if (x > 0) {
size += try flood(l, p - 1, w, h);
}
if (x < w - 2) {
size += try flood(l, p + 1, w, h);
}
if (y > 0) {
size += try flood(l, p - w, w, h);
}
if (y < h - 1) {
size += try flood(l, p + w, w, h);
}
return size;
}
fn pp(l: []u8, w: usize, h: usize) !void {
var y: usize = 0;
while (y < h) : (y += 1) {
try aoc.print("{s}", .{l[y * w .. (1 + y) * w]});
}
}
fn lava(alloc: std.mem.Allocator, in: []const u8) ![2]usize {
var l: []u8 = alloc.dupe(u8, in) catch unreachable;
defer alloc.free(l);
var w = indexOf(l, '\n', 0).? + 1;
var h = in.len / w;
var r = [2]usize{ 0, 0 };
var ch: u8 = '0';
var sizes = std.ArrayList(usize).init(alloc);
defer sizes.deinit();
//print("\n", .{}) catch unreachable;
while (ch < '9') : (ch += 1) {
var p: usize = 0;
while (p < l.len - 1) : (p += 1) {
if (l[p] != ch) {
continue;
}
r[0] += 1 + ch - '0';
var size: usize = try flood(l, p, w, h);
//print("{},{} s={}\n", .{ p % w, p / w, size }) catch unreachable;
try sizes.append(size);
}
}
std.sort.sort(usize, sizes.items, {}, aoc.usizeLessThan);
r[1] = (sizes.items[sizes.items.len - 3] *
sizes.items[sizes.items.len - 2] *
sizes.items[sizes.items.len - 1]);
return r;
}
test "examples" {
var test1 = try lava(aoc.talloc, aoc.test1file);
try aoc.assertEq(@as(usize, 15), test1[0]);
var real = try lava(aoc.talloc, aoc.inputfile);
try aoc.assertEq(@as(usize, 456), real[0]);
try aoc.assertEq(@as(usize, 1134), test1[1]);
try aoc.assertEq(@as(usize, 1047744), real[1]);
}
fn day09(inp: []const u8, bench: bool) anyerror!void {
var p = try lava(aoc.halloc, inp);
if (!bench) {
try aoc.print("Part 1: {}\nPart 2: {}\n", .{ p[0], p[1] });
}
}
pub fn main() anyerror!void {
try aoc.benchme(aoc.input(), day09);
} | 2021/09/aoc.zig |
const kernel = @import("../../kernel.zig");
const x86_64 = @import("../x86_64.zig");
const DescriptorTable = @import("descriptor_table.zig");
const TSS = @import("tss.zig");
const log = kernel.log.scoped(.GDT);
pub const Table = packed struct {
null_entry: Entry = 0, // 0x00
code_16: Entry = 0x00009a000000ffff, // 0x08
data_16: Entry = 0x000093000000ffff, // 0x10
code_32: Entry = 0x00cf9a000000ffff, // 0x18
data_32: Entry = 0x00cf93000000ffff, // 0x20
code_64: Entry = 0x00af9b000000ffff, // 0x28
data_64: Entry = 0x00af93000000ffff, // 0x30
user_code_64: Entry = 0x00affb000000ffff, // 0x38
user_data_64: Entry = 0x00aff3000000ffff, // 0x40
tss: TSS.Descriptor, // 0x48
comptime {
kernel.assert_unsafe(@sizeOf(Table) == 9 * @sizeOf(Entry) + @sizeOf(TSS.Descriptor));
kernel.assert_unsafe(@offsetOf(Table, "code_64") == 0x28);
kernel.assert_unsafe(@offsetOf(Table, "data_64") == 0x30);
kernel.assert_unsafe(@offsetOf(Table, "user_code_64") == 0x38);
kernel.assert_unsafe(@offsetOf(Table, "user_data_64") == 0x40);
kernel.assert_unsafe(@offsetOf(Table, "tss") == 9 * @sizeOf(Entry));
}
pub fn initial_setup(gdt: *Table) void {
log.debug("Loading GDT...", .{});
gdt.* = Table{
.tss = bootstrap_tss.get_descriptor(),
};
log.debug("GDT loaded", .{});
gdt.load();
log.debug("GDT loaded", .{});
x86_64.flush_segments_kernel();
x86_64.set_current_cpu(&kernel.cpus[0]);
_ = x86_64.get_current_cpu();
}
pub inline fn load(gdt: *Table) void {
const register = DescriptorTable.Register{
.limit = @sizeOf(Table) - 1,
.address = @ptrToInt(gdt),
};
asm volatile (
\\ lgdt %[gdt_register]
:
: [gdt_register] "*p" (®ister),
);
}
pub inline fn update_tss(gdt: *Table, tss: *TSS.Struct) void {
gdt.tss = tss.get_descriptor();
const tss_selector: u16 = @offsetOf(Table, "tss");
asm volatile (
\\ltr %[tss_selector]
:
: [tss_selector] "r" (tss_selector),
);
log.debug("Updated TSS", .{});
}
};
const bootstrap_tss = TSS.Struct{};
const Entry = u64;
pub fn save() DescriptorTable.Register {
var register: DescriptorTable.Register = undefined;
asm volatile ("sgdt %[gdt_register]"
:
: [gdt_register] "*p" (®ister),
);
return register;
} | src/kernel/arch/x86_64/gdt.zig |
const std = @import("std");
const bytecode = @import("bytecode.zig");
const Value = @import("value.zig").Value;
const Str = []const u8;
pub fn disassemble(chunk: bytecode.Chunk, name: Str) void {
print("== {s} ==\n", .{name});
var offset: usize = 0;
while (offset < chunk.code.items.len) {
offset = disassembleInst(chunk, offset);
}
}
pub fn disassembleInst(chunk: bytecode.Chunk, offset: usize) usize {
print("{d:0>4} ", .{offset});
// print("({d:0>4}:{d:0>4}) ", .{ chunk.locs.items[offset].line, chunk.locs.items[offset].col });
switch (@intToEnum(bytecode.Op, chunk.code.items[offset])) {
.Constant => return constantInst("OP_CONSTANT", chunk, offset),
.DefineGlobal => return constantInst("OP_DEFINE_GLOBAL", chunk, offset),
.GetGlobal => return constantInst("OP_GET_GLOBAL", chunk, offset),
.SetGlobal => return constantInst("OP_SET_GLOBAL", chunk, offset),
.Return => return simpleInst("OP_RETURN", offset),
.True => return simpleInst("OP_TRUE", offset),
.False => return simpleInst("OP_FALSE", offset),
.Nil => return simpleInst("OP_NIL", offset),
.Add => return simpleInst("OP_ADD", offset),
.Sub => return simpleInst("OP_SUB", offset),
.Mul => return simpleInst("OP_MUL", offset),
.Div => return simpleInst("OP_DIV", offset),
.Neg => return simpleInst("OP_NEG", offset),
.Not => return simpleInst("OP_NOT", offset),
.And => return simpleInst("OP_AND", offset),
.Or => return simpleInst("OP_OR", offset),
.Xor => return simpleInst("OP_XOR", offset),
.Equal => return simpleInst("OP_EQUAL", offset),
.NotEqual => return simpleInst("OP_NOT_EQUAL", offset),
.Greater => return simpleInst("OP_GREATER", offset),
.GreaterEqual => return simpleInst("OP_GREATER_EQUAL", offset),
.Less => return simpleInst("OP_LESS", offset),
.LessEqual => return simpleInst("OP_LESS_EQUAL", offset),
.Print => return simpleInst("OP_PRINT", offset),
.Pop => return simpleInst("OP_POP", offset),
}
unreachable;
}
fn simpleInst(inst: Str, offset: usize) usize {
print("{s}\n", .{inst});
return offset + 1;
}
fn constantInst(inst: Str, chunk: bytecode.Chunk, offset: usize) usize {
const value = chunk.getConstant(offset + 1);
print("{s: <16} {}\n", .{ inst, value });
return offset + 3;
}
const print = std.debug.print; | src/debug.zig |
//--------------------------------------------------------------------------------
// Section: Types (6)
//--------------------------------------------------------------------------------
const CLSID_WaaSAssessor_Value = Guid.initString("098ef871-fa9f-46af-8958-c083515d7c9c");
pub const CLSID_WaaSAssessor = &CLSID_WaaSAssessor_Value;
pub const UpdateImpactLevel = enum(i32) {
None = 0,
Low = 1,
Medium = 2,
High = 3,
};
pub const UpdateImpactLevel_None = UpdateImpactLevel.None;
pub const UpdateImpactLevel_Low = UpdateImpactLevel.Low;
pub const UpdateImpactLevel_Medium = UpdateImpactLevel.Medium;
pub const UpdateImpactLevel_High = UpdateImpactLevel.High;
pub const UpdateAssessmentStatus = enum(i32) {
Latest = 0,
NotLatestSoftRestriction = 1,
NotLatestHardRestriction = 2,
NotLatestEndOfSupport = 3,
NotLatestServicingTrain = 4,
NotLatestDeferredFeature = 5,
NotLatestDeferredQuality = 6,
NotLatestPausedFeature = 7,
NotLatestPausedQuality = 8,
NotLatestManaged = 9,
NotLatestUnknown = 10,
NotLatestTargetedVersion = 11,
};
pub const UpdateAssessmentStatus_Latest = UpdateAssessmentStatus.Latest;
pub const UpdateAssessmentStatus_NotLatestSoftRestriction = UpdateAssessmentStatus.NotLatestSoftRestriction;
pub const UpdateAssessmentStatus_NotLatestHardRestriction = UpdateAssessmentStatus.NotLatestHardRestriction;
pub const UpdateAssessmentStatus_NotLatestEndOfSupport = UpdateAssessmentStatus.NotLatestEndOfSupport;
pub const UpdateAssessmentStatus_NotLatestServicingTrain = UpdateAssessmentStatus.NotLatestServicingTrain;
pub const UpdateAssessmentStatus_NotLatestDeferredFeature = UpdateAssessmentStatus.NotLatestDeferredFeature;
pub const UpdateAssessmentStatus_NotLatestDeferredQuality = UpdateAssessmentStatus.NotLatestDeferredQuality;
pub const UpdateAssessmentStatus_NotLatestPausedFeature = UpdateAssessmentStatus.NotLatestPausedFeature;
pub const UpdateAssessmentStatus_NotLatestPausedQuality = UpdateAssessmentStatus.NotLatestPausedQuality;
pub const UpdateAssessmentStatus_NotLatestManaged = UpdateAssessmentStatus.NotLatestManaged;
pub const UpdateAssessmentStatus_NotLatestUnknown = UpdateAssessmentStatus.NotLatestUnknown;
pub const UpdateAssessmentStatus_NotLatestTargetedVersion = UpdateAssessmentStatus.NotLatestTargetedVersion;
pub const UpdateAssessment = extern struct {
status: UpdateAssessmentStatus,
impact: UpdateImpactLevel,
daysOutOfDate: u32,
};
pub const OSUpdateAssessment = extern struct {
isEndOfSupport: BOOL,
assessmentForCurrent: UpdateAssessment,
assessmentForUpToDate: UpdateAssessment,
securityStatus: UpdateAssessmentStatus,
assessmentTime: FILETIME,
releaseInfoTime: FILETIME,
currentOSBuild: ?PWSTR,
currentOSReleaseTime: FILETIME,
upToDateOSBuild: ?PWSTR,
upToDateOSReleaseTime: FILETIME,
};
// TODO: this type is limited to platform 'windows10.0.15063'
const IID_IWaaSAssessor_Value = Guid.initString("2347bbef-1a3b-45a4-902d-3e09c269b45e");
pub const IID_IWaaSAssessor = &IID_IWaaSAssessor_Value;
pub const IWaaSAssessor = extern struct {
pub const VTable = extern struct {
base: IUnknown.VTable,
GetOSUpdateAssessment: fn(
self: *const IWaaSAssessor,
result: ?*OSUpdateAssessment,
) callconv(@import("std").os.windows.WINAPI) HRESULT,
};
vtable: *const VTable,
pub fn MethodMixin(comptime T: type) type { return struct {
pub usingnamespace IUnknown.MethodMixin(T);
// NOTE: method is namespaced with interface name to avoid conflicts for now
pub fn IWaaSAssessor_GetOSUpdateAssessment(self: *const T, result: ?*OSUpdateAssessment) callconv(.Inline) HRESULT {
return @ptrCast(*const IWaaSAssessor.VTable, self.vtable).GetOSUpdateAssessment(@ptrCast(*const IWaaSAssessor, self), result);
}
};}
pub usingnamespace MethodMixin(@This());
};
//--------------------------------------------------------------------------------
// Section: Functions (0)
//--------------------------------------------------------------------------------
//--------------------------------------------------------------------------------
// Section: Unicode Aliases (0)
//--------------------------------------------------------------------------------
const thismodule = @This();
pub usingnamespace switch (@import("../zig.zig").unicode_mode) {
.ansi => struct {
},
.wide => struct {
},
.unspecified => if (@import("builtin").is_test) struct {
} else struct {
},
};
//--------------------------------------------------------------------------------
// Section: Imports (6)
//--------------------------------------------------------------------------------
const Guid = @import("../zig.zig").Guid;
const BOOL = @import("../foundation.zig").BOOL;
const FILETIME = @import("../foundation.zig").FILETIME;
const HRESULT = @import("../foundation.zig").HRESULT;
const IUnknown = @import("../system/com.zig").IUnknown;
const PWSTR = @import("../foundation.zig").PWSTR;
test {
@setEvalBranchQuota(
@import("std").meta.declarations(@This()).len * 3
);
// reference all the pub declarations
if (!@import("builtin").is_test) return;
inline for (@import("std").meta.declarations(@This())) |decl| {
if (decl.is_pub) {
_ = decl;
}
}
} | win32/system/update_assessment.zig |
const std = @import("std");
const ArrayList = std.ArrayList;
const V5 = [5]i64;
const num_teaspoons: u32 = 100;
fn addV5(u: V5, v: V5) V5 {
return V5{
u[0] + v[0],
u[1] + v[1],
u[2] + v[2],
u[3] + v[3],
u[4] + v[4],
};
}
fn scaleV5(c: i64, v: V5) V5 {
return V5{ v[0] * c, v[1] * c, v[2] * c, v[3] * c, v[4] * c };
}
fn clampV5(lo: i64, v: V5) V5 {
return V5{ @maximum(lo, v[0]), @maximum(lo, v[1]), @maximum(lo, v[2]), @maximum(lo, v[3]), @maximum(lo, v[4]) };
}
fn product(xs: []i64) i64 {
var total: i64 = 1;
for (xs) |x| {
total *= x;
}
return total;
}
fn sum(xs: []i64) i64 {
var total: i64 = 0;
for (xs) |x| {
total += x;
}
return total;
}
fn score(ingredients: []V5, teaspoons: []i64) i64 {
var v: V5 = [_]i64{0} ** 5;
var i: usize = 0;
while (i < teaspoons.len) : (i += 1) {
v = addV5(v, scaleV5(teaspoons[i], ingredients[i]));
}
return product(clampV5(0, v)[0..4]);
}
fn calories(ingredients: []V5, teaspoons: []i64) i64 {
var total: i64 = 0;
var i: usize = 0;
while (i < teaspoons.len) : (i += 1) {
total += teaspoons[i] * ingredients[i][4];
}
return total;
}
// Only teaspoons[0..index] is valid when this is called
fn solve(ingredients: []V5, teaspoons: []i64, index: usize, target_calories: ?i64) i64 {
var used_teaspoons: i64 = sum(teaspoons[0..index]);
if (index + 1 == ingredients.len) {
teaspoons[index] = num_teaspoons - used_teaspoons;
if (target_calories) |cals| {
if (calories(ingredients, teaspoons) == cals) {
return score(ingredients, teaspoons);
} else {
return 0;
}
} else {
return score(ingredients, teaspoons);
}
} else {
var max_score: i64 = 0;
teaspoons[index] = 0;
while (teaspoons[index] + used_teaspoons <= num_teaspoons) : (teaspoons[index] += 1) {
max_score = @maximum(max_score, solve(ingredients, teaspoons, index + 1, target_calories));
}
return max_score;
}
}
pub fn main() !void {
const file =
try std.fs.cwd().openFile("inputs/day15.txt", .{ .read = true });
defer file.close();
var reader = std.io.bufferedReader(file.reader()).reader();
var buffer: [1024]u8 = undefined;
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
var vs = ArrayList(V5).init(&gpa.allocator);
defer vs.deinit();
while (try reader.readUntilDelimiterOrEof(&buffer, '\n')) |line| {
var tokens = std.mem.tokenize(u8, line, ":");
_ = tokens.next(); // "Ingredient: "
// Split in to property-value tokens
tokens = std.mem.tokenize(u8, tokens.next().?, ",");
var v: V5 = undefined;
var index: u32 = 0;
while (tokens.next()) |token| : (index += 1) {
// Split property and value
var pair_tokens = std.mem.tokenize(u8, token, " ");
_ = pair_tokens.next(); // property
v[index] = try std.fmt.parseInt(i64, pair_tokens.next().?, 10);
}
try vs.append(v);
}
var teaspoons = ArrayList(i64).init(&gpa.allocator);
try teaspoons.resize(vs.items.len);
defer teaspoons.deinit();
std.debug.print("{d}\n", .{solve(vs.items, teaspoons.items, 0, null)});
std.debug.print("{d}\n", .{solve(vs.items, teaspoons.items, 0, 500)});
} | src/day15.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const nitori = @import("nitori");
const communication = nitori.communication;
const interface = nitori.interface;
const EventChannel = communication.EventChannel;
//;
// TODO maybe have a *const System in the contexts?
pub const modules = struct {
usingnamespace @import("modules/sine.zig");
usingnamespace @import("modules/utility.zig");
// usingnamespace @import("modules/sample_player.zig");
};
//;
pub const Module = struct {
const Self = @This();
pub const InBuffer = struct {
id: usize,
buf: []const f32,
};
pub const FrameContext = struct {
now: u64,
};
pub const ComputeContext = struct {
sample_rate: u32,
frame_len: usize,
inputs: []const InBuffer,
output: []f32,
};
pub const VTable = struct {
frame: fn (Self, FrameContext) void = _frame,
compute: fn (Self, ComputeContext) void,
pub fn _frame(module: Self, _ctx: FrameContext) void {}
};
impl: interface.Impl,
vtable: *const VTable,
};
pub fn Controlled(comptime T: type) type {
return struct {
const Self = @This();
const MsgChannel = EventChannel(T.Message);
pub const Controller = struct {
tx: MsgChannel.Sender,
pub fn send(self: *Controller, now: u64, msg: T.Message) MsgChannel.Error!void {
return self.tx.send(now, msg);
}
};
inner: *T,
channel: MsgChannel,
rx: MsgChannel.Receiver,
pub fn init(
self: *Self,
allocator: *Allocator,
message_ct: usize,
inner: *T,
) Allocator.Error!void {
self.inner = inner;
self.channel = try MsgChannel.init(allocator, message_ct);
self.rx = self.channel.makeReceiver();
}
pub fn deinit(self: *Self) void {
self.channel.deinit();
}
pub fn makeController(self: *Self) Controller {
return Controller{
.tx = self.channel.makeSender(),
};
}
//;
pub fn module(self: *Self) Module {
return .{
.impl = interface.Impl.init(self),
.vtable = &comptime Module.VTable{
.frame = frame,
.compute = compute,
},
};
}
pub fn frame(m: Module, ctx: Module.FrameContext) void {
var self = m.impl.cast(Self);
while (self.rx.tryRecv(ctx.now)) |event| {
self.inner.takeMessage(event.data);
}
if (@hasDecl(T, "frame")) {
self.inner.frame(ctx);
}
}
pub fn compute(m: Module, ctx: Module.ComputeContext) void {
var self = m.impl.cast(Self);
if (@hasDecl(T, "compute")) {
self.inner.compute(ctx);
}
}
};
} | src/module.zig |
const std = @import("../std.zig");
const Cpu = std.Target.Cpu;
pub const Feature = enum {
@"64bit",
a,
c,
d,
e,
f,
m,
relax,
};
pub usingnamespace Cpu.Feature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= Cpu.Feature.Set.needed_bit_count);
var result: [len]Cpu.Feature = undefined;
result[@enumToInt(Feature.@"64bit")] = .{
.llvm_name = "64bit",
.description = "Implements RV64",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.a)] = .{
.llvm_name = "a",
.description = "'A' (Atomic Instructions)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.c)] = .{
.llvm_name = "c",
.description = "'C' (Compressed Instructions)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.d)] = .{
.llvm_name = "d",
.description = "'D' (Double-Precision Floating-Point)",
.dependencies = featureSet(&[_]Feature{
.f,
}),
};
result[@enumToInt(Feature.e)] = .{
.llvm_name = "e",
.description = "Implements RV32E (provides 16 rather than 32 GPRs)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.f)] = .{
.llvm_name = "f",
.description = "'F' (Single-Precision Floating-Point)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.m)] = .{
.llvm_name = "m",
.description = "'M' (Integer Multiplication and Division)",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.relax)] = .{
.llvm_name = "relax",
.description = "Enable Linker relaxation.",
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
for (result) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
break :blk result;
};
pub const cpu = struct {
pub const baseline_rv32 = Cpu{
.name = "baseline_rv32",
.llvm_name = "generic-rv32",
.features = featureSet(&[_]Feature{
.a,
.c,
.d,
.f,
.m,
.relax,
}),
};
pub const baseline_rv64 = Cpu{
.name = "baseline_rv64",
.llvm_name = "generic-rv64",
.features = featureSet(&[_]Feature{
.@"64bit",
.a,
.c,
.d,
.f,
.m,
.relax,
}),
};
pub const generic_rv32 = Cpu{
.name = "generic_rv32",
.llvm_name = "generic-rv32",
.features = featureSet(&[_]Feature{}),
};
pub const generic_rv64 = Cpu{
.name = "generic_rv64",
.llvm_name = "generic-rv64",
.features = featureSet(&[_]Feature{
.@"64bit",
}),
};
};
/// All riscv CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const Cpu{
&cpu.baseline_rv32,
&cpu.baseline_rv64,
&cpu.generic_rv32,
&cpu.generic_rv64,
}; | lib/std/target/riscv.zig |
const std = @import("std");
const warn = std.debug.warn;
const c = @cImport({
@cInclude("string.h");
@cInclude("stdio.h");
@cInclude("ui.h");
});
extern fn onClosing(w: ?*c.uiWindow, data: ?*c_void) c_int {
c.uiQuit();
return 1;
}
extern fn onShouldQuit(data: ?*c_void) c_int {
var mainwin2: ?*c.uiWindow = @ptrCast(?*c.uiWindow, data);
c.uiControlDestroy(toUiControl(mainwin2));
return 1;
}
fn makeBasicControlsPage() [*c]c.uiControl {
var vbox = c.uiNewVerticalBox();
c.uiBoxSetPadded(vbox, 1);
var hbox = c.uiNewHorizontalBox();
c.uiBoxSetPadded(hbox, 1);
c.uiBoxAppend(vbox, toUiControl(hbox), 0);
c.uiBoxAppend(hbox, toUiControl(c.uiNewButton(c"Button")), 0);
c.uiBoxAppend(hbox, toUiControl(c.uiNewCheckbox(c"Checkbox")), 0);
c.uiBoxAppend(vbox, toUiControl(c.uiNewLabel(c"This is a label. Right now, labels can only span one line.")), 0);
c.uiBoxAppend(vbox, toUiControl(c.uiNewHorizontalSeparator()), 0);
var group = c.uiNewGroup(c"Entries");
c.uiGroupSetMargined(group, 1);
c.uiBoxAppend(vbox, toUiControl(group), 1);
var entryForm = c.uiNewForm();
c.uiFormSetPadded(entryForm, 1);
c.uiGroupSetChild(group, toUiControl(entryForm));
c.uiFormAppend(entryForm, c"Entry", toUiControl(c.uiNewEntry()), 0);
c.uiFormAppend(entryForm, c"Password Entry", toUiControl(c.uiNewPasswordEntry()), 0);
c.uiFormAppend(entryForm, c"Search Entry", toUiControl(c.uiNewSearchEntry()), 0);
c.uiFormAppend(entryForm, c"Multiline Entry", toUiControl(c.uiNewMultilineEntry()), 1);
c.uiFormAppend(entryForm, c"Multiline Entry No Wrap", toUiControl(c.uiNewNonWrappingMultilineEntry()), 1);
return toUiControl(vbox);
}
var spinbox: ?*c.uiSpinbox = undefined;
var slider: ?*c.uiSlider = undefined;
var pbar: ?*c.uiProgressBar = undefined;
extern fn onSpinboxChanged(s: ?*c.uiSpinbox, data: ?*c_void) void {
c.uiSliderSetValue(slider, c.uiSpinboxValue(s));
c.uiProgressBarSetValue(pbar, c.uiSpinboxValue(s));
}
extern fn onSliderChanged(s: ?*c.uiSlider, data: ?*c_void) void {
c.uiSpinboxSetValue(spinbox, c.uiSliderValue(s));
c.uiProgressBarSetValue(pbar, c.uiSliderValue(s));
}
fn makeNumbersPage() [*c]c.uiControl {
var hbox = c.uiNewHorizontalBox();
c.uiBoxSetPadded(hbox, 1);
var group = c.uiNewGroup(c"Numbers");
c.uiGroupSetMargined(group, 1);
c.uiBoxAppend(hbox, toUiControl(group), 1);
var vbox = c.uiNewVerticalBox();
c.uiBoxSetPadded(vbox, 1);
c.uiGroupSetChild(group, toUiControl(vbox));
spinbox = c.uiNewSpinbox(0, 100);
slider = c.uiNewSlider(0, 100);
pbar = c.uiNewProgressBar();
c.uiSpinboxOnChanged(spinbox, onSpinboxChanged, null);
c.uiSliderOnChanged(slider, onSliderChanged, null);
c.uiBoxAppend(vbox, toUiControl(spinbox), 0);
c.uiBoxAppend(vbox, toUiControl(slider), 0);
c.uiBoxAppend(vbox, toUiControl(pbar), 0);
var ip = c.uiNewProgressBar();
c.uiProgressBarSetValue(ip, -1);
c.uiBoxAppend(vbox, toUiControl(ip), 0);
group = c.uiNewGroup(c"Lists");
c.uiGroupSetMargined(group, 1);
c.uiBoxAppend(hbox, toUiControl(group), 1);
vbox = c.uiNewVerticalBox();
c.uiBoxSetPadded(vbox, 1);
c.uiGroupSetChild(group, toUiControl(vbox));
var cbox = c.uiNewCombobox();
c.uiComboboxAppend(cbox, c"Combobox Item 1");
c.uiComboboxAppend(cbox, c"Combobox Item 2");
c.uiComboboxAppend(cbox, c"Combobox Item 3");
c.uiBoxAppend(vbox, toUiControl(cbox), 0);
var ecbox = c.uiNewEditableCombobox();
c.uiEditableComboboxAppend(ecbox, c"Editable Item 1");
c.uiEditableComboboxAppend(ecbox, c"Editable Item 2");
c.uiEditableComboboxAppend(ecbox, c"Editable Item 3");
c.uiBoxAppend(vbox, toUiControl(ecbox), 0);
var rb = c.uiNewRadioButtons();
c.uiRadioButtonsAppend(rb, c"Radio Button 1");
c.uiRadioButtonsAppend(rb, c"Radio Button 2");
c.uiRadioButtonsAppend(rb, c"Radio Button 3");
c.uiBoxAppend(vbox, toUiControl(rb), 0);
return toUiControl(hbox);
}
var mainwin: ?*c.uiWindow = undefined;
extern fn onOpenFileClicked(b: ?*c.uiButton, data: ?*c_void) void {
var entry = @ptrCast(?*c.uiEntry, data);
var filename = c.uiOpenFile(mainwin);
if (filename == null) {
c.uiEntrySetText(entry, c"(cancelled)");
return;
}
c.uiEntrySetText(entry, filename);
c.uiFreeText(filename);
}
extern fn onSaveFileClicked(b: ?*c.uiButton, data: ?*c_void) void {
var entry = @ptrCast(?*c.uiEntry, data);
var filename = c.uiSaveFile(mainwin);
if (filename == null) {
c.uiEntrySetText(entry, c"(cancelled)");
return;
}
c.uiEntrySetText(entry, filename);
c.uiFreeText(filename);
}
extern fn onMsgBoxClicked(b: ?*c.uiButton, data: ?*c_void) void {
c.uiMsgBox(mainwin, c"This is a normal message box.", c"More detailed information can be shown here.");
}
extern fn onMsgBoxErrorClicked(b: ?*c.uiButton, data: ?*c_void) void {
c.uiMsgBoxError(mainwin, c"This message box describes an error.", c"More detailed information can be shown here.");
}
/// Converts any control to the uiControl type
fn toUiControl(data: var) [*c]c.uiControl {
return @ptrCast([*c]c.uiControl, @alignCast(@alignOf(c.uiControl), data));
}
fn makeDataChoosersPage() [*c]c.uiControl {
var hbox = c.uiNewHorizontalBox();
c.uiBoxSetPadded(hbox, 1);
var vbox = c.uiNewVerticalBox();
c.uiBoxSetPadded(vbox, 1);
c.uiBoxAppend(hbox, toUiControl(vbox), 0);
c.uiBoxAppend(vbox, toUiControl(c.uiNewDatePicker()), 0);
c.uiBoxAppend(vbox, toUiControl(c.uiNewTimePicker()), 0);
c.uiBoxAppend(vbox, toUiControl(c.uiNewDateTimePicker()), 0);
c.uiBoxAppend(vbox, toUiControl(c.uiNewFontButton()), 0);
c.uiBoxAppend(vbox, toUiControl(c.uiNewColorButton()), 0);
c.uiBoxAppend(hbox, toUiControl(c.uiNewVerticalSeparator()), 0);
vbox = c.uiNewVerticalBox();
c.uiBoxSetPadded(vbox, 1);
c.uiBoxAppend(hbox, toUiControl(vbox), 1);
var grid = c.uiNewGrid();
c.uiGridSetPadded(grid, 1);
c.uiBoxAppend(vbox, toUiControl(grid), 0);
var button = c.uiNewButton(c"Open File");
var entry = c.uiNewEntry();
c.uiEntrySetReadOnly(entry, 1);
c.uiButtonOnClicked(button, onOpenFileClicked, @ptrCast(?*c_void, entry));
c.uiGridAppend(grid, toUiControl(button), 0, 0, 1, 1, 0, c.uiAlign(c.uiAlignFill), 0, c.uiAlign(c.uiAlignFill));
c.uiGridAppend(grid, toUiControl(entry), 1, 0, 1, 1, 1, c.uiAlign(c.uiAlignFill), 0, c.uiAlign(c.uiAlignFill));
button = c.uiNewButton(c"Save File");
entry = c.uiNewEntry();
c.uiEntrySetReadOnly(entry, 1);
c.uiButtonOnClicked(button, onSaveFileClicked, @ptrCast(?*c_void, entry));
c.uiGridAppend(grid, toUiControl(button), 0, 1, 1, 1, 0, c.uiAlign(c.uiAlignFill), 0, c.uiAlign(c.uiAlignFill));
c.uiGridAppend(grid, toUiControl(entry), 1, 1, 1, 1, 1, c.uiAlign(c.uiAlignFill), 0, c.uiAlign(c.uiAlignFill));
var msggrid = c.uiNewGrid();
c.uiGridSetPadded(msggrid, 1);
c.uiGridAppend(grid, toUiControl(msggrid), 0, 2, 2, 1, 0, c.uiAlign(c.uiAlignCenter), 0, c.uiAlign(c.uiAlignStart));
button = c.uiNewButton(c"Message Box");
c.uiButtonOnClicked(button, onMsgBoxClicked, null);
c.uiGridAppend(msggrid, toUiControl(button), 0, 0, 1, 1, 0, c.uiAlign(c.uiAlignFill), 0, c.uiAlign(c.uiAlignFill));
button = c.uiNewButton(c"Error Box");
c.uiButtonOnClicked(button, onMsgBoxErrorClicked, null);
c.uiGridAppend(msggrid, toUiControl(button), 1, 0, 1, 1, 0, c.uiAlign(c.uiAlignFill), 0, c.uiAlign(c.uiAlignFill));
return toUiControl(hbox);
}
pub fn main() u8 {
var options = c.uiInitOptions{ .Size = 0 };
var err = c.uiInit(&options);
if (err != null) {
warn("error initializing libui: {}\n", err);
c.uiFreeInitError(err);
return 1;
}
mainwin = c.uiNewWindow(c"libui Control Gallery", 640, 480, 1);
c.uiWindowOnClosing(mainwin, onClosing, null);
c.uiOnShouldQuit(onShouldQuit, @ptrCast(?*c_void, mainwin));
var tab = c.uiNewTab();
c.uiWindowSetChild(mainwin, toUiControl(tab));
c.uiWindowSetMargined(mainwin, 1);
c.uiTabAppend(tab, c"Basic Controls", makeBasicControlsPage());
c.uiTabSetMargined(tab, 0, 1);
c.uiTabAppend(tab, c"Numbers and Lists", makeNumbersPage());
c.uiTabSetMargined(tab, 1, 1);
c.uiTabAppend(tab, c"Data Choosers", makeDataChoosersPage());
c.uiTabSetMargined(tab, 2, 1);
c.uiControlShow(toUiControl(mainwin));
c.uiMain();
return 0;
} | src/main.zig |
const std = @import("std");
const unicode = @import("unicode");
const io = std.io;
const mem = std.mem;
const utf8 = unicode.utf8;
const warn = std.debug.warn;
const Allocator = mem.Allocator;
pub const WriteError = io.BufferOutStream.Error;
/// WriterCommon returns a csv Writer that can write OutStream initialized with
/// Error trype.
pub fn WriterCommon(comptime Errot: type) type {
return struct {
const Self = @This();
pub const BufferedOutStream = io.BufferedOutStream(Errot);
buffer_stream: BufferedOutStream,
comma: u8,
use_crlf: bool,
pub fn init(stream: *BufferedOutStream.Stream) Self {
return Self{
.buffer_stream = BufferedOutStream.init(stream),
.comma = ',',
.use_crlf = false,
};
}
pub fn flush(self: *Self) !void {
try self.buffer_stream.flush();
}
pub fn write(self: *Self, records: []const []const u8) !void {
var stream = &self.buffer_stream.stream;
if (!validDelim(self.comma)) {
return error.InvalidDelim;
}
for (records) |field, n| {
if (n > 0) {
try stream.writeByte(self.comma);
}
// If we don't have to have a quoted field then just
// write out the field and continue to the next field.
if (!fieldNeedsQuotes(self.comma, field)) {
try stream.write(field);
continue;
}
try stream.writeByte('"');
var f = field;
while (f.len > 0) {
var i = f.len;
if (mem.indexOfAny(u8, f, "\"\r\n")) |idx| {
i = idx;
}
try stream.write(f[0..i]);
f = f[i..];
if (f.len > 0) {
switch (f[0]) {
'"' => {
try stream.write(
\\""
);
},
'\r' => {
if (!self.use_crlf) {
try stream.writeByte('\r');
}
},
'\n' => {
if (self.use_crlf) {
try stream.write("\r\n");
} else {
try stream.writeByte('\n');
}
},
else => {},
}
f = f[1..];
}
}
try stream.writeByte('"');
}
if (self.use_crlf) {
try stream.write("\r\n");
} else {
try stream.writeByte('\n');
}
}
};
}
/// writer that can write to streams from
// io.BufferOutStream
///
/// please see WriterCommon if you want to write to a custome stream
/// implementation.
pub const Writer = WriterCommon(WriteError);
fn validDelim(r: u8) bool {
return r != 0 and r != '"' and r != '\r' and r != '\n';
}
/// fieldNeedsQuotes reports whether our field must be enclosed in quotes.
/// Fields with a Comma, fields with a quote or newline, and
/// fields which start with a space must be enclosed in quotes.
/// We used to quote empty strings, but we do not anymore (as of Go 1.4).
/// The two representations should be equivalent, but Postgres distinguishes
/// quoted vs non-quoted empty string during database imports, and it has
/// an option to force the quoted behavior for non-quoted CSV but it has
/// no option to force the non-quoted behavior for quoted CSV, making
/// CSV with quoted empty strings strictly less useful.
/// Not quoting the empty string also makes this package match the behavior
/// of Microsoft Excel and Google Drive.
/// For Postgres, quote the data terminating string `\.`.
fn fieldNeedsQuotes(comma: u8, field: []const u8) bool {
if (field.len == 0) return false;
const back_dot =
\\\.
;
if (mem.eql(u8, field, back_dot) or
mem.indexOfScalar(u8, field, comma) != null or
mem.indexOfAny(u8, field, "\"\r\n") != null)
{
return true;
}
const rune = utf8.decodeRune(field) catch |err| {
return false;
};
return unicode.isSpace(rune.value);
}
pub const Record = struct {
arena: std.heap.ArenaAllocator,
lines: Lines,
pub fn init(allocator: *Allocator) Record {
return Record{
.arena = std.heap.ArenaAllocator.init(allocator),
.lines = Lines.init(a),
};
}
pub fn append(self: *Record, line: []const u8) !void {
try self.lines.append(line);
}
pub fn reset(self: *Record) void {
try self.lines.resize(0);
self.arena.deinit();
self.arena.buffer_list.first = null;
}
pub fn size(self: *Record) usize {
return self.lines.len;
}
pub fn ga(self: *Record) *Allocator {
return &self.arena.allocator;
}
};
pub const Lines = std.ArrayList([]const u8);
pub const ParserError = struct {
start_line: usize,
line: usize,
column: usize,
err: []const u8,
pub fn init(
start_line: usize,
line: usize,
column: usize,
err: []const u8,
) ParserError {
return ParserError{
.start_line = start_line,
.line = line,
.column = column,
.err = err,
};
}
};
// Error is the error of the input stream that the reader will be reading from.
pub fn ReaderCommon(comptime Error: type) type {
return struct {
const Self = @This();
/// Comma is the field delimiter.
/// It is set to comma (',') by NewReader.
/// Comma must be a valid rune and must not be \r, \n,
/// or the Unicode replacement character (0xFFFD).
comma: u8,
/// Comment, if not 0, is the comment character. Lines beginning with the
/// Comment character without preceding whitespace are ignored.
/// With leading whitespace the Comment character becomes part of the
/// field, even if TrimLeadingSpace is true.
/// Comment must be a valid rune and must not be \r, \n,
/// or the Unicode replacement character (0xFFFD).
/// It must also not be equal to Comma.
comment: u8,
/// fields_per_record is the number of expected fields per record.
/// If FieldsPerRecord is positive, Read requires each record to
/// have the given number of fields. If FieldsPerRecord is 0, Read sets it to
/// the number of fields in the first record, so that future records must
/// have the same field count. If fields_per_record is null, no check is
/// made and records may have a variable number of fields.
fields_per_record: ?usize,
/// If lazy_quotes is true, a quote may appear in an unquoted field and a
/// non-doubled quote may appear in a quoted field.
lazy_quotes: bool,
/// If TrimLeadingSpace is true, leading white space in a field is ignored.
/// This is done even if the field delimiter, Comma, is white space.
trim_leading_space: bool,
/// The stream of csv data
in_stream: *InStream,
num_line: usize,
record_buffer: std.Buffer,
field_index: std.ArrayList(usize),
pub const InStream = io.InStream(Error);
pub fn init(allocator: *Allocator, stream: *InStream) Self {
return Self{
.comma = ',',
.comment = 0,
.fields_per_record = 0,
.lazy_quotes = false,
.trim_leading_space = false,
.in_stream = stream,
.num_line = 0,
.record_buffer = std.Buffer.init(a, "") catch unreachable,
.field_index = std.ArrayList(usize).init(a),
};
}
pub fn deinit(self: *Self) void {
self.record_buffer.deinit();
self.field_index.deinit();
}
pub fn read(self: *Self, record: *Record) !void {
if (self.comma == self.comment or
!validDelim(self.comma) or
self.comment != 0 and !validDelim(self.comment))
{
return error.InvalidDelim;
}
var line_buffer = &try std.Buffer.init(record.ga(), "");
var full_line = "";
var line = "";
while (true) {
try self.readLine(line_buffer);
line = line_buffer.toSlice();
if (self.comment != 0 and nextRune(line, self.comment)) {
line = "";
continue;
}
if (line.len == 0) {
continue; //empty line
}
full_line = line;
}
const comma_len: usize = 1;
const quote_len: usize = 1;
var record_line = self.num_line;
try self.record_buffer.resize(0);
try self.field_index.resize(0);
parse_field: while (true) {
if (self.trim_leading_space) {
line = trimLeft(line);
}
if (line.len == 0 or line[0] == '"') {
// Non-quoted string field
var field = line;
var ix: ?usize = null;
if (indexRune(line, self.comma)) |i| {
field = field[0..i];
ix = i;
}
if (!self.lazy_quotes) {
if (mem.indexOfScalar(u8, field, '"')) |i| {
const e = ParserError.init(
record_line,
self.num_line,
column,
"BareQuote",
);
warn("csv: {}\n", e);
return error.BareQuote;
}
}
try self.record_buffer.append(field);
try self.field_index.append(self.record_buffer.len());
if (ix) |i| {
line = line[i + comma_len ..];
continue :parse_field;
}
break :parse_field;
} else {
line = line[quote_len..];
while (true) {
if (mem.indexOfScalar(u8, line, '"')) |i| {
try self.record_buffer.append(line[0..i]);
line = line[i + quote_len ..];
if (line.len > 0) {
switch (line[0]) {
'"' => {
try self.record_buffer.appendByte('"');
line = line[quote_len..];
},
self.comma => {
line = line[comma_len..];
try self.field_index.append(self.record_buffer.len);
continue :parse_field;
},
else => {
if (self.lazy_quotes) {
try self.record_buffer.appendByte('"');
} else {
const col = full_line[0 .. full_line.len - line.len - quote_len];
const e = ParserError.init(
record_line,
self.num_line,
col,
"Quote",
);
warn("csv: {}\n", e);
return error.Quote;
}
},
}
} else {
try self.field_index.append(self.record_buffer.len);
break :parse_field;
}
} else if (line.len > 0) {
try record.append(line);
try self.readLine(line_buffer);
line = line_buffer.toSlice();
full_line = line;
}
}
}
}
if (err != null) {
warn("csv: {}\n", err);
return error.ParserError;
}
var pre_id: usize = 0;
const src = self.record_buffer.toSlice();
try record.reset();
for (self.field_index) |idx| {
try record.append(src[pre_id..idx]);
pre_id = idx;
}
if (self.fields_per_record > 0) {
if (record.size() != self.fields_per_record) {
const e = ParserError.init(
record_line,
record_line,
column,
"FieldCount",
);
warn("csv: {}\n", e);
return error.FieldCount;
}
} else if (self.fields_per_record == 0) {
self.fields_per_record = record.size();
}
}
//trims space at the beginning of s
fn trimLeft(s: []const u8) []const u8 {
var i: usize = 0;
while (i < s.len) {
if (!std.ascii.isSpace(s[i])) {
break;
}
i += 1;
}
return s[i..];
}
fn indexRune(s: []const u8, rune: u8) ?usize {
return mem.indexOfScalar(u8, s, rune);
}
fn readLine(self: *Selr, buf: *std.Buffer) !void {
readLineInternal(self, buf) catch |err| {
if (buf.len() > 0 and err == error.EndOfStream) {
if (buf.endsWith('\r')) {
try buf.resize(buf.len() - 1);
}
self.num_line += 1;
return;
}
return err;
};
self.num_line += 1;
//TODO normalize \r\n
}
const max_line_size: usize = 1024 * 5;
fn readLineInternal(self: *Selr, buf: *std.Buffer) !void {
try buf.reset(0);
try self.in_stream.readUntilDelimiterBuffer('\n', max_line_size);
}
};
}
fn nextRune(s: []const u8, rune: u8) bool {
if (s.len > 0) return s[0] == rune;
return false;
} | src/encoding/csv/csv.zig |
const std = @import("std");
const builtin = @import("builtin");
const argsParser = @import("args");
const ihex = @import("ihex");
const spu = @import("spu-mk2");
const common = @import("shared.zig");
extern "kernel32" fn SetConsoleMode(hConsoleHandle: std.os.windows.HANDLE, dwMode: std.os.windows.DWORD) callconv(.Stdcall) std.os.windows.BOOL;
pub fn dumpState(emu: *spu.SpuMk2(common.WasmDemoMachine)) !void {
const stdout = std.io.getStdOut().writer();
try stdout.print(
"\r\nstate: IP={X:0>4} SP={X:0>4} BP={X:0>4} FR={X:0>4}\r\n",
.{
emu.ip,
emu.sp,
emu.bp,
@bitCast(u16, emu.fr),
// emu.bus_addr,
// @tagName(emu.stage),
},
);
try stdout.writeAll("stack:\r\n");
var offset: i8 = -6;
while (offset <= 6) : (offset += 1) {
const msg: []const u8 = if (offset == 0) " <-" else ""; // workaround for tuple bug
const addr = if (offset < 0) emu.sp -% @intCast(u8, -2 * offset) else emu.sp +% @intCast(u8, 2 * offset);
const msg_2: []const u8 = if (addr == emu.bp) " (BASE)" else "";
const value = emu.readWord(addr) catch @as(u16, 0xAAAA);
try stdout.print(" {X:0>4}: [SP{:0>2}]={X:0>4}{s}{s}\r\n", .{
addr,
offset,
value,
msg,
msg_2,
});
}
}
var termios_bak: std.os.termios = undefined;
fn outputErrorMsg(emu: *spu.SpuMk2(common.WasmDemoMachine), err: anyerror) !u8 {
const stdin = std.io.getStdIn();
// reset terminal before outputting error messages
if (builtin.os.tag == .linux) {
try std.os.tcsetattr(stdin.handle, .NOW, termios_bak);
}
// const time = timer.read();
try std.io.getStdOut().writer().print("\nerror: {s}\n", .{
@errorName(err),
});
try dumpState(emu);
switch (err) {
error.BusError, error.UserBreak => return 1,
else => return err,
}
unreachable;
}
pub fn main() !u8 {
const cli_args = argsParser.parseForCurrentProcess(struct {
help: bool = false,
@"entry-point": u16 = 0x0000,
trace: bool = false,
@"memory-dump": ?[]const u8 = null,
pub const shorthands = .{
.h = "help",
.e = "entry-point",
};
}, std.heap.page_allocator, .print) catch return 1;
defer cli_args.deinit();
if (cli_args.options.help or cli_args.positionals.len == 0) {
try std.io.getStdOut().writer().writeAll(
\\emulator [--help] initialization.hex […]
\\Emulates the Ashet Home Computer, based on the SPU Mark II.
\\Each file passed as an argument will be loaded into the memory
\\and provides an initialization for ROM and RAM.
\\
\\-h, --help Displays this help text.
\\
);
return if (cli_args.options.help) @as(u8, 0) else @as(u8, 1);
}
var debugger = Debugger{};
var emu = spu.SpuMk2(common.WasmDemoMachine).init(.{});
const memory = &emu.memory;
if (cli_args.options.trace) {
emu.debug_interface = &debugger.interface;
}
const hexParseMode = ihex.ParseMode{ .pedantic = true };
for (cli_args.positionals) |path| {
var file = try std.fs.cwd().openFile(path, .{ .read = true, .write = false });
defer file.close();
// Emulator will always start at address 0x0000 or CLI given entry point.
_ = try ihex.parseData(file.reader(), hexParseMode, memory, common.WasmDemoMachine.LoaderError, common.WasmDemoMachine.loadHexRecord);
}
emu.ip = cli_args.options.@"entry-point";
const stdin = std.io.getStdIn();
if (builtin.os.tag == .linux) {
const original = try std.os.tcgetattr(stdin.handle);
var modified_raw = original;
const IGNBRK = 0o0000001;
const BRKINT = 0o0000002;
const PARMRK = 0o0000010;
const ISTRIP = 0o0000040;
const INLCR = 0o0000100;
const IGNCR = 0o0000200;
const ICRNL = 0o0000400;
const IXON = 0o0002000;
const OPOST = 0o0000001;
const ECHO = 0o0000010;
const ECHONL = 0o0000100;
const ICANON = 0o0000002;
const ISIG = 0o0000001;
const IEXTEN = 0o0100000;
const CSIZE = 0o0000060;
const PARENB = 0o0000400;
const CS8 = 0o0000060;
// Note that this will also disable break signals!
_ = 0;
_ = 0;
_ = 0 | ISIG;
modified_raw.iflag &= ~@as(std.os.linux.tcflag_t, BRKINT | IGNBRK | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON);
modified_raw.oflag &= ~@as(std.os.linux.tcflag_t, OPOST);
modified_raw.lflag &= ~@as(std.os.linux.tcflag_t, ECHO | ECHONL | ICANON | IEXTEN);
modified_raw.cflag &= ~@as(std.os.linux.tcflag_t, CSIZE | PARENB);
modified_raw.cflag |= CS8;
try std.os.tcsetattr(stdin.handle, .NOW, modified_raw);
termios_bak = original;
}
defer if (builtin.os.tag == .linux) {
std.os.tcsetattr(stdin.handle, .NOW, termios_bak) catch {
std.log.err("Failed to reset stdin. Please call stty sane to get back a proper terminal experience!", .{});
};
};
if (builtin.os.tag == .windows) {
if (SetConsoleMode(stdin.handle, 0) == 0)
return error.FailedToSetConsole;
}
// defer std.debug.warn("Executed {} instructions!\n", .{emu.count});
main_loop: while (true) {
emu.runBatch(10_000) catch |err| switch (err) {
error.CpuHalted => break :main_loop,
error.DebugBreak => try dumpState(&emu),
else => |e| return try outputErrorMsg(&emu, e),
};
}
if (cli_args.options.@"memory-dump") |file_name| {
try std.fs.cwd().writeFile(file_name, &emu.memory.memory);
}
return 0;
}
const Debugger = struct {
interface: spu.DebugInterface = .{
.traceInstructionFn = traceInstruction,
.traceAddressFn = traceAddress,
},
fn traceInstruction(interface: *spu.DebugInterface, ip: u16, instruction: spu.Instruction, input0: u16, input1: u16, output: u16) void {
const self = @fieldParentPtr(Debugger, "interface", interface);
_ = self;
const stdout = std.io.getStdOut().writer();
stdout.print("offset={X:0>4} instr={}\tinput0={X:0>4}\tinput1={X:0>4}\toutput={X:0>4}\r\n", .{
ip,
instruction,
input0,
input1,
output,
}) catch {};
}
fn traceAddress(interface: *spu.DebugInterface, virt: u16) spu.DebugInterface.TraceError!void {
const self = @fieldParentPtr(Debugger, "interface", interface);
_ = self;
_ = virt;
//
}
};
pub const SerialEmulator = struct {
pub fn read() !u16 {
const stdin = std.io.getStdIn();
if (builtin.os.tag == .linux) {
var fds = [1]std.os.pollfd{
.{
.fd = stdin.handle,
.events = std.os.POLL.IN,
.revents = 0,
},
};
_ = try std.os.poll(&fds, 0);
if ((fds[0].revents & std.os.POLL.IN) != 0) {
const val = @as(u16, try stdin.reader().readByte());
if (val == 0x03) // CTRL_C
return error.UserBreak;
return val;
}
}
if (builtin.os.tag == .windows) {
std.os.windows.WaitForSingleObject(stdin.handle, 0) catch |err| switch (err) {
error.WaitTimeOut => return 0xFFFF,
else => return err,
};
const val = @as(u16, try stdin.reader().readByte());
if (val == 0x03) // CTRL_C
return error.UserBreak;
return val;
}
return 0xFFFF;
}
pub fn write(value: u16) !void {
try std.io.getStdOut().writer().print("{c}", .{@truncate(u8, value)});
// std.time.sleep(50 * std.time.millisecond);
}
}; | tools/emulator/pc-main.zig |
const inputFile = @embedFile("./input/day08.txt");
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const assert = std.debug.assert;
// 0(6): abcefg
// 1(2): cf
// 2(5): acdeg
// 3(5): acdfg
// 4(4): bcdf
// 5(5): abdfg
// 6(6): abdefg
// 7(3): acf
// 8(7): abcdefg
// 9(6): abcdfg
/// 1(2): cf
/// 4(4): bcdf
/// 7(3): acf
/// 8(7): abcdefg
const digitLengths = struct {
One: usize = 2,
Four: usize = 4,
Seven: usize = 3,
Eight: usize = 7,
}{};
const PartOneResult = struct {
n1: u32,
n4: u32,
n7: u32,
n8: u32,
};
pub fn partOne(lines: []Line) PartOneResult {
var result = std.mem.zeroes(PartOneResult);
for (lines) |line| {
for (line.out) |outDigit| {
switch (outDigit.len) {
digitLengths.One => result.n1 += 1,
digitLengths.Four => result.n4 += 1,
digitLengths.Seven => result.n7 += 1,
digitLengths.Eight => result.n8 += 1,
else => continue,
}
}
}
return result;
}
pub fn partTwo(lines: []Line) u32 {
var result: u32 = 0;
const powTens = [_]u32{ 1000, 100, 10, 1 };
for (lines) |line| {
const digits = inferDigits(line.digits);
for (line.out) |outStr, i| {
for (digits) |digitStr, digit| {
if (std.mem.eql(u8, digitStr, outStr)) {
result += @intCast(u32, digit) * powTens[i];
break;
}
}
}
}
return result;
}
fn debugPrintInference(digits: [10][]const u8) void {
std.debug.print("Inferred ", .{});
for (digits) |digit, i| {
std.debug.print("{s} - {d} ", .{ digit, i });
}
std.debug.print("\n", .{});
}
/// # Algorithm:
/// First, identify all known digits (1, 4, 7, 8)
/// Compare 4 and all 6-len strings. 4 is a subset of 9 only. This gives you 9.
/// Compare the other two 6 length strings and 1. 1 is a subset of 0 only. This gives you 0 and 6.
/// Compare 1 and all 5-len strings. 1 is a subset of 3 only. This gives you 3
/// Compute (8 - 6). The missing character is cc
/// Compare two remaining 5-len strings. The one with cc is 2, the other is 5.
fn inferDigits(digits: [10][]const u8) [10][]const u8 {
var one: []const u8 = undefined;
var four: []const u8 = undefined;
var seven: []const u8 = undefined;
var eight: []const u8 = undefined;
var len5: [3][]const u8 = undefined;
var len6: [3][]const u8 = undefined;
// Step 1: Identify all known digits
{
var len5Count: usize = 0;
var len6Count: usize = 0;
for (digits) |digit| {
switch (digit.len) {
digitLengths.One => one = digit,
digitLengths.Four => four = digit,
digitLengths.Seven => seven = digit,
digitLengths.Eight => eight = digit,
6 => {
len6[len6Count] = digit;
len6Count += 1;
},
5 => {
len5[len5Count] = digit;
len5Count += 1;
},
else => unreachable,
}
}
assert(len5Count == 3);
assert(len6Count == 3);
}
// Step 2: Filter out 6 len strings
const nineIdx = findSubset(len6, four);
const zeroIdx = blk: {
for (len6) |slic, i| {
if (i == nineIdx) continue;
if (isSubsetOf(one, slic)) break :blk i;
}
unreachable;
};
const sixIdx = oddOneOut(nineIdx, zeroIdx);
const six = len6[sixIdx];
// Step 3: five len strings
const threeIdx = findSubset(len5, one);
const twoIdx = blk: {
const cc = digitDiff(eight, six);
for (len5) |slic, i| {
if (i == threeIdx) continue;
if (contains(slic, cc)) break :blk i;
}
unreachable;
};
const fiveIdx = oddOneOut(threeIdx, twoIdx);
return [_][]const u8{
len6[zeroIdx],
one,
len5[twoIdx],
len5[threeIdx],
four,
len5[fiveIdx],
six,
seven,
eight,
len6[nineIdx],
};
}
fn contains(x: []const u8, c: u8) bool {
for (x) |xc| {
if (xc == c) return true;
}
return false;
}
// Returns the char present in x not in y
fn digitDiff(x: []const u8, y: []const u8) u8 {
assert(x.len - y.len == 1);
for (x) |c, i| {
if (i == x.len - 1 or y[i] != c) return c;
}
unreachable;
}
// from 0 - 2, return the one that's not a or b
fn oddOneOut(a: usize, b: usize) usize {
var i: usize = 0;
while (i < 3) : (i += 1) {
if (i != a and i != b) return i;
}
unreachable;
}
fn findSubset(arr: [3][]const u8, subset: []const u8) usize {
for (arr) |slic, i| {
if (isSubsetOf(subset, slic)) return i;
}
unreachable;
}
// x, y are sorted and x.len < y.len
fn isSubsetOf(x: []const u8, y: []const u8) bool {
assert(x.len < y.len);
var yi: usize = 0;
for (x) |xc| {
// Try to scan forward and find the char
while (true) {
if (yi == y.len) return false;
if (y[yi] == xc) break;
yi += 1;
}
}
return true;
}
test "isSubsetOf" {
try std.testing.expect(isSubsetOf("afg", "abcfsdg"));
try std.testing.expect(!isSubsetOf("afgh", "abcfsdg"));
try std.testing.expect(!isSubsetOf("agh", "abcfsdg"));
}
pub fn main() !void {
// Standard boilerplate for Aoc problems
const stdout = std.io.getStdOut().writer();
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
var gpaAllocator = &gpa.allocator;
defer assert(!gpa.deinit()); // Check for memory leaks
var arena = std.heap.ArenaAllocator.init(gpaAllocator);
defer arena.deinit();
var allocator = &arena.allocator; // use an arena
// don't free, will be freed in arena
const lines = try parseInput(inputFile, allocator);
const part1 = partOne(lines);
const part1Total = part1.n1 + part1.n4 + part1.n7 + part1.n8;
try stdout.print("Part 1: {any}, total: {d}\n", .{ part1, part1Total });
try stdout.print("Part 2: {d}\n", .{partTwo(lines)});
}
const Line = struct {
digits: [10][]const u8,
out: [4][]const u8,
pub fn deinit(self: @This(), allocator: Allocator) void {
for (self.digits) |digit| {
allocator.free(digit);
}
for (self.out) |x| {
allocator.free(x);
}
}
};
const ascU8 = std.sort.asc(u8); // somehow std.sort.asc_u8 isn't exported?
/// Caller is responsible for freeing memory
/// Since this creates a lot of small allocations (we sort the input strings),
/// caller is recommended to use a Arena Allocator
fn parseInput(input: []const u8, allocator: Allocator) ![]Line {
var start: usize = 0;
var lines = ArrayList(Line).init(allocator);
errdefer lines.deinit();
errdefer for (lines.items) |line| {
line.deinit(allocator);
};
// A line consists of exactly 10 slices then a | then four more slices
while (std.mem.indexOfScalarPos(u8, input, start, '\n')) |lineEnd| : (start = lineEnd + 1) {
//
// Fill in Digits
//
var digits: [10][]const u8 = undefined;
// Number of allocated digits so far (could fail at any point)
var digitsCount: usize = 0;
errdefer while (digitsCount > 0) {
digitsCount -= 1;
allocator.free(digits[digitsCount]);
};
while (digitsCount < 10) : (digitsCount += 1) {
const sliceEnd = std.mem.indexOfScalarPos(u8, input, start, ' ').?;
defer start = sliceEnd + 1;
const slice = try allocator.dupe(u8, input[start..sliceEnd]);
std.sort.sort(u8, slice, {}, ascU8);
digits[digitsCount] = slice;
}
assert(input[start] == '|');
assert(input[start + 1] == ' ');
start += 2;
//
// Fill in out
//
var out: [4][]const u8 = undefined;
var outCount: usize = 0;
errdefer while (outCount > 0) {
outCount -= 1;
allocator.free(out[outCount]);
};
while (outCount < 4) : (outCount += 1) {
const sliceEnd = std.mem.indexOfAnyPos(u8, input, start, &.{ ' ', '\n' }).?;
defer start = sliceEnd + 1;
const slice = try allocator.dupe(u8, input[start..sliceEnd]);
std.sort.sort(u8, slice, {}, ascU8);
out[outCount] = slice;
}
try lines.append(Line{
.digits = digits,
.out = out,
});
}
return lines.toOwnedSlice();
}
///
/// TESTING
///
const testInput =
\\be cfbegad cbdgef fgaecd cgeb fdcge agebfd fecdb fabcd edb | fdgacbe cefdb cefbgd gcbe
\\edbfga begcd cbg gc gcadebf fbgde acbgfd abcde gfcbed gfec | fcgedb cgb dgebacf gc
\\fgaebd cg bdaec gdafb agbcfd gdcbef bgcad gfac gcb cdgabef | cg cg fdcagb cbg
\\fbegcd cbd adcefb dageb afcb bc aefdc ecdab fgdeca fcdbega | efabcd cedba gadfec cb
\\aecbfdg fbg gf bafeg dbefa fcge gcbea fcaegb dgceab fcbdga | gecf egdcabf bgf bfgea
\\fgeab ca afcebg bdacfeg cfaedg gcfdb baec bfadeg bafgc acf | gebdcfa ecba ca fadegcb
\\dbcfg fgd bdegcaf fgec aegbdf ecdfab fbedc dacgb gdcebf gf | cefg dcbef fcge gbcadfe
\\bdfegc cbegaf gecbf dfcage bdacg ed bedf ced adcbefg gebcd | ed bcgafe cdgba cbgef
\\egadfb cdbfeg cegd fecab cgb gbdefca cg fgcdab egfdb bfceg | gbdfcae bgc cg cgb
\\gcafb gcf dcaebfg ecagb gf abcdeg gaef cafbge fdbac fegbdc | fgae cfgab fg bagce
\\
;
test "Part 1" {
var allocator = std.testing.allocator;
const lines = try parseInput(testInput, allocator);
defer {
for (lines) |line| {
line.deinit(allocator);
}
allocator.free(lines);
}
const part1 = partOne(lines);
const part1Total = part1.n1 + part1.n4 + part1.n7 + part1.n8;
try std.testing.expectEqual(@as(u32, 26), part1Total);
}
test "Parsing with failing allocator" {
var failNums: usize = 0;
while (failNums < 200) : (failNums += 4) {
var allocator = std.testing.FailingAllocator.init(std.testing.allocator, failNums).allocator();
const linesOrErr = parseInput(testInput, allocator);
if (linesOrErr) |lines| {
for (lines) |line| {
line.deinit(allocator);
}
allocator.free(lines);
} else |err| {
try std.testing.expectEqual(error.OutOfMemory, err);
}
}
}
test "part 2 single" {
const singleTestInput =
\\acedgfb cdfbe gcdfa fbcad dab cefabd cdfgeb eafb cagedb ab | cdfeb fcadb cdfeb cdbaf
\\
;
var allocator = std.testing.allocator;
const lines = try parseInput(singleTestInput, allocator);
defer {
for (lines) |line| {
line.deinit(allocator);
}
allocator.free(lines);
}
const part2 = partTwo(lines);
try std.testing.expectEqual(@as(u32, 5353), part2);
}
test "part 2" {
var allocator = std.testing.allocator;
const lines = try parseInput(testInput, allocator);
defer {
for (lines) |line| {
line.deinit(allocator);
}
allocator.free(lines);
}
const part2 = partTwo(lines);
try std.testing.expectEqual(@as(u32, 61229), part2);
} | src/day08.zig |
const Cpu = @import("cpu.zig").Cpu;
const Keypad = @import("keypad.zig").Keypad;
usingnamespace @import("std").testing;
fn testUpdateFrame(frame: []u1) void {}
test "Next opcode" {
var cpu = Cpu.init(.{});
cpu.memory[0x200] = 0xA2;
cpu.memory[0x201] = 0xF0;
expect(cpu.fetchOpcode() == 0xA2F0);
}
test "Load data" {
var cpu = Cpu.init(.{});
const data = [_]u8{ 0x01, 0x02 };
cpu.loadBytes(&data);
expect(cpu.fetchOpcode() == 0x0102);
expect(cpu.memory[0x200] == 0x01);
expect(cpu.memory[0x201] == 0x02);
}
test "Cycle" {
var cpu = Cpu.init(.{});
const data = [_]u8{ 0xA1, 0x00 };
cpu.loadBytes(&data);
const opcode = cpu.fetchOpcode();
try cpu.cycle();
// program counter moves 2 per cycle (except for some opcode conditions where it skips)
expect(cpu.pc == 0x202);
expect(opcode == 0xA100);
}
test "Expect Unknown Opcode" {
var cpu = Cpu.init(.{});
expectError(error.UnknownOpcode, cpu.dispatch(0x1));
}
test "Key (un)pressed" {
var keypad = Keypad{ .mutex = @import("std").Mutex.init() };
keypad.pressKey(.Two);
expect(keypad.keys[0x2] == 0x1);
keypad.releaseKey(.Two);
expect(keypad.keys[0x2] == 0x0);
}
test "All opcodes" {
// Not the cleanest tests but gets the job done for now
// 2nnn - CALL addr
var cpu = Cpu.init(.{});
try cpu.dispatch(0x2100);
expectEqual(cpu.pc, 0x100);
expectEqual(cpu.sp, 0x1);
expectEqual(cpu.stack[0], 0x202);
// 3xkk - SE Vx, byte
cpu = Cpu.init(.{});
try cpu.dispatch(0x3123);
expectEqual(cpu.pc, 0x202);
cpu = Cpu.init(.{});
cpu.registers[1] = 0x03;
try cpu.dispatch(0x3103);
expectEqual(cpu.pc, 0x204);
// 4xkk - SNE Vx, byte
cpu = Cpu.init(.{});
try cpu.dispatch(0x4123);
expectEqual(cpu.pc, 0x204);
cpu = Cpu.init(.{});
cpu.registers[1] = 0x03;
try cpu.dispatch(0x4103);
expectEqual(cpu.pc, 0x202);
// 5xy0 - SE Vx, Vy
cpu = Cpu.init(.{});
cpu.registers[1] = 0x03;
cpu.registers[2] = 0x04;
try cpu.dispatch(0x5120);
expectEqual(cpu.pc, 0x202);
// 6xkk - LD Vx, byte
cpu = Cpu.init(.{});
try cpu.dispatch(0x6102);
expectEqual(cpu.registers[1], 0x02);
// 7xkk - ADD Vx,
cpu = Cpu.init(.{});
try cpu.dispatch(0x7102);
expectEqual(cpu.registers[1], 0x02);
cpu = Cpu.init(.{});
cpu.registers[1] = 0x01;
try cpu.dispatch(0x7102);
expectEqual(cpu.registers[1], 0x03);
// 8xy0 - LD Vx, Vy
cpu = Cpu.init(.{});
cpu.registers[2] = 0x01;
try cpu.dispatch(0x8120);
expectEqual(cpu.registers[1], 0x01);
// 8xy1 - OR Vx, Vy
cpu = Cpu.init(.{});
cpu.registers[1] = 0x10;
cpu.registers[2] = 0x01;
try cpu.dispatch(0x8121);
expectEqual(cpu.registers[1], 0x11);
// 8xy2 - AND Vx, Vy
cpu = Cpu.init(.{});
cpu.registers[1] = 0x10;
cpu.registers[2] = 0x01;
try cpu.dispatch(0x8122);
expectEqual(cpu.registers[1], 0x00);
// 8xy3 - XOR Vx, Vy
cpu = Cpu.init(.{});
cpu.registers[1] = 0x01;
cpu.registers[2] = 0x01;
try cpu.dispatch(0x8123);
expectEqual(cpu.registers[1], 0x00);
// 8xy4 - ADD Vx, Vy
cpu = Cpu.init(.{});
cpu.registers[1] = 0x01;
cpu.registers[2] = 0x01;
try cpu.dispatch(0x8124);
expectEqual(cpu.registers[1], 0x2);
expectEqual(cpu.registers[0xF], 0x0);
cpu = Cpu.init(.{});
cpu.registers[1] = 0xFF;
cpu.registers[2] = 0x03;
try cpu.dispatch(0x8124);
expectEqual(cpu.registers[1], 0x2);
expectEqual(cpu.registers[0xF], 0x1);
// 8xy5 - SUB Vx, Vy
cpu = Cpu.init(.{});
cpu.registers[1] = 0xFF;
cpu.registers[2] = 0x03;
try cpu.dispatch(0x8125);
expectEqual(cpu.registers[1], 0xFC);
expectEqual(cpu.registers[0xF], 0x1);
cpu = Cpu.init(.{});
cpu.registers[1] = 0x02;
cpu.registers[2] = 0x03;
try cpu.dispatch(0x8125);
expectEqual(cpu.registers[1], 0xFF);
expectEqual(cpu.registers[0xF], 0x0);
// 8xy6 - SHR Vx {, Vy}
cpu = Cpu.init(.{});
cpu.registers[1] = 0x03;
try cpu.dispatch(0x8126);
expectEqual(cpu.registers[1], 0x1);
expectEqual(cpu.registers[0xF], 0x1);
cpu = Cpu.init(.{});
cpu.registers[1] = 0x02;
try cpu.dispatch(0x8126);
expectEqual(cpu.registers[1], 0x1);
expectEqual(cpu.registers[0xF], 0x0);
// 8xy7 - SUBN Vx, Vy
cpu = Cpu.init(.{});
cpu.registers[1] = 0x03;
cpu.registers[2] = 0xFF;
try cpu.dispatch(0x8127);
expectEqual(cpu.registers[1], 0xFC);
expectEqual(cpu.registers[0xF], 0x1);
cpu = Cpu.init(.{});
cpu.registers[1] = 0x03;
cpu.registers[2] = 0x02;
try cpu.dispatch(0x8127);
expectEqual(cpu.registers[1], 0xFF);
expectEqual(cpu.registers[0xF], 0x0);
// 8xyE - SHL Vx {, Vy}
cpu = Cpu.init(.{});
cpu.registers[1] = 0x01;
try cpu.dispatch(0x812E);
expectEqual(cpu.registers[1], 0x2);
expectEqual(cpu.registers[0xF], 0x0);
cpu = Cpu.init(.{});
cpu.registers[1] = 0x81;
try cpu.dispatch(0x812E);
expectEqual(cpu.registers[1], 0x2);
expectEqual(cpu.registers[0xF], 0x1);
// 9xy0 - SNE Vx, Vy
cpu = Cpu.init(.{});
cpu.registers[1] = 0x01;
cpu.registers[2] = 0x02;
try cpu.dispatch(0x9120);
expectEqual(cpu.pc, 0x204);
cpu = Cpu.init(.{});
cpu.registers[1] = 0x01;
cpu.registers[2] = 0x01;
try cpu.dispatch(0x9120);
expectEqual(cpu.pc, 0x202);
// Annn - LD I, addr
cpu = Cpu.init(.{});
try cpu.dispatch(0xA100);
expectEqual(cpu.index_register, 0x100);
// Bnnn - JP V0, addr
cpu = Cpu.init(.{});
try cpu.dispatch(0xB210);
expectEqual(cpu.pc, 0x210);
cpu = Cpu.init(.{});
cpu.registers[0] = 0x01;
try cpu.dispatch(0xB210);
expectEqual(cpu.pc, 0x211);
// Cxkk - RND Vx, byte
cpu = Cpu.init(.{});
try cpu.dispatch(0xC110);
expectEqual(cpu.pc, 0x202);
expect(cpu.registers[1] == 0 or cpu.registers[1] == 16);
// Dxyn - DRW Vx, Vy, nibble
cpu = Cpu.init(.{});
cpu.index_register = 0x200;
cpu.memory[0x200] = 0x01;
try cpu.dispatch(0xD001);
expectEqual(cpu.video.data[0], 0x00);
expectEqual(cpu.registers[0xF], 0x0);
cpu = Cpu.init(.{});
cpu.index_register = 0x200;
cpu.memory[0x200] = 0x01;
cpu.video.data[0x7] = 0x01;
try cpu.dispatch(0xD001);
expectEqual(cpu.video.data[0x7], 0x00);
expectEqual(cpu.registers[0xF], 0x1);
// Ex9E - SKP Vx
cpu = Cpu.init(.{});
cpu.registers[0x01] = 0x02;
cpu.keypad.keys[0x02] = 0x1;
try cpu.dispatch(0xE19E);
expectEqual(cpu.pc, 0x204);
// ExA1 - SKNP Vx
cpu = Cpu.init(.{});
cpu.registers[0x01] = 0x02;
cpu.keypad.keys[0x02] = 0x1;
try cpu.dispatch(0xE1A1);
expectEqual(cpu.pc, 0x202);
// Fx07 - LD Vx, DT
cpu = Cpu.init(.{ .delay_timer = 0x01 });
try cpu.dispatch(0xF107);
expectEqual(cpu.registers[1], 0x01);
// Fx15 - LD DT, Vx
cpu = Cpu.init(.{});
cpu.registers[1] = 0x05;
try cpu.dispatch(0xF115);
expectEqual(cpu.delay_timer, 0x05);
// Fx18 - LD ST, Vx
cpu = Cpu.init(.{});
cpu.registers[1] = 0x05;
try cpu.dispatch(0xF118);
expectEqual(cpu.sound_timer, 0x05);
// Fx29 - LD F, Vx
cpu = Cpu.init(.{});
try cpu.dispatch(0xF029);
expectEqual(cpu.index_register, 0x00);
cpu = Cpu.init(.{});
cpu.registers[0x01] = 0x01;
try cpu.dispatch(0xF129);
expectEqual(cpu.index_register, 0x05);
cpu = Cpu.init(.{});
cpu.registers[0x02] = 0x02;
try cpu.dispatch(0xF229);
expectEqual(cpu.index_register, 0x0A);
// Fx33 - LD B, Vx
cpu = Cpu.init(.{});
cpu.registers[0] = 0xFF;
cpu.index_register = 0x200;
try cpu.dispatch(0xF033);
expectEqual(cpu.memory[0x200], 0x02);
expectEqual(cpu.memory[0x201], 0x05);
expectEqual(cpu.memory[0x202], 0x05);
// Fx55 - LD [I], Vx
cpu = Cpu.init(.{});
cpu.registers[0] = 0x03;
cpu.registers[1] = 0x02;
cpu.registers[2] = 0x01;
cpu.index_register = 0x220;
try cpu.dispatch(0xF255);
expectEqual(cpu.memory[0x220], 0x03);
expectEqual(cpu.memory[0x221], 0x02);
expectEqual(cpu.memory[0x222], 0x01);
// Fx65 - LD Vx, [I]
cpu = Cpu.init(.{});
cpu.memory[0x200] = 0x01;
cpu.memory[0x201] = 0x02;
cpu.index_register = 0x200;
try cpu.dispatch(0xF165);
expectEqual(cpu.registers[0], 0x01);
expectEqual(cpu.registers[1], 0x02);
expectEqual(cpu.registers[2], 0x00);
cpu = Cpu.init(.{});
cpu.memory[0x200] = 0x01;
cpu.memory[0x201] = 0x02;
cpu.index_register = 0x200;
try cpu.dispatch(0xF265);
expectEqual(cpu.registers[0], 0x01);
expectEqual(cpu.registers[1], 0x02);
} | src/tests.zig |
const std = @import("std");
const utils = @import("utils.zig");
const AttributeInfo = @import("attributes.zig").AttributeInfo;
const ClassFile = @import("ClassFile.zig");
const ConstantPool = @import("ConstantPool.zig");
const FieldInfo = @This();
pub const AccessFlags = struct {
public: bool = false,
private: bool = false,
protected: bool = false,
static: bool = false,
final: bool = false,
@"volatile": bool = false,
transient: bool = false,
synthetic: bool = false,
enum_member: bool = false,
};
constant_pool: *ConstantPool,
access_flags: AccessFlags,
name_index: u16,
descriptor_index: u16,
attributes: std.ArrayList(AttributeInfo),
pub fn getName(self: FieldInfo) ConstantPool.Utf8Info {
return self.constant_pool.get(self.name_index).utf8;
}
pub fn getDescriptor(self: FieldInfo) ConstantPool.Utf8Info {
return self.constant_pool.get(self.descriptor_index).utf8;
}
pub fn decode(constant_pool: *ConstantPool, allocator: std.mem.Allocator, reader: anytype) !FieldInfo {
var access_flags_u = try reader.readIntBig(u16);
var name_index = try reader.readIntBig(u16);
var descriptor_index = try reader.readIntBig(u16);
// var att_count = try reader.readIntBig(u16);
// var att = try std.ArrayList(AttributeInfo).initCapacity(allocator, att_count);
// for (att.items) |*a| a.* = try AttributeInfo.decode(constant_pool, allocator, reader);
var attributes_length = try reader.readIntBig(u16);
var attributes_index: usize = 0;
var attributes = std.ArrayList(AttributeInfo).init(allocator);
while (attributes_index < attributes_length) : (attributes_index += 1) {
var decoded = try AttributeInfo.decode(constant_pool, allocator, reader);
if (decoded == .unknown) {
attributes_length -= 1;
continue;
}
try attributes.append(decoded);
}
return FieldInfo{
.constant_pool = constant_pool,
.access_flags = .{
.public = utils.isPresent(u16, access_flags_u, 0x0001),
.private = utils.isPresent(u16, access_flags_u, 0x0002),
.protected = utils.isPresent(u16, access_flags_u, 0x0004),
.static = utils.isPresent(u16, access_flags_u, 0x0008),
.final = utils.isPresent(u16, access_flags_u, 0x0010),
.@"volatile" = utils.isPresent(u16, access_flags_u, 0x0040),
.transient = utils.isPresent(u16, access_flags_u, 0x0080),
.synthetic = utils.isPresent(u16, access_flags_u, 0x1000),
.enum_member = utils.isPresent(u16, access_flags_u, 0x4000),
},
.name_index = name_index,
.descriptor_index = descriptor_index,
.attributes = attributes,
};
}
pub fn encode(self: FieldInfo, writer: anytype) !void {
var access_flags_u: u16 = 0;
if (self.access_flags.public) utils.setPresent(u16, &access_flags_u, 0x0001);
if (self.access_flags.private) utils.setPresent(u16, &access_flags_u, 0x0002);
if (self.access_flags.protected) utils.setPresent(u16, &access_flags_u, 0x0004);
if (self.access_flags.static) utils.setPresent(u16, &access_flags_u, 0x0008);
if (self.access_flags.final) utils.setPresent(u16, &access_flags_u, 0x0010);
if (self.access_flags.@"volatile") utils.setPresent(u16, &access_flags_u, 0x0040);
if (self.access_flags.transient) utils.setPresent(u16, &access_flags_u, 0x0080);
if (self.access_flags.synthetic) utils.setPresent(u16, &access_flags_u, 0x1000);
if (self.access_flags.enum_member) utils.setPresent(u16, &access_flags_u, 0x4000);
try writer.writeIntBig(u16, access_flags_u);
try writer.writeIntBig(u16, self.name_index);
try writer.writeIntBig(u16, self.descriptor_index);
try writer.writeIntBig(u16, @intCast(u16, self.attributes.items.len));
for (self.attributes.items) |*att| try att.encode(writer);
}
pub fn deinit(self: FieldInfo) void {
for (self.attributes.items) |*att| att.deinit();
self.attributes.deinit();
} | src/FieldInfo.zig |
const NULL = @import("std").zig.c_translation.cast(?*anyopaque, @as(c_int, 0));
pub const wchar_t = c_int;
const xlib = @import("Xlib.zig");
const keys = @import("keysym.zig");
pub usingnamespace keys;
pub const struct__XkbStateRec = extern struct {
group: u8,
locked_group: u8,
base_group: c_ushort,
latched_group: c_ushort,
mods: u8,
base_mods: u8,
latched_mods: u8,
locked_mods: u8,
compat_state: u8,
grab_mods: u8,
compat_grab_mods: u8,
lookup_mods: u8,
compat_lookup_mods: u8,
ptr_buttons: c_ushort,
};
pub const XkbStateRec = struct__XkbStateRec;
pub const XkbStatePtr = [*c]struct__XkbStateRec;
pub const struct__XkbMods = extern struct {
mask: u8,
real_mods: u8,
vmods: c_ushort,
};
pub const XkbModsRec = struct__XkbMods;
pub const XkbModsPtr = [*c]struct__XkbMods;
pub const struct__XkbKTMapEntry = extern struct {
active: c_int,
level: u8,
mods: XkbModsRec,
};
pub const XkbKTMapEntryRec = struct__XkbKTMapEntry;
pub const XkbKTMapEntryPtr = [*c]struct__XkbKTMapEntry;
pub const struct__XkbKeyType = extern struct {
mods: XkbModsRec,
num_levels: u8,
map_count: u8,
map: XkbKTMapEntryPtr,
preserve: XkbModsPtr,
name: xlib.Atom,
level_names: [*c]xlib.Atom,
};
pub const XkbKeyTypeRec = struct__XkbKeyType;
pub const XkbKeyTypePtr = [*c]struct__XkbKeyType;
pub const struct__XkbBehavior = extern struct {
type: u8,
data: u8,
};
pub const XkbBehavior = struct__XkbBehavior;
pub const struct__XkbAnyAction = extern struct {
type: u8,
data: [7]u8,
};
pub const XkbAnyAction = struct__XkbAnyAction;
pub const struct__XkbModAction = extern struct {
type: u8,
flags: u8,
mask: u8,
real_mods: u8,
vmods1: u8,
vmods2: u8,
};
pub const XkbModAction = struct__XkbModAction;
pub const struct__XkbGroupAction = extern struct {
type: u8,
flags: u8,
group_XXX: u8,
};
pub const XkbGroupAction = struct__XkbGroupAction;
pub const struct__XkbISOAction = extern struct {
type: u8,
flags: u8,
mask: u8,
real_mods: u8,
group_XXX: u8,
affect: u8,
vmods1: u8,
vmods2: u8,
};
pub const XkbISOAction = struct__XkbISOAction;
pub const struct__XkbPtrAction = extern struct {
type: u8,
flags: u8,
high_XXX: u8,
low_XXX: u8,
high_YYY: u8,
low_YYY: u8,
};
pub const XkbPtrAction = struct__XkbPtrAction;
pub const struct__XkbPtrBtnAction = extern struct {
type: u8,
flags: u8,
count: u8,
button: u8,
};
pub const XkbPtrBtnAction = struct__XkbPtrBtnAction;
pub const struct__XkbPtrDfltAction = extern struct {
type: u8,
flags: u8,
affect: u8,
valueXXX: u8,
};
pub const XkbPtrDfltAction = struct__XkbPtrDfltAction;
pub const struct__XkbSwitchScreenAction = extern struct {
type: u8,
flags: u8,
screenXXX: u8,
};
pub const XkbSwitchScreenAction = struct__XkbSwitchScreenAction;
pub const struct__XkbCtrlsAction = extern struct {
type: u8,
flags: u8,
ctrls3: u8,
ctrls2: u8,
ctrls1: u8,
ctrls0: u8,
};
pub const XkbCtrlsAction = struct__XkbCtrlsAction;
pub const struct__XkbMessageAction = extern struct {
type: u8,
flags: u8,
message: [6]u8,
};
pub const XkbMessageAction = struct__XkbMessageAction;
pub const struct__XkbRedirectKeyAction = extern struct {
type: u8,
new_key: u8,
mods_mask: u8,
mods: u8,
vmods_mask0: u8,
vmods_mask1: u8,
vmods0: u8,
vmods1: u8,
};
pub const XkbRedirectKeyAction = struct__XkbRedirectKeyAction;
pub const struct__XkbDeviceBtnAction = extern struct {
type: u8,
flags: u8,
count: u8,
button: u8,
device: u8,
};
pub const XkbDeviceBtnAction = struct__XkbDeviceBtnAction;
pub const struct__XkbDeviceValuatorAction = extern struct {
type: u8,
device: u8,
v1_what: u8,
v1_ndx: u8,
v1_value: u8,
v2_what: u8,
v2_ndx: u8,
v2_value: u8,
};
pub const XkbDeviceValuatorAction = struct__XkbDeviceValuatorAction;
pub const union__XkbAction = extern union {
any: XkbAnyAction,
mods: XkbModAction,
group: XkbGroupAction,
iso: XkbISOAction,
ptr: XkbPtrAction,
btn: XkbPtrBtnAction,
dflt: XkbPtrDfltAction,
screen: XkbSwitchScreenAction,
ctrls: XkbCtrlsAction,
msg: XkbMessageAction,
redirect: XkbRedirectKeyAction,
devbtn: XkbDeviceBtnAction,
devval: XkbDeviceValuatorAction,
type: u8,
};
pub const XkbAction = union__XkbAction;
pub const struct__XkbControls = extern struct {
mk_dflt_btn: u8,
num_groups: u8,
groups_wrap: u8,
internal: XkbModsRec,
ignore_lock: XkbModsRec,
enabled_ctrls: c_uint,
repeat_delay: c_ushort,
repeat_interval: c_ushort,
slow_keys_delay: c_ushort,
debounce_delay: c_ushort,
mk_delay: c_ushort,
mk_interval: c_ushort,
mk_time_to_max: c_ushort,
mk_max_speed: c_ushort,
mk_curve: c_short,
ax_options: c_ushort,
ax_timeout: c_ushort,
axt_opts_mask: c_ushort,
axt_opts_values: c_ushort,
axt_ctrls_mask: c_uint,
axt_ctrls_values: c_uint,
per_key_repeat: [32]u8,
};
pub const XkbControlsRec = struct__XkbControls;
pub const XkbControlsPtr = [*c]struct__XkbControls;
pub const struct__XkbServerMapRec = extern struct {
num_acts: c_ushort,
size_acts: c_ushort,
acts: [*c]XkbAction,
behaviors: [*c]XkbBehavior,
key_acts: [*c]c_ushort,
explicit: [*c]u8,
vmods: [16]u8,
vmodmap: [*c]c_ushort,
};
pub const XkbServerMapRec = struct__XkbServerMapRec;
pub const XkbServerMapPtr = [*c]struct__XkbServerMapRec;
pub const struct__XkbSymMapRec = extern struct {
kt_index: [4]u8,
group_info: u8,
width: u8,
offset: c_ushort,
};
pub const XkbSymMapRec = struct__XkbSymMapRec;
pub const XkbSymMapPtr = [*c]struct__XkbSymMapRec;
pub const struct__XkbClientMapRec = extern struct {
size_types: u8,
num_types: u8,
types: XkbKeyTypePtr,
size_syms: c_ushort,
num_syms: c_ushort,
syms: [*c]xlib.KeySym,
key_sym_map: XkbSymMapPtr,
modmap: [*c]u8,
};
pub const XkbClientMapRec = struct__XkbClientMapRec;
pub const XkbClientMapPtr = [*c]struct__XkbClientMapRec;
pub const struct__XkbSymInterpretRec = extern struct {
sym: xlib.KeySym,
flags: u8,
match: u8,
mods: u8,
virtual_mod: u8,
act: XkbAnyAction,
};
pub const XkbSymInterpretRec = struct__XkbSymInterpretRec;
pub const XkbSymInterpretPtr = [*c]struct__XkbSymInterpretRec;
pub const struct__XkbCompatMapRec = extern struct {
sym_interpret: XkbSymInterpretPtr,
groups: [4]XkbModsRec,
num_si: c_ushort,
size_si: c_ushort,
};
pub const XkbCompatMapRec = struct__XkbCompatMapRec;
pub const XkbCompatMapPtr = [*c]struct__XkbCompatMapRec;
pub const struct__XkbIndicatorMapRec = extern struct {
flags: u8,
which_groups: u8,
groups: u8,
which_mods: u8,
mods: XkbModsRec,
ctrls: c_uint,
};
pub const XkbIndicatorMapRec = struct__XkbIndicatorMapRec;
pub const XkbIndicatorMapPtr = [*c]struct__XkbIndicatorMapRec;
pub const struct__XkbIndicatorRec = extern struct {
phys_indicators: c_ulong,
maps: [32]XkbIndicatorMapRec,
};
pub const XkbIndicatorRec = struct__XkbIndicatorRec;
pub const XkbIndicatorPtr = [*c]struct__XkbIndicatorRec;
pub const struct__XkbKeyNameRec = extern struct {
name: [4]u8,
};
pub const XkbKeyNameRec = struct__XkbKeyNameRec;
pub const XkbKeyNamePtr = [*c]struct__XkbKeyNameRec;
pub const struct__XkbKeyAliasRec = extern struct {
real: [4]u8,
alias: [4]u8,
};
pub const XkbKeyAliasRec = struct__XkbKeyAliasRec;
pub const XkbKeyAliasPtr = [*c]struct__XkbKeyAliasRec;
pub const struct__XkbNamesRec = extern struct {
keycodes: xlib.Atom,
geometry: xlib.Atom,
symbols: xlib.Atom,
types: xlib.Atom,
compat: xlib.Atom,
vmods: [16]xlib.Atom,
indicators: [32]xlib.Atom,
groups: [4]xlib.Atom,
keys: XkbKeyNamePtr,
key_aliases: XkbKeyAliasPtr,
radio_groups: [*c]xlib.Atom,
phys_symbols: xlib.Atom,
num_keys: u8,
num_key_aliases: u8,
num_rg: c_ushort,
};
pub const XkbNamesRec = struct__XkbNamesRec;
pub const XkbNamesPtr = [*c]struct__XkbNamesRec;
pub const struct__XkbGeometry = opaque {};
pub const XkbGeometryPtr = ?*struct__XkbGeometry;
pub const struct__XkbDesc = extern struct {
dpy: ?*xlib.Display,
flags: c_ushort,
device_spec: c_ushort,
min_key_code: xlib.KeyCode,
max_key_code: xlib.KeyCode,
ctrls: XkbControlsPtr,
server: XkbServerMapPtr,
map: XkbClientMapPtr,
indicators: XkbIndicatorPtr,
names: XkbNamesPtr,
compat: XkbCompatMapPtr,
geom: XkbGeometryPtr,
};
pub const XkbDescRec = struct__XkbDesc;
pub const XkbDescPtr = [*c]struct__XkbDesc;
pub const struct__XkbMapChanges = extern struct {
changed: c_ushort,
min_key_code: xlib.KeyCode,
max_key_code: xlib.KeyCode,
first_type: u8,
num_types: u8,
first_key_sym: xlib.KeyCode,
num_key_syms: u8,
first_key_act: xlib.KeyCode,
num_key_acts: u8,
first_key_behavior: xlib.KeyCode,
num_key_behaviors: u8,
first_key_explicit: xlib.KeyCode,
num_key_explicit: u8,
first_modmap_key: xlib.KeyCode,
num_modmap_keys: u8,
first_vmodmap_key: xlib.KeyCode,
num_vmodmap_keys: u8,
pad: u8,
vmods: c_ushort,
};
pub const XkbMapChangesRec = struct__XkbMapChanges;
pub const XkbMapChangesPtr = [*c]struct__XkbMapChanges;
pub const struct__XkbControlsChanges = extern struct {
changed_ctrls: c_uint,
enabled_ctrls_changes: c_uint,
num_groups_changed: c_int,
};
pub const XkbControlsChangesRec = struct__XkbControlsChanges;
pub const XkbControlsChangesPtr = [*c]struct__XkbControlsChanges;
pub const struct__XkbIndicatorChanges = extern struct {
state_changes: c_uint,
map_changes: c_uint,
};
pub const XkbIndicatorChangesRec = struct__XkbIndicatorChanges;
pub const XkbIndicatorChangesPtr = [*c]struct__XkbIndicatorChanges;
pub const struct__XkbNameChanges = extern struct {
changed: c_uint,
first_type: u8,
num_types: u8,
first_lvl: u8,
num_lvls: u8,
num_aliases: u8,
num_rg: u8,
first_key: u8,
num_keys: u8,
changed_vmods: c_ushort,
changed_indicators: c_ulong,
changed_groups: u8,
};
pub const XkbNameChangesRec = struct__XkbNameChanges;
pub const XkbNameChangesPtr = [*c]struct__XkbNameChanges;
pub const struct__XkbCompatChanges = extern struct {
changed_groups: u8,
first_si: c_ushort,
num_si: c_ushort,
};
pub const XkbCompatChangesRec = struct__XkbCompatChanges;
pub const XkbCompatChangesPtr = [*c]struct__XkbCompatChanges;
pub const struct__XkbChanges = extern struct {
device_spec: c_ushort,
state_changes: c_ushort,
map: XkbMapChangesRec,
ctrls: XkbControlsChangesRec,
indicators: XkbIndicatorChangesRec,
names: XkbNameChangesRec,
compat: XkbCompatChangesRec,
};
pub const XkbChangesRec = struct__XkbChanges;
pub const XkbChangesPtr = [*c]struct__XkbChanges;
pub const struct__XkbComponentNames = extern struct {
keymap: [*c]u8,
keycodes: [*c]u8,
types: [*c]u8,
compat: [*c]u8,
symbols: [*c]u8,
geometry: [*c]u8,
};
pub const XkbComponentNamesRec = struct__XkbComponentNames;
pub const XkbComponentNamesPtr = [*c]struct__XkbComponentNames;
pub const struct__XkbComponentName = extern struct {
flags: c_ushort,
name: [*c]u8,
};
pub const XkbComponentNameRec = struct__XkbComponentName;
pub const XkbComponentNamePtr = [*c]struct__XkbComponentName;
pub const struct__XkbComponentList = extern struct {
num_keymaps: c_int,
num_keycodes: c_int,
num_types: c_int,
num_compat: c_int,
num_symbols: c_int,
num_geometry: c_int,
keymaps: XkbComponentNamePtr,
keycodes: XkbComponentNamePtr,
types: XkbComponentNamePtr,
compat: XkbComponentNamePtr,
symbols: XkbComponentNamePtr,
geometry: XkbComponentNamePtr,
};
pub const XkbComponentListRec = struct__XkbComponentList;
pub const XkbComponentListPtr = [*c]struct__XkbComponentList;
pub const struct__XkbDeviceLedInfo = extern struct {
led_class: c_ushort,
led_id: c_ushort,
phys_indicators: c_uint,
maps_present: c_uint,
names_present: c_uint,
state: c_uint,
names: [32]xlib.Atom,
maps: [32]XkbIndicatorMapRec,
};
pub const XkbDeviceLedInfoRec = struct__XkbDeviceLedInfo;
pub const XkbDeviceLedInfoPtr = [*c]struct__XkbDeviceLedInfo;
pub const struct__XkbDeviceInfo = extern struct {
name: [*c]u8,
type: xlib.Atom,
device_spec: c_ushort,
has_own_state: c_int,
supported: c_ushort,
unsupported: c_ushort,
num_btns: c_ushort,
btn_acts: [*c]XkbAction,
sz_leds: c_ushort,
num_leds: c_ushort,
dflt_kbd_fb: c_ushort,
dflt_led_fb: c_ushort,
leds: XkbDeviceLedInfoPtr,
};
pub const XkbDeviceInfoRec = struct__XkbDeviceInfo;
pub const XkbDeviceInfoPtr = [*c]struct__XkbDeviceInfo;
pub const struct__XkbDeviceLedChanges = extern struct {
led_class: c_ushort,
led_id: c_ushort,
defined: c_uint,
next: [*c]struct__XkbDeviceLedChanges,
};
pub const XkbDeviceLedChangesRec = struct__XkbDeviceLedChanges;
pub const XkbDeviceLedChangesPtr = [*c]struct__XkbDeviceLedChanges;
pub const struct__XkbDeviceChanges = extern struct {
changed: c_uint,
first_btn: c_ushort,
num_btns: c_ushort,
leds: XkbDeviceLedChangesRec,
};
pub const XkbDeviceChangesRec = struct__XkbDeviceChanges;
pub const XkbDeviceChangesPtr = [*c]struct__XkbDeviceChanges;
pub const struct__XkbAnyEvent = extern struct {
type: c_int,
serial: c_ulong,
send_event: c_int,
display: ?*xlib.Display,
time: xlib.Time,
xkb_type: c_int,
device: c_uint,
};
pub const XkbAnyEvent = struct__XkbAnyEvent;
pub const struct__XkbNewKeyboardNotify = extern struct {
type: c_int,
serial: c_ulong,
send_event: c_int,
display: ?*xlib.Display,
time: xlib.Time,
xkb_type: c_int,
device: c_int,
old_device: c_int,
min_key_code: c_int,
max_key_code: c_int,
old_min_key_code: c_int,
old_max_key_code: c_int,
changed: c_uint,
req_major: u8,
req_minor: u8,
};
pub const XkbNewKeyboardNotifyEvent = struct__XkbNewKeyboardNotify;
pub const struct__XkbMapNotifyEvent = extern struct {
type: c_int,
serial: c_ulong,
send_event: c_int,
display: ?*xlib.Display,
time: xlib.Time,
xkb_type: c_int,
device: c_int,
changed: c_uint,
flags: c_uint,
first_type: c_int,
num_types: c_int,
min_key_code: xlib.KeyCode,
max_key_code: xlib.KeyCode,
first_key_sym: xlib.KeyCode,
first_key_act: xlib.KeyCode,
first_key_behavior: xlib.KeyCode,
first_key_explicit: xlib.KeyCode,
first_modmap_key: xlib.KeyCode,
first_vmodmap_key: xlib.KeyCode,
num_key_syms: c_int,
num_key_acts: c_int,
num_key_behaviors: c_int,
num_key_explicit: c_int,
num_modmap_keys: c_int,
num_vmodmap_keys: c_int,
vmods: c_uint,
};
pub const XkbMapNotifyEvent = struct__XkbMapNotifyEvent;
pub const struct__XkbStateNotifyEvent = extern struct {
type: c_int,
serial: c_ulong,
send_event: c_int,
display: ?*xlib.Display,
time: xlib.Time,
xkb_type: c_int,
device: c_int,
changed: c_uint,
group: c_int,
base_group: c_int,
latched_group: c_int,
locked_group: c_int,
mods: c_uint,
base_mods: c_uint,
latched_mods: c_uint,
locked_mods: c_uint,
compat_state: c_int,
grab_mods: u8,
compat_grab_mods: u8,
lookup_mods: u8,
compat_lookup_mods: u8,
ptr_buttons: c_int,
keycode: xlib.KeyCode,
event_type: u8,
req_major: u8,
req_minor: u8,
};
pub const XkbStateNotifyEvent = struct__XkbStateNotifyEvent;
pub const struct__XkbControlsNotify = extern struct {
type: c_int,
serial: c_ulong,
send_event: c_int,
display: ?*xlib.Display,
time: xlib.Time,
xkb_type: c_int,
device: c_int,
changed_ctrls: c_uint,
enabled_ctrls: c_uint,
enabled_ctrl_changes: c_uint,
num_groups: c_int,
keycode: xlib.KeyCode,
event_type: u8,
req_major: u8,
req_minor: u8,
};
pub const XkbControlsNotifyEvent = struct__XkbControlsNotify;
pub const struct__XkbIndicatorNotify = extern struct {
type: c_int,
serial: c_ulong,
send_event: c_int,
display: ?*xlib.Display,
time: xlib.Time,
xkb_type: c_int,
device: c_int,
changed: c_uint,
state: c_uint,
};
pub const XkbIndicatorNotifyEvent = struct__XkbIndicatorNotify;
pub const struct__XkbNamesNotify = extern struct {
type: c_int,
serial: c_ulong,
send_event: c_int,
display: ?*xlib.Display,
time: xlib.Time,
xkb_type: c_int,
device: c_int,
changed: c_uint,
first_type: c_int,
num_types: c_int,
first_lvl: c_int,
num_lvls: c_int,
num_aliases: c_int,
num_radio_groups: c_int,
changed_vmods: c_uint,
changed_groups: c_uint,
changed_indicators: c_uint,
first_key: c_int,
num_keys: c_int,
};
pub const XkbNamesNotifyEvent = struct__XkbNamesNotify;
pub const struct__XkbCompatMapNotify = extern struct {
type: c_int,
serial: c_ulong,
send_event: c_int,
display: ?*xlib.Display,
time: xlib.Time,
xkb_type: c_int,
device: c_int,
changed_groups: c_uint,
first_si: c_int,
num_si: c_int,
num_total_si: c_int,
};
pub const XkbCompatMapNotifyEvent = struct__XkbCompatMapNotify;
pub const struct__XkbBellNotify = extern struct {
type: c_int,
serial: c_ulong,
send_event: c_int,
display: ?*xlib.Display,
time: xlib.Time,
xkb_type: c_int,
device: c_int,
percent: c_int,
pitch: c_int,
duration: c_int,
bell_class: c_int,
bell_id: c_int,
name: xlib.Atom,
window: xlib.Window,
event_only: c_int,
};
pub const XkbBellNotifyEvent = struct__XkbBellNotify;
pub const struct__XkbActionMessage = extern struct {
type: c_int,
serial: c_ulong,
send_event: c_int,
display: ?*xlib.Display,
time: xlib.Time,
xkb_type: c_int,
device: c_int,
keycode: xlib.KeyCode,
press: c_int,
key_event_follows: c_int,
group: c_int,
mods: c_uint,
message: [7]u8,
};
pub const XkbActionMessageEvent = struct__XkbActionMessage;
pub const struct__XkbAccessXNotify = extern struct {
type: c_int,
serial: c_ulong,
send_event: c_int,
display: ?*xlib.Display,
time: xlib.Time,
xkb_type: c_int,
device: c_int,
detail: c_int,
keycode: c_int,
sk_delay: c_int,
debounce_delay: c_int,
};
pub const XkbAccessXNotifyEvent = struct__XkbAccessXNotify;
pub const struct__XkbExtensionDeviceNotify = extern struct {
type: c_int,
serial: c_ulong,
send_event: c_int,
display: ?*xlib.Display,
time: xlib.Time,
xkb_type: c_int,
device: c_int,
reason: c_uint,
supported: c_uint,
unsupported: c_uint,
first_btn: c_int,
num_btns: c_int,
leds_defined: c_uint,
led_state: c_uint,
led_class: c_int,
led_id: c_int,
};
pub const XkbExtensionDeviceNotifyEvent = struct__XkbExtensionDeviceNotify;
pub const union__XkbEvent = extern union {
type: c_int,
any: XkbAnyEvent,
new_kbd: XkbNewKeyboardNotifyEvent,
map: XkbMapNotifyEvent,
state: XkbStateNotifyEvent,
ctrls: XkbControlsNotifyEvent,
indicators: XkbIndicatorNotifyEvent,
names: XkbNamesNotifyEvent,
compat: XkbCompatMapNotifyEvent,
bell: XkbBellNotifyEvent,
message: XkbActionMessageEvent,
accessx: XkbAccessXNotifyEvent,
device: XkbExtensionDeviceNotifyEvent,
core: xlib.XEvent,
};
pub const XkbEvent = union__XkbEvent;
pub const struct__XkbKbdDpyState = opaque {};
pub const XkbKbdDpyStateRec = struct__XkbKbdDpyState;
pub const XkbKbdDpyStatePtr = ?*struct__XkbKbdDpyState;
pub extern fn XkbIgnoreExtension(c_int) c_int;
pub extern fn XkbOpenDisplay([*c]const u8, [*c]c_int, [*c]c_int, [*c]c_int, [*c]c_int, [*c]c_int) ?*xlib.Display;
pub extern fn XkbQueryExtension(?*xlib.Display, [*c]c_int, [*c]c_int, [*c]c_int, [*c]c_int, [*c]c_int) c_int;
pub extern fn XkbUseExtension(?*xlib.Display, [*c]c_int, [*c]c_int) c_int;
pub extern fn XkbLibraryVersion([*c]c_int, [*c]c_int) c_int;
pub extern fn XkbSetXlibControls(?*xlib.Display, c_uint, c_uint) c_uint;
pub extern fn XkbGetXlibControls(?*xlib.Display) c_uint;
pub extern fn XkbXlibControlsImplemented() c_uint;
pub const XkbInternAtomFunc = ?fn (?*xlib.Display, [*c]const u8, c_int) callconv(.C) xlib.Atom;
pub const XkbGetAtomNameFunc = ?fn (?*xlib.Display, xlib.Atom) callconv(.C) [*c]u8;
pub extern fn XkbSetAtomFuncs(XkbInternAtomFunc, XkbGetAtomNameFunc) void;
pub extern fn XkbKeycodeToKeysym(?*xlib.Display, xlib.KeyCode, c_int, c_int) xlib.KeySym;
pub extern fn XkbKeysymToModifiers(?*xlib.Display, xlib.KeySym) c_uint;
pub extern fn XkbLookupKeySym(?*xlib.Display, xlib.KeyCode, c_uint, [*c]c_uint, [*c]xlib.KeySym) c_int;
pub extern fn XkbLookupKeyBinding(?*xlib.Display, xlib.KeySym, c_uint, [*c]u8, c_int, [*c]c_int) c_int;
pub extern fn XkbTranslateKeyCode(XkbDescPtr, xlib.KeyCode, c_uint, [*c]c_uint, [*c]xlib.KeySym) c_int;
pub extern fn XkbTranslateKeySym(?*xlib.Display, [*c]xlib.KeySym, c_uint, [*c]u8, c_int, [*c]c_int) c_int;
pub extern fn XkbSetAutoRepeatRate(?*xlib.Display, c_uint, c_uint, c_uint) c_int;
pub extern fn XkbGetAutoRepeatRate(?*xlib.Display, c_uint, [*c]c_uint, [*c]c_uint) c_int;
pub extern fn XkbChangeEnabledControls(?*xlib.Display, c_uint, c_uint, c_uint) c_int;
pub extern fn XkbDeviceBell(?*xlib.Display, xlib.Window, c_int, c_int, c_int, c_int, xlib.Atom) c_int;
pub extern fn XkbForceDeviceBell(?*xlib.Display, c_int, c_int, c_int, c_int) c_int;
pub extern fn XkbDeviceBellEvent(?*xlib.Display, xlib.Window, c_int, c_int, c_int, c_int, xlib.Atom) c_int;
pub extern fn XkbBell(?*xlib.Display, xlib.Window, c_int, xlib.Atom) c_int;
pub extern fn XkbForceBell(?*xlib.Display, c_int) c_int;
pub extern fn XkbBellEvent(?*xlib.Display, xlib.Window, c_int, xlib.Atom) c_int;
pub extern fn XkbSelectEvents(?*xlib.Display, c_uint, c_uint, c_uint) c_int;
pub extern fn XkbSelectEventDetails(?*xlib.Display, c_uint, c_uint, c_ulong, c_ulong) c_int;
pub extern fn XkbNoteMapChanges(XkbMapChangesPtr, [*c]XkbMapNotifyEvent, c_uint) void;
pub extern fn XkbNoteNameChanges(XkbNameChangesPtr, [*c]XkbNamesNotifyEvent, c_uint) void;
pub extern fn XkbGetIndicatorState(?*xlib.Display, c_uint, [*c]c_uint) c_int;
pub extern fn XkbGetDeviceIndicatorState(?*xlib.Display, c_uint, c_uint, c_uint, [*c]c_uint) c_int;
pub extern fn XkbGetIndicatorMap(?*xlib.Display, c_ulong, XkbDescPtr) c_int;
pub extern fn XkbSetIndicatorMap(?*xlib.Display, c_ulong, XkbDescPtr) c_int;
pub extern fn XkbGetNamedIndicator(?*xlib.Display, xlib.Atom, [*c]c_int, [*c]c_int, XkbIndicatorMapPtr, [*c]c_int) c_int;
pub extern fn XkbGetNamedDeviceIndicator(?*xlib.Display, c_uint, c_uint, c_uint, xlib.Atom, [*c]c_int, [*c]c_int, XkbIndicatorMapPtr, [*c]c_int) c_int;
pub extern fn XkbSetNamedIndicator(?*xlib.Display, xlib.Atom, c_int, c_int, c_int, XkbIndicatorMapPtr) c_int;
pub extern fn XkbSetNamedDeviceIndicator(?*xlib.Display, c_uint, c_uint, c_uint, xlib.Atom, c_int, c_int, c_int, XkbIndicatorMapPtr) c_int;
pub extern fn XkbLockModifiers(?*xlib.Display, c_uint, c_uint, c_uint) c_int;
pub extern fn XkbLatchModifiers(?*xlib.Display, c_uint, c_uint, c_uint) c_int;
pub extern fn XkbLockGroup(?*xlib.Display, c_uint, c_uint) c_int;
pub extern fn XkbLatchGroup(?*xlib.Display, c_uint, c_uint) c_int;
pub extern fn XkbSetServerInternalMods(?*xlib.Display, c_uint, c_uint, c_uint, c_uint, c_uint) c_int;
pub extern fn XkbSetIgnoreLockMods(?*xlib.Display, c_uint, c_uint, c_uint, c_uint, c_uint) c_int;
pub extern fn XkbVirtualModsToReal(XkbDescPtr, c_uint, [*c]c_uint) c_int;
pub extern fn XkbComputeEffectiveMap(XkbDescPtr, XkbKeyTypePtr, [*c]u8) c_int;
pub extern fn XkbInitCanonicalKeyTypes(XkbDescPtr, c_uint, c_int) c_int;
pub extern fn XkbAllocKeyboard() XkbDescPtr;
pub extern fn XkbFreeKeyboard(XkbDescPtr, c_uint, c_int) void;
pub extern fn XkbAllocClientMap(XkbDescPtr, c_uint, c_uint) c_int;
pub extern fn XkbAllocServerMap(XkbDescPtr, c_uint, c_uint) c_int;
pub extern fn XkbFreeClientMap(XkbDescPtr, c_uint, c_int) void;
pub extern fn XkbFreeServerMap(XkbDescPtr, c_uint, c_int) void;
pub extern fn XkbAddKeyType(XkbDescPtr, xlib.Atom, c_int, c_int, c_int) XkbKeyTypePtr;
pub extern fn XkbAllocIndicatorMaps(XkbDescPtr) c_int;
pub extern fn XkbFreeIndicatorMaps(XkbDescPtr) void;
pub extern fn XkbGetMap(?*xlib.Display, c_uint, c_uint) XkbDescPtr;
pub extern fn XkbGetUpdatedMap(?*xlib.Display, c_uint, XkbDescPtr) c_int;
pub extern fn XkbGetMapChanges(?*xlib.Display, XkbDescPtr, XkbMapChangesPtr) c_int;
pub extern fn XkbRefreshKeyboardMapping([*c]XkbMapNotifyEvent) c_int;
pub extern fn XkbGetKeyTypes(?*xlib.Display, c_uint, c_uint, XkbDescPtr) c_int;
pub extern fn XkbGetKeySyms(?*xlib.Display, c_uint, c_uint, XkbDescPtr) c_int;
pub extern fn XkbGetKeyActions(?*xlib.Display, c_uint, c_uint, XkbDescPtr) c_int;
pub extern fn XkbGetKeyBehaviors(?*xlib.Display, c_uint, c_uint, XkbDescPtr) c_int;
pub extern fn XkbGetVirtualMods(?*xlib.Display, c_uint, XkbDescPtr) c_int;
pub extern fn XkbGetKeyExplicitComponents(?*xlib.Display, c_uint, c_uint, XkbDescPtr) c_int;
pub extern fn XkbGetKeyModifierMap(?*xlib.Display, c_uint, c_uint, XkbDescPtr) c_int;
pub extern fn XkbGetKeyVirtualModMap(?*xlib.Display, c_uint, c_uint, XkbDescPtr) c_int;
pub extern fn XkbAllocControls(XkbDescPtr, c_uint) c_int;
pub extern fn XkbFreeControls(XkbDescPtr, c_uint, c_int) void;
pub extern fn XkbGetControls(?*xlib.Display, c_ulong, XkbDescPtr) c_int;
pub extern fn XkbSetControls(?*xlib.Display, c_ulong, XkbDescPtr) c_int;
pub extern fn XkbNoteControlsChanges(XkbControlsChangesPtr, [*c]XkbControlsNotifyEvent, c_uint) void;
pub extern fn XkbAllocCompatMap(XkbDescPtr, c_uint, c_uint) c_int;
pub extern fn XkbFreeCompatMap(XkbDescPtr, c_uint, c_int) void;
pub extern fn XkbGetCompatMap(?*xlib.Display, c_uint, XkbDescPtr) c_int;
pub extern fn XkbSetCompatMap(?*xlib.Display, c_uint, XkbDescPtr, c_int) c_int;
pub extern fn XkbAddSymInterpret(XkbDescPtr, XkbSymInterpretPtr, c_int, XkbChangesPtr) XkbSymInterpretPtr;
pub extern fn XkbAllocNames(XkbDescPtr, c_uint, c_int, c_int) c_int;
pub extern fn XkbGetNames(?*xlib.Display, c_uint, XkbDescPtr) c_int;
pub extern fn XkbSetNames(?*xlib.Display, c_uint, c_uint, c_uint, XkbDescPtr) c_int;
pub extern fn XkbChangeNames(?*xlib.Display, XkbDescPtr, XkbNameChangesPtr) c_int;
pub extern fn XkbFreeNames(XkbDescPtr, c_uint, c_int) void;
pub extern fn XkbGetState(?*xlib.Display, c_uint, XkbStatePtr) c_int;
pub extern fn XkbSetMap(?*xlib.Display, c_uint, XkbDescPtr) c_int;
pub extern fn XkbChangeMap(?*xlib.Display, XkbDescPtr, XkbMapChangesPtr) c_int;
pub extern fn XkbSetDetectableAutoRepeat(?*xlib.Display, c_int, [*c]c_int) c_int;
pub extern fn XkbGetDetectableAutoRepeat(?*xlib.Display, [*c]c_int) c_int;
pub extern fn XkbSetAutoResetControls(?*xlib.Display, c_uint, [*c]c_uint, [*c]c_uint) c_int;
pub extern fn XkbGetAutoResetControls(?*xlib.Display, [*c]c_uint, [*c]c_uint) c_int;
pub extern fn XkbSetPerClientControls(?*xlib.Display, c_uint, [*c]c_uint) c_int;
pub extern fn XkbGetPerClientControls(?*xlib.Display, [*c]c_uint) c_int;
pub extern fn XkbCopyKeyType(XkbKeyTypePtr, XkbKeyTypePtr) c_int;
pub extern fn XkbCopyKeyTypes(XkbKeyTypePtr, XkbKeyTypePtr, c_int) c_int;
pub extern fn XkbResizeKeyType(XkbDescPtr, c_int, c_int, c_int, c_int) c_int;
pub extern fn XkbResizeKeySyms(XkbDescPtr, c_int, c_int) [*c]xlib.KeySym;
pub extern fn XkbResizeKeyActions(XkbDescPtr, c_int, c_int) [*c]XkbAction;
pub extern fn XkbChangeTypesOfKey(XkbDescPtr, c_int, c_int, c_uint, [*c]c_int, XkbMapChangesPtr) c_int;
pub extern fn XkbChangeKeycodeRange(XkbDescPtr, c_int, c_int, XkbChangesPtr) c_int;
pub extern fn XkbListComponents(?*xlib.Display, c_uint, XkbComponentNamesPtr, [*c]c_int) XkbComponentListPtr;
pub extern fn XkbFreeComponentList(XkbComponentListPtr) void;
pub extern fn XkbGetKeyboard(?*xlib.Display, c_uint, c_uint) XkbDescPtr;
pub extern fn XkbGetKeyboardByName(?*xlib.Display, c_uint, XkbComponentNamesPtr, c_uint, c_uint, c_int) XkbDescPtr;
pub extern fn XkbKeyTypesForCoreSymbols(XkbDescPtr, c_int, [*c]xlib.KeySym, c_uint, [*c]c_int, [*c]xlib.KeySym) c_int;
pub extern fn XkbApplyCompatMapToKey(XkbDescPtr, xlib.KeyCode, XkbChangesPtr) c_int;
pub extern fn XkbUpdateMapFromCore(XkbDescPtr, xlib.KeyCode, c_int, c_int, [*c]xlib.KeySym, XkbChangesPtr) c_int;
pub extern fn XkbAddDeviceLedInfo(XkbDeviceInfoPtr, c_uint, c_uint) XkbDeviceLedInfoPtr;
pub extern fn XkbResizeDeviceButtonActions(XkbDeviceInfoPtr, c_uint) c_int;
pub extern fn XkbAllocDeviceInfo(c_uint, c_uint, c_uint) XkbDeviceInfoPtr;
pub extern fn XkbFreeDeviceInfo(XkbDeviceInfoPtr, c_uint, c_int) void;
pub extern fn XkbNoteDeviceChanges(XkbDeviceChangesPtr, [*c]XkbExtensionDeviceNotifyEvent, c_uint) void;
pub extern fn XkbGetDeviceInfo(?*xlib.Display, c_uint, c_uint, c_uint, c_uint) XkbDeviceInfoPtr;
pub extern fn XkbGetDeviceInfoChanges(?*xlib.Display, XkbDeviceInfoPtr, XkbDeviceChangesPtr) c_int;
pub extern fn XkbGetDeviceButtonActions(?*xlib.Display, XkbDeviceInfoPtr, c_int, c_uint, c_uint) c_int;
pub extern fn XkbGetDeviceLedInfo(?*xlib.Display, XkbDeviceInfoPtr, c_uint, c_uint, c_uint) c_int;
pub extern fn XkbSetDeviceInfo(?*xlib.Display, c_uint, XkbDeviceInfoPtr) c_int;
pub extern fn XkbChangeDeviceInfo(?*xlib.Display, XkbDeviceInfoPtr, XkbDeviceChangesPtr) c_int;
pub extern fn XkbSetDeviceLedInfo(?*xlib.Display, XkbDeviceInfoPtr, c_uint, c_uint, c_uint) c_int;
pub extern fn XkbSetDeviceButtonActions(?*xlib.Display, XkbDeviceInfoPtr, c_uint, c_uint) c_int;
pub extern fn XkbToControl(u8) u8;
pub extern fn XkbSetDebuggingFlags(?*xlib.Display, c_uint, c_uint, [*c]u8, c_uint, c_uint, [*c]c_uint, [*c]c_uint) c_int;
pub extern fn XkbApplyVirtualModChanges(XkbDescPtr, c_uint, XkbChangesPtr) c_int;
pub extern fn XkbUpdateActionVirtualMods(XkbDescPtr, [*c]XkbAction, c_uint) c_int;
pub extern fn XkbUpdateKeyTypeVirtualMods(XkbDescPtr, XkbKeyTypePtr, c_uint, XkbChangesPtr) void;
pub const X_kbUseExtension = @as(c_int, 0);
pub const X_kbSelectEvents = @as(c_int, 1);
pub const X_kbBell = @as(c_int, 3);
pub const X_kbGetState = @as(c_int, 4);
pub const X_kbLatchLockState = @as(c_int, 5);
pub const X_kbGetControls = @as(c_int, 6);
pub const X_kbSetControls = @as(c_int, 7);
pub const X_kbGetMap = @as(c_int, 8);
pub const X_kbSetMap = @as(c_int, 9);
pub const X_kbGetCompatMap = @as(c_int, 10);
pub const X_kbSetCompatMap = @as(c_int, 11);
pub const X_kbGetIndicatorState = @as(c_int, 12);
pub const X_kbGetIndicatorMap = @as(c_int, 13);
pub const X_kbSetIndicatorMap = @as(c_int, 14);
pub const X_kbGetNamedIndicator = @as(c_int, 15);
pub const X_kbSetNamedIndicator = @as(c_int, 16);
pub const X_kbGetNames = @as(c_int, 17);
pub const X_kbSetNames = @as(c_int, 18);
pub const X_kbGetGeometry = @as(c_int, 19);
pub const X_kbSetGeometry = @as(c_int, 20);
pub const X_kbPerClientFlags = @as(c_int, 21);
pub const X_kbListComponents = @as(c_int, 22);
pub const X_kbGetKbdByName = @as(c_int, 23);
pub const X_kbGetDeviceInfo = @as(c_int, 24);
pub const X_kbSetDeviceInfo = @as(c_int, 25);
pub const X_kbSetDebuggingFlags = @as(c_int, 101);
pub const XkbEventCode = @as(c_int, 0);
pub const XkbNumberEvents = XkbEventCode + @as(c_int, 1);
pub const XkbNewKeyboardNotify = @as(c_int, 0);
pub const XkbMapNotify = @as(c_int, 1);
pub const XkbStateNotify = @as(c_int, 2);
pub const XkbControlsNotify = @as(c_int, 3);
pub const XkbIndicatorStateNotify = @as(c_int, 4);
pub const XkbIndicatorMapNotify = @as(c_int, 5);
pub const XkbNamesNotify = @as(c_int, 6);
pub const XkbCompatMapNotify = @as(c_int, 7);
pub const XkbBellNotify = @as(c_int, 8);
pub const XkbActionMessage = @as(c_int, 9);
pub const XkbAccessXNotify = @as(c_int, 10);
pub const XkbExtensionDeviceNotify = @as(c_int, 11);
pub const XkbNewKeyboardNotifyMask = @as(c_long, 1) << @as(c_int, 0);
pub const XkbMapNotifyMask = @as(c_long, 1) << @as(c_int, 1);
pub const XkbStateNotifyMask = @as(c_long, 1) << @as(c_int, 2);
pub const XkbControlsNotifyMask = @as(c_long, 1) << @as(c_int, 3);
pub const XkbIndicatorStateNotifyMask = @as(c_long, 1) << @as(c_int, 4);
pub const XkbIndicatorMapNotifyMask = @as(c_long, 1) << @as(c_int, 5);
pub const XkbNamesNotifyMask = @as(c_long, 1) << @as(c_int, 6);
pub const XkbCompatMapNotifyMask = @as(c_long, 1) << @as(c_int, 7);
pub const XkbBellNotifyMask = @as(c_long, 1) << @as(c_int, 8);
pub const XkbActionMessageMask = @as(c_long, 1) << @as(c_int, 9);
pub const XkbAccessXNotifyMask = @as(c_long, 1) << @as(c_int, 10);
pub const XkbExtensionDeviceNotifyMask = @as(c_long, 1) << @as(c_int, 11);
pub const XkbAllEventsMask = @as(c_int, 0xFFF);
pub const XkbNKN_KeycodesMask = @as(c_long, 1) << @as(c_int, 0);
pub const XkbNKN_GeometryMask = @as(c_long, 1) << @as(c_int, 1);
pub const XkbNKN_DeviceIDMask = @as(c_long, 1) << @as(c_int, 2);
pub const XkbAllNewKeyboardEventsMask = @as(c_int, 0x7);
pub const XkbAXN_SKPress = @as(c_int, 0);
pub const XkbAXN_SKAccept = @as(c_int, 1);
pub const XkbAXN_SKReject = @as(c_int, 2);
pub const XkbAXN_SKRelease = @as(c_int, 3);
pub const XkbAXN_BKAccept = @as(c_int, 4);
pub const XkbAXN_BKReject = @as(c_int, 5);
pub const XkbAXN_AXKWarning = @as(c_int, 6);
pub const XkbAXN_SKPressMask = @as(c_long, 1) << @as(c_int, 0);
pub const XkbAXN_SKAcceptMask = @as(c_long, 1) << @as(c_int, 1);
pub const XkbAXN_SKRejectMask = @as(c_long, 1) << @as(c_int, 2);
pub const XkbAXN_SKReleaseMask = @as(c_long, 1) << @as(c_int, 3);
pub const XkbAXN_BKAcceptMask = @as(c_long, 1) << @as(c_int, 4);
pub const XkbAXN_BKRejectMask = @as(c_long, 1) << @as(c_int, 5);
pub const XkbAXN_AXKWarningMask = @as(c_long, 1) << @as(c_int, 6);
pub const XkbAllAccessXEventsMask = @as(c_int, 0x7f);
pub const XkbAllStateEventsMask = XkbAllStateComponentsMask;
pub const XkbAllMapEventsMask = XkbAllMapComponentsMask;
pub const XkbAllControlEventsMask = XkbAllControlsMask;
pub const XkbAllIndicatorEventsMask = XkbAllIndicatorsMask;
pub const XkbAllNameEventsMask = XkbAllNamesMask;
pub const XkbAllCompatMapEventsMask = XkbAllCompatMask;
pub const XkbAllBellEventsMask = @as(c_long, 1) << @as(c_int, 0);
pub const XkbAllActionMessagesMask = @as(c_long, 1) << @as(c_int, 0);
pub const XkbKeyboard = @as(c_int, 0);
pub const XkbNumberErrors = @as(c_int, 1);
pub const XkbErr_BadDevice = @as(c_int, 0xff);
pub const XkbErr_BadClass = @as(c_int, 0xfe);
pub const XkbErr_BadId = @as(c_int, 0xfd);
pub const XkbClientMapMask = @as(c_long, 1) << @as(c_int, 0);
pub const XkbServerMapMask = @as(c_long, 1) << @as(c_int, 1);
pub const XkbCompatMapMask = @as(c_long, 1) << @as(c_int, 2);
pub const XkbIndicatorMapMask = @as(c_long, 1) << @as(c_int, 3);
pub const XkbNamesMask = @as(c_long, 1) << @as(c_int, 4);
pub const XkbGeometryMask = @as(c_long, 1) << @as(c_int, 5);
pub const XkbControlsMask = @as(c_long, 1) << @as(c_int, 6);
pub const XkbAllComponentsMask = @as(c_int, 0x7f);
pub const XkbModifierStateMask = @as(c_long, 1) << @as(c_int, 0);
pub const XkbModifierBaseMask = @as(c_long, 1) << @as(c_int, 1);
pub const XkbModifierLatchMask = @as(c_long, 1) << @as(c_int, 2);
pub const XkbModifierLockMask = @as(c_long, 1) << @as(c_int, 3);
pub const XkbGroupStateMask = @as(c_long, 1) << @as(c_int, 4);
pub const XkbGroupBaseMask = @as(c_long, 1) << @as(c_int, 5);
pub const XkbGroupLatchMask = @as(c_long, 1) << @as(c_int, 6);
pub const XkbGroupLockMask = @as(c_long, 1) << @as(c_int, 7);
pub const XkbCompatStateMask = @as(c_long, 1) << @as(c_int, 8);
pub const XkbGrabModsMask = @as(c_long, 1) << @as(c_int, 9);
pub const XkbCompatGrabModsMask = @as(c_long, 1) << @as(c_int, 10);
pub const XkbLookupModsMask = @as(c_long, 1) << @as(c_int, 11);
pub const XkbCompatLookupModsMask = @as(c_long, 1) << @as(c_int, 12);
pub const XkbPointerButtonMask = @as(c_long, 1) << @as(c_int, 13);
pub const XkbAllStateComponentsMask = @as(c_int, 0x3fff);
pub const XkbRepeatKeysMask = @as(c_long, 1) << @as(c_int, 0);
pub const XkbSlowKeysMask = @as(c_long, 1) << @as(c_int, 1);
pub const XkbBounceKeysMask = @as(c_long, 1) << @as(c_int, 2);
pub const XkbStickyKeysMask = @as(c_long, 1) << @as(c_int, 3);
pub const XkbMouseKeysMask = @as(c_long, 1) << @as(c_int, 4);
pub const XkbMouseKeysAccelMask = @as(c_long, 1) << @as(c_int, 5);
pub const XkbAccessXKeysMask = @as(c_long, 1) << @as(c_int, 6);
pub const XkbAccessXTimeoutMask = @as(c_long, 1) << @as(c_int, 7);
pub const XkbAccessXFeedbackMask = @as(c_long, 1) << @as(c_int, 8);
pub const XkbAudibleBellMask = @as(c_long, 1) << @as(c_int, 9);
pub const XkbOverlay1Mask = @as(c_long, 1) << @as(c_int, 10);
pub const XkbOverlay2Mask = @as(c_long, 1) << @as(c_int, 11);
pub const XkbIgnoreGroupLockMask = @as(c_long, 1) << @as(c_int, 12);
pub const XkbGroupsWrapMask = @as(c_long, 1) << @as(c_int, 27);
pub const XkbInternalModsMask = @as(c_long, 1) << @as(c_int, 28);
pub const XkbIgnoreLockModsMask = @as(c_long, 1) << @as(c_int, 29);
pub const XkbPerKeyRepeatMask = @as(c_long, 1) << @as(c_int, 30);
pub const XkbControlsEnabledMask = @as(c_long, 1) << @as(c_int, 31);
pub const XkbAccessXOptionsMask = XkbStickyKeysMask | XkbAccessXFeedbackMask;
pub const XkbAllBooleanCtrlsMask = @as(c_int, 0x00001FFF);
pub const XkbAllControlsMask = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xF8001FFF, .hexadecimal);
pub const XkbAX_SKPressFBMask = @as(c_long, 1) << @as(c_int, 0);
pub const XkbAX_SKAcceptFBMask = @as(c_long, 1) << @as(c_int, 1);
pub const XkbAX_FeatureFBMask = @as(c_long, 1) << @as(c_int, 2);
pub const XkbAX_SlowWarnFBMask = @as(c_long, 1) << @as(c_int, 3);
pub const XkbAX_IndicatorFBMask = @as(c_long, 1) << @as(c_int, 4);
pub const XkbAX_StickyKeysFBMask = @as(c_long, 1) << @as(c_int, 5);
pub const XkbAX_TwoKeysMask = @as(c_long, 1) << @as(c_int, 6);
pub const XkbAX_LatchToLockMask = @as(c_long, 1) << @as(c_int, 7);
pub const XkbAX_SKReleaseFBMask = @as(c_long, 1) << @as(c_int, 8);
pub const XkbAX_SKRejectFBMask = @as(c_long, 1) << @as(c_int, 9);
pub const XkbAX_BKRejectFBMask = @as(c_long, 1) << @as(c_int, 10);
pub const XkbAX_DumbBellFBMask = @as(c_long, 1) << @as(c_int, 11);
pub const XkbAX_FBOptionsMask = @as(c_int, 0xF3F);
pub const XkbAX_SKOptionsMask = @as(c_int, 0x0C0);
pub const XkbAX_AllOptionsMask = @as(c_int, 0xFFF);
pub const XkbUseCoreKbd = @as(c_int, 0x0100);
pub const XkbUseCorePtr = @as(c_int, 0x0200);
pub const XkbDfltXIClass = @as(c_int, 0x0300);
pub const XkbDfltXIId = @as(c_int, 0x0400);
pub const XkbAllXIClasses = @as(c_int, 0x0500);
pub const XkbAllXIIds = @as(c_int, 0x0600);
pub const XkbXINone = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xff00, .hexadecimal);
pub inline fn XkbExplicitXIDevice(c: anytype) @TypeOf((c & ~@as(c_int, 0xff)) == @as(c_int, 0)) {
return (c & ~@as(c_int, 0xff)) == @as(c_int, 0);
}
pub inline fn XkbExplicitXIClass(c: anytype) @TypeOf((c & ~@as(c_int, 0xff)) == @as(c_int, 0)) {
return (c & ~@as(c_int, 0xff)) == @as(c_int, 0);
}
pub inline fn XkbExplicitXIId(c: anytype) @TypeOf((c & ~@as(c_int, 0xff)) == @as(c_int, 0)) {
return (c & ~@as(c_int, 0xff)) == @as(c_int, 0);
}
pub inline fn XkbSingleXIClass(c: anytype) @TypeOf(((c & ~@as(c_int, 0xff)) == @as(c_int, 0)) or (c == XkbDfltXIClass)) {
return ((c & ~@as(c_int, 0xff)) == @as(c_int, 0)) or (c == XkbDfltXIClass);
}
pub inline fn XkbSingleXIId(c: anytype) @TypeOf(((c & ~@as(c_int, 0xff)) == @as(c_int, 0)) or (c == XkbDfltXIId)) {
return ((c & ~@as(c_int, 0xff)) == @as(c_int, 0)) or (c == XkbDfltXIId);
}
pub const XkbNoModifier = @as(c_int, 0xff);
pub const XkbNoShiftLevel = @as(c_int, 0xff);
pub const XkbNoShape = @as(c_int, 0xff);
pub const XkbNoIndicator = @as(c_int, 0xff);
pub const XkbNoModifierMask = @as(c_int, 0);
pub const XkbAllModifiersMask = @as(c_int, 0xff);
pub const XkbAllVirtualModsMask = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xffff, .hexadecimal);
pub const XkbNumKbdGroups = @as(c_int, 4);
pub const XkbMaxKbdGroup = XkbNumKbdGroups - @as(c_int, 1);
pub const XkbMaxMouseKeysBtn = @as(c_int, 4);
pub const XkbGroup1Index = @as(c_int, 0);
pub const XkbGroup2Index = @as(c_int, 1);
pub const XkbGroup3Index = @as(c_int, 2);
pub const XkbGroup4Index = @as(c_int, 3);
pub const XkbAnyGroup = @as(c_int, 254);
pub const XkbAllGroups = @as(c_int, 255);
pub const XkbGroup1Mask = @as(c_int, 1) << @as(c_int, 0);
pub const XkbGroup2Mask = @as(c_int, 1) << @as(c_int, 1);
pub const XkbGroup3Mask = @as(c_int, 1) << @as(c_int, 2);
pub const XkbGroup4Mask = @as(c_int, 1) << @as(c_int, 3);
pub const XkbAnyGroupMask = @as(c_int, 1) << @as(c_int, 7);
pub const XkbAllGroupsMask = @as(c_int, 0xf);
pub inline fn XkbBuildCoreState(m: anytype, g: anytype) @TypeOf(((g & @as(c_int, 0x3)) << @as(c_int, 13)) | (m & @as(c_int, 0xff))) {
return ((g & @as(c_int, 0x3)) << @as(c_int, 13)) | (m & @as(c_int, 0xff));
}
pub inline fn XkbGroupForCoreState(s: anytype) @TypeOf((s >> @as(c_int, 13)) & @as(c_int, 0x3)) {
return (s >> @as(c_int, 13)) & @as(c_int, 0x3);
}
pub inline fn XkbIsLegalGroup(g: anytype) @TypeOf((g >= @as(c_int, 0)) and (g < XkbNumKbdGroups)) {
return (g >= @as(c_int, 0)) and (g < XkbNumKbdGroups);
}
pub const XkbWrapIntoRange = @as(c_int, 0x00);
pub const XkbClampIntoRange = @as(c_int, 0x40);
pub const XkbRedirectIntoRange = @as(c_int, 0x80);
pub const XkbSA_ClearLocks = @as(c_long, 1) << @as(c_int, 0);
pub const XkbSA_LatchToLock = @as(c_long, 1) << @as(c_int, 1);
pub const XkbSA_LockNoLock = @as(c_long, 1) << @as(c_int, 0);
pub const XkbSA_LockNoUnlock = @as(c_long, 1) << @as(c_int, 1);
pub const XkbSA_UseModMapMods = @as(c_long, 1) << @as(c_int, 2);
pub const XkbSA_GroupAbsolute = @as(c_long, 1) << @as(c_int, 2);
pub const XkbSA_UseDfltButton = @as(c_int, 0);
pub const XkbSA_NoAcceleration = @as(c_long, 1) << @as(c_int, 0);
pub const XkbSA_MoveAbsoluteX = @as(c_long, 1) << @as(c_int, 1);
pub const XkbSA_MoveAbsoluteY = @as(c_long, 1) << @as(c_int, 2);
pub const XkbSA_ISODfltIsGroup = @as(c_long, 1) << @as(c_int, 7);
pub const XkbSA_ISONoAffectMods = @as(c_long, 1) << @as(c_int, 6);
pub const XkbSA_ISONoAffectGroup = @as(c_long, 1) << @as(c_int, 5);
pub const XkbSA_ISONoAffectPtr = @as(c_long, 1) << @as(c_int, 4);
pub const XkbSA_ISONoAffectCtrls = @as(c_long, 1) << @as(c_int, 3);
pub const XkbSA_ISOAffectMask = @as(c_int, 0x78);
pub const XkbSA_MessageOnPress = @as(c_long, 1) << @as(c_int, 0);
pub const XkbSA_MessageOnRelease = @as(c_long, 1) << @as(c_int, 1);
pub const XkbSA_MessageGenKeyEvent = @as(c_long, 1) << @as(c_int, 2);
pub const XkbSA_AffectDfltBtn = @as(c_int, 1);
pub const XkbSA_DfltBtnAbsolute = @as(c_long, 1) << @as(c_int, 2);
pub const XkbSA_SwitchApplication = @as(c_long, 1) << @as(c_int, 0);
pub const XkbSA_SwitchAbsolute = @as(c_long, 1) << @as(c_int, 2);
pub const XkbSA_IgnoreVal = @as(c_int, 0x00);
pub const XkbSA_SetValMin = @as(c_int, 0x10);
pub const XkbSA_SetValCenter = @as(c_int, 0x20);
pub const XkbSA_SetValMax = @as(c_int, 0x30);
pub const XkbSA_SetValRelative = @as(c_int, 0x40);
pub const XkbSA_SetValAbsolute = @as(c_int, 0x50);
pub const XkbSA_ValOpMask = @as(c_int, 0x70);
pub const XkbSA_ValScaleMask = @as(c_int, 0x07);
pub inline fn XkbSA_ValOp(a: anytype) @TypeOf(a & XkbSA_ValOpMask) {
return a & XkbSA_ValOpMask;
}
pub inline fn XkbSA_ValScale(a: anytype) @TypeOf(a & XkbSA_ValScaleMask) {
return a & XkbSA_ValScaleMask;
}
pub const XkbSA_NoAction = @as(c_int, 0x00);
pub const XkbSA_SetMods = @as(c_int, 0x01);
pub const XkbSA_LatchMods = @as(c_int, 0x02);
pub const XkbSA_LockMods = @as(c_int, 0x03);
pub const XkbSA_SetGroup = @as(c_int, 0x04);
pub const XkbSA_LatchGroup = @as(c_int, 0x05);
pub const XkbSA_LockGroup = @as(c_int, 0x06);
pub const XkbSA_MovePtr = @as(c_int, 0x07);
pub const XkbSA_PtrBtn = @as(c_int, 0x08);
pub const XkbSA_LockPtrBtn = @as(c_int, 0x09);
pub const XkbSA_SetPtrDflt = @as(c_int, 0x0a);
pub const XkbSA_ISOLock = @as(c_int, 0x0b);
pub const XkbSA_Terminate = @as(c_int, 0x0c);
pub const XkbSA_SwitchScreen = @as(c_int, 0x0d);
pub const XkbSA_SetControls = @as(c_int, 0x0e);
pub const XkbSA_LockControls = @as(c_int, 0x0f);
pub const XkbSA_ActionMessage = @as(c_int, 0x10);
pub const XkbSA_RedirectKey = @as(c_int, 0x11);
pub const XkbSA_DeviceBtn = @as(c_int, 0x12);
pub const XkbSA_LockDeviceBtn = @as(c_int, 0x13);
pub const XkbSA_DeviceValuator = @as(c_int, 0x14);
pub const XkbSA_LastAction = XkbSA_DeviceValuator;
pub const XkbSA_NumActions = XkbSA_LastAction + @as(c_int, 1);
pub const XkbSA_XFree86Private = @as(c_int, 0x86);
pub const XkbSA_BreakLatch = ((((((((((@as(c_int, 1) << XkbSA_NoAction) | (@as(c_int, 1) << XkbSA_PtrBtn)) | (@as(c_int, 1) << XkbSA_LockPtrBtn)) | (@as(c_int, 1) << XkbSA_Terminate)) | (@as(c_int, 1) << XkbSA_SwitchScreen)) | (@as(c_int, 1) << XkbSA_SetControls)) | (@as(c_int, 1) << XkbSA_LockControls)) | (@as(c_int, 1) << XkbSA_ActionMessage)) | (@as(c_int, 1) << XkbSA_RedirectKey)) | (@as(c_int, 1) << XkbSA_DeviceBtn)) | (@as(c_int, 1) << XkbSA_LockDeviceBtn);
pub inline fn XkbIsGroupAction(a: anytype) @TypeOf((a.*.type >= XkbSA_SetGroup) and (a.*.type <= XkbSA_LockGroup)) {
return (a.*.type >= XkbSA_SetGroup) and (a.*.type <= XkbSA_LockGroup);
}
pub inline fn XkbIsPtrAction(a: anytype) @TypeOf((a.*.type >= XkbSA_MovePtr) and (a.*.type <= XkbSA_SetPtrDflt)) {
return (a.*.type >= XkbSA_MovePtr) and (a.*.type <= XkbSA_SetPtrDflt);
}
pub const XkbKB_Permanent = @as(c_int, 0x80);
pub const XkbKB_OpMask = @as(c_int, 0x7f);
pub const XkbKB_Default = @as(c_int, 0x00);
pub const XkbKB_Lock = @as(c_int, 0x01);
pub const XkbKB_RadioGroup = @as(c_int, 0x02);
pub const XkbKB_Overlay1 = @as(c_int, 0x03);
pub const XkbKB_Overlay2 = @as(c_int, 0x04);
pub const XkbKB_RGAllowNone = @as(c_int, 0x80);
pub const XkbMinLegalKeyCode = @as(c_int, 8);
pub const XkbMaxLegalKeyCode = @as(c_int, 255);
pub const XkbMaxKeyCount = (XkbMaxLegalKeyCode - XkbMinLegalKeyCode) + @as(c_int, 1);
pub const XkbPerKeyBitArraySize = (XkbMaxLegalKeyCode + @as(c_int, 1)) / @as(c_int, 8);
pub inline fn XkbIsLegalKeycode(k: anytype) @TypeOf(k >= XkbMinLegalKeyCode) {
return k >= XkbMinLegalKeyCode;
}
pub const XkbNumModifiers = @as(c_int, 8);
pub const XkbNumVirtualMods = @as(c_int, 16);
pub const XkbNumIndicators = @as(c_int, 32);
pub const XkbAllIndicatorsMask = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xffffffff, .hexadecimal);
pub const XkbMaxRadioGroups = @as(c_int, 32);
pub const XkbAllRadioGroupsMask = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xffffffff, .hexadecimal);
pub const XkbMaxShiftLevel = @as(c_int, 63);
pub const XkbMaxSymsPerKey = XkbMaxShiftLevel * XkbNumKbdGroups;
pub const XkbRGMaxMembers = @as(c_int, 12);
pub const XkbActionMessageLength = @as(c_int, 6);
pub const XkbKeyNameLength = @as(c_int, 4);
pub const XkbMaxRedirectCount = @as(c_int, 8);
pub const XkbGeomPtsPerMM = @as(c_int, 10);
pub const XkbGeomMaxColors = @as(c_int, 32);
pub const XkbGeomMaxLabelColors = @as(c_int, 3);
pub const XkbGeomMaxPriority = @as(c_int, 255);
pub const XkbOneLevelIndex = @as(c_int, 0);
pub const XkbTwoLevelIndex = @as(c_int, 1);
pub const XkbAlphabeticIndex = @as(c_int, 2);
pub const XkbKeypadIndex = @as(c_int, 3);
pub const XkbLastRequiredType = XkbKeypadIndex;
pub const XkbNumRequiredTypes = XkbLastRequiredType + @as(c_int, 1);
pub const XkbMaxKeyTypes = @as(c_int, 255);
pub const XkbOneLevelMask = @as(c_int, 1) << @as(c_int, 0);
pub const XkbTwoLevelMask = @as(c_int, 1) << @as(c_int, 1);
pub const XkbAlphabeticMask = @as(c_int, 1) << @as(c_int, 2);
pub const XkbKeypadMask = @as(c_int, 1) << @as(c_int, 3);
pub const XkbAllRequiredTypes = @as(c_int, 0xf);
pub inline fn XkbShiftLevel(n: anytype) @TypeOf(n - @as(c_int, 1)) {
return n - @as(c_int, 1);
}
pub inline fn XkbShiftLevelMask(n: anytype) @TypeOf(@as(c_int, 1) << (n - @as(c_int, 1))) {
return @as(c_int, 1) << (n - @as(c_int, 1));
}
pub const XkbName = "XKEYBOARD";
pub const XkbMajorVersion = @as(c_int, 1);
pub const XkbMinorVersion = @as(c_int, 0);
pub const XkbExplicitKeyTypesMask = @as(c_int, 0x0f);
pub const XkbExplicitKeyType1Mask = @as(c_int, 1) << @as(c_int, 0);
pub const XkbExplicitKeyType2Mask = @as(c_int, 1) << @as(c_int, 1);
pub const XkbExplicitKeyType3Mask = @as(c_int, 1) << @as(c_int, 2);
pub const XkbExplicitKeyType4Mask = @as(c_int, 1) << @as(c_int, 3);
pub const XkbExplicitInterpretMask = @as(c_int, 1) << @as(c_int, 4);
pub const XkbExplicitAutoRepeatMask = @as(c_int, 1) << @as(c_int, 5);
pub const XkbExplicitBehaviorMask = @as(c_int, 1) << @as(c_int, 6);
pub const XkbExplicitVModMapMask = @as(c_int, 1) << @as(c_int, 7);
pub const XkbAllExplicitMask = @as(c_int, 0xff);
pub const XkbKeyTypesMask = @as(c_int, 1) << @as(c_int, 0);
pub const XkbKeySymsMask = @as(c_int, 1) << @as(c_int, 1);
pub const XkbModifierMapMask = @as(c_int, 1) << @as(c_int, 2);
pub const XkbExplicitComponentsMask = @as(c_int, 1) << @as(c_int, 3);
pub const XkbKeyActionsMask = @as(c_int, 1) << @as(c_int, 4);
pub const XkbKeyBehaviorsMask = @as(c_int, 1) << @as(c_int, 5);
pub const XkbVirtualModsMask = @as(c_int, 1) << @as(c_int, 6);
pub const XkbVirtualModMapMask = @as(c_int, 1) << @as(c_int, 7);
pub const XkbAllClientInfoMask = (XkbKeyTypesMask | XkbKeySymsMask) | XkbModifierMapMask;
pub const XkbAllServerInfoMask = (((XkbExplicitComponentsMask | XkbKeyActionsMask) | XkbKeyBehaviorsMask) | XkbVirtualModsMask) | XkbVirtualModMapMask;
pub const XkbAllMapComponentsMask = XkbAllClientInfoMask | XkbAllServerInfoMask;
pub const XkbSI_AutoRepeat = @as(c_int, 1) << @as(c_int, 0);
pub const XkbSI_LockingKey = @as(c_int, 1) << @as(c_int, 1);
pub const XkbSI_LevelOneOnly = @as(c_int, 0x80);
pub const XkbSI_OpMask = @as(c_int, 0x7f);
pub const XkbSI_NoneOf = @as(c_int, 0);
pub const XkbSI_AnyOfOrNone = @as(c_int, 1);
pub const XkbSI_AnyOf = @as(c_int, 2);
pub const XkbSI_AllOf = @as(c_int, 3);
pub const XkbSI_Exactly = @as(c_int, 4);
pub const XkbIM_NoExplicit = @as(c_long, 1) << @as(c_int, 7);
pub const XkbIM_NoAutomatic = @as(c_long, 1) << @as(c_int, 6);
pub const XkbIM_LEDDrivesKB = @as(c_long, 1) << @as(c_int, 5);
pub const XkbIM_UseBase = @as(c_long, 1) << @as(c_int, 0);
pub const XkbIM_UseLatched = @as(c_long, 1) << @as(c_int, 1);
pub const XkbIM_UseLocked = @as(c_long, 1) << @as(c_int, 2);
pub const XkbIM_UseEffective = @as(c_long, 1) << @as(c_int, 3);
pub const XkbIM_UseCompat = @as(c_long, 1) << @as(c_int, 4);
pub const XkbIM_UseNone = @as(c_int, 0);
pub const XkbIM_UseAnyGroup = ((XkbIM_UseBase | XkbIM_UseLatched) | XkbIM_UseLocked) | XkbIM_UseEffective;
pub const XkbIM_UseAnyMods = XkbIM_UseAnyGroup | XkbIM_UseCompat;
pub const XkbSymInterpMask = @as(c_int, 1) << @as(c_int, 0);
pub const XkbGroupCompatMask = @as(c_int, 1) << @as(c_int, 1);
pub const XkbAllCompatMask = @as(c_int, 0x3);
pub const XkbKeycodesNameMask = @as(c_int, 1) << @as(c_int, 0);
pub const XkbGeometryNameMask = @as(c_int, 1) << @as(c_int, 1);
pub const XkbSymbolsNameMask = @as(c_int, 1) << @as(c_int, 2);
pub const XkbPhysSymbolsNameMask = @as(c_int, 1) << @as(c_int, 3);
pub const XkbTypesNameMask = @as(c_int, 1) << @as(c_int, 4);
pub const XkbCompatNameMask = @as(c_int, 1) << @as(c_int, 5);
pub const XkbKeyTypeNamesMask = @as(c_int, 1) << @as(c_int, 6);
pub const XkbKTLevelNamesMask = @as(c_int, 1) << @as(c_int, 7);
pub const XkbIndicatorNamesMask = @as(c_int, 1) << @as(c_int, 8);
pub const XkbKeyNamesMask = @as(c_int, 1) << @as(c_int, 9);
pub const XkbKeyAliasesMask = @as(c_int, 1) << @as(c_int, 10);
pub const XkbVirtualModNamesMask = @as(c_int, 1) << @as(c_int, 11);
pub const XkbGroupNamesMask = @as(c_int, 1) << @as(c_int, 12);
pub const XkbRGNamesMask = @as(c_int, 1) << @as(c_int, 13);
pub const XkbComponentNamesMask = @as(c_int, 0x3f);
pub const XkbAllNamesMask = @as(c_int, 0x3fff);
pub const XkbGBN_TypesMask = @as(c_long, 1) << @as(c_int, 0);
pub const XkbGBN_CompatMapMask = @as(c_long, 1) << @as(c_int, 1);
pub const XkbGBN_ClientSymbolsMask = @as(c_long, 1) << @as(c_int, 2);
pub const XkbGBN_ServerSymbolsMask = @as(c_long, 1) << @as(c_int, 3);
pub const XkbGBN_SymbolsMask = XkbGBN_ClientSymbolsMask | XkbGBN_ServerSymbolsMask;
pub const XkbGBN_IndicatorMapMask = @as(c_long, 1) << @as(c_int, 4);
pub const XkbGBN_KeyNamesMask = @as(c_long, 1) << @as(c_int, 5);
pub const XkbGBN_GeometryMask = @as(c_long, 1) << @as(c_int, 6);
pub const XkbGBN_OtherNamesMask = @as(c_long, 1) << @as(c_int, 7);
pub const XkbGBN_AllComponentsMask = @as(c_int, 0xff);
pub const XkbLC_Hidden = @as(c_long, 1) << @as(c_int, 0);
pub const XkbLC_Default = @as(c_long, 1) << @as(c_int, 1);
pub const XkbLC_Partial = @as(c_long, 1) << @as(c_int, 2);
pub const XkbLC_AlphanumericKeys = @as(c_long, 1) << @as(c_int, 8);
pub const XkbLC_ModifierKeys = @as(c_long, 1) << @as(c_int, 9);
pub const XkbLC_KeypadKeys = @as(c_long, 1) << @as(c_int, 10);
pub const XkbLC_FunctionKeys = @as(c_long, 1) << @as(c_int, 11);
pub const XkbLC_AlternateGroup = @as(c_long, 1) << @as(c_int, 12);
pub const XkbXI_KeyboardsMask = @as(c_long, 1) << @as(c_int, 0);
pub const XkbXI_ButtonActionsMask = @as(c_long, 1) << @as(c_int, 1);
pub const XkbXI_IndicatorNamesMask = @as(c_long, 1) << @as(c_int, 2);
pub const XkbXI_IndicatorMapsMask = @as(c_long, 1) << @as(c_int, 3);
pub const XkbXI_IndicatorStateMask = @as(c_long, 1) << @as(c_int, 4);
pub const XkbXI_UnsupportedFeatureMask = @as(c_long, 1) << @as(c_int, 15);
pub const XkbXI_AllFeaturesMask = @as(c_int, 0x001f);
pub const XkbXI_AllDeviceFeaturesMask = @as(c_int, 0x001e);
pub const XkbXI_IndicatorsMask = @as(c_int, 0x001c);
pub const XkbAllExtensionDeviceEventsMask = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0x801f, .hexadecimal);
pub const XkbPCF_DetectableAutoRepeatMask = @as(c_long, 1) << @as(c_int, 0);
pub const XkbPCF_GrabsUseXKBStateMask = @as(c_long, 1) << @as(c_int, 1);
pub const XkbPCF_AutoResetControlsMask = @as(c_long, 1) << @as(c_int, 2);
pub const XkbPCF_LookupStateWhenGrabbed = @as(c_long, 1) << @as(c_int, 3);
pub const XkbPCF_SendEventUsesXKBState = @as(c_long, 1) << @as(c_int, 4);
pub const XkbPCF_AllFlagsMask = @as(c_int, 0x1F);
pub const XkbDF_DisableLocks = @as(c_int, 1) << @as(c_int, 0);
pub inline fn XkbCharToInt(v: anytype) @TypeOf(if (v & @as(c_int, 0x80)) @import("std").zig.c_translation.cast(c_int, v | ~@as(c_int, 0xff)) else @import("std").zig.c_translation.cast(c_int, v & @as(c_int, 0x7f))) {
return if (v & @as(c_int, 0x80)) @import("std").zig.c_translation.cast(c_int, v | ~@as(c_int, 0xff)) else @import("std").zig.c_translation.cast(c_int, v & @as(c_int, 0x7f));
}
pub inline fn Xkb2CharsToInt(h: anytype, l: anytype) c_short {
return @import("std").zig.c_translation.cast(c_short, (h << @as(c_int, 8)) | l);
}
pub inline fn XkbIntTo2Chars(i: anytype, h: anytype, l: anytype) c_short {
h.* = ((i >> 8) & @as(c_int, 0xff));
l.* = ((i) & @as(c_int, 0xff));
return h | l;
}
pub inline fn XkbModLocks(s: anytype) @TypeOf(s.*.locked_mods) {
return s.*.locked_mods;
}
pub inline fn XkbStateMods(s: anytype) @TypeOf((s.*.base_mods | s.*.latched_mods) | XkbModLocks(s)) {
return (s.*.base_mods | s.*.latched_mods) | XkbModLocks(s);
}
pub inline fn XkbGroupLock(s: anytype) @TypeOf(s.*.locked_group) {
return s.*.locked_group;
}
pub inline fn XkbStateGroup(s: anytype) @TypeOf((s.*.base_group + s.*.latched_group) + XkbGroupLock(s)) {
return (s.*.base_group + s.*.latched_group) + XkbGroupLock(s);
}
pub inline fn XkbStateFieldFromRec(s: anytype) @TypeOf(XkbBuildCoreState(s.*.lookup_mods, s.*.group)) {
return XkbBuildCoreState(s.*.lookup_mods, s.*.group);
}
pub inline fn XkbGrabStateFromRec(s: anytype) @TypeOf(XkbBuildCoreState(s.*.grab_mods, s.*.group)) {
return XkbBuildCoreState(s.*.grab_mods, s.*.group);
}
pub inline fn XkbNumGroups(g: anytype) @TypeOf(g & @as(c_int, 0x0f)) {
return g & @as(c_int, 0x0f);
}
pub inline fn XkbOutOfRangeGroupInfo(g: anytype) @TypeOf(g & @as(c_int, 0xf0)) {
return g & @as(c_int, 0xf0);
}
pub inline fn XkbOutOfRangeGroupAction(g: anytype) @TypeOf(g & @as(c_int, 0xc0)) {
return g & @as(c_int, 0xc0);
}
pub inline fn XkbOutOfRangeGroupNumber(g: anytype) @TypeOf((g & @as(c_int, 0x30)) >> @as(c_int, 4)) {
return (g & @as(c_int, 0x30)) >> @as(c_int, 4);
}
pub inline fn XkbSetGroupInfo(g: anytype, w: anytype, n: anytype) @TypeOf(((w & @as(c_int, 0xc0)) | ((n & @as(c_int, 3)) << @as(c_int, 4))) | (g & @as(c_int, 0x0f))) {
return ((w & @as(c_int, 0xc0)) | ((n & @as(c_int, 3)) << @as(c_int, 4))) | (g & @as(c_int, 0x0f));
}
pub inline fn XkbSetNumGroups(g: anytype, n: anytype) @TypeOf((g & @as(c_int, 0xf0)) | (n & @as(c_int, 0x0f))) {
return (g & @as(c_int, 0xf0)) | (n & @as(c_int, 0x0f));
}
pub const XkbAnyActionDataSize = @as(c_int, 7);
pub inline fn XkbModActionVMods(a: anytype) c_short {
return @import("std").zig.c_translation.cast(c_short, (a.*.vmods1 << @as(c_int, 8)) | a.*.vmods2);
}
pub inline fn XkbSAGroup(a: anytype) @TypeOf(XkbCharToInt(a.*.group_XXX)) {
return XkbCharToInt(a.*.group_XXX);
}
pub inline fn XkbPtrActionX(a: anytype) @TypeOf(Xkb2CharsToInt(a.*.high_XXX, a.*.low_XXX)) {
return Xkb2CharsToInt(a.*.high_XXX, a.*.low_XXX);
}
pub inline fn XkbPtrActionY(a: anytype) @TypeOf(Xkb2CharsToInt(a.*.high_YYY, a.*.low_YYY)) {
return Xkb2CharsToInt(a.*.high_YYY, a.*.low_YYY);
}
pub inline fn XkbSetPtrActionX(a: anytype, x: anytype) @TypeOf(XkbIntTo2Chars(x, a.*.high_XXX, a.*.low_XXX)) {
return XkbIntTo2Chars(x, a.*.high_XXX, a.*.low_XXX);
}
pub inline fn XkbSetPtrActionY(a: anytype, y: anytype) @TypeOf(XkbIntTo2Chars(y, a.*.high_YYY, a.*.low_YYY)) {
return XkbIntTo2Chars(y, a.*.high_YYY, a.*.low_YYY);
}
pub inline fn XkbSAPtrDfltValue(a: anytype) @TypeOf(XkbCharToInt(a.*.valueXXX)) {
return XkbCharToInt(a.*.valueXXX);
}
pub inline fn XkbSAScreen(a: anytype) @TypeOf(XkbCharToInt(a.*.screenXXX)) {
return XkbCharToInt(a.*.screenXXX);
}
pub inline fn XkbActionCtrls(a: anytype) @TypeOf((((@import("std").zig.c_translation.cast(c_uint, a.*.ctrls3) << @as(c_int, 24)) | (@import("std").zig.c_translation.cast(c_uint, a.*.ctrls2) << @as(c_int, 16))) | (@import("std").zig.c_translation.cast(c_uint, a.*.ctrls1) << @as(c_int, 8))) | @import("std").zig.c_translation.cast(c_uint, a.*.ctrls0)) {
return (((@import("std").zig.c_translation.cast(c_uint, a.*.ctrls3) << @as(c_int, 24)) | (@import("std").zig.c_translation.cast(c_uint, a.*.ctrls2) << @as(c_int, 16))) | (@import("std").zig.c_translation.cast(c_uint, a.*.ctrls1) << @as(c_int, 8))) | @import("std").zig.c_translation.cast(c_uint, a.*.ctrls0);
}
pub inline fn XkbSARedirectVMods(a: anytype) @TypeOf((@import("std").zig.c_translation.cast(c_uint, a.*.vmods1) << @as(c_int, 8)) | @import("std").zig.c_translation.cast(c_uint, a.*.vmods0)) {
return (@import("std").zig.c_translation.cast(c_uint, a.*.vmods1) << @as(c_int, 8)) | @import("std").zig.c_translation.cast(c_uint, a.*.vmods0);
}
pub inline fn XkbSARedirectVModsMask(a: anytype) @TypeOf((@import("std").zig.c_translation.cast(c_uint, a.*.vmods_mask1) << @as(c_int, 8)) | @import("std").zig.c_translation.cast(c_uint, a.*.vmods_mask0)) {
return (@import("std").zig.c_translation.cast(c_uint, a.*.vmods_mask1) << @as(c_int, 8)) | @import("std").zig.c_translation.cast(c_uint, a.*.vmods_mask0);
}
pub inline fn XkbAX_AnyFeedback(c: anytype) @TypeOf(c.*.enabled_ctrls & XkbAccessXFeedbackMask) {
return c.*.enabled_ctrls & XkbAccessXFeedbackMask;
}
pub inline fn XkbAX_NeedOption(c: anytype, w: anytype) @TypeOf(c.*.ax_options & w) {
return c.*.ax_options & w;
}
pub inline fn XkbAX_NeedFeedback(c: anytype, w: anytype) @TypeOf((XkbAX_AnyFeedback(c) != 0) and (XkbAX_NeedOption(c, w) != 0)) {
return (XkbAX_AnyFeedback(c) != 0) and (XkbAX_NeedOption(c, w) != 0);
}
pub inline fn XkbSMKeyActionsPtr(m: anytype, k: anytype) @TypeOf(&m.*.acts[m.*.key_acts[k]]) {
return &m.*.acts[m.*.key_acts[k]];
}
pub inline fn XkbCMKeyGroupInfo(m: anytype, k: anytype) @TypeOf(m.*.key_sym_map[k].group_info) {
return m.*.key_sym_map[k].group_info;
}
pub inline fn XkbCMKeyNumGroups(m: anytype, k: anytype) @TypeOf(XkbNumGroups(m.*.key_sym_map[k].group_info)) {
return XkbNumGroups(m.*.key_sym_map[k].group_info);
}
pub inline fn XkbCMKeyGroupWidth(m: anytype, k: anytype, g: anytype) @TypeOf(XkbCMKeyType(m, k, g).*.num_levels) {
return XkbCMKeyType(m, k, g).*.num_levels;
}
pub inline fn XkbCMKeyGroupsWidth(m: anytype, k: anytype) @TypeOf(m.*.key_sym_map[k].width) {
return m.*.key_sym_map[k].width;
}
pub inline fn XkbCMKeyTypeIndex(m: anytype, k: anytype, g: anytype) @TypeOf(m.*.key_sym_map[k].kt_index[g & @as(c_int, 0x3)]) {
return m.*.key_sym_map[k].kt_index[g & @as(c_int, 0x3)];
}
pub inline fn XkbCMKeyType(m: anytype, k: anytype, g: anytype) @TypeOf(&m.*.types[XkbCMKeyTypeIndex(m, k, g)]) {
return &m.*.types[XkbCMKeyTypeIndex(m, k, g)];
}
pub inline fn XkbCMKeyNumSyms(m: anytype, k: anytype) @TypeOf(XkbCMKeyGroupsWidth(m, k) * XkbCMKeyNumGroups(m, k)) {
return XkbCMKeyGroupsWidth(m, k) * XkbCMKeyNumGroups(m, k);
}
pub inline fn XkbCMKeySymsOffset(m: anytype, k: anytype) @TypeOf(m.*.key_sym_map[k].offset) {
return m.*.key_sym_map[k].offset;
}
pub inline fn XkbCMKeySymsPtr(m: anytype, k: anytype) @TypeOf(&m.*.syms[XkbCMKeySymsOffset(m, k)]) {
return &m.*.syms[XkbCMKeySymsOffset(m, k)];
}
pub inline fn XkbIM_IsAuto(i: anytype) @TypeOf(((i.*.flags & XkbIM_NoAutomatic) == @as(c_int, 0)) and ((((i.*.which_groups != 0) and (i.*.groups != 0)) or ((i.*.which_mods != 0) and (i.*.mods.mask != 0))) or (i.*.ctrls != 0))) {
return ((i.*.flags & XkbIM_NoAutomatic) == @as(c_int, 0)) and ((((i.*.which_groups != 0) and (i.*.groups != 0)) or ((i.*.which_mods != 0) and (i.*.mods.mask != 0))) or (i.*.ctrls != 0));
}
pub inline fn XkbIM_InUse(i: anytype) @TypeOf((((i.*.flags != 0) or (i.*.which_groups != 0)) or (i.*.which_mods != 0)) or (i.*.ctrls != 0)) {
return (((i.*.flags != 0) or (i.*.which_groups != 0)) or (i.*.which_mods != 0)) or (i.*.ctrls != 0);
}
pub inline fn XkbKeyKeyTypeIndex(d: anytype, k: anytype, g: anytype) @TypeOf(XkbCMKeyTypeIndex(d.*.map, k, g)) {
return XkbCMKeyTypeIndex(d.*.map, k, g);
}
pub inline fn XkbKeyKeyType(d: anytype, k: anytype, g: anytype) @TypeOf(XkbCMKeyType(d.*.map, k, g)) {
return XkbCMKeyType(d.*.map, k, g);
}
pub inline fn XkbKeyGroupWidth(d: anytype, k: anytype, g: anytype) @TypeOf(XkbCMKeyGroupWidth(d.*.map, k, g)) {
return XkbCMKeyGroupWidth(d.*.map, k, g);
}
pub inline fn XkbKeyGroupsWidth(d: anytype, k: anytype) @TypeOf(XkbCMKeyGroupsWidth(d.*.map, k)) {
return XkbCMKeyGroupsWidth(d.*.map, k);
}
pub inline fn XkbKeyGroupInfo(d: anytype, k: anytype) @TypeOf(XkbCMKeyGroupInfo(d.*.map, k)) {
return XkbCMKeyGroupInfo(d.*.map, k);
}
pub inline fn XkbKeyNumGroups(d: anytype, k: anytype) @TypeOf(XkbCMKeyNumGroups(d.*.map, k)) {
return XkbCMKeyNumGroups(d.*.map, k);
}
pub inline fn XkbKeyNumSyms(d: anytype, k: anytype) @TypeOf(XkbCMKeyNumSyms(d.*.map, k)) {
return XkbCMKeyNumSyms(d.*.map, k);
}
pub inline fn XkbKeySymsPtr(d: anytype, k: anytype) @TypeOf(XkbCMKeySymsPtr(d.*.map, k)) {
return XkbCMKeySymsPtr(d.*.map, k);
}
pub inline fn XkbKeySym(d: anytype, k: anytype, n: anytype) @TypeOf(XkbKeySymsPtr(d, k)[n]) {
return XkbKeySymsPtr(d, k)[n];
}
pub inline fn XkbKeySymEntry(d: anytype, k: anytype, sl: anytype, g: anytype) @TypeOf(XkbKeySym(d, k, (XkbKeyGroupsWidth(d, k) * g) + sl)) {
return XkbKeySym(d, k, (XkbKeyGroupsWidth(d, k) * g) + sl);
}
pub inline fn XkbKeyAction(d: anytype, k: anytype, n: anytype) @TypeOf(if (XkbKeyHasActions(d, k)) &XkbKeyActionsPtr(d, k)[n] else NULL) {
return if (XkbKeyHasActions(d, k)) &XkbKeyActionsPtr(d, k)[n] else NULL;
}
pub inline fn XkbKeyActionEntry(d: anytype, k: anytype, sl: anytype, g: anytype) @TypeOf(if (XkbKeyHasActions(d, k)) XkbKeyAction(d, k, (XkbKeyGroupsWidth(d, k) * g) + sl) else NULL) {
return if (XkbKeyHasActions(d, k)) XkbKeyAction(d, k, (XkbKeyGroupsWidth(d, k) * g) + sl) else NULL;
}
pub inline fn XkbKeyHasActions(d: anytype, k: anytype) @TypeOf(d.*.server.*.key_acts[k] != @as(c_int, 0)) {
return d.*.server.*.key_acts[k] != @as(c_int, 0);
}
pub inline fn XkbKeyNumActions(d: anytype, k: anytype) @TypeOf(if (XkbKeyHasActions(d, k)) XkbKeyNumSyms(d, k) else @as(c_int, 1)) {
return if (XkbKeyHasActions(d, k)) XkbKeyNumSyms(d, k) else @as(c_int, 1);
}
pub inline fn XkbKeyActionsPtr(d: anytype, k: anytype) @TypeOf(XkbSMKeyActionsPtr(d.*.server, k)) {
return XkbSMKeyActionsPtr(d.*.server, k);
}
pub inline fn XkbKeycodeInRange(d: anytype, k: anytype) @TypeOf((k >= d.*.min_key_code) and (k <= d.*.max_key_code)) {
return (k >= d.*.min_key_code) and (k <= d.*.max_key_code);
}
pub inline fn XkbNumKeys(d: anytype) @TypeOf((d.*.max_key_code - d.*.min_key_code) + @as(c_int, 1)) {
return (d.*.max_key_code - d.*.min_key_code) + @as(c_int, 1);
}
pub inline fn XkbXI_DevHasBtnActs(d: anytype) @TypeOf((d.*.num_btns > @as(c_int, 0)) and (d.*.btn_acts != NULL)) {
return (d.*.num_btns > @as(c_int, 0)) and (d.*.btn_acts != NULL);
}
pub inline fn XkbXI_LegalDevBtn(d: anytype, b: anytype) @TypeOf((XkbXI_DevHasBtnActs(d) != 0) and (b < d.*.num_btns)) {
return (XkbXI_DevHasBtnActs(d) != 0) and (b < d.*.num_btns);
}
pub inline fn XkbXI_DevHasLeds(d: anytype) @TypeOf((d.*.num_leds > @as(c_int, 0)) and (d.*.leds != NULL)) {
return (d.*.num_leds > @as(c_int, 0)) and (d.*.leds != NULL);
}
pub const XkbOD_Success = @as(c_int, 0);
pub const XkbOD_BadLibraryVersion = @as(c_int, 1);
pub const XkbOD_ConnectionRefused = @as(c_int, 2);
pub const XkbOD_NonXkbServer = @as(c_int, 3);
pub const XkbOD_BadServerVersion = @as(c_int, 4);
pub const XkbLC_ForceLatin1Lookup = @as(c_int, 1) << @as(c_int, 0);
pub const XkbLC_ConsumeLookupMods = @as(c_int, 1) << @as(c_int, 1);
pub const XkbLC_AlwaysConsumeShiftAndLock = @as(c_int, 1) << @as(c_int, 2);
pub const XkbLC_IgnoreNewKeyboards = @as(c_int, 1) << @as(c_int, 3);
pub const XkbLC_ControlFallback = @as(c_int, 1) << @as(c_int, 4);
pub const XkbLC_ConsumeKeysOnComposeFail = @as(c_int, 1) << @as(c_int, 29);
pub const XkbLC_ComposeLED = @as(c_int, 1) << @as(c_int, 30);
pub const XkbLC_BeepOnComposeFail = @as(c_int, 1) << @as(c_int, 31);
pub const XkbLC_AllComposeControls = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xc0000000, .hexadecimal);
pub const XkbLC_AllControls = @import("std").zig.c_translation.promoteIntLiteral(c_int, 0xc000001f, .hexadecimal);
pub inline fn XkbGetIndicatorMapChanges(d: anytype, x: anytype, c: anytype) @TypeOf(XkbGetIndicatorMap(d, c.*.map_changes, x)) {
return XkbGetIndicatorMap(d, c.*.map_changes, x);
}
pub inline fn XkbChangeIndicatorMaps(d: anytype, x: anytype, c: anytype) @TypeOf(XkbSetIndicatorMap(d, c.*.map_changes, x)) {
return XkbSetIndicatorMap(d, c.*.map_changes, x);
}
pub inline fn XkbGetControlsChanges(d: anytype, x: anytype, c: anytype) @TypeOf(XkbGetControls(d, c.*.changed_ctrls, x)) {
return XkbGetControls(d, c.*.changed_ctrls, x);
}
pub inline fn XkbChangeControls(d: anytype, x: anytype, c: anytype) @TypeOf(XkbSetControls(d, c.*.changed_ctrls, x)) {
return XkbSetControls(d, c.*.changed_ctrls, x);
}
pub const _XkbStateRec = struct__XkbStateRec;
pub const _XkbMods = struct__XkbMods;
pub const _XkbKTMapEntry = struct__XkbKTMapEntry;
pub const _XkbKeyType = struct__XkbKeyType;
pub const _XkbBehavior = struct__XkbBehavior;
pub const _XkbAnyAction = struct__XkbAnyAction;
pub const _XkbModAction = struct__XkbModAction;
pub const _XkbGroupAction = struct__XkbGroupAction;
pub const _XkbISOAction = struct__XkbISOAction;
pub const _XkbPtrAction = struct__XkbPtrAction;
pub const _XkbPtrBtnAction = struct__XkbPtrBtnAction;
pub const _XkbPtrDfltAction = struct__XkbPtrDfltAction;
pub const _XkbSwitchScreenAction = struct__XkbSwitchScreenAction;
pub const _XkbCtrlsAction = struct__XkbCtrlsAction;
pub const _XkbMessageAction = struct__XkbMessageAction;
pub const _XkbRedirectKeyAction = struct__XkbRedirectKeyAction;
pub const _XkbDeviceBtnAction = struct__XkbDeviceBtnAction;
pub const _XkbDeviceValuatorAction = struct__XkbDeviceValuatorAction;
pub const _XkbAction = union__XkbAction;
pub const _XkbControls = struct__XkbControls;
pub const _XkbServerMapRec = struct__XkbServerMapRec;
pub const _XkbSymMapRec = struct__XkbSymMapRec;
pub const _XkbClientMapRec = struct__XkbClientMapRec;
pub const _XkbSymInterpretRec = struct__XkbSymInterpretRec;
pub const _XkbCompatMapRec = struct__XkbCompatMapRec;
pub const _XkbIndicatorMapRec = struct__XkbIndicatorMapRec;
pub const _XkbIndicatorRec = struct__XkbIndicatorRec;
pub const _XkbKeyNameRec = struct__XkbKeyNameRec;
pub const _XkbKeyAliasRec = struct__XkbKeyAliasRec;
pub const _XkbNamesRec = struct__XkbNamesRec;
pub const _XkbGeometry = struct__XkbGeometry;
pub const _XkbDesc = struct__XkbDesc;
pub const _XkbMapChanges = struct__XkbMapChanges;
pub const _XkbControlsChanges = struct__XkbControlsChanges;
pub const _XkbIndicatorChanges = struct__XkbIndicatorChanges;
pub const _XkbNameChanges = struct__XkbNameChanges;
pub const _XkbCompatChanges = struct__XkbCompatChanges;
pub const _XkbChanges = struct__XkbChanges;
pub const _XkbComponentNames = struct__XkbComponentNames;
pub const _XkbComponentName = struct__XkbComponentName;
pub const _XkbComponentList = struct__XkbComponentList;
pub const _XkbDeviceLedInfo = struct__XkbDeviceLedInfo;
pub const _XkbDeviceInfo = struct__XkbDeviceInfo;
pub const _XkbDeviceLedChanges = struct__XkbDeviceLedChanges;
pub const _XkbDeviceChanges = struct__XkbDeviceChanges;
pub const _XkbAnyEvent = struct__XkbAnyEvent;
pub const _XkbNewKeyboardNotify = struct__XkbNewKeyboardNotify;
pub const _XkbMapNotifyEvent = struct__XkbMapNotifyEvent;
pub const _XkbStateNotifyEvent = struct__XkbStateNotifyEvent;
pub const _XkbControlsNotify = struct__XkbControlsNotify;
pub const _XkbIndicatorNotify = struct__XkbIndicatorNotify;
pub const _XkbNamesNotify = struct__XkbNamesNotify;
pub const _XkbCompatMapNotify = struct__XkbCompatMapNotify;
pub const _XkbBellNotify = struct__XkbBellNotify;
pub const _XkbActionMessage = struct__XkbActionMessage;
pub const _XkbAccessXNotify = struct__XkbAccessXNotify;
pub const _XkbExtensionDeviceNotify = struct__XkbExtensionDeviceNotify;
pub const _XkbEvent = union__XkbEvent;
pub const _XkbKbdDpyState = struct__XkbKbdDpyState; | modules/platform/src/linux/X11/XKBlib.zig |
const std = @import("std");
const builtin = @import("builtin");
const compiler_rt = @import("../compiler_rt.zig");
pub fn __multf3(a: f128, b: f128) callconv(.C) f128 {
return mulXf3(f128, a, b);
}
pub fn __muldf3(a: f64, b: f64) callconv(.C) f64 {
return mulXf3(f64, a, b);
}
pub fn __mulsf3(a: f32, b: f32) callconv(.C) f32 {
return mulXf3(f32, a, b);
}
pub fn __aeabi_fmul(a: f32, b: f32) callconv(.C) f32 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __mulsf3, .{ a, b });
}
pub fn __aeabi_dmul(a: f64, b: f64) callconv(.C) f64 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __muldf3, .{ a, b });
}
fn mulXf3(comptime T: type, a: T, b: T) T {
@setRuntimeSafety(builtin.is_test);
const typeWidth = @typeInfo(T).Float.bits;
const Z = std.meta.Int(.unsigned, typeWidth);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
const signBit = (@as(Z, 1) << (significandBits + exponentBits));
const maxExponent = ((1 << exponentBits) - 1);
const exponentBias = (maxExponent >> 1);
const implicitBit = (@as(Z, 1) << significandBits);
const quietBit = implicitBit >> 1;
const significandMask = implicitBit - 1;
const absMask = signBit - 1;
const exponentMask = absMask ^ significandMask;
const qnanRep = exponentMask | quietBit;
const infRep = @bitCast(Z, std.math.inf(T));
const aExponent = @truncate(u32, (@bitCast(Z, a) >> significandBits) & maxExponent);
const bExponent = @truncate(u32, (@bitCast(Z, b) >> significandBits) & maxExponent);
const productSign: Z = (@bitCast(Z, a) ^ @bitCast(Z, b)) & signBit;
var aSignificand: Z = @bitCast(Z, a) & significandMask;
var bSignificand: Z = @bitCast(Z, b) & significandMask;
var scale: i32 = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
if (aExponent -% 1 >= maxExponent -% 1 or bExponent -% 1 >= maxExponent -% 1) {
const aAbs: Z = @bitCast(Z, a) & absMask;
const bAbs: Z = @bitCast(Z, b) & absMask;
// NaN * anything = qNaN
if (aAbs > infRep) return @bitCast(T, @bitCast(Z, a) | quietBit);
// anything * NaN = qNaN
if (bAbs > infRep) return @bitCast(T, @bitCast(Z, b) | quietBit);
if (aAbs == infRep) {
// infinity * non-zero = +/- infinity
if (bAbs != 0) {
return @bitCast(T, aAbs | productSign);
} else {
// infinity * zero = NaN
return @bitCast(T, qnanRep);
}
}
if (bAbs == infRep) {
//? non-zero * infinity = +/- infinity
if (aAbs != 0) {
return @bitCast(T, bAbs | productSign);
} else {
// zero * infinity = NaN
return @bitCast(T, qnanRep);
}
}
// zero * anything = +/- zero
if (aAbs == 0) return @bitCast(T, productSign);
// anything * zero = +/- zero
if (bAbs == 0) return @bitCast(T, productSign);
// one or both of a or b is denormal, the other (if applicable) is a
// normal number. Renormalize one or both of a and b, and set scale to
// include the necessary exponent adjustment.
if (aAbs < implicitBit) scale +%= normalize(T, &aSignificand);
if (bAbs < implicitBit) scale +%= normalize(T, &bSignificand);
}
// Or in the implicit significand bit. (If we fell through from the
// denormal path it was already set by normalize( ), but setting it twice
// won't hurt anything.)
aSignificand |= implicitBit;
bSignificand |= implicitBit;
// Get the significand of a*b. Before multiplying the significands, shift
// one of them left to left-align it in the field. Thus, the product will
// have (exponentBits + 2) integral digits, all but two of which must be
// zero. Normalizing this result is just a conditional left-shift by one
// and bumping the exponent accordingly.
var productHi: Z = undefined;
var productLo: Z = undefined;
wideMultiply(Z, aSignificand, bSignificand << exponentBits, &productHi, &productLo);
var productExponent: i32 = @bitCast(i32, aExponent +% bExponent) -% exponentBias +% scale;
// Normalize the significand, adjust exponent if needed.
if ((productHi & implicitBit) != 0) {
productExponent +%= 1;
} else {
productHi = (productHi << 1) | (productLo >> (typeWidth - 1));
productLo = productLo << 1;
}
// If we have overflowed the type, return +/- infinity.
if (productExponent >= maxExponent) return @bitCast(T, infRep | productSign);
if (productExponent <= 0) {
// Result is denormal before rounding
//
// If the result is so small that it just underflows to zero, return
// a zero of the appropriate sign. Mathematically there is no need to
// handle this case separately, but we make it a special case to
// simplify the shift logic.
const shift: u32 = @truncate(u32, @as(Z, 1) -% @bitCast(u32, productExponent));
if (shift >= typeWidth) return @bitCast(T, productSign);
// Otherwise, shift the significand of the result so that the round
// bit is the high bit of productLo.
wideRightShiftWithSticky(Z, &productHi, &productLo, shift);
} else {
// Result is normal before rounding; insert the exponent.
productHi &= significandMask;
productHi |= @as(Z, @bitCast(u32, productExponent)) << significandBits;
}
// Insert the sign of the result:
productHi |= productSign;
// Final rounding. The final result may overflow to infinity, or underflow
// to zero, but those are the correct results in those cases. We use the
// default IEEE-754 round-to-nearest, ties-to-even rounding mode.
if (productLo > signBit) productHi +%= 1;
if (productLo == signBit) productHi +%= productHi & 1;
return @bitCast(T, productHi);
}
fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
@setRuntimeSafety(builtin.is_test);
switch (Z) {
u32 => {
// 32x32 --> 64 bit multiply
const product = @as(u64, a) * @as(u64, b);
hi.* = @truncate(u32, product >> 32);
lo.* = @truncate(u32, product);
},
u64 => {
const S = struct {
fn loWord(x: u64) u64 {
return @truncate(u32, x);
}
fn hiWord(x: u64) u64 {
return @truncate(u32, x >> 32);
}
};
// 64x64 -> 128 wide multiply for platforms that don't have such an operation;
// many 64-bit platforms have this operation, but they tend to have hardware
// floating-point, so we don't bother with a special case for them here.
// Each of the component 32x32 -> 64 products
const plolo: u64 = S.loWord(a) * S.loWord(b);
const plohi: u64 = S.loWord(a) * S.hiWord(b);
const philo: u64 = S.hiWord(a) * S.loWord(b);
const phihi: u64 = S.hiWord(a) * S.hiWord(b);
// Sum terms that contribute to lo in a way that allows us to get the carry
const r0: u64 = S.loWord(plolo);
const r1: u64 = S.hiWord(plolo) +% S.loWord(plohi) +% S.loWord(philo);
lo.* = r0 +% (r1 << 32);
// Sum terms contributing to hi with the carry from lo
hi.* = S.hiWord(plohi) +% S.hiWord(philo) +% S.hiWord(r1) +% phihi;
},
u128 => {
const Word_LoMask = @as(u64, 0x00000000ffffffff);
const Word_HiMask = @as(u64, 0xffffffff00000000);
const Word_FullMask = @as(u64, 0xffffffffffffffff);
const S = struct {
fn Word_1(x: u128) u64 {
return @truncate(u32, x >> 96);
}
fn Word_2(x: u128) u64 {
return @truncate(u32, x >> 64);
}
fn Word_3(x: u128) u64 {
return @truncate(u32, x >> 32);
}
fn Word_4(x: u128) u64 {
return @truncate(u32, x);
}
};
// 128x128 -> 256 wide multiply for platforms that don't have such an operation;
// many 64-bit platforms have this operation, but they tend to have hardware
// floating-point, so we don't bother with a special case for them here.
const product11: u64 = S.Word_1(a) * S.Word_1(b);
const product12: u64 = S.Word_1(a) * S.Word_2(b);
const product13: u64 = S.Word_1(a) * S.Word_3(b);
const product14: u64 = S.Word_1(a) * S.Word_4(b);
const product21: u64 = S.Word_2(a) * S.Word_1(b);
const product22: u64 = S.Word_2(a) * S.Word_2(b);
const product23: u64 = S.Word_2(a) * S.Word_3(b);
const product24: u64 = S.Word_2(a) * S.Word_4(b);
const product31: u64 = S.Word_3(a) * S.Word_1(b);
const product32: u64 = S.Word_3(a) * S.Word_2(b);
const product33: u64 = S.Word_3(a) * S.Word_3(b);
const product34: u64 = S.Word_3(a) * S.Word_4(b);
const product41: u64 = S.Word_4(a) * S.Word_1(b);
const product42: u64 = S.Word_4(a) * S.Word_2(b);
const product43: u64 = S.Word_4(a) * S.Word_3(b);
const product44: u64 = S.Word_4(a) * S.Word_4(b);
const sum0: u128 = @as(u128, product44);
const sum1: u128 = @as(u128, product34) +%
@as(u128, product43);
const sum2: u128 = @as(u128, product24) +%
@as(u128, product33) +%
@as(u128, product42);
const sum3: u128 = @as(u128, product14) +%
@as(u128, product23) +%
@as(u128, product32) +%
@as(u128, product41);
const sum4: u128 = @as(u128, product13) +%
@as(u128, product22) +%
@as(u128, product31);
const sum5: u128 = @as(u128, product12) +%
@as(u128, product21);
const sum6: u128 = @as(u128, product11);
const r0: u128 = (sum0 & Word_FullMask) +%
((sum1 & Word_LoMask) << 32);
const r1: u128 = (sum0 >> 64) +%
((sum1 >> 32) & Word_FullMask) +%
(sum2 & Word_FullMask) +%
((sum3 << 32) & Word_HiMask);
lo.* = r0 +% (r1 << 64);
hi.* = (r1 >> 64) +%
(sum1 >> 96) +%
(sum2 >> 64) +%
(sum3 >> 32) +%
sum4 +%
(sum5 << 32) +%
(sum6 << 64);
},
else => @compileError("unsupported"),
}
}
fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
@setRuntimeSafety(builtin.is_test);
const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
const shift = @clz(Z, significand.*) - @clz(Z, implicitBit);
significand.* <<= @intCast(std.math.Log2Int(Z), shift);
return 1 - shift;
}
fn wideRightShiftWithSticky(comptime Z: type, hi: *Z, lo: *Z, count: u32) void {
@setRuntimeSafety(builtin.is_test);
const typeWidth = @typeInfo(Z).Int.bits;
const S = std.math.Log2Int(Z);
if (count < typeWidth) {
const sticky = @truncate(u8, lo.* << @intCast(S, typeWidth -% count));
lo.* = (hi.* << @intCast(S, typeWidth -% count)) | (lo.* >> @intCast(S, count)) | sticky;
hi.* = hi.* >> @intCast(S, count);
} else if (count < 2 * typeWidth) {
const sticky = @truncate(u8, hi.* << @intCast(S, 2 * typeWidth -% count) | lo.*);
lo.* = hi.* >> @intCast(S, count -% typeWidth) | sticky;
hi.* = 0;
} else {
const sticky = @truncate(u8, hi.* | lo.*);
lo.* = sticky;
hi.* = 0;
}
}
test "import mulXf3" {
_ = @import("mulXf3_test.zig");
} | lib/std/special/compiler_rt/mulXf3.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const ines = @import("../ines.zig");
const console_ = @import("../console.zig");
const Config = console_.Config;
const Console = console_.Console;
const Cpu = @import("../cpu.zig").Cpu;
const GenericMapper = @import("../mapper.zig").GenericMapper;
const common = @import("common.zig");
const flags = @import("../flags.zig");
pub fn Mapper(comptime config: Config) type {
const G = GenericMapper(config);
return struct {
const Self = @This();
cpu: *Cpu(config),
prg_ram: common.PrgRam,
prgs: common.Prgs,
chrs: common.Chrs,
mirroring: enum(u2) {
one_screen_lower = 0,
one_screen_upper = 1,
vertical = 2,
horizontal = 3,
},
last_write_cycle: usize = 0,
shift_register: u4 = 0,
write_count: u3 = 0,
prg_bank_mode: enum {
prg_switch_both,
prg_fix_first,
prg_fix_last,
} = .prg_fix_last,
chr_bank_mode: enum(u1) {
chr_switch_both = 0,
chr_switch_separate = 1,
} = .chr_switch_both,
prg_bank: u4 = 0,
chr_bank0: u5 = 0,
chr_bank1: u5 = 0,
pub fn initMem(
self: *Self,
allocator: *Allocator,
console: *Console(config),
info: *ines.RomInfo,
) Allocator.Error!void {
self.* = Self{
.cpu = &console.cpu,
.prg_ram = try common.PrgRam.init(allocator, true, info.has_sram),
.prgs = try common.Prgs.init(allocator, info.prg_rom),
.chrs = try common.Chrs.init(allocator, info.chr_rom),
.mirroring = @intToEnum(@TypeOf(self.mirroring), @enumToInt(info.mirroring)),
};
self.updatePrg();
self.updateChr();
}
pub fn deinitMem(generic: G, allocator: *Allocator) void {
const self = common.fromGeneric(Self, config, generic);
self.prg_ram.deinit(allocator);
self.prgs.deinit(allocator);
self.chrs.deinit(allocator);
allocator.destroy(self);
}
pub fn mirrorNametable(generic: G, addr: u16) u12 {
const self = common.fromGeneric(Self, config, generic);
return switch (self.mirroring) {
.one_screen_lower => @truncate(u12, addr & 0x3ff),
.one_screen_upper => @truncate(u12, 0x400 | (addr & 0x3ff)),
.vertical => @truncate(u12, addr & 0x7ff),
.horizontal => @truncate(u12, addr & 0xbff),
};
}
pub fn readPrg(generic: G, addr: u16) ?u8 {
const self = common.fromGeneric(Self, config, generic);
return switch (addr) {
0x4020...0x5fff => null,
0x6000...0x7fff => self.prg_ram.read(addr),
0x8000...0xffff => self.prgs.read(addr),
else => unreachable,
};
}
pub fn readChr(generic: G, addr: u16) u8 {
const self = common.fromGeneric(Self, config, generic);
return self.chrs.read(addr);
}
pub fn writePrg(generic: *G, addr: u16, val: u8) void {
const self = common.fromGeneric(Self, config, generic.*);
switch (addr) {
0x4020...0x5fff => {},
0x6000...0x7fff => self.prg_ram.write(addr, val),
0x8000...0xffff => self.writeRom(addr, val),
else => unreachable,
}
}
fn updatePrg(self: *Self) void {
switch (self.prg_bank_mode) {
.prg_switch_both => self.prgs.setConsecutiveBanks(0, 2, self.prg_bank & 0xe),
.prg_fix_first => {
self.prgs.setBank(0, 0);
self.prgs.setBank(1, self.prg_bank);
},
.prg_fix_last => {
self.prgs.setBank(0, self.prg_bank);
self.prgs.setBank(1, self.prgs.bankCount() - 1);
},
}
}
fn updateChr(self: *Self) void {
switch (self.chr_bank_mode) {
.chr_switch_both => {
self.chrs.setConsecutiveBanks(0, 2, self.chr_bank0 & 0x1e);
},
.chr_switch_separate => {
self.chrs.setBank(0, self.chr_bank0);
self.chrs.setBank(1, self.chr_bank1);
},
}
}
fn writeRom(self: *Self, addr: u16, val: u8) void {
const temp = self.last_write_cycle;
self.last_write_cycle = self.cpu.cycles;
if (self.cpu.cycles == temp + 1) {
return;
}
if (flags.getMaskBool(u8, val, 0x80)) {
self.shift_register = 0;
self.write_count = 0;
self.prg_bank_mode = .prg_fix_last;
self.updatePrg();
return;
}
if (self.write_count != 4) {
self.shift_register = (self.shift_register >> 1) | (@as(u4, @truncate(u1, val)) << 3);
self.write_count += 1;
} else {
const final_val = @as(u5, self.shift_register) | (@as(u5, @truncate(u1, val)) << 4);
switch (addr) {
0x8000...0x9fff => {
self.mirroring = @intToEnum(@TypeOf(self.mirroring), @truncate(u2, final_val));
self.prg_bank_mode = switch (@truncate(u2, final_val >> 2)) {
0, 1 => .prg_switch_both,
2 => .prg_fix_first,
3 => .prg_fix_last,
};
self.chr_bank_mode = @intToEnum(@TypeOf(self.chr_bank_mode), @truncate(u1, final_val >> 4));
self.updatePrg();
self.updateChr();
},
0xa000...0xbfff => {
self.chr_bank0 = final_val;
self.updateChr();
},
0xc000...0xdfff => {
self.chr_bank1 = final_val;
self.updateChr();
},
0xe000...0xffff => {
self.prg_ram.enabled = final_val & 0x10 == 0;
self.prg_bank = @truncate(u4, final_val);
self.updatePrg();
},
else => unreachable,
}
self.shift_register = 0;
self.write_count = 0;
}
}
pub fn writeChr(generic: *G, addr: u16, val: u8) void {
const self = common.fromGeneric(Self, config, generic.*);
self.chrs.write(addr, val);
}
};
} | src/mapper/mmc1.zig |
const ser = @import("../../../lib.zig").ser;
/// Struct serialization interface.
///
/// Getty structs are only partially serialized by `getty.Serializer`
/// implementations due to the fact that there are many different ways to
/// iterate over and access the fields of a struct. As such, this interface is
/// provided so that serialization may be driven and completed by the user of a
/// serializer.
///
/// The interface specifies the following:
///
/// - How to serialize a field of a struct.
/// - How to finish serialization for a struct.
///
/// Parameters
/// ==========
///
/// Context
/// -------
///
/// This is the type that implements `getty.ser.Structure` (or a pointer to it).
///
/// Ok
/// --
///
/// The successful return type for all of `getty.ser.Structure`'s methods.
///
/// Error
/// -----
///
/// The error set used by all of `getty.ser.Structure`'s methods upon failure.
///
/// serializeField
/// --------------
///
/// A method that serializes a field of a struct.
///
/// end
/// ---
///
/// A method that ends the serialization of a struct.
///
/// Examples
/// ========
///
/// ```zig
/// const struct_sb = struct {
/// pub fn is(comptime T: type) bool {
/// return T == struct { x: i32, y: i32 };
/// }
///
/// pub fn serialize(value: anytype, serializer: anytype) !@TypeOf(serializer).Ok {
/// // Begin struct serialization.
/// const structure = (try serializer.serializeStruct("struct name", 2)).structure();
///
/// // Serialize struct fields.
/// try structure.serializeField("x", value.x);
/// try structure.serializeField("y", value.y);
///
/// // End struct serialization.
/// return try structure.end();
/// }
/// };
/// ```
pub fn Structure(
comptime Context: type,
comptime Ok: type,
comptime Error: type,
comptime serializeField: @TypeOf(struct {
fn f(self: Context, comptime key: []const u8, value: anytype) Error!void {
_ = self;
_ = key;
_ = value;
unreachable;
}
}.f),
comptime end: fn (Context) Error!Ok,
) type {
return struct {
pub const @"getty.ser.Structure" = struct {
context: Context,
const Self = @This();
/// Successful return type.
pub const Ok = Ok;
/// The error set used upon failure.
pub const Error = Error;
/// Serialize a struct field.
pub fn serializeField(self: Self, comptime key: []const u8, value: anytype) Error!void {
try serializeField(self.context, key, value);
}
/// Finish serializing a struct.
pub fn end(self: Self) Error!Ok {
return try end(self.context);
}
};
pub fn structure(self: Context) @"getty.ser.Structure" {
return .{ .context = self };
}
};
} | src/ser/interface/structure.zig |
const std = @import("std");
const utils = @import("utils");
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const print = utils.print;
const Fold = union(enum) {
Horizontal: i32,
Vertical: i32,
};
const Folding = struct {
points: [][2]i32,
folds: []Fold,
};
fn readInput(arena: *ArenaAllocator) anyerror!Folding {
var lines_it = try utils.iterateLinesInFile(&arena.allocator, "input.txt");
defer lines_it.deinit();
var points = try std.ArrayList([2]i32).initCapacity(&arena.allocator, 4096);
while (lines_it.next()) |line| {
if (line.len == 0) {
break;
}
var point = std.mem.tokenize(u8, line, ",");
try points.append(.{ try std.fmt.parseInt(i32, point.next().?, 10), try std.fmt.parseInt(i32, point.next().?, 10) });
}
var folds = try std.ArrayList(Fold).initCapacity(&arena.allocator, 4096);
while (lines_it.next()) |line| {
if (line.len == 0) {
break;
}
const fold_part = line["fold along ".len..];
var point = std.mem.tokenize(u8, fold_part, "=");
const axis = point.next().?;
const position = try std.fmt.parseInt(i32, point.next().?, 10);
const fold = switch (axis[0]) {
'x' => Fold{ .Vertical = position },
'y' => Fold{ .Horizontal = position },
else => unreachable,
};
try folds.append(fold);
}
return Folding{
.points = points.items,
.folds = folds.items,
};
}
fn pointLessThan(_: void, lhs: [2]i32, rhs: [2]i32) bool {
const dx = lhs[0] - rhs[0];
const dy = lhs[1] - rhs[1];
if (dy < 0) {
return true;
} else if (dy > 0) {
return false;
} else if (dx < 0) {
return true;
} else {
return false;
}
}
fn foldPoints(points: [][2]i32, fold: Fold) void {
for (points) |*point| {
var px = point[0];
var py = point[1];
switch (fold) {
Fold.Horizontal => |y| {
if (py > y) {
py = y - (py - y);
}
},
Fold.Vertical => |x| {
if (px > x) {
px = x - (px - x);
}
},
}
point[0] = px;
point[1] = py;
}
}
fn part1(folding: Folding) i32 {
foldPoints(folding.points, folding.folds[0]);
std.sort.sort([2]i32, folding.points, {}, pointLessThan);
var sum: i32 = 1;
var prev = folding.points[0];
for (folding.points) |p| {
if (!std.mem.eql(i32, &prev, &p)) {
sum += 1;
prev = p;
}
}
return sum;
}
fn part2(folding: Folding) void {
const max_row_length = 80;
for (folding.folds) |fold| {
foldPoints(folding.points, fold);
}
std.sort.sort([2]i32, folding.points, {}, pointLessThan);
const points = folding.points;
var minx = points[0][0];
var maxx = points[0][0];
var miny = points[0][1];
var maxy = points[0][1];
for (points) |p| {
minx = std.math.min(minx, p[0]);
maxx = std.math.max(maxx, p[0]);
miny = std.math.min(miny, p[1]);
maxy = std.math.max(maxy, p[1]);
}
if (maxx - minx > max_row_length) {
@panic("The paper is too wide");
}
print("Part 2:", .{});
var y: i32 = miny;
var row = std.mem.zeroes([max_row_length]u8);
var offset: usize = 0;
while (y <= maxy) : (y += 1) {
var x: i32 = minx;
var row_offset: usize = 0;
while (x <= maxx) : (x += 1) {
row[row_offset] = ' ';
for (points[offset..]) |p| {
if (p[0] > x or p[1] > y) {
break;
}
if (p[0] == x and p[1] == y) {
row[row_offset] = '@';
}
offset += 1;
}
row_offset += 1;
}
print("{s}", .{row[0..row_offset]});
}
}
pub fn main() anyerror!void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const part1_result = part1(try readInput(&arena));
print("Part 1: {d}", .{part1_result});
part2(try readInput(&arena));
} | day13/src/main.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const ValueTree = std.json.ValueTree;
const Value = std.json.Value;
const stdout = std.io.getStdOut().outStream();
const args = @import("args");
const BUFFSIZE = 2048;
var arena = ArenaAllocator.init(std.heap.page_allocator);
///Either array or object json document.
const DocumentTag = enum {
array, object
};
const Document = union(DocumentTag) {
const Self = @This();
array: Value,
object: Value,
/// general init
/// Initialize the Document with `Document.init(.array)` or
/// `Document.init(.oject)`
pub fn init(x: DocumentTag) Self {
switch (x) {
DocumentTag.array => return array_init(),
DocumentTag.object => return object_init(),
}
}
/// initialize as array document
fn array_init() Self {
var value = Value{ .Array = std.json.Array.init(&arena.allocator) };
return Document{ .array = value };
}
/// Initialize as object document
fn object_init() Self {
var value = Value{ .Object = std.json.ObjectMap.init(&arena.allocator) };
return Self{ .object = value };
}
/// Adds new element to the document
pub fn push_element(self: *Self, string: []const u8) !void {
switch (self.*) {
Self.array => |*array| {
try self.appendToArray(string);
},
Self.object => |*object| {
try self.appendToObject(string);
},
else => unreachable,
}
}
/// prints Document outstream
fn print(self: Self, outstream: anytype) !void {
const outputOptions = std.json.StringifyOptions{ .whitespace = null };
switch (self) {
Self.array => |array| {
_ = try std.json.stringify(array, outputOptions, stdout);
},
Self.object => |object| {
_ = try std.json.stringify(object, outputOptions, stdout);
},
}
}
/// prints Document to stdout!
fn printStdOut(self: Self) !void {
try self.print(std.io.getStdOut().outStream());
}
/// String representation of the document
/// caller ownes the string!
fn stringify(self: Self) ![]u8 {
var x = std.ArrayList(u8).init(&arena.allocator);
var stream = x.outStream();
_ = try self.print(stream);
return x.toOwnedSlice();
}
/// adding elements to the array document
fn appendToArray(self: *Self, string: []const u8) !void {
var value = std.json.Value{ .String = string };
_ = try self.array.Array.append(value);
}
/// adds new element to the object document
fn appendToObject(self: *Self, keyValue: []const u8) !void {
var segments: std.mem.TokenIterator = std.mem.tokenize(keyValue, "=:");
var key: []const u8 = undefined;
var value: Value = undefined;
if (segments.next()) |k| {
key = k;
}
if (segments.next()) |v| {
const tree = try parseValue(v);
value = tree.root;
} else {
value = @as(Value, .Null);
}
_ = try self.object.Object.put(key, value);
return;
}
};
/// parses values of key value pairs. trying to interpret the value
/// as json object if an error occours the value is treated as a
/// string value.
fn parseValue(value: []const u8) !ValueTree {
var p = std.json.Parser.init(&arena.allocator, false);
defer p.deinit();
if (p.parse(value)) |result| {
return result;
} else |err| switch (err) {
// parsing of the json top value failed,
// make it a null
error.InvalidTopLevel, error.InvalidLiteral => {
return ValueTree{
.arena = arena,
.root = Value{ .String = value },
};
},
// any other error should abort the parser.
else => {
std.debug.warn("\nError in parsing json value '{}': {}\n", .{ value, err });
return ValueTree{
.arena = arena,
.root = @as(Value, .Null),
};
},
}
}
fn readLines(allocator: *Allocator) !std.ArrayList([]u8) {
var array = std.ArrayList([]u8).init(allocator);
var buffer: [BUFSIZ]u8 = undefined;
while (try stdin.readUntilDelimiterOrEof(buffer[0..], '\n')) |line| {
var copy = try std.mem.dupe(allocator, u8, line);
_ = try array.append(copy);
}
return array;
}
pub fn main() anyerror!void {
var cli = try args.parseForCurrentProcess(struct {
@"object": bool = false,
@"array": bool = false,
help: bool = false,
pub const shorthands = .{
.a = "array",
.o = "object",
.h = "help",
};
}, &arena.allocator);
defer cli.deinit();
if (cli.options.help) {
try stdout.print(
"{} [--help] [--object] [--array] [ARG]...\n",
.{std.fs.path.basename(cli.executable_name.?)},
);
try stdout.writeAll(@embedFile("cli.help.txt"));
return;
}
var document = if (cli.options.array) Document.init(.array) else Document.init(.object);
for (cli.positionals) |arg| {
_ = try document.push_element(arg);
}
try document.printStdOut();
}
const testing = std.testing;
const assert = std.debug.assert;
test "parse json" {
var p = std.json.Parser.init(&arena.allocator, false);
{
defer p.reset();
const s = "null";
var j = try p.parse(s);
assert(j.root == .Null);
}
{
defer p.reset();
const s =
\\""
;
var j = try p.parse(s);
// std.debug.warn("\n...{}\n", .{ @tagName(j.root) });
}
{
defer p.reset();
const s =
\\ "quoted=string"
;
var j = try p.parse(s);
assert(j.root == .String);
}
{
defer p.reset();
const s = "1231233";
var j = try p.parse(s);
assert(j.root == .Integer);
}
{
defer p.reset();
const s =
\\ "123"
;
var j = try p.parse(s);
assert(j.root == .String);
}
{
defer p.reset();
const s = "einfach nur text";
var j: ValueTree = undefined;
if (p.parse(s)) |result| {
j = result;
} else |err| switch (err) {
error.InvalidTopLevel => {
j = ValueTree{
.arena = arena,
.root = @as(Value, .Null),
};
}, // ok
else => return err,
}
assert(j.root == .Null);
}
}
test "parse value part of object" {
{
const value =
\\ "ein einfacher String"
;
const result = try parseValue(value);
std.debug.warn("\n\nresult: {}\n", .{result.root});
}
{
const value =
\\ ohne quoates
;
const result = try parseValue(value);
std.debug.warn("\n\nresult: {}\n", .{result.root});
}
{
const value =
\\ 12343
;
const result = try parseValue(value);
std.debug.warn("\n\nresult: {}\n", .{result.root});
}
{
const value =
\\ { "key1": "value1",
\\ "key2": 123123 }
;
const result = try parseValue(value);
std.debug.warn("\n\nresult: {}\n", .{result.root});
}
{
const value =
\\ { "key1": "value1",
\\ "key2": 123123, }
;
const result = try parseValue(value);
std.debug.warn("\n\nresult: {}\n", .{result.root});
}
} | src/main.zig |
const std = @import("std");
const mem = std.mem;
const OtherSymbol = @This();
allocator: *mem.Allocator,
array: []bool,
lo: u21 = 166,
hi: u21 = 129994,
pub fn init(allocator: *mem.Allocator) !OtherSymbol {
var instance = OtherSymbol{
.allocator = allocator,
.array = try allocator.alloc(bool, 129829),
};
mem.set(bool, instance.array, false);
var index: u21 = 0;
instance.array[0] = true;
instance.array[3] = true;
instance.array[8] = true;
instance.array[10] = true;
instance.array[988] = true;
index = 1255;
while (index <= 1256) : (index += 1) {
instance.array[index] = true;
}
index = 1384;
while (index <= 1385) : (index += 1) {
instance.array[index] = true;
}
instance.array[1592] = true;
instance.array[1603] = true;
index = 1623;
while (index <= 1624) : (index += 1) {
instance.array[index] = true;
}
instance.array[1872] = true;
instance.array[2388] = true;
instance.array[2762] = true;
index = 2893;
while (index <= 2898) : (index += 1) {
instance.array[index] = true;
}
instance.array[2900] = true;
instance.array[3033] = true;
instance.array[3241] = true;
instance.array[3283] = true;
index = 3675;
while (index <= 3677) : (index += 1) {
instance.array[index] = true;
}
instance.array[3693] = true;
index = 3695;
while (index <= 3697) : (index += 1) {
instance.array[index] = true;
}
index = 3700;
while (index <= 3705) : (index += 1) {
instance.array[index] = true;
}
instance.array[3726] = true;
instance.array[3728] = true;
instance.array[3730] = true;
index = 3864;
while (index <= 3871) : (index += 1) {
instance.array[index] = true;
}
index = 3873;
while (index <= 3878) : (index += 1) {
instance.array[index] = true;
}
index = 3880;
while (index <= 3881) : (index += 1) {
instance.array[index] = true;
}
index = 3887;
while (index <= 3890) : (index += 1) {
instance.array[index] = true;
}
index = 4088;
while (index <= 4089) : (index += 1) {
instance.array[index] = true;
}
index = 4842;
while (index <= 4851) : (index += 1) {
instance.array[index] = true;
}
instance.array[5575] = true;
instance.array[6298] = true;
index = 6456;
while (index <= 6489) : (index += 1) {
instance.array[index] = true;
}
index = 6843;
while (index <= 6852) : (index += 1) {
instance.array[index] = true;
}
index = 6862;
while (index <= 6870) : (index += 1) {
instance.array[index] = true;
}
index = 8282;
while (index <= 8283) : (index += 1) {
instance.array[index] = true;
}
index = 8285;
while (index <= 8288) : (index += 1) {
instance.array[index] = true;
}
index = 8290;
while (index <= 8291) : (index += 1) {
instance.array[index] = true;
}
instance.array[8302] = true;
index = 8304;
while (index <= 8305) : (index += 1) {
instance.array[index] = true;
}
index = 8312;
while (index <= 8317) : (index += 1) {
instance.array[index] = true;
}
instance.array[8319] = true;
instance.array[8321] = true;
instance.array[8323] = true;
instance.array[8328] = true;
index = 8340;
while (index <= 8341) : (index += 1) {
instance.array[index] = true;
}
instance.array[8356] = true;
index = 8358;
while (index <= 8359) : (index += 1) {
instance.array[index] = true;
}
instance.array[8361] = true;
index = 8420;
while (index <= 8421) : (index += 1) {
instance.array[index] = true;
}
index = 8431;
while (index <= 8435) : (index += 1) {
instance.array[index] = true;
}
index = 8438;
while (index <= 8441) : (index += 1) {
instance.array[index] = true;
}
index = 8443;
while (index <= 8444) : (index += 1) {
instance.array[index] = true;
}
index = 8446;
while (index <= 8447) : (index += 1) {
instance.array[index] = true;
}
index = 8449;
while (index <= 8455) : (index += 1) {
instance.array[index] = true;
}
index = 8457;
while (index <= 8487) : (index += 1) {
instance.array[index] = true;
}
index = 8490;
while (index <= 8491) : (index += 1) {
instance.array[index] = true;
}
instance.array[8493] = true;
index = 8495;
while (index <= 8525) : (index += 1) {
instance.array[index] = true;
}
index = 8794;
while (index <= 8801) : (index += 1) {
instance.array[index] = true;
}
index = 8806;
while (index <= 8825) : (index += 1) {
instance.array[index] = true;
}
index = 8828;
while (index <= 8834) : (index += 1) {
instance.array[index] = true;
}
index = 8837;
while (index <= 8917) : (index += 1) {
instance.array[index] = true;
}
index = 8919;
while (index <= 8948) : (index += 1) {
instance.array[index] = true;
}
index = 8974;
while (index <= 9013) : (index += 1) {
instance.array[index] = true;
}
index = 9020;
while (index <= 9088) : (index += 1) {
instance.array[index] = true;
}
index = 9114;
while (index <= 9124) : (index += 1) {
instance.array[index] = true;
}
index = 9206;
while (index <= 9283) : (index += 1) {
instance.array[index] = true;
}
index = 9306;
while (index <= 9488) : (index += 1) {
instance.array[index] = true;
}
index = 9490;
while (index <= 9498) : (index += 1) {
instance.array[index] = true;
}
index = 9500;
while (index <= 9553) : (index += 1) {
instance.array[index] = true;
}
index = 9562;
while (index <= 9672) : (index += 1) {
instance.array[index] = true;
}
index = 9674;
while (index <= 9921) : (index += 1) {
instance.array[index] = true;
}
index = 9966;
while (index <= 10009) : (index += 1) {
instance.array[index] = true;
}
index = 10074;
while (index <= 10329) : (index += 1) {
instance.array[index] = true;
}
index = 10842;
while (index <= 10889) : (index += 1) {
instance.array[index] = true;
}
index = 10911;
while (index <= 10912) : (index += 1) {
instance.array[index] = true;
}
index = 10919;
while (index <= 10957) : (index += 1) {
instance.array[index] = true;
}
index = 10960;
while (index <= 10991) : (index += 1) {
instance.array[index] = true;
}
index = 10993;
while (index <= 11097) : (index += 1) {
instance.array[index] = true;
}
index = 11327;
while (index <= 11332) : (index += 1) {
instance.array[index] = true;
}
index = 11690;
while (index <= 11691) : (index += 1) {
instance.array[index] = true;
}
index = 11738;
while (index <= 11763) : (index += 1) {
instance.array[index] = true;
}
index = 11765;
while (index <= 11853) : (index += 1) {
instance.array[index] = true;
}
index = 11866;
while (index <= 12079) : (index += 1) {
instance.array[index] = true;
}
index = 12106;
while (index <= 12117) : (index += 1) {
instance.array[index] = true;
}
instance.array[12126] = true;
index = 12140;
while (index <= 12141) : (index += 1) {
instance.array[index] = true;
}
instance.array[12154] = true;
index = 12176;
while (index <= 12177) : (index += 1) {
instance.array[index] = true;
}
index = 12184;
while (index <= 12185) : (index += 1) {
instance.array[index] = true;
}
index = 12522;
while (index <= 12523) : (index += 1) {
instance.array[index] = true;
}
index = 12528;
while (index <= 12537) : (index += 1) {
instance.array[index] = true;
}
index = 12570;
while (index <= 12605) : (index += 1) {
instance.array[index] = true;
}
index = 12634;
while (index <= 12664) : (index += 1) {
instance.array[index] = true;
}
index = 12676;
while (index <= 12705) : (index += 1) {
instance.array[index] = true;
}
instance.array[12714] = true;
index = 12730;
while (index <= 12761) : (index += 1) {
instance.array[index] = true;
}
index = 12772;
while (index <= 12810) : (index += 1) {
instance.array[index] = true;
}
index = 12826;
while (index <= 13145) : (index += 1) {
instance.array[index] = true;
}
index = 19738;
while (index <= 19801) : (index += 1) {
instance.array[index] = true;
}
index = 41962;
while (index <= 42016) : (index += 1) {
instance.array[index] = true;
}
index = 42882;
while (index <= 42885) : (index += 1) {
instance.array[index] = true;
}
index = 42896;
while (index <= 42897) : (index += 1) {
instance.array[index] = true;
}
instance.array[42899] = true;
index = 43473;
while (index <= 43475) : (index += 1) {
instance.array[index] = true;
}
instance.array[64855] = true;
instance.array[65342] = true;
instance.array[65346] = true;
index = 65351;
while (index <= 65352) : (index += 1) {
instance.array[index] = true;
}
index = 65366;
while (index <= 65367) : (index += 1) {
instance.array[index] = true;
}
index = 65681;
while (index <= 65689) : (index += 1) {
instance.array[index] = true;
}
index = 65747;
while (index <= 65763) : (index += 1) {
instance.array[index] = true;
}
index = 65766;
while (index <= 65768) : (index += 1) {
instance.array[index] = true;
}
index = 65770;
while (index <= 65782) : (index += 1) {
instance.array[index] = true;
}
instance.array[65786] = true;
index = 65834;
while (index <= 65878) : (index += 1) {
instance.array[index] = true;
}
index = 67537;
while (index <= 67538) : (index += 1) {
instance.array[index] = true;
}
instance.array[68130] = true;
instance.array[71321] = true;
index = 73519;
while (index <= 73526) : (index += 1) {
instance.array[index] = true;
}
index = 73531;
while (index <= 73547) : (index += 1) {
instance.array[index] = true;
}
index = 92822;
while (index <= 92825) : (index += 1) {
instance.array[index] = true;
}
instance.array[92831] = true;
instance.array[113654] = true;
index = 118618;
while (index <= 118863) : (index += 1) {
instance.array[index] = true;
}
index = 118874;
while (index <= 118912) : (index += 1) {
instance.array[index] = true;
}
index = 118915;
while (index <= 118974) : (index += 1) {
instance.array[index] = true;
}
index = 118980;
while (index <= 118982) : (index += 1) {
instance.array[index] = true;
}
index = 119005;
while (index <= 119006) : (index += 1) {
instance.array[index] = true;
}
index = 119014;
while (index <= 119043) : (index += 1) {
instance.array[index] = true;
}
index = 119048;
while (index <= 119106) : (index += 1) {
instance.array[index] = true;
}
index = 119130;
while (index <= 119195) : (index += 1) {
instance.array[index] = true;
}
instance.array[119199] = true;
index = 119386;
while (index <= 119472) : (index += 1) {
instance.array[index] = true;
}
index = 120666;
while (index <= 121177) : (index += 1) {
instance.array[index] = true;
}
index = 121233;
while (index <= 121236) : (index += 1) {
instance.array[index] = true;
}
index = 121287;
while (index <= 121294) : (index += 1) {
instance.array[index] = true;
}
index = 121296;
while (index <= 121309) : (index += 1) {
instance.array[index] = true;
}
index = 121311;
while (index <= 121312) : (index += 1) {
instance.array[index] = true;
}
instance.array[123049] = true;
instance.array[125958] = true;
instance.array[126088] = true;
index = 126810;
while (index <= 126853) : (index += 1) {
instance.array[index] = true;
}
index = 126858;
while (index <= 126957) : (index += 1) {
instance.array[index] = true;
}
index = 126970;
while (index <= 126984) : (index += 1) {
instance.array[index] = true;
}
index = 126987;
while (index <= 127001) : (index += 1) {
instance.array[index] = true;
}
index = 127003;
while (index <= 127017) : (index += 1) {
instance.array[index] = true;
}
index = 127019;
while (index <= 127055) : (index += 1) {
instance.array[index] = true;
}
index = 127079;
while (index <= 127239) : (index += 1) {
instance.array[index] = true;
}
index = 127296;
while (index <= 127324) : (index += 1) {
instance.array[index] = true;
}
index = 127338;
while (index <= 127381) : (index += 1) {
instance.array[index] = true;
}
index = 127386;
while (index <= 127394) : (index += 1) {
instance.array[index] = true;
}
index = 127402;
while (index <= 127403) : (index += 1) {
instance.array[index] = true;
}
index = 127418;
while (index <= 127423) : (index += 1) {
instance.array[index] = true;
}
index = 127578;
while (index <= 127828) : (index += 1) {
instance.array[index] = true;
}
index = 127834;
while (index <= 128561) : (index += 1) {
instance.array[index] = true;
}
index = 128570;
while (index <= 128582) : (index += 1) {
instance.array[index] = true;
}
index = 128586;
while (index <= 128598) : (index += 1) {
instance.array[index] = true;
}
index = 128602;
while (index <= 128717) : (index += 1) {
instance.array[index] = true;
}
index = 128730;
while (index <= 128818) : (index += 1) {
instance.array[index] = true;
}
index = 128826;
while (index <= 128837) : (index += 1) {
instance.array[index] = true;
}
index = 128858;
while (index <= 128869) : (index += 1) {
instance.array[index] = true;
}
index = 128874;
while (index <= 128929) : (index += 1) {
instance.array[index] = true;
}
index = 128938;
while (index <= 128947) : (index += 1) {
instance.array[index] = true;
}
index = 128954;
while (index <= 128993) : (index += 1) {
instance.array[index] = true;
}
index = 129002;
while (index <= 129031) : (index += 1) {
instance.array[index] = true;
}
index = 129034;
while (index <= 129035) : (index += 1) {
instance.array[index] = true;
}
index = 129114;
while (index <= 129234) : (index += 1) {
instance.array[index] = true;
}
index = 129236;
while (index <= 129317) : (index += 1) {
instance.array[index] = true;
}
index = 129319;
while (index <= 129453) : (index += 1) {
instance.array[index] = true;
}
index = 129466;
while (index <= 129479) : (index += 1) {
instance.array[index] = true;
}
index = 129482;
while (index <= 129486) : (index += 1) {
instance.array[index] = true;
}
index = 129490;
while (index <= 129492) : (index += 1) {
instance.array[index] = true;
}
index = 129498;
while (index <= 129504) : (index += 1) {
instance.array[index] = true;
}
index = 129514;
while (index <= 129538) : (index += 1) {
instance.array[index] = true;
}
index = 129546;
while (index <= 129552) : (index += 1) {
instance.array[index] = true;
}
index = 129562;
while (index <= 129564) : (index += 1) {
instance.array[index] = true;
}
index = 129578;
while (index <= 129584) : (index += 1) {
instance.array[index] = true;
}
index = 129626;
while (index <= 129772) : (index += 1) {
instance.array[index] = true;
}
index = 129774;
while (index <= 129828) : (index += 1) {
instance.array[index] = true;
}
// Placeholder: 0. Struct name, 1. Code point kind
return instance;
}
pub fn deinit(self: *OtherSymbol) void {
self.allocator.free(self.array);
}
// isOtherSymbol checks if cp is of the kind Other_Symbol.
pub fn isOtherSymbol(self: OtherSymbol, cp: u21) bool {
if (cp < self.lo or cp > self.hi) return false;
const index = cp - self.lo;
return if (index >= self.array.len) false else self.array[index];
} | src/components/autogen/DerivedGeneralCategory/OtherSymbol.zig |
const std = @import("std");
const Request = @import("request.zig").Request;
const Response = @import("response.zig").Response;
const MimeType = @import("mime_type.zig").MimeType;
const url = @import("url.zig");
const Allocator = std.mem.Allocator;
const fs = std.fs;
pub const FileServer = @This();
var dir: fs.Dir = undefined;
var alloc: *std.mem.Allocator = undefined;
var initialized: bool = false;
var base_path: ?[]const u8 = null;
pub const Config = struct {
dir_path: []const u8,
base_path: ?[]const u8 = null,
};
/// Sets the directory of the file server to the given path
/// Note that this function must be called before passing the serve
/// function to the `Server`.
///
/// deinit() must be called to close the dir handler
pub fn init(allocator: *Allocator, config: Config) fs.Dir.OpenError!void {
dir = try fs.cwd().openDir(config.dir_path, .{});
alloc = allocator;
initialized = true;
base_path = config.base_path;
}
/// Closes the dir handler
pub fn deinit() void {
dir.close();
}
/// Servers a file based on the path of the request
pub fn serve(response: *Response, request: Request) (Response.Error || error{NotAFile} || std.os.SendFileError)!void {
std.debug.assert(initialized);
const index = "index.html";
var path = url.sanitize(request.url.path);
if (std.mem.endsWith(u8, path, index)) {
return localRedirect(response, request, "./", alloc);
}
if (base_path) |b_path| {
if (std.mem.startsWith(u8, path[1..], b_path)) {
path = path[b_path.len + 1 ..];
if (path.len > 0 and path[0] == '/') path = path[1..];
}
} else if (path[0] == '/') path = path[1..];
// if the sanitized path starts with '..' it means it goes up from the root
// and therefore has access to outside root.
if (std.mem.startsWith(u8, path, "..")) return response.notFound();
var file = dir.openFile(path, .{}) catch |_| {
return response.notFound();
};
defer file.close();
serveFile(response, request.url.path, file) catch |err| switch (err) {
error.NotAFile => return response.notFound(),
else => return err,
};
}
/// Notifies the client with a Moved Permanently header
/// The memory allocated by this is freed
fn localRedirect(
response: *Response,
request: Request,
path: []const u8,
allocator: *Allocator,
) (Response.Error)!void {
const new_path = try std.mem.concat(allocator, u8, &[_][]const u8{
path,
request.url.raw_query,
});
defer allocator.free(new_path);
try response.headers.put("Location", new_path);
try response.writeHeader(.moved_permanently);
}
/// Serves a file to the client
/// Opening and closing of the file must be handled by the user
///
/// NOTE: This is a low level implementation utilizing std.os.sendFile()
/// and accesses the response writer's internal socket handle. This does not allow for setting
/// any other headers and/or status codes. Use response.write() for that
pub fn serveFile(
response: *Response,
file_name: []const u8,
file: fs.File,
) (Response.Error || error{NotAFile} || std.os.SendFileError)!void {
var stat = try file.stat();
if (stat.kind != .File)
return error.NotAFile;
response.is_flushed = true;
var stream = response.writer();
const len = stat.size;
// write status line
try stream.writeAll("HTTP/1.1 200 OK\r\n");
//write headers
for (response.headers.items()) |header| {
try stream.print("{s}: {s}\r\n", .{ header.key, header.value });
}
try stream.print("Content-Length: {d}\r\n", .{len});
try stream.print("Content-Type: {s}\r\n", .{MimeType.fromFileName(file_name).toType()});
if (!std.io.is_async) {
try stream.writeAll("Connection: close\r\n");
}
//Carrot Return after headers to tell clients where headers end, and body starts
try stream.writeAll("\r\n");
try response.flush();
const out = response.buffered_writer.unbuffered_writer.context.handle;
var remaining: u64 = len;
while (remaining > 0) {
remaining -= try std.os.sendfile(out, file.handle, len - remaining, remaining, &.{}, &.{}, 0);
}
} | src/fs.zig |
const wasm4 = @import("wasm4.zig");
const w4 = @import("wrapper4.zig");
const std = @import("std");
const smiley_data = [8]u8{
0b11000011,
0b10000001,
0b00100100,
0b00100100,
0b00000000,
0b00100100,
0b10011001,
0b11000011,
};
var pitch: i16 = 0;
var duty_index: u2 = 0;
var channel_index: u2 = 0;
var last_gamepad = w4.Gamepad.none_pressed;
fn gamepad_delta(now: w4.Gamepad, previous: w4.Gamepad) w4.Gamepad {
return .{
.single_buttons = .{
now.single_buttons[0] and !previous.single_buttons[0],
now.single_buttons[1] and !previous.single_buttons[1],
},
.direction_buttons = .{
.left = now.direction_buttons.left and !previous.direction_buttons.left,
.right = now.direction_buttons.right and !previous.direction_buttons.right,
.up = now.direction_buttons.up and !previous.direction_buttons.up,
.down = now.direction_buttons.down and !previous.direction_buttons.down,
},
};
}
pub fn TextScratch(comptime size_bytes: usize) type {
return struct {
data: [size_bytes]u8,
pub fn init() @This() {
return .{ .data = undefined };
}
pub fn bufPrint(self: *@This(), comptime format_string: []const u8, argsTuple: anytype) ![]u8 {
return try std.fmt.bufPrint(&self.data, format_string, argsTuple);
}
pub fn render(self: *@This(), top_left: w4.Coordinate, color: w4.DrawColor, comptime format_string: []const u8, argsTuple: anytype) !void {
const text = try self.bufPrint(format_string, argsTuple);
w4.textUtf8(text, top_left, color);
}
pub fn retainedColorRender(self: *@This(), top_left: w4.Coordinate, comptime format_string: []const u8, argsTuple: anytype) !void {
const text = try self.bufPrint(format_string, argsTuple);
w4.retained_colors.textUtf8(text, top_left);
}
};
}
fn pr_test_update(gamepad: w4.Gamepad) void {
const new_presses: w4.Gamepad = gamepad_delta(gamepad, last_gamepad);
const vdelta = @as(i2, @boolToInt(new_presses.direction_buttons.up)) -
@as(i2, @boolToInt(new_presses.direction_buttons.down));
pitch += vdelta;
const hdelta = @as(i2, @boolToInt(new_presses.direction_buttons.right)) -
@as(i2, @boolToInt(new_presses.direction_buttons.left and !last_gamepad.direction_buttons.left));
duty_index +%= @bitCast(u2, hdelta);
channel_index +%= @boolToInt(new_presses.single_buttons[1]);
var text_scratch = TextScratch(1 << 8).init();
text_scratch.render(.{ .x = 10, .y = 40 }, 3, "pitch (^/v): {d}", .{pitch}) catch unreachable;
text_scratch.render(.{ .x = 10, .y = 50 }, 3, "mode (</>): {d}", .{duty_index}) catch unreachable;
text_scratch.render(.{ .x = 10, .y = 60 }, 3, "channel (Z/Y): {d}", .{channel_index}) catch unreachable;
if (gamepad.single_buttons[0]) {
w4.draw_colors.set(0, 2);
const base_freq = @floatToInt(u16, 256.0 * (std.math.pow(f32, 2.0, @intToFloat(f32, pitch) / 12)));
const instrument = switch (channel_index) {
0, 1 => instrument: {
const duty = @intToEnum(w4.Instrument.PulseDuty, duty_index);
break :instrument switch (channel_index) {
else => unreachable,
0 => break :instrument w4.Instrument{ .pulse_0 = duty },
1 => break :instrument w4.Instrument{ .pulse_1 = duty },
};
},
2 => w4.Instrument{ .triangle = {} },
3 => w4.Instrument{ .noise = {} },
};
w4.tone(.{ .start = base_freq, .end = base_freq }, w4.Adsr.init(0, 0, 2, 0), @as(u32, 40), instrument) catch unreachable;
}
}
const Harmony = enum {
diminished,
minor_b5,
minor_6,
minor,
major7,
major,
augmented,
augmented7,
pub fn next(self: Harmony) Harmony {
switch (self) {
.diminished => return .minor_b5,
.minor_b5 => return .minor_6,
.minor_6 => return .minor,
.minor => return .major7,
.major7 => return .major,
.major => return .augmented,
.augmented => return .augmented7,
.augmented7 => return .diminished,
}
}
pub fn previous(self: Harmony) Harmony {
switch (self) {
.diminished => return .augmented7,
.minor_b5 => return .diminished,
.minor_6 => return .minor_b5,
.minor => return .minor_6,
.major7 => return .minor,
.major => return .major7,
.augmented => return .major,
.augmented7 => return .augmented,
}
}
pub fn shortName(self: Harmony) []const u8 {
switch (self) {
.diminished => return "dim",
.minor_b5 => return "m_b5",
.minor_6 => return "m_6",
.minor => return "m",
.major7 => return "maj7",
.major => return "maj",
.augmented => return "aug",
.augmented7 => return "aug7",
}
}
};
var harmony = Harmony.minor;
const Positioning = enum {
spread,
narrow,
pub fn next(self: Positioning) Positioning {
switch (self) {
.spread => return .narrow,
.narrow => return .spread,
}
}
};
var positioning: Positioning = .spread;
var moved_b: bool = false;
var frame_counter: usize = 0;
var from_minor_harmony: bool = false;
fn cadences_update(gamepad: w4.Gamepad) void {
const new_presses: w4.Gamepad = gamepad_delta(gamepad, last_gamepad);
const vdelta = @as(i2, @boolToInt(new_presses.direction_buttons.up)) -
@as(i2, @boolToInt(new_presses.direction_buttons.down));
const hdelta = @as(i2, @boolToInt(new_presses.direction_buttons.right)) -
@as(i2, @boolToInt(new_presses.direction_buttons.left and !last_gamepad.direction_buttons.left));
const prev_harmony = harmony.previous();
const next_harmony = harmony.next();
switch (hdelta) {
else => unreachable,
0 => {},
1 => {
from_minor_harmony = true;
harmony = harmony.next();
},
-1 => {
from_minor_harmony = false;
harmony = harmony.previous();
},
}
const cadence_mode = gamepad.single_buttons[1];
if (!gamepad.single_buttons[1]) {
pitch += vdelta;
if (last_gamepad.single_buttons[1] and !moved_b) {
positioning = positioning.next();
moved_b = true;
}
} else {
if (vdelta != 0) {
positioning = positioning.next();
}
moved_b = moved_b or (vdelta != 0);
if (new_presses.single_buttons[1]) {
moved_b = false;
}
switch (vdelta) {
else => unreachable,
0 => {},
-1 => pitch -= @as(i16, if (positioning == .spread) 7 else 5),
1 => pitch += @as(i16, if (positioning == .spread) 7 else 5),
}
}
w4.draw_colors.set(0, 3);
var text_scratch = TextScratch(1 << 8).init();
// note: inline-if in tuples (f.e. `.{if(a) "asdf" else "jkl;"}`) are currently broken, see https://github.com/ziglang/zig/issues/4491
text_scratch.retainedColorRender(.{ .x = 10, .y = 40 }, "pitch (^/v): {d}", .{pitch}) catch unreachable;
const cadence_mode_text = if (cadence_mode) @as([]const u8, "on") else @as([]const u8, "off");
text_scratch.retainedColorRender(.{ .x = 10, .y = 30 }, "cadence jump: {s}", .{cadence_mode_text}) catch unreachable;
const pos_text = switch (positioning) {
.narrow => @as([]const u8, "narrow"),
.spread => @as([]const u8, "spread"),
};
text_scratch.retainedColorRender(.{ .x = 10, .y = 50 }, "pos: {s}", .{pos_text}) catch unreachable;
text_scratch.retainedColorRender(.{ .x = 10, .y = 65 }, " harmony ", .{}) catch unreachable;
text_scratch.retainedColorRender(.{ .x = 5, .y = 75 }, "{s:4} < {s:4} > {s:4}", .{ prev_harmony.shortName(), harmony.shortName(), next_harmony.shortName() }) catch unreachable;
const suggestion_text: ?[]const u8 = suggestion_text: {
switch (harmony) {
else => break :suggestion_text null, // no suggestion
.minor, .major7, .major => break :suggestion_text @as([]const u8, switch (positioning) { // suggest the closest cadence
.spread => switch (harmony) {
else => unreachable,
.minor => "cadence jump ^>", //suggest cadence
.major7 => {
if ((@mod(pitch, 6)) < 2) { //suggest resolution
break :suggestion_text @as([]const u8, if (from_minor_harmony) "normal step ^>" else "normal step ^<");
} else { //suggest cadence
break :suggestion_text @as([]const u8, if (from_minor_harmony) "cadence jump ^<" else "cadence jump ^>");
}
},
.major => "cadence jump ^<", //suggest cadence
},
.narrow => switch (harmony) {
else => unreachable,
.minor => "cadence jump v>", //suggest cadence
.major7 => {
if ((@mod(pitch, 6)) < 2) { //suggest resolution
break :suggestion_text @as([]const u8, if (from_minor_harmony) "normal step v>" else "normal step v<");
} else { //suggest cadence
break :suggestion_text @as([]const u8, if (from_minor_harmony) "cadence jump v<" else "cadence jump v>");
}
},
.major => "cadence jump v<",
},
}),
}
};
if (suggestion_text) |suggested| {
w4.draw_colors.set(0, 1);
text_scratch.retainedColorRender(.{ .x = 10, .y = 90 }, "suggestion:", .{}) catch unreachable;
text_scratch.retainedColorRender(.{ .x = 10, .y = 100 }, "{s}", .{suggested}) catch unreachable;
}
if (gamepad.single_buttons[0]) {
const freqFromPitch = struct {
fn freqFromPitch(input_pitch: i16) u16 {
return @floatToInt(u16, 230.0 * (std.math.pow(f32, 2.0, @intToFloat(f32, input_pitch) / 12)));
}
}.freqFromPitch;
const pitch_spread = 12 * @as(u4, @boolToInt(positioning == .spread));
const base_freq = freqFromPitch(pitch);
const support_pitch_1 = switch (harmony) {
.diminished, .minor_b5, .minor_6, .minor => pitch + 3,
.major, .major7, .augmented, .augmented7 => pitch + 4,
};
const support_freq_1 = freqFromPitch(support_pitch_1 + pitch_spread);
_ = support_freq_1;
const support_pitch_2 = switch (harmony) {
.diminished, .minor_b5 => pitch + 6,
.minor_6 => pitch + 8,
.minor, .major7, .major => pitch + 7,
.augmented, .augmented7 => pitch + 8,
};
const support_freq_2 = freqFromPitch(support_pitch_2 + pitch_spread);
_ = support_freq_2;
const support_pitch_3 = switch (harmony) {
.diminished => pitch + 9,
.minor_b5, .minor, .minor_6, .major7, .augmented7 => pitch + 10,
.major, .augmented => pitch + 11,
};
const support_freq_3 = freqFromPitch(support_pitch_3 + pitch_spread);
const support_pitch_4 = switch (harmony) {
.diminished => pitch + 14,
.minor_b5, .minor, .minor_6, .major7, .major, .augmented, .augmented7 => pitch + 14,
};
const support_freq_4 = freqFromPitch(support_pitch_4);
const volume = 40;
const support_freq_a = if (frame_counter % 4 < 2) support_freq_3 else support_freq_1;
const support_freq_b = if ((frame_counter +% 1) % 4 < 2) support_freq_4 else support_freq_2;
const support_volume = 22;
w4.tone(.{ .start = base_freq, .end = base_freq }, w4.Adsr.init(0, 0, 2, 0), volume, .{ .triangle = {} }) catch unreachable;
w4.tone(.{ .start = support_freq_a, .end = support_freq_a }, w4.Adsr.init(0, 0, 2, 0), support_volume, .{ .pulse_0 = .quarter }) catch unreachable;
w4.tone(.{ .start = support_freq_b, .end = support_freq_b }, w4.Adsr.init(0, 0, 2, 0), support_volume, .{ .pulse_1 = .half }) catch unreachable;
}
}
export fn update() void {
defer frame_counter += 1;
//w4.draw_colors.set(0, 1);
w4.text("Hello from Zig!", .{ .x = 10, .y = 10 }, 1);
const gamepad = w4.gamepads[0].get();
defer last_gamepad = gamepad;
//pr_test_update(gamepad);
cadences_update(gamepad);
w4.draw_colors.set(0, 2 + @as(u2, @boolToInt(gamepad.single_buttons[0])));
w4.retained_colors.blitFixed(.one, .{ .width = 8, .height = 8 }, smiley_data, .{ .x = 76, .y = 116 }, w4.BlitEffectFlags.none);
w4.retained_colors.text("Press X to bleep!", .{ .x = 10, .y = 130 });
} | src/main.zig |
const std = @import("std");
const states = @import("state.zig");
const State = states.State;
const ScoreMap = std.StringHashMap(f32);
fn doCount(haystack: []u8, needle: []u8) usize {
var count: usize = 0;
var idx: usize = 0;
while (true) {
var newidx_opt = std.mem.indexOfPos(u8, haystack, idx, needle);
if (newidx_opt) |newidx| {
if (newidx == idx) {
idx += 1;
} else {
idx = newidx;
}
} else {
break;
}
}
return count;
}
fn contains(haystack: []const u8, needle: []const u8) bool {
var idx = std.mem.indexOf(u8, haystack, needle);
return idx != null;
}
fn toLower(allocator: *std.mem.Allocator, data: []const u8) ![]u8 {
var out = try allocator.alloc(u8, data.len);
for (data) |byte, idx| {
out[idx] = std.ascii.toLower(byte);
}
return out;
}
fn compareFunc(kv1: ScoreMap.KV, kv2: ScoreMap.KV) bool {
var kv1_score = (kv1.value + @intToFloat(f32, kv1.key.len));
var kv2_score = (kv2.value + @intToFloat(f32, kv2.key.len));
return kv1_score < kv2_score;
}
pub fn doSearch(state: *State, unprep_term: []u8) !void {
// first step is lowercasing the given search term
var res = ScoreMap.init(state.allocator);
defer res.deinit();
var kvs = std.ArrayList(ScoreMap.KV).init(state.allocator);
defer kvs.deinit();
var search_term = try toLower(state.allocator, unprep_term);
defer state.allocator.free(search_term);
// port of https://github.com/lnyaa/elixir-docsearch/blob/master/server.py#L17
var it = state.map.iterator();
while (it.next()) |kv| {
var score: f32 = 0.0;
var count = doCount(kv.value, search_term);
score = @intToFloat(f32, count) / @as(f32, 50);
var key_lower = try toLower(state.allocator, kv.key);
defer state.allocator.free(key_lower);
var idx_opt = std.mem.indexOf(u8, key_lower, search_term);
if (idx_opt) |idx| {
score += std.math.min(
@intToFloat(f32, key_lower.len) / @intToFloat(f32, idx),
10,
);
}
score = std.math.min(score, 1);
score = std.math.floor(score * 100) / 100;
if (score > 0.05) {
var kv_score = try res.getOrPutValue(kv.key, score);
try kvs.append(kv_score.*);
}
}
var kvs_slice = kvs.toSlice();
std.sort.sort(ScoreMap.KV, kvs_slice, compareFunc);
var stdout_file = std.io.getStdOut();
const stdout = stdout_file.outStream();
if (kvs_slice.len > 15) kvs_slice = kvs_slice[0..14];
for (kvs_slice) |kv| {
var kv_opt = state.map.get(kv.key);
if (kv_opt) |kv_state| {
if (kv_state.value.len > 0) {
try stdout.print("{}:\n\t{}\n---\n", .{ kv.key, kv_state.value });
} else {
try stdout.print("{}\n---\n", .{kv.key});
}
}
}
} | src/search.zig |
const std = @import("std");
pub const Type = enum {
@"void",
number,
string,
boolean,
array,
object,
pub fn format(value: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
try writer.writerAll(@tagName(value));
}
};
pub const TypeSet = struct {
const Self = @This();
pub const empty = Self{
.@"void" = false,
.number = false,
.string = false,
.boolean = false,
.array = false,
.object = false,
};
pub const any = Self{
.@"void" = true,
.number = true,
.string = true,
.boolean = true,
.array = true,
.object = true,
};
@"void": bool,
number: bool,
string: bool,
boolean: bool,
array: bool,
object: bool,
pub fn from(value_type: Type) Self {
return Self{
.@"void" = (value_type == .@"void"),
.number = (value_type == .number),
.string = (value_type == .string),
.boolean = (value_type == .boolean),
.array = (value_type == .array),
.object = (value_type == .object),
};
}
pub fn init(list: anytype) Self {
var set = TypeSet.empty;
inline for (list) |item| {
set = set.@"union"(from(item));
}
return set;
}
pub fn contains(self: Self, item: Type) bool {
return switch (item) {
.@"void" => self.@"void",
.number => self.number,
.string => self.string,
.boolean => self.boolean,
.array => self.array,
.object => self.object,
};
}
/// Returns a type set that only contains all types that are contained in both parameters.
pub fn intersection(a: Self, b: Self) Self {
var result: Self = undefined;
inline for (std.meta.fields(Self)) |fld| {
@field(result, fld.name) = @field(a, fld.name) and @field(b, fld.name);
}
return result;
}
/// Returns a type set that contains all types that are contained in any of the parameters.
pub fn @"union"(a: Self, b: Self) Self {
var result: Self = undefined;
inline for (std.meta.fields(Self)) |fld| {
@field(result, fld.name) = @field(a, fld.name) or @field(b, fld.name);
}
return result;
}
pub fn isEmpty(self: Self) bool {
inline for (std.meta.fields(Self)) |fld| {
if (@field(self, fld.name))
return false;
}
return true;
}
pub fn isAny(self: Self) bool {
inline for (std.meta.fields(Self)) |fld| {
if (!@field(self, fld.name))
return false;
}
return true;
}
/// Tests if the type set contains at least one common type.
pub fn areCompatible(a: Self, b: Self) bool {
return !intersection(a, b).isEmpty();
}
pub fn format(value: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
if (value.isEmpty()) {
try writer.writeAll("none");
} else if (value.isAny()) {
try writer.writeAll("any");
} else {
var separate = false;
inline for (std.meta.fields(Self)) |fld| {
if (@field(value, fld.name)) {
if (separate) {
try writer.writeAll("|");
}
separate = true;
try writer.writeAll(fld.name);
}
}
}
}
}; | src/library/compiler/typeset.zig |
const assert = std.debug.assert;
const builtin = @import("builtin");
const expect = std.testing.expect;
/// Many reader, many writer, non-allocating, thread-safe
/// Uses a spinlock to protect push() and pop()
/// When building in single threaded mode, this is a simple linked list.
pub fn Stack(comptime T: type) type {
return struct {
root: ?*Node,
lock: @TypeOf(lock_init),
const lock_init = if (builtin.single_threaded) {} else false;
pub const Self = @This();
pub const Node = struct {
next: ?*Node,
data: T,
};
pub fn init() Self {
return Self{
.root = null,
.lock = lock_init,
};
}
/// push operation, but only if you are the first item in the stack. if you did not succeed in
/// being the first item in the stack, returns the other item that was there.
pub fn pushFirst(self: *Self, node: *Node) ?*Node {
node.next = null;
return @cmpxchgStrong(?*Node, &self.root, null, node, .SeqCst, .SeqCst);
}
pub fn push(self: *Self, node: *Node) void {
if (builtin.single_threaded) {
node.next = self.root;
self.root = node;
} else {
while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {}
defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst));
node.next = self.root;
self.root = node;
}
}
pub fn pop(self: *Self) ?*Node {
if (builtin.single_threaded) {
const root = self.root orelse return null;
self.root = root.next;
return root;
} else {
while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {}
defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst));
const root = self.root orelse return null;
self.root = root.next;
return root;
}
}
pub fn isEmpty(self: *Self) bool {
return @atomicLoad(?*Node, &self.root, .SeqCst) == null;
}
};
}
const std = @import("../std.zig");
const Context = struct {
allocator: *std.mem.Allocator,
stack: *Stack(i32),
put_sum: isize,
get_sum: isize,
get_count: usize,
puts_done: bool,
};
// TODO add lazy evaluated build options and then put puts_per_thread behind
// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor
// CI we would use a less aggressive setting since at 1 core, while we still
// want this test to pass, we need a smaller value since there is so much thrashing
// we would also use a less aggressive setting when running in valgrind
const puts_per_thread = 500;
const put_thread_count = 3;
test "std.atomic.stack" {
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
defer std.heap.page_allocator.free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
var a = &fixed_buffer_allocator.allocator;
var stack = Stack(i32).init();
var context = Context{
.allocator = a,
.stack = &stack,
.put_sum = 0,
.get_sum = 0,
.puts_done = false,
.get_count = 0,
};
if (builtin.single_threaded) {
{
var i: usize = 0;
while (i < put_thread_count) : (i += 1) {
expect(startPuts(&context) == 0);
}
}
context.puts_done = true;
{
var i: usize = 0;
while (i < put_thread_count) : (i += 1) {
expect(startGets(&context) == 0);
}
}
} else {
var putters: [put_thread_count]*std.Thread = undefined;
for (putters) |*t| {
t.* = try std.Thread.spawn(&context, startPuts);
}
var getters: [put_thread_count]*std.Thread = undefined;
for (getters) |*t| {
t.* = try std.Thread.spawn(&context, startGets);
}
for (putters) |t|
t.wait();
@atomicStore(bool, &context.puts_done, true, .SeqCst);
for (getters) |t|
t.wait();
}
if (context.put_sum != context.get_sum) {
std.debug.panic("failure\nput_sum:{} != get_sum:{}", .{ context.put_sum, context.get_sum });
}
if (context.get_count != puts_per_thread * put_thread_count) {
std.debug.panic("failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}", .{
context.get_count,
@as(u32, puts_per_thread),
@as(u32, put_thread_count),
});
}
}
fn startPuts(ctx: *Context) u8 {
var put_count: usize = puts_per_thread;
var r = std.rand.DefaultPrng.init(0xdeadbeef);
while (put_count != 0) : (put_count -= 1) {
std.time.sleep(1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.int(u32));
const node = ctx.allocator.create(Stack(i32).Node) catch unreachable;
node.* = Stack(i32).Node{
.next = undefined,
.data = x,
};
ctx.stack.push(node);
_ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst);
}
return 0;
}
fn startGets(ctx: *Context) u8 {
while (true) {
const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst);
while (ctx.stack.pop()) |node| {
std.time.sleep(1); // let the os scheduler be our fuzz
_ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst);
}
if (last) return 0;
}
} | lib/std/atomic/stack.zig |
const std = @import("std");
const c = @import("c.zig");
pub const Error = error{
OutOfMemory,
InvalidMode,
InvalidChannelIndex,
NoChange,
Unknown,
};
fn translateErr(ret: c_int) Error!void {
return switch (ret) {
0 => {},
c.EBUR128_ERROR_NOMEM => error.OutOfMemory,
c.EBUR128_ERROR_INVALID_MODE => error.InvalidMode,
c.EBUR128_ERROR_INVALID_CHANNEL_INDEX => error.InvalidChannelIndex,
c.EBUR128_ERROR_NO_CHANGE => error.NoChange,
else => error.Unknown,
};
}
pub const Mode = enum(c_int) {
m = c.EBUR128_MODE_M,
s = c.EBUR128_MODE_S,
i = c.EBUR128_MODE_I,
lra = c.EBUR128_MODE_LRA,
sample_peak = c.EBUR128_MODE_SAMPLE_PEAK,
true_peak = c.EBUR128_MODE_TRUE_PEAK,
};
pub fn getVersion() std.SemanticVersion {
var ret: std.SemanticVersion = undefined;
c.ebur128_get_version(&ret.major, &ret.minor, &ret.patch);
return ret;
}
/// Get global integrated loudness in LUFS.
pub fn loudnessGlobalMultiple(states: []*State) Error!f64 {
var ret: f64 = undefined;
try translateErr(c.ebur128_loudness_global_multiple(@ptrCast([*c][*c]c.ebur128_state, states.ptr), states.len, &ret));
return ret;
}
/// Get loudness range (LRA) in LU across multiple instances.
///
/// Calculates loudness range according to EBU 3342.
pub fn loudnessGlobalRange(states: []*State) Error!f64 {
var ret: f64 = undefined;
try translateErr(c.ebur128_loudness_global_range(@ptrCast([*c][*c]c.ebur128_state, states.ptr), states.len, &ret));
return ret;
}
pub const State = opaque {
const Self = @This();
// the library uses a non opaque struct with an opaque member for private
// members. I'm just going to treat it as all opaque and add getter functions
fn cast(state: *State) *c.ebur128_state {
return @ptrCast(*c.ebur128_state, @alignCast(@alignOf([*c]c.ebur128_state), state));
}
/// Initialize library state.
///
/// channels the number of channels.
/// sample_rate the sample rate.
/// mode see the mode enum for possible values.
pub fn create(channels: u32, sample_rate: u64, mode: Mode) Error!*Self {
return if (c.ebur128_init(channels, sample_rate, @enumToInt(mode))) |ptr|
@ptrCast(*Self, ptr)
else
error.Unknown;
}
pub fn destroy(self: **Self) void {
c.ebur128_destroy(@ptrCast([*c][*c]c.ebur128_state, self));
}
pub fn getSampleRate(self: *Self) u64 {
return self.cast().samplerate;
}
pub fn getChannels(self: *Self) u32 {
return self.cast().channels;
}
pub fn getMode(self: *Self) Mode {
return @intToEnum(Mode, self.cast().mode);
}
/// Set channel type.
///
/// The default is:
/// - 0 -> EBUR128_LEFT
/// - 1 -> EBUR128_RIGHT
/// - 2 -> EBUR128_CENTER
/// - 3 -> EBUR128_UNUSED
/// - 4 -> EBUR128_LEFT_SURROUND
/// - 5 -> EBUR128_RIGHT_SURROUND
///
/// channel_number zero based channel index.
/// channel_type channel type from the "channel" enum.
pub fn setChannel(self: *Self, channel_num: u32, channel_type: Channel) Error!void {
return translateErr(c.ebur128_set_channel(self.cast(), channel_num, @enumToInt(channel_type)));
}
/// Change library parameters.
///
/// Note that the channel map will be reset when setting a different number of
/// channels. The current unfinished block will be lost.
///
/// channels: new number of channels.
/// sample_rate: new sample rate.
pub fn changeParameters(self: *Self, channels: u32, sample_rate: u64) Error!void {
return translateErr(c.ebur128_change_parameters(self.cast(), channels, sample_rate));
}
/// Set the maximum window duration.
///
/// Set the maximum duration that will be used for ebur128_loudness_window().
/// Note that this destroys the current content of the audio buffer.
///
/// window: duration of the window in ms.
pub fn setMaxWindow(self: *Self, window: u64) Error!void {
return translateErr(c.ebur128_set_max_window(self.cast(), window));
}
/// Set the maximum history.
///
/// Set the maximum history that will be stored for loudness integration.
/// More history provides more accurate results, but requires more resources.
///
/// Applies to ebur128_loudness_range() and ebur128_loudness_global() when
/// EBUR128_MODE_HISTOGRAM is not set.
///
/// Default is ULONG_MAX (at least ~50 days).
/// Minimum is 3000ms for EBUR128_MODE_LRA and 400ms for EBUR128_MODE_M.
///
/// history: duration of history in ms.
pub fn setMaxHistory(self: *Self, history: u64) Error!void {
return translateErr(c.ebur128_set_max_history(self.cast(), history));
}
/// Add frames to be processed.
///
/// @param src array of source frames. Channels must be interleaved.
/// @param frames number of frames. Not number of samples!
pub fn addFrames(self: *Self, comptime T: type, frames: []T) Error!void {
return translateErr(switch (T) {
i16 => c.ebur128_add_frames_short(self.cast(), frames.ptr, frames.len),
i32 => c.ebur128_add_frames_int(self.cast(), frames.ptr, frames.len),
f32 => c.ebur128_add_frames_float(self.cast(), frames.ptr, frames.len),
f64 => c.ebur128_add_frames_double(self.cast(), frames.ptr, frames.len),
else => @compileError("type " ++ @typeName(T) ++ " not supported"),
});
}
/// Get global integrated loudness in LUFS.
pub fn loudnessGlobal(self: *Self) Error!f64 {
var ret: f64 = undefined;
try translateErr(c.ebur128_loudness_global(self.cast(), &ret));
return ret;
}
/// Get momentary loudness (last 400ms) in LUFS.
pub fn loudnessMomentary(self: *Self) Error!f64 {
var ret: f64 = undefined;
try translateErr(c.ebur128_loudness_momentary(self.cast(), &ret));
return ret;
}
/// Get short term loudness (last 3s) in LUFS.
pub fn loudnessShortTerm(self: *Self) Error!f64 {
var ret: f64 = undefined;
try translateErr(c.ebur128_loudness_shortterm(self.cast(), &ret));
return ret;
}
/// Get loudness of the specified window in LUFS.
///
/// window must not be larger than the current window set in st.
/// The current window can be changed by calling ebur128_set_max_window().
///
/// window: window in ms to calculate loudness.
pub fn loudnessWindow(self: *Self, window: u64) Error!f64 {
var ret: f64 = undefined;
try translateErr(c.ebur128_loudness_window(self.cast(), window, &ret));
return ret;
}
/// Get loudness range (LRA) of programme in LU.
///
/// Calculates loudness range according to EBU 3342.
pub fn loudnessRange(self: *Self) Error!64 {
var ret: f64 = undefined;
try translateErr(c.ebur128_loudness_range(self.cast(), &ret));
return ret;
}
/// Get maximum sample peak from all frames that have been processed.
///
/// The equation to convert to dBFS is: 20 * log10(out)
///
/// channel_num: channel to analyse
pub fn samplePeak(self: *Self, channel_num: u32) Error!void {
var ret: f64 = undefined;
try translateErr(c.ebur128_sample_peak(self.cast(), channel_num, &ret));
return ret;
}
/// Get maximum sample peak from the last call to add_frames().
///
/// The equation to convert to dBFS is: 20 * log10(out)
///
/// channel_num: channel to analyse
pub fn prevSamplePeak(self: *Self, channel_num: u32) Error!void {
var ret: f64 = undefined;
try translateErr(c.ebur128_prev_sample_peak(self.cast(), channel_num, &ret));
return ret;
}
/// Get maximum true peak from all frames that have been processed.
///
/// Uses an implementation defined algorithm to calculate the true peak. Do not
/// try to compare resulting values across different versions of the library,
/// as the algorithm may change.
///
/// The current implementation uses a custom polyphase FIR interpolator to
/// calculate true peak. Will oversample 4x for sample rates < 96000 Hz, 2x for
/// sample rates < 192000 Hz and leave the signal unchanged for 192000 Hz.
///
/// The equation to convert to dBTP is: 20 * log10(out)
///
/// channel_num: channel to analyse
pub fn truePeak(self: *Self, channel_num: u32) Error!void {
var ret: f64 = undefined;
try translateErr(c.ebur128_true_peak(self.cast(), channel_num, &ret));
return ret;
}
/// Get maximum true peak from the last call to add_frames().
///
/// Uses an implementation defined algorithm to calculate the true peak. Do not
/// try to compare resulting values across different versions of the library,
/// as the algorithm may change.
///
/// The current implementation uses a custom polyphase FIR interpolator to
/// calculate true peak. Will oversample 4x for sample rates < 96000 Hz, 2x for
/// sample rates < 192000 Hz and leave the signal unchanged for 192000 Hz.
///
/// The equation to convert to dBTP is: 20 * log10(out)
///
/// channel_number: channel to analyse
/// returns maximum true peak in float format (1.0 is 0 dBTP)
pub fn prevTruePeak(self: *Self, channel_number: u32) Error!f64 {
var ret: f64 = undefined;
try translateError(c.ebur128_prev_true_peak(self.cast(), channel_number, &ret));
return ret;
}
/// Get relative threshold in LUFS.
pub fn relativeThreshold(self: *Self) f64 {
var ret: f64 = undefined;
try translateErr(c.ebur128_relative_threshold(self.cast(), &ret));
return ret;
}
}; | src/main.zig |
const std = @import("../std.zig");
const CpuFeature = std.Target.Cpu.Feature;
const CpuModel = std.Target.Cpu.Model;
pub const Feature = enum {
duplex,
hvx,
hvx_length128b,
hvx_length64b,
hvxv60,
hvxv62,
hvxv65,
hvxv66,
long_calls,
mem_noshuf,
memops,
noreturn_stack_elim,
nvj,
nvs,
packets,
reserved_r19,
small_data,
v5,
v55,
v60,
v62,
v65,
v66,
zreg,
};
pub usingnamespace CpuFeature.feature_set_fns(Feature);
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@enumToInt(Feature.duplex)] = .{
.llvm_name = "duplex",
.description = "Enable generation of duplex instruction",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.hvx)] = .{
.llvm_name = "hvx",
.description = "Hexagon HVX instructions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.hvx_length128b)] = .{
.llvm_name = "hvx-length128b",
.description = "Hexagon HVX 128B instructions",
.dependencies = featureSet(&[_]Feature{
.hvx,
}),
};
result[@enumToInt(Feature.hvx_length64b)] = .{
.llvm_name = "hvx-length64b",
.description = "Hexagon HVX 64B instructions",
.dependencies = featureSet(&[_]Feature{
.hvx,
}),
};
result[@enumToInt(Feature.hvxv60)] = .{
.llvm_name = "hvxv60",
.description = "Hexagon HVX instructions",
.dependencies = featureSet(&[_]Feature{
.hvx,
}),
};
result[@enumToInt(Feature.hvxv62)] = .{
.llvm_name = "hvxv62",
.description = "Hexagon HVX instructions",
.dependencies = featureSet(&[_]Feature{
.hvx,
.hvxv60,
}),
};
result[@enumToInt(Feature.hvxv65)] = .{
.llvm_name = "hvxv65",
.description = "Hexagon HVX instructions",
.dependencies = featureSet(&[_]Feature{
.hvx,
.hvxv60,
.hvxv62,
}),
};
result[@enumToInt(Feature.hvxv66)] = .{
.llvm_name = "hvxv66",
.description = "Hexagon HVX instructions",
.dependencies = featureSet(&[_]Feature{
.hvx,
.hvxv60,
.hvxv62,
.hvxv65,
.zreg,
}),
};
result[@enumToInt(Feature.long_calls)] = .{
.llvm_name = "long-calls",
.description = "Use constant-extended calls",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.mem_noshuf)] = .{
.llvm_name = "mem_noshuf",
.description = "Supports mem_noshuf feature",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.memops)] = .{
.llvm_name = "memops",
.description = "Use memop instructions",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.noreturn_stack_elim)] = .{
.llvm_name = "noreturn-stack-elim",
.description = "Eliminate stack allocation in a noreturn function when possible",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.nvj)] = .{
.llvm_name = "nvj",
.description = "Support for new-value jumps",
.dependencies = featureSet(&[_]Feature{
.packets,
}),
};
result[@enumToInt(Feature.nvs)] = .{
.llvm_name = "nvs",
.description = "Support for new-value stores",
.dependencies = featureSet(&[_]Feature{
.packets,
}),
};
result[@enumToInt(Feature.packets)] = .{
.llvm_name = "packets",
.description = "Support for instruction packets",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.reserved_r19)] = .{
.llvm_name = "reserved-r19",
.description = "Reserve register R19",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.small_data)] = .{
.llvm_name = "small-data",
.description = "Allow GP-relative addressing of global variables",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.v5)] = .{
.llvm_name = "v5",
.description = "Enable Hexagon V5 architecture",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.v55)] = .{
.llvm_name = "v55",
.description = "Enable Hexagon V55 architecture",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.v60)] = .{
.llvm_name = "v60",
.description = "Enable Hexagon V60 architecture",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.v62)] = .{
.llvm_name = "v62",
.description = "Enable Hexagon V62 architecture",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.v65)] = .{
.llvm_name = "v65",
.description = "Enable Hexagon V65 architecture",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.v66)] = .{
.llvm_name = "v66",
.description = "Enable Hexagon V66 architecture",
.dependencies = featureSet(&[_]Feature{}),
};
result[@enumToInt(Feature.zreg)] = .{
.llvm_name = "zreg",
.description = "Hexagon ZReg extension instructions",
.dependencies = featureSet(&[_]Feature{}),
};
const ti = @typeInfo(Feature);
for (result) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
}
break :blk result;
};
pub const cpu = struct {
pub const generic = CpuModel{
.name = "generic",
.llvm_name = "generic",
.features = featureSet(&[_]Feature{
.duplex,
.memops,
.nvj,
.nvs,
.packets,
.small_data,
.v5,
.v55,
.v60,
}),
};
pub const hexagonv5 = CpuModel{
.name = "hexagonv5",
.llvm_name = "hexagonv5",
.features = featureSet(&[_]Feature{
.duplex,
.memops,
.nvj,
.nvs,
.packets,
.small_data,
.v5,
}),
};
pub const hexagonv55 = CpuModel{
.name = "hexagonv55",
.llvm_name = "hexagonv55",
.features = featureSet(&[_]Feature{
.duplex,
.memops,
.nvj,
.nvs,
.packets,
.small_data,
.v5,
.v55,
}),
};
pub const hexagonv60 = CpuModel{
.name = "hexagonv60",
.llvm_name = "hexagonv60",
.features = featureSet(&[_]Feature{
.duplex,
.memops,
.nvj,
.nvs,
.packets,
.small_data,
.v5,
.v55,
.v60,
}),
};
pub const hexagonv62 = CpuModel{
.name = "hexagonv62",
.llvm_name = "hexagonv62",
.features = featureSet(&[_]Feature{
.duplex,
.memops,
.nvj,
.nvs,
.packets,
.small_data,
.v5,
.v55,
.v60,
.v62,
}),
};
pub const hexagonv65 = CpuModel{
.name = "hexagonv65",
.llvm_name = "hexagonv65",
.features = featureSet(&[_]Feature{
.duplex,
.mem_noshuf,
.memops,
.nvj,
.nvs,
.packets,
.small_data,
.v5,
.v55,
.v60,
.v62,
.v65,
}),
};
pub const hexagonv66 = CpuModel{
.name = "hexagonv66",
.llvm_name = "hexagonv66",
.features = featureSet(&[_]Feature{
.duplex,
.mem_noshuf,
.memops,
.nvj,
.nvs,
.packets,
.small_data,
.v5,
.v55,
.v60,
.v62,
.v65,
.v66,
}),
};
};
/// All hexagon CPUs, sorted alphabetically by name.
/// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1
/// compiler has inefficient memory and CPU usage, affecting build times.
pub const all_cpus = &[_]*const CpuModel{
&cpu.generic,
&cpu.hexagonv5,
&cpu.hexagonv55,
&cpu.hexagonv60,
&cpu.hexagonv62,
&cpu.hexagonv65,
&cpu.hexagonv66,
}; | lib/std/target/hexagon.zig |
const std = @import("std");
const server = &@import("../main.zig").server;
const Error = @import("../command.zig").Error;
const Direction = @import("../command.zig").Direction;
const Seat = @import("../Seat.zig");
const View = @import("../View.zig");
const ViewStack = @import("../view_stack.zig").ViewStack;
/// Swap the currently focused view with either the view higher or lower in the visible stack
pub fn swap(
allocator: *std.mem.Allocator,
seat: *Seat,
args: []const [:0]const u8,
out: *?[]const u8,
) Error!void {
if (args.len < 2) return Error.NotEnoughArguments;
if (args.len > 2) return Error.TooManyArguments;
if (seat.focused != .view)
return;
// Filter out everything that is not part of the current layout
if (seat.focused.view.pending.float or seat.focused.view.pending.fullscreen) return;
const direction = std.meta.stringToEnum(Direction, args[1]) orelse return Error.InvalidDirection;
const focused_node = @fieldParentPtr(ViewStack(View).Node, "view", seat.focused.view);
const output = seat.focused_output;
var it = ViewStack(View).iter(
focused_node,
if (direction == .next) .forward else .reverse,
output.pending.tags,
filter,
);
var it_wrap = ViewStack(View).iter(
if (direction == .next) output.views.first else output.views.last,
if (direction == .next) .forward else .reverse,
output.pending.tags,
filter,
);
// skip the first node which is focused_node
_ = it.next().?;
const to_swap = @fieldParentPtr(
ViewStack(View).Node,
"view",
// Wrap around if needed
if (it.next()) |next| next else it_wrap.next().?,
);
// Dont swap when only the focused view is part of the layout
if (focused_node == to_swap) {
return;
}
output.views.swap(focused_node, to_swap);
output.arrangeViews();
server.root.startTransaction();
}
fn filter(view: *View, filter_tags: u32) bool {
return view.surface != null and !view.pending.float and
!view.pending.fullscreen and view.pending.tags & filter_tags != 0;
} | source/river-0.1.0/river/command/swap.zig |
const std = @import("std");
const mem = std.mem;
const io = std.io;
const fs = std.fs;
const fmt = std.fmt;
const testing = std.testing;
const Target = std.Target;
const CrossTarget = std.zig.CrossTarget;
const assert = std.debug.assert;
const SparcCpuinfoImpl = struct {
model: ?*const Target.Cpu.Model = null,
is_64bit: bool = false,
const cpu_names = .{
.{ "SuperSparc", &Target.sparc.cpu.supersparc },
.{ "HyperSparc", &Target.sparc.cpu.hypersparc },
.{ "SpitFire", &Target.sparc.cpu.ultrasparc },
.{ "BlackBird", &Target.sparc.cpu.ultrasparc },
.{ "Sabre", &Target.sparc.cpu.ultrasparc },
.{ "Hummingbird", &Target.sparc.cpu.ultrasparc },
.{ "Cheetah", &Target.sparc.cpu.ultrasparc3 },
.{ "Jalapeno", &Target.sparc.cpu.ultrasparc3 },
.{ "Jaguar", &Target.sparc.cpu.ultrasparc3 },
.{ "Panther", &Target.sparc.cpu.ultrasparc3 },
.{ "Serrano", &Target.sparc.cpu.ultrasparc3 },
.{ "UltraSparc T1", &Target.sparc.cpu.niagara },
.{ "UltraSparc T2", &Target.sparc.cpu.niagara2 },
.{ "UltraSparc T3", &Target.sparc.cpu.niagara3 },
.{ "UltraSparc T4", &Target.sparc.cpu.niagara4 },
.{ "UltraSparc T5", &Target.sparc.cpu.niagara4 },
.{ "LEON", &Target.sparc.cpu.leon3 },
};
fn line_hook(self: *SparcCpuinfoImpl, key: []const u8, value: []const u8) !bool {
if (mem.eql(u8, key, "cpu")) {
inline for (cpu_names) |pair| {
if (mem.indexOfPos(u8, value, 0, pair[0]) != null) {
self.model = pair[1];
break;
}
}
} else if (mem.eql(u8, key, "type")) {
self.is_64bit = mem.eql(u8, value, "sun4u") or mem.eql(u8, value, "sun4v");
}
return true;
}
fn finalize(self: *const SparcCpuinfoImpl, arch: Target.Cpu.Arch) ?Target.Cpu {
// At the moment we only support 64bit SPARC systems.
assert(self.is_64bit);
const model = self.model orelse return null;
return Target.Cpu{
.arch = arch,
.model = model,
.features = model.features,
};
}
};
const SparcCpuinfoParser = CpuinfoParser(SparcCpuinfoImpl);
test "cpuinfo: SPARC" {
try testParser(SparcCpuinfoParser, .sparcv9, &Target.sparc.cpu.niagara2,
\\cpu : UltraSparc T2 (Niagara2)
\\fpu : UltraSparc T2 integrated FPU
\\pmu : niagara2
\\type : sun4v
);
}
const PowerpcCpuinfoImpl = struct {
model: ?*const Target.Cpu.Model = null,
const cpu_names = .{
.{ "604e", &Target.powerpc.cpu.@"604e" },
.{ "604", &Target.powerpc.cpu.@"604" },
.{ "7400", &Target.powerpc.cpu.@"7400" },
.{ "7410", &Target.powerpc.cpu.@"7400" },
.{ "7447", &Target.powerpc.cpu.@"7400" },
.{ "7455", &Target.powerpc.cpu.@"7450" },
.{ "G4", &Target.powerpc.cpu.@"g4" },
.{ "POWER4", &Target.powerpc.cpu.@"970" },
.{ "PPC970FX", &Target.powerpc.cpu.@"970" },
.{ "PPC970MP", &Target.powerpc.cpu.@"970" },
.{ "G5", &Target.powerpc.cpu.@"g5" },
.{ "POWER5", &Target.powerpc.cpu.@"g5" },
.{ "A2", &Target.powerpc.cpu.@"a2" },
.{ "POWER6", &Target.powerpc.cpu.@"pwr6" },
.{ "POWER7", &Target.powerpc.cpu.@"pwr7" },
.{ "POWER8", &Target.powerpc.cpu.@"pwr8" },
.{ "POWER8E", &Target.powerpc.cpu.@"pwr8" },
.{ "POWER8NVL", &Target.powerpc.cpu.@"pwr8" },
.{ "POWER9", &Target.powerpc.cpu.@"pwr9" },
.{ "POWER10", &Target.powerpc.cpu.@"pwr10" },
};
fn line_hook(self: *PowerpcCpuinfoImpl, key: []const u8, value: []const u8) !bool {
if (mem.eql(u8, key, "cpu")) {
// The model name is often followed by a comma or space and extra
// info.
inline for (cpu_names) |pair| {
const end_index = mem.indexOfAny(u8, value, ", ") orelse value.len;
if (mem.eql(u8, value[0..end_index], pair[0])) {
self.model = pair[1];
break;
}
}
// Stop the detection once we've seen the first core.
return false;
}
return true;
}
fn finalize(self: *const PowerpcCpuinfoImpl, arch: Target.Cpu.Arch) ?Target.Cpu {
const model = self.model orelse return null;
return Target.Cpu{
.arch = arch,
.model = model,
.features = model.features,
};
}
};
const PowerpcCpuinfoParser = CpuinfoParser(PowerpcCpuinfoImpl);
test "cpuinfo: PowerPC" {
try testParser(PowerpcCpuinfoParser, .powerpc, &Target.powerpc.cpu.@"970",
\\processor : 0
\\cpu : PPC970MP, altivec supported
\\clock : 1250.000000MHz
\\revision : 1.1 (pvr 0044 0101)
);
try testParser(PowerpcCpuinfoParser, .powerpc64le, &Target.powerpc.cpu.pwr8,
\\processor : 0
\\cpu : POWER8 (raw), altivec supported
\\clock : 2926.000000MHz
\\revision : 2.0 (pvr 004d 0200)
);
}
const ArmCpuinfoImpl = struct {
cores: [4]CoreInfo = undefined,
core_no: usize = 0,
have_fields: usize = 0,
const CoreInfo = struct {
architecture: u8 = 0,
implementer: u8 = 0,
variant: u8 = 0,
part: u16 = 0,
is_really_v6: bool = false,
};
const cpu_models = struct {
// Shorthands to simplify the tables below.
const A32 = Target.arm.cpu;
const A64 = Target.aarch64.cpu;
const E = struct {
part: u16,
variant: ?u8 = null, // null if matches any variant
m32: ?*const Target.Cpu.Model = null,
m64: ?*const Target.Cpu.Model = null,
};
// implementer = 0x41
const ARM = [_]E{
E{ .part = 0x926, .m32 = &A32.arm926ej_s, .m64 = null },
E{ .part = 0xb02, .m32 = &A32.mpcore, .m64 = null },
E{ .part = 0xb36, .m32 = &A32.arm1136j_s, .m64 = null },
E{ .part = 0xb56, .m32 = &A32.arm1156t2_s, .m64 = null },
E{ .part = 0xb76, .m32 = &A32.arm1176jz_s, .m64 = null },
E{ .part = 0xc05, .m32 = &A32.cortex_a5, .m64 = null },
E{ .part = 0xc07, .m32 = &A32.cortex_a7, .m64 = null },
E{ .part = 0xc08, .m32 = &A32.cortex_a8, .m64 = null },
E{ .part = 0xc09, .m32 = &A32.cortex_a9, .m64 = null },
E{ .part = 0xc0d, .m32 = &A32.cortex_a17, .m64 = null },
E{ .part = 0xc0f, .m32 = &A32.cortex_a15, .m64 = null },
E{ .part = 0xc0e, .m32 = &A32.cortex_a17, .m64 = null },
E{ .part = 0xc14, .m32 = &A32.cortex_r4, .m64 = null },
E{ .part = 0xc15, .m32 = &A32.cortex_r5, .m64 = null },
E{ .part = 0xc17, .m32 = &A32.cortex_r7, .m64 = null },
E{ .part = 0xc18, .m32 = &A32.cortex_r8, .m64 = null },
E{ .part = 0xc20, .m32 = &A32.cortex_m0, .m64 = null },
E{ .part = 0xc21, .m32 = &A32.cortex_m1, .m64 = null },
E{ .part = 0xc23, .m32 = &A32.cortex_m3, .m64 = null },
E{ .part = 0xc24, .m32 = &A32.cortex_m4, .m64 = null },
E{ .part = 0xc27, .m32 = &A32.cortex_m7, .m64 = null },
E{ .part = 0xc60, .m32 = &A32.cortex_m0plus, .m64 = null },
E{ .part = 0xd01, .m32 = &A32.cortex_a32, .m64 = null },
E{ .part = 0xd03, .m32 = &A32.cortex_a53, .m64 = &A64.cortex_a53 },
E{ .part = 0xd04, .m32 = &A32.cortex_a35, .m64 = &A64.cortex_a35 },
E{ .part = 0xd05, .m32 = &A32.cortex_a55, .m64 = &A64.cortex_a55 },
E{ .part = 0xd07, .m32 = &A32.cortex_a57, .m64 = &A64.cortex_a57 },
E{ .part = 0xd08, .m32 = &A32.cortex_a72, .m64 = &A64.cortex_a72 },
E{ .part = 0xd09, .m32 = &A32.cortex_a73, .m64 = &A64.cortex_a73 },
E{ .part = 0xd0a, .m32 = &A32.cortex_a75, .m64 = &A64.cortex_a75 },
E{ .part = 0xd0b, .m32 = &A32.cortex_a76, .m64 = &A64.cortex_a76 },
E{ .part = 0xd0c, .m32 = &A32.neoverse_n1, .m64 = null },
E{ .part = 0xd0d, .m32 = &A32.cortex_a77, .m64 = &A64.cortex_a77 },
E{ .part = 0xd13, .m32 = &A32.cortex_r52, .m64 = null },
E{ .part = 0xd20, .m32 = &A32.cortex_m23, .m64 = null },
E{ .part = 0xd21, .m32 = &A32.cortex_m33, .m64 = null },
E{ .part = 0xd41, .m32 = &A32.cortex_a78, .m64 = &A64.cortex_a78 },
E{ .part = 0xd4b, .m32 = &A32.cortex_a78c, .m64 = &A64.cortex_a78c },
E{ .part = 0xd44, .m32 = &A32.cortex_x1, .m64 = &A64.cortex_x1 },
E{ .part = 0xd02, .m64 = &A64.cortex_a34 },
E{ .part = 0xd06, .m64 = &A64.cortex_a65 },
E{ .part = 0xd43, .m64 = &A64.cortex_a65ae },
};
// implementer = 0x42
const Broadcom = [_]E{
E{ .part = 0x516, .m64 = &A64.thunderx2t99 },
};
// implementer = 0x43
const Cavium = [_]E{
E{ .part = 0x0a0, .m64 = &A64.thunderx },
E{ .part = 0x0a2, .m64 = &A64.thunderxt81 },
E{ .part = 0x0a3, .m64 = &A64.thunderxt83 },
E{ .part = 0x0a1, .m64 = &A64.thunderxt88 },
E{ .part = 0x0af, .m64 = &A64.thunderx2t99 },
};
// implementer = 0x46
const Fujitsu = [_]E{
E{ .part = 0x001, .m64 = &A64.a64fx },
};
// implementer = 0x48
const HiSilicon = [_]E{
E{ .part = 0xd01, .m64 = &A64.tsv110 },
};
// implementer = 0x4e
const Nvidia = [_]E{
E{ .part = 0x004, .m64 = &A64.carmel },
};
// implementer = 0x50
const Ampere = [_]E{
E{ .part = 0x000, .variant = 3, .m64 = &A64.emag },
E{ .part = 0x000, .m64 = &A64.xgene1 },
};
// implementer = 0x51
const Qualcomm = [_]E{
E{ .part = 0x06f, .m32 = &A32.krait },
E{ .part = 0x201, .m64 = &A64.kryo, .m32 = &A64.kryo },
E{ .part = 0x205, .m64 = &A64.kryo, .m32 = &A64.kryo },
E{ .part = 0x211, .m64 = &A64.kryo, .m32 = &A64.kryo },
E{ .part = 0x800, .m64 = &A64.cortex_a73, .m32 = &A64.cortex_a73 },
E{ .part = 0x801, .m64 = &A64.cortex_a73, .m32 = &A64.cortex_a73 },
E{ .part = 0x802, .m64 = &A64.cortex_a75, .m32 = &A64.cortex_a75 },
E{ .part = 0x803, .m64 = &A64.cortex_a75, .m32 = &A64.cortex_a75 },
E{ .part = 0x804, .m64 = &A64.cortex_a76, .m32 = &A64.cortex_a76 },
E{ .part = 0x805, .m64 = &A64.cortex_a76, .m32 = &A64.cortex_a76 },
E{ .part = 0xc00, .m64 = &A64.falkor },
E{ .part = 0xc01, .m64 = &A64.saphira },
};
fn isKnown(core: CoreInfo, is_64bit: bool) ?*const Target.Cpu.Model {
const models = switch (core.implementer) {
0x41 => &ARM,
0x42 => &Broadcom,
0x43 => &Cavium,
0x46 => &Fujitsu,
0x48 => &HiSilicon,
0x50 => &Ampere,
0x51 => &Qualcomm,
else => return null,
};
for (models) |model| {
if (model.part == core.part and
(model.variant == null or model.variant.? == core.variant))
return if (is_64bit) model.m64 else model.m32;
}
return null;
}
};
fn addOne(self: *ArmCpuinfoImpl) void {
if (self.have_fields == 4 and self.core_no < self.cores.len) {
if (self.core_no > 0) {
// Deduplicate the core info.
for (self.cores[0..self.core_no]) |it| {
if (std.meta.eql(it, self.cores[self.core_no]))
return;
}
}
self.core_no += 1;
}
}
fn line_hook(self: *ArmCpuinfoImpl, key: []const u8, value: []const u8) !bool {
const info = &self.cores[self.core_no];
if (mem.eql(u8, key, "processor")) {
// Handle both old-style and new-style cpuinfo formats.
// The former prints a sequence of "processor: N" lines for each
// core and then the info for the core that's executing this code(!)
// while the latter prints the infos for each core right after the
// "processor" key.
self.have_fields = 0;
self.cores[self.core_no] = .{};
} else if (mem.eql(u8, key, "CPU implementer")) {
info.implementer = try fmt.parseInt(u8, value, 0);
self.have_fields += 1;
} else if (mem.eql(u8, key, "CPU architecture")) {
// "AArch64" on older kernels.
info.architecture = if (mem.startsWith(u8, value, "AArch64"))
8
else
try fmt.parseInt(u8, value, 0);
self.have_fields += 1;
} else if (mem.eql(u8, key, "CPU variant")) {
info.variant = try fmt.parseInt(u8, value, 0);
self.have_fields += 1;
} else if (mem.eql(u8, key, "CPU part")) {
info.part = try fmt.parseInt(u16, value, 0);
self.have_fields += 1;
} else if (mem.eql(u8, key, "model name")) {
// ARMv6 cores report "CPU architecture" equal to 7.
if (mem.indexOf(u8, value, "(v6l)")) |_| {
info.is_really_v6 = true;
}
} else if (mem.eql(u8, key, "CPU revision")) {
// This field is always the last one for each CPU section.
_ = self.addOne();
}
return true;
}
fn finalize(self: *ArmCpuinfoImpl, arch: Target.Cpu.Arch) ?Target.Cpu {
if (self.core_no == 0) return null;
const is_64bit = switch (arch) {
.aarch64, .aarch64_be, .aarch64_32 => true,
else => false,
};
var known_models: [self.cores.len]?*const Target.Cpu.Model = undefined;
for (self.cores[0..self.core_no]) |core, i| {
known_models[i] = cpu_models.isKnown(core, is_64bit);
}
// XXX We pick the first core on big.LITTLE systems, hopefully the
// LITTLE one.
const model = known_models[0] orelse return null;
return Target.Cpu{
.arch = arch,
.model = model,
.features = model.features,
};
}
};
const ArmCpuinfoParser = CpuinfoParser(ArmCpuinfoImpl);
test "cpuinfo: ARM" {
try testParser(ArmCpuinfoParser, .arm, &Target.arm.cpu.arm1176jz_s,
\\processor : 0
\\model name : ARMv6-compatible processor rev 7 (v6l)
\\BogoMIPS : 997.08
\\Features : half thumb fastmult vfp edsp java tls
\\CPU implementer : 0x41
\\CPU architecture: 7
\\CPU variant : 0x0
\\CPU part : 0xb76
\\CPU revision : 7
);
try testParser(ArmCpuinfoParser, .arm, &Target.arm.cpu.cortex_a7,
\\processor : 0
\\model name : ARMv7 Processor rev 3 (v7l)
\\BogoMIPS : 18.00
\\Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae
\\CPU implementer : 0x41
\\CPU architecture: 7
\\CPU variant : 0x0
\\CPU part : 0xc07
\\CPU revision : 3
\\
\\processor : 4
\\model name : ARMv7 Processor rev 3 (v7l)
\\BogoMIPS : 90.00
\\Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae
\\CPU implementer : 0x41
\\CPU architecture: 7
\\CPU variant : 0x2
\\CPU part : 0xc0f
\\CPU revision : 3
);
try testParser(ArmCpuinfoParser, .aarch64, &Target.aarch64.cpu.cortex_a72,
\\processor : 0
\\BogoMIPS : 108.00
\\Features : fp asimd evtstrm crc32 cpuid
\\CPU implementer : 0x41
\\CPU architecture: 8
\\CPU variant : 0x0
\\CPU part : 0xd08
\\CPU revision : 3
);
}
fn testParser(
parser: anytype,
arch: Target.Cpu.Arch,
expected_model: *const Target.Cpu.Model,
input: []const u8,
) !void {
var fbs = io.fixedBufferStream(input);
const result = try parser.parse(arch, fbs.reader());
try testing.expectEqual(expected_model, result.?.model);
try testing.expect(expected_model.features.eql(result.?.features));
}
// The generic implementation of a /proc/cpuinfo parser.
// For every line it invokes the line_hook method with the key and value strings
// as first and second parameters. Returning false from the hook function stops
// the iteration without raising an error.
// When all the lines have been analyzed the finalize method is called.
fn CpuinfoParser(comptime impl: anytype) type {
return struct {
fn parse(arch: Target.Cpu.Arch, reader: anytype) anyerror!?Target.Cpu {
var line_buf: [1024]u8 = undefined;
var obj: impl = .{};
while (true) {
const line = (try reader.readUntilDelimiterOrEof(&line_buf, '\n')) orelse break;
const colon_pos = mem.indexOfScalar(u8, line, ':') orelse continue;
const key = mem.trimRight(u8, line[0..colon_pos], " \t");
const value = mem.trimLeft(u8, line[colon_pos + 1 ..], " \t");
if (!try obj.line_hook(key, value))
break;
}
return obj.finalize(arch);
}
};
}
pub fn detectNativeCpuAndFeatures() ?Target.Cpu {
var f = fs.openFileAbsolute("/proc/cpuinfo", .{ .intended_io_mode = .blocking }) catch |err| switch (err) {
else => return null,
};
defer f.close();
const current_arch = std.Target.current.cpu.arch;
switch (current_arch) {
.arm, .armeb, .thumb, .thumbeb, .aarch64, .aarch64_be, .aarch64_32 => {
return ArmCpuinfoParser.parse(current_arch, f.reader()) catch null;
},
.sparcv9 => {
return SparcCpuinfoParser.parse(current_arch, f.reader()) catch null;
},
.powerpc, .powerpcle, .powerpc64, .powerpc64le => {
return PowerpcCpuinfoParser.parse(current_arch, f.reader()) catch null;
},
else => {},
}
return null;
} | lib/std/zig/system/linux.zig |
const std = @import("std");
const assert = std.debug.assert;
const log = std.log;
const cuda = @import("cudaz");
const cu = cuda.cu;
const png = @import("png.zig");
const Image = png.Image;
const builtin = @import("builtin");
const CallingConvention = @import("std").builtin.CallingConvention;
pub const is_nvptx = builtin.cpu.arch == .nvptx64;
pub const kernel: CallingConvention = if (is_nvptx) .PtxKernel else .Unspecified;
pub fn validate_output(alloc: std.mem.Allocator, comptime dir: []const u8, threshold: f32) !void {
const output = try Image.fromFilePath(alloc, dir ++ "output.png");
const reference = try Image.fromFilePath(alloc, dir ++ "reference.png");
log.info("Loaded output image and reference image for comparison", .{});
assert(output.width == reference.width);
assert(output.height == reference.height);
// assert(output.image_format == reference.image_format);
assert(output.raw().len == reference.raw().len);
const avg_diff = try eq_and_show_diff(alloc, dir, output, reference);
if (avg_diff < threshold) {
log.info("*** The image matches, Congrats ! ***", .{});
}
}
pub fn eq_and_show_diff(alloc: std.mem.Allocator, comptime dir: []const u8, output: Image, reference: Image) !f32 {
var diff = try png.Image.init(alloc, reference.width, reference.height, .gray8);
var out_pxls = output.iterator();
var ref_pxls = reference.iterator();
const num_pixels = reference.width * reference.height;
var i: usize = 0;
var min_val: f32 = 255;
var max_val: f32 = -255;
var total: f32 = 0;
while (true) {
var ref_pxl = ref_pxls.next();
if (ref_pxl == null) break;
var out_pxl = out_pxls.next();
var d = ref_pxl.?.r - out_pxl.?.r;
d = std.math.absFloat(d);
min_val = std.math.min(min_val, d);
max_val = std.math.max(max_val, d);
i += 1;
total += d;
}
var avg_diff = 255.0 * total / @intToFloat(f32, num_pixels);
i = 0;
var diff_pxls = diff.px.gray8;
while (true) {
var ref_pxl = ref_pxls.next();
if (ref_pxl == null) break;
var out_pxl = out_pxls.next();
var d = ref_pxl.?.r - out_pxl.?.r;
d = std.math.absFloat(d);
const centered_d = 255.0 * (d - min_val) / (max_val - min_val);
diff_pxls[i] = @floatToInt(u8, centered_d);
i += 1;
}
try diff.writeToFilePath(dir ++ "output_diff.png");
if (min_val != 0 or max_val != 0) {
std.log.err("Found diffs between two images, avg: {d:.3}, ranging from {d:.1} to {d:.1} pixel value.", .{ avg_diff, 255 * min_val, 255 * max_val });
}
return avg_diff;
}
pub fn asUchar3(img: Image) []cu.uchar3 {
var ptr: [*]cu.uchar3 = @ptrCast([*]cu.uchar3, img.px.rgb24);
const num_pixels = img.width * img.height;
return ptr[0..num_pixels];
}
pub fn expectEqualDeviceSlices(
comptime DType: type,
h_expected: []const DType,
d_values: []const DType,
) !void {
const allocator = std.testing.allocator;
const h_values = try cuda.allocAndCopyResult(DType, allocator, d_values);
defer allocator.free(h_values);
std.testing.expectEqualSlices(DType, h_expected, h_values) catch |err| {
if (h_expected.len < 80) {
log.err("Expected: {any}, got: {any}", .{ h_expected, h_values });
}
return err;
};
} | CS344/src/utils.zig |
const std = @import("std");
/// DCT computes discrete cosine transform.
pub const DCT = struct {
const Self = @This();
allocator: *std.mem.Allocator,
size: usize,
workspace: []f32,
dft_sin: []f32,
dft_cos: []f32,
pub fn init(allocator: *std.mem.Allocator, input_size: usize) !Self {
var workspace = try allocator.alloc(f32, input_size * 10);
var w_real = workspace[0..input_size];
var w_imag = workspace[input_size .. 2 * input_size];
const input_size_f32 = @intToFloat(f32, input_size);
var k: usize = 0;
while (k < input_size) : (k += 1) {
const k_f32 = @intToFloat(f32, k);
w_real[k] = std.math.cos(k_f32 * std.math.pi / (2 * input_size_f32)) / std.math.sqrt(2.0 * input_size_f32);
w_imag[k] = -std.math.sin(k_f32 * std.math.pi / (2 * input_size_f32)) / std.math.sqrt(2.0 * input_size_f32);
}
w_real[0] /= std.math.sqrt(2.0);
w_imag[0] /= std.math.sqrt(2.0);
var dft_sin = try allocator.alloc(f32, 4 * input_size * input_size);
var dft_cos = try allocator.alloc(f32, 4 * input_size * input_size);
k = 0;
const dft_size = 2 * input_size;
const dft_size_f32 = @intToFloat(f32, dft_size);
while (k < dft_size) : (k += 1) {
const k_f32 = @intToFloat(f32, k);
var n: usize = 0;
while (n < dft_size) : (n += 1) {
const n_f32 = @intToFloat(f32, n);
dft_sin[k * dft_size + n] = std.math.sin(2.0 * std.math.pi * n_f32 * k_f32 / (dft_size_f32));
dft_cos[k * dft_size + n] = std.math.cos(2.0 * std.math.pi * n_f32 * k_f32 / (dft_size_f32));
}
}
return Self{
.allocator = allocator,
.size = input_size,
.workspace = workspace,
.dft_sin = dft_sin,
.dft_cos = dft_cos,
};
}
pub fn deinit(self: *Self) void {
self.allocator.free(self.workspace);
self.allocator.free(self.dft_sin);
self.allocator.free(self.dft_cos);
}
/// Update data with its DCT. This function is not thread-safe. First half
/// of data is the real values, next half is imaginary values.
pub fn apply(self: *Self, data: []f32) !void {
if (data.len != self.size * 2) return error.InvalidSize;
var w_real = self.workspace[0..self.size];
var w_imag = self.workspace[self.size .. 2 * self.size];
var local_real = self.workspace[2 * self.size .. 4 * self.size];
var local_imag = self.workspace[4 * self.size .. 6 * self.size];
var tmp_real = self.workspace[6 * self.size .. 8 * self.size];
var tmp_imag = self.workspace[8 * self.size .. 10 * self.size];
var n: usize = 0;
while (n < self.size) : (n += 1) {
local_real[n] = data[n];
local_imag[n] = data[n + self.size];
local_real[n + self.size] = data[self.size - 1 - n];
local_imag[n + self.size] = data[2 * self.size - 1 - n];
}
// DFT
const dft_size = 2 * self.size;
const dft_size_f32 = @intToFloat(f32, dft_size);
var k: usize = 0;
while (k < dft_size) : (k += 1) {
const k_f32 = @intToFloat(f32, k);
var sum_real: f32 = 0;
var sum_imag: f32 = 0;
n = 0;
while (n < dft_size) : (n += 1) {
const n_f32 = @intToFloat(f32, n);
const sinval = self.dft_sin[k * dft_size + n];
const cosval = self.dft_cos[k * dft_size + n];
sum_real += local_real[n] * cosval + local_imag[n] * sinval;
sum_imag += -local_real[n] * sinval + local_imag[n] * cosval;
}
tmp_real[k] = sum_real;
tmp_imag[k] = sum_imag;
}
k = 0;
while (k < self.size) : (k += 1) {
data[k] = tmp_real[k] * w_real[k] - tmp_imag[k] * w_imag[k];
data[k + self.size] = tmp_real[k] * w_imag[k] + tmp_imag[k] * w_real[k];
}
}
};
test "dct" {
var dct = try DCT.init(std.testing.allocator, 16);
defer dct.deinit();
var data = [32]f32{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
var truth = [32]f32{ 30, -18.3115, 4.99114e-16, -2.00753, -3.81742e-15, -0.701587, -1.19145e-14, -0.339542, -2.34055e-14, -0.187678, -1.25645e-14, -0.10714, -2.9214e-15, -0.0560376, 2.18904e-15, -0.0174952, 0, 0, 1.20141e-15, 3.21965e-15, 2.38847e-15, -7.93809e-15, 1.13128e-14, -1.04083e-14, 1.87829e-14, -1.8735e-14, -3.91056e-15, -7.00134e-15, -8.45103e-16, 1.18048e-14, 2.67662e-15, 1.63524e-14 };
try dct.apply(data[0..]);
var err: f32 = 0;
for (data) |got, idx| {
var want = truth[idx];
err += (got - want) * (got - want);
}
err = std.math.sqrt(err / @intToFloat(f32, data.len));
try std.testing.expect(err < 0.0001);
}
test "dct file" {
var dct = try DCT.init(std.testing.allocator, 256);
defer dct.deinit();
var d = std.io.fixedBufferStream(@embedFile("testdata/test_dct.256.in"));
var data: [512]f32 = undefined;
for (data) |*x| {
x.* = 0;
}
var data_u8 = std.mem.sliceAsBytes(data[0..]);
const n = try d.reader().readAll(data_u8);
var t = std.io.fixedBufferStream(@embedFile("testdata/test_dct.256.out"));
var truth: [512]f32 = undefined;
var truth_u8 = std.mem.sliceAsBytes(truth[0..]);
try t.reader().readNoEof(truth_u8);
try dct.apply(data[0..]);
var err: f32 = 0;
for (data) |got, idx| {
const want = truth[idx];
err += (got - want) * (got - want);
}
err = std.math.sqrt(err / @intToFloat(f32, data.len));
try std.testing.expect(err < 0.005);
} | src/dsp/dct.zig |
const std = @import("std");
const Vec = std.meta.Vector(16, u8);
fn reverse_mask(n: u8) Vec {
// global constant is not used to workaround a compiler bug
var v = Vec { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
var i: u8 = 0;
while (i < n) : (i += 1) v[i] = n - i - 1;
return v;
}
fn rotate_mask(n: u8) Vec {
var v = Vec { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
var i: u8 = 0;
while (i < n) : (i += 1) v[i] = (i + 1) % n;
return v;
}
fn next_perm_mask(n: u8) Vec {
var v = Vec { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
var i: u8 = 2;
while (i <= n) : (i += 1) v = apply_mask(v, i, rotate_mask);
return v;
}
fn apply_mask(a: Vec, n: u8, comptime mask: anytype) Vec {
const len = @typeInfo(Vec).Vector.len;
comptime var i: u8 = 0;
inline while (i < len) : (i += 1) if (i == n) return @shuffle(u8, a, undefined, mask(i));
unreachable;
}
fn pfannkuchen(perm: Vec) u32 {
var flip_count: u32 = 0;
var a = perm;
while (true) {
const k = a[0];
if (k == 0) return flip_count;
a = apply_mask(a, k + 1, reverse_mask);
flip_count += 1;
}
}
pub fn main() !void {
const n = try get_n();
var max_flip_count: u32 = 0;
var checksum: i32 = 0;
var perm = Vec { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
var count = [_]u8 { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 };
var parity: u1 = 0;
while (true) : (parity +%= 1) {
const flip_count = pfannkuchen(perm);
max_flip_count = std.math.max(max_flip_count, flip_count);
checksum += @intCast(i32, flip_count) * (1 - @intCast(i32, parity)*2);
const r = for (count[0..n]) |v, i| {
if (v != 1) break @intCast(u8, i);
} else break;
perm = apply_mask(perm, r + 1, next_perm_mask);
count[r] -= 1;
for (count[1..r]) |*v, i| v.* = @intCast(u8, i + 2);
}
const stdout = std.io.getStdOut().writer();
try stdout.print("{d}\nPfannkuchen({d}) = {d}\n", .{ checksum, n, max_flip_count });
}
fn get_n() !u8 {
var arg_it = std.process.args();
_ = arg_it.skip();
const arg = arg_it.nextPosix() orelse return 10;
return try std.fmt.parseInt(u8, arg, 10);
} | bench/algorithm/fannkuch-redux/2.zig |
const std = @import("std");
const flip_vertically_on_write = false;
// Expects a writer which implements std.io.OutStream.
pub fn writeToStream(writer: var, w: usize, h: usize, comp: usize, data: []const u8, quality: i32) !void {
try writeToStreamCore(writer, w, h, comp, data, quality);
}
pub fn writeToFile(filename: []const u8, w: usize, h: usize, comp: usize, data: []const u8, quality: i32) !void {
var file = try std.fs.File.openWrite(filename);
defer file.close();
var file_stream = file.outStream();
var buffered_writer = std.io.BufferedOutStream(std.fs.File.WriteError).init(&file_stream.stream);
try writeToStream(&buffered_writer.stream, w, h, comp, data, quality);
try buffered_writer.flush();
}
const zig_zag_table = [_]u8{
0, 1, 5, 6, 14, 15, 27, 28, 2, 4, 7, 13, 16, 26, 29, 42, 3, 8, 12, 17, 25, 30, 41, 43, 9, 11, 18,
24, 31, 40, 44, 53, 10, 19, 23, 32, 39, 45, 52, 54, 20, 22, 33, 38, 46, 51, 55, 60, 21, 34, 37, 47, 50, 56,
59, 61, 35, 36, 48, 49, 57, 58, 62, 63,
};
fn writeBits(writer: var, bitBufP: *u32, bitCntP: *u32, bs: [2]u16) !void {
var bitBuf = bitBufP.*;
var bitCnt = bitCntP.*;
bitCnt += bs[1];
bitBuf |= std.math.shl(u32, bs[0], 24 - bitCnt);
while (bitCnt >= 8) : ({
bitBuf <<= 8;
bitCnt -= 8;
}) {
const c = @truncate(u8, bitBuf >> 16);
try writer.write(([_]u8{c})[0..]);
if (c == 255) {
try writer.write(([_]u8{0})[0..]);
}
}
bitBufP.* = bitBuf;
bitCntP.* = bitCnt;
}
fn computeDCT(
d0p: *f32,
d1p: *f32,
d2p: *f32,
d3p: *f32,
d4p: *f32,
d5p: *f32,
d6p: *f32,
d7p: *f32,
) void {
var d0 = d0p.*;
var d1 = d1p.*;
var d2 = d2p.*;
var d3 = d3p.*;
var d4 = d4p.*;
var d5 = d5p.*;
var d6 = d6p.*;
var d7 = d7p.*;
const tmp0 = d0 + d7;
const tmp7 = d0 - d7;
const tmp1 = d1 + d6;
const tmp6 = d1 - d6;
const tmp2 = d2 + d5;
const tmp5 = d2 - d5;
const tmp3 = d3 + d4;
const tmp4 = d3 - d4;
// Even part
var tmp10 = tmp0 + tmp3; // phase 2
var tmp13 = tmp0 - tmp3;
var tmp11 = tmp1 + tmp2;
var tmp12 = tmp1 - tmp2;
d0 = tmp10 + tmp11; // phase 3
d4 = tmp10 - tmp11;
const z1 = (tmp12 + tmp13) * 0.707106781; // c4
d2 = tmp13 + z1; // phase 5
d6 = tmp13 - z1;
// Odd part
tmp10 = tmp4 + tmp5; // phase 2
tmp11 = tmp5 + tmp6;
tmp12 = tmp6 + tmp7;
// The rotator is modified from fig 4-8 to avoid extra negations.
const z5 = (tmp10 - tmp12) * 0.382683433; // c6
const z2 = tmp10 * 0.541196100 + z5; // c2-c6
const z4 = tmp12 * 1.306562965 + z5; // c2+c6
const z3 = tmp11 * 0.707106781; // c4
const z11 = tmp7 + z3; // phase 5
const z13 = tmp7 - z3;
d5p.* = z13 + z2; // phase 6
d3p.* = z13 - z2;
d1p.* = z11 + z4;
d7p.* = z11 - z4;
d0p.* = d0;
d2p.* = d2;
d4p.* = d4;
d6p.* = d6;
}
fn calcBits(_val: i32, bits: []u16) void {
std.debug.assert(bits.len == 2);
var val = _val;
var tmp1 = if (val < 0) -val else val;
val = if (val < 0) val - 1 else val;
bits[1] = 1;
while (true) {
tmp1 >>= 1;
if (tmp1 == 0) break;
bits[1] += 1;
}
bits[0] = @truncate(u16, @bitCast(u32, val) & (std.math.shl(c_uint, 1, bits[1]) - 1));
}
fn processDU(writer: var, bitBuf: *u32, bitCnt: *u32, CDU: []f32, fdtbl: []f32, DC: i32, HTDC: [256][2]u16, HTAC: [256][2]u16) !i32 {
std.debug.assert(CDU.len == 64);
std.debug.assert(fdtbl.len == 64);
const EOB = [_]u16{ HTAC[0x00][0], HTAC[0x00][1] };
const M16zeroes = [_]u16{ HTAC[0xF0][0], HTAC[0xF0][1] };
var DU: [64]i32 = undefined;
// DCT rows
{
var i: usize = 0;
while (i < 64) : (i += 8) {
computeDCT(&CDU[i], &CDU[i + 1], &CDU[i + 2], &CDU[i + 3], &CDU[i + 4], &CDU[i + 5], &CDU[i + 6], &CDU[i + 7]);
}
}
// DCT columns
{
var i: usize = 0;
while (i < 8) : (i += 1) {
computeDCT(&CDU[i], &CDU[i + 8], &CDU[i + 16], &CDU[i + 24], &CDU[i + 32], &CDU[i + 40], &CDU[i + 48], &CDU[i + 56]);
}
}
// Quantize/descale/zigzag the coefficients
{
var i: usize = 0;
while (i < 64) : (i += 1) {
const v = CDU[i] * fdtbl[i];
// DU[zig_zag_table[i]] = (int)(v < 0 ? ceilf(v - 0.5f) : floorf(v + 0.5f));
// ceilf() and floorf() are C99, not C89, but I /think/ they're not needed here anyway?
DU[zig_zag_table[i]] = @floatToInt(i32, if (v < 0) v - 0.5 else v + 0.5);
}
}
// Encode DC
const diff = DU[0] - DC;
if (diff == 0) {
try writeBits(writer, bitBuf, bitCnt, HTDC[0]);
} else {
var bits: [2]u16 = undefined;
calcBits(diff, bits[0..]);
try writeBits(writer, bitBuf, bitCnt, HTDC[bits[1]]);
try writeBits(writer, bitBuf, bitCnt, bits);
}
// Encode ACs
var end0pos: usize = 63;
while (end0pos > 0 and DU[end0pos] == 0) {
end0pos -= 1;
}
// end0pos = first element in reverse order !=0
if (end0pos == 0) {
try writeBits(writer, bitBuf, bitCnt, EOB);
return DU[0];
}
var i: usize = 1;
while (i <= end0pos) : (i += 1) {
const startpos = i;
var bits: [2]u16 = undefined;
while (DU[i] == 0 and i <= end0pos) {
i += 1;
}
var nrzeroes = i - startpos;
if (nrzeroes >= 16) {
const lng = nrzeroes >> 4;
var nrmarker: usize = 1;
while (nrmarker <= lng) : (nrmarker += 1) {
try writeBits(writer, bitBuf, bitCnt, M16zeroes);
}
nrzeroes &= 15;
}
calcBits(DU[i], bits[0..]);
try writeBits(writer, bitBuf, bitCnt, HTAC[(nrzeroes << 4) + bits[1]]);
try writeBits(writer, bitBuf, bitCnt, bits);
}
if (end0pos != 63) {
try writeBits(writer, bitBuf, bitCnt, EOB);
}
return DU[0];
}
const std_dc_luminance_nrcodes = [_]u8{ 0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 };
const std_dc_luminance_values = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
const std_ac_luminance_nrcodes = [_]u8{ 0, 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d };
const std_ac_luminance_values = [_]u8{
0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa,
};
const std_dc_chrominance_nrcodes = [_]u8{ 0, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 };
const std_dc_chrominance_values = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
const std_ac_chrominance_nrcodes = [_]u8{ 0, 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77 };
const std_ac_chrominance_values = [_]u8{
0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0, 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34, 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa,
};
// Huffman tables
const YDC_HT = [_][2]u16{
[_]u16{ 0, 2 }, [_]u16{ 2, 3 }, [_]u16{ 3, 3 }, [_]u16{ 4, 3 }, [_]u16{ 5, 3 }, [_]u16{ 6, 3 },
[_]u16{ 14, 4 }, [_]u16{ 30, 5 }, [_]u16{ 62, 6 }, [_]u16{ 126, 7 }, [_]u16{ 254, 8 }, [_]u16{ 510, 9 },
} ++ ([_][2]u16{[_]u16{ 0, 0 }}) ** 244;
const UVDC_HT = [12][2]u16{
[_]u16{ 0, 2 }, [_]u16{ 1, 2 }, [_]u16{ 2, 2 }, [_]u16{ 6, 3 }, [_]u16{ 14, 4 }, [_]u16{ 30, 5 },
[_]u16{ 62, 6 }, [_]u16{ 126, 7 }, [_]u16{ 254, 8 }, [_]u16{ 510, 9 }, [_]u16{ 1022, 10 }, [_]u16{ 2046, 11 },
} ++ ([_][2]u16{[_]u16{ 0, 0 }}) ** 244;
const YAC_HT = [256][2]u16{
[_]u16{ 10, 4 }, [_]u16{ 0, 2 }, [_]u16{ 1, 2 }, [_]u16{ 4, 3 },
[_]u16{ 11, 4 }, [_]u16{ 26, 5 }, [_]u16{ 120, 7 }, [_]u16{ 248, 8 },
[_]u16{ 1014, 10 }, [_]u16{ 65410, 16 }, [_]u16{ 65411, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 12, 4 }, [_]u16{ 27, 5 }, [_]u16{ 121, 7 },
[_]u16{ 502, 9 }, [_]u16{ 2038, 11 }, [_]u16{ 65412, 16 }, [_]u16{ 65413, 16 },
[_]u16{ 65414, 16 }, [_]u16{ 65415, 16 }, [_]u16{ 65416, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 28, 5 }, [_]u16{ 249, 8 }, [_]u16{ 1015, 10 },
[_]u16{ 4084, 12 }, [_]u16{ 65417, 16 }, [_]u16{ 65418, 16 }, [_]u16{ 65419, 16 },
[_]u16{ 65420, 16 }, [_]u16{ 65421, 16 }, [_]u16{ 65422, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 58, 6 }, [_]u16{ 503, 9 }, [_]u16{ 4085, 12 },
[_]u16{ 65423, 16 }, [_]u16{ 65424, 16 }, [_]u16{ 65425, 16 }, [_]u16{ 65426, 16 },
[_]u16{ 65427, 16 }, [_]u16{ 65428, 16 }, [_]u16{ 65429, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 59, 6 }, [_]u16{ 1016, 10 }, [_]u16{ 65430, 16 },
[_]u16{ 65431, 16 }, [_]u16{ 65432, 16 }, [_]u16{ 65433, 16 }, [_]u16{ 65434, 16 },
[_]u16{ 65435, 16 }, [_]u16{ 65436, 16 }, [_]u16{ 65437, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 122, 7 }, [_]u16{ 2039, 11 }, [_]u16{ 65438, 16 },
[_]u16{ 65439, 16 }, [_]u16{ 65440, 16 }, [_]u16{ 65441, 16 }, [_]u16{ 65442, 16 },
[_]u16{ 65443, 16 }, [_]u16{ 65444, 16 }, [_]u16{ 65445, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 123, 7 }, [_]u16{ 4086, 12 }, [_]u16{ 65446, 16 },
[_]u16{ 65447, 16 }, [_]u16{ 65448, 16 }, [_]u16{ 65449, 16 }, [_]u16{ 65450, 16 },
[_]u16{ 65451, 16 }, [_]u16{ 65452, 16 }, [_]u16{ 65453, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 250, 8 }, [_]u16{ 4087, 12 }, [_]u16{ 65454, 16 },
[_]u16{ 65455, 16 }, [_]u16{ 65456, 16 }, [_]u16{ 65457, 16 }, [_]u16{ 65458, 16 },
[_]u16{ 65459, 16 }, [_]u16{ 65460, 16 }, [_]u16{ 65461, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 504, 9 }, [_]u16{ 32704, 15 }, [_]u16{ 65462, 16 },
[_]u16{ 65463, 16 }, [_]u16{ 65464, 16 }, [_]u16{ 65465, 16 }, [_]u16{ 65466, 16 },
[_]u16{ 65467, 16 }, [_]u16{ 65468, 16 }, [_]u16{ 65469, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 505, 9 }, [_]u16{ 65470, 16 }, [_]u16{ 65471, 16 },
[_]u16{ 65472, 16 }, [_]u16{ 65473, 16 }, [_]u16{ 65474, 16 }, [_]u16{ 65475, 16 },
[_]u16{ 65476, 16 }, [_]u16{ 65477, 16 }, [_]u16{ 65478, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 506, 9 }, [_]u16{ 65479, 16 }, [_]u16{ 65480, 16 },
[_]u16{ 65481, 16 }, [_]u16{ 65482, 16 }, [_]u16{ 65483, 16 }, [_]u16{ 65484, 16 },
[_]u16{ 65485, 16 }, [_]u16{ 65486, 16 }, [_]u16{ 65487, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 1017, 10 }, [_]u16{ 65488, 16 }, [_]u16{ 65489, 16 },
[_]u16{ 65490, 16 }, [_]u16{ 65491, 16 }, [_]u16{ 65492, 16 }, [_]u16{ 65493, 16 },
[_]u16{ 65494, 16 }, [_]u16{ 65495, 16 }, [_]u16{ 65496, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 1018, 10 }, [_]u16{ 65497, 16 }, [_]u16{ 65498, 16 },
[_]u16{ 65499, 16 }, [_]u16{ 65500, 16 }, [_]u16{ 65501, 16 }, [_]u16{ 65502, 16 },
[_]u16{ 65503, 16 }, [_]u16{ 65504, 16 }, [_]u16{ 65505, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 2040, 11 }, [_]u16{ 65506, 16 }, [_]u16{ 65507, 16 },
[_]u16{ 65508, 16 }, [_]u16{ 65509, 16 }, [_]u16{ 65510, 16 }, [_]u16{ 65511, 16 },
[_]u16{ 65512, 16 }, [_]u16{ 65513, 16 }, [_]u16{ 65514, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 65515, 16 }, [_]u16{ 65516, 16 }, [_]u16{ 65517, 16 },
[_]u16{ 65518, 16 }, [_]u16{ 65519, 16 }, [_]u16{ 65520, 16 }, [_]u16{ 65521, 16 },
[_]u16{ 65522, 16 }, [_]u16{ 65523, 16 }, [_]u16{ 65524, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 2041, 11 }, [_]u16{ 65525, 16 }, [_]u16{ 65526, 16 }, [_]u16{ 65527, 16 },
[_]u16{ 65528, 16 }, [_]u16{ 65529, 16 }, [_]u16{ 65530, 16 }, [_]u16{ 65531, 16 },
[_]u16{ 65532, 16 }, [_]u16{ 65533, 16 }, [_]u16{ 65534, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
};
const UVAC_HT = [256][2]u16{
[_]u16{ 0, 2 }, [_]u16{ 1, 2 }, [_]u16{ 4, 3 }, [_]u16{ 10, 4 },
[_]u16{ 24, 5 }, [_]u16{ 25, 5 }, [_]u16{ 56, 6 }, [_]u16{ 120, 7 },
[_]u16{ 500, 9 }, [_]u16{ 1014, 10 }, [_]u16{ 4084, 12 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 11, 4 }, [_]u16{ 57, 6 }, [_]u16{ 246, 8 },
[_]u16{ 501, 9 }, [_]u16{ 2038, 11 }, [_]u16{ 4085, 12 }, [_]u16{ 65416, 16 },
[_]u16{ 65417, 16 }, [_]u16{ 65418, 16 }, [_]u16{ 65419, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 26, 5 }, [_]u16{ 247, 8 }, [_]u16{ 1015, 10 },
[_]u16{ 4086, 12 }, [_]u16{ 32706, 15 }, [_]u16{ 65420, 16 }, [_]u16{ 65421, 16 },
[_]u16{ 65422, 16 }, [_]u16{ 65423, 16 }, [_]u16{ 65424, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 27, 5 }, [_]u16{ 248, 8 }, [_]u16{ 1016, 10 },
[_]u16{ 4087, 12 }, [_]u16{ 65425, 16 }, [_]u16{ 65426, 16 }, [_]u16{ 65427, 16 },
[_]u16{ 65428, 16 }, [_]u16{ 65429, 16 }, [_]u16{ 65430, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 58, 6 }, [_]u16{ 502, 9 }, [_]u16{ 65431, 16 },
[_]u16{ 65432, 16 }, [_]u16{ 65433, 16 }, [_]u16{ 65434, 16 }, [_]u16{ 65435, 16 },
[_]u16{ 65436, 16 }, [_]u16{ 65437, 16 }, [_]u16{ 65438, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 59, 6 }, [_]u16{ 1017, 10 }, [_]u16{ 65439, 16 },
[_]u16{ 65440, 16 }, [_]u16{ 65441, 16 }, [_]u16{ 65442, 16 }, [_]u16{ 65443, 16 },
[_]u16{ 65444, 16 }, [_]u16{ 65445, 16 }, [_]u16{ 65446, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 121, 7 }, [_]u16{ 2039, 11 }, [_]u16{ 65447, 16 },
[_]u16{ 65448, 16 }, [_]u16{ 65449, 16 }, [_]u16{ 65450, 16 }, [_]u16{ 65451, 16 },
[_]u16{ 65452, 16 }, [_]u16{ 65453, 16 }, [_]u16{ 65454, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 122, 7 }, [_]u16{ 2040, 11 }, [_]u16{ 65455, 16 },
[_]u16{ 65456, 16 }, [_]u16{ 65457, 16 }, [_]u16{ 65458, 16 }, [_]u16{ 65459, 16 },
[_]u16{ 65460, 16 }, [_]u16{ 65461, 16 }, [_]u16{ 65462, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 249, 8 }, [_]u16{ 65463, 16 }, [_]u16{ 65464, 16 },
[_]u16{ 65465, 16 }, [_]u16{ 65466, 16 }, [_]u16{ 65467, 16 }, [_]u16{ 65468, 16 },
[_]u16{ 65469, 16 }, [_]u16{ 65470, 16 }, [_]u16{ 65471, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 503, 9 }, [_]u16{ 65472, 16 }, [_]u16{ 65473, 16 },
[_]u16{ 65474, 16 }, [_]u16{ 65475, 16 }, [_]u16{ 65476, 16 }, [_]u16{ 65477, 16 },
[_]u16{ 65478, 16 }, [_]u16{ 65479, 16 }, [_]u16{ 65480, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 504, 9 }, [_]u16{ 65481, 16 }, [_]u16{ 65482, 16 },
[_]u16{ 65483, 16 }, [_]u16{ 65484, 16 }, [_]u16{ 65485, 16 }, [_]u16{ 65486, 16 },
[_]u16{ 65487, 16 }, [_]u16{ 65488, 16 }, [_]u16{ 65489, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 505, 9 }, [_]u16{ 65490, 16 }, [_]u16{ 65491, 16 },
[_]u16{ 65492, 16 }, [_]u16{ 65493, 16 }, [_]u16{ 65494, 16 }, [_]u16{ 65495, 16 },
[_]u16{ 65496, 16 }, [_]u16{ 65497, 16 }, [_]u16{ 65498, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 506, 9 }, [_]u16{ 65499, 16 }, [_]u16{ 65500, 16 },
[_]u16{ 65501, 16 }, [_]u16{ 65502, 16 }, [_]u16{ 65503, 16 }, [_]u16{ 65504, 16 },
[_]u16{ 65505, 16 }, [_]u16{ 65506, 16 }, [_]u16{ 65507, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 2041, 11 }, [_]u16{ 65508, 16 }, [_]u16{ 65509, 16 },
[_]u16{ 65510, 16 }, [_]u16{ 65511, 16 }, [_]u16{ 65512, 16 }, [_]u16{ 65513, 16 },
[_]u16{ 65514, 16 }, [_]u16{ 65515, 16 }, [_]u16{ 65516, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 16352, 14 }, [_]u16{ 65517, 16 }, [_]u16{ 65518, 16 },
[_]u16{ 65519, 16 }, [_]u16{ 65520, 16 }, [_]u16{ 65521, 16 }, [_]u16{ 65522, 16 },
[_]u16{ 65523, 16 }, [_]u16{ 65524, 16 }, [_]u16{ 65525, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
[_]u16{ 1018, 10 }, [_]u16{ 32707, 15 }, [_]u16{ 65526, 16 }, [_]u16{ 65527, 16 },
[_]u16{ 65528, 16 }, [_]u16{ 65529, 16 }, [_]u16{ 65530, 16 }, [_]u16{ 65531, 16 },
[_]u16{ 65532, 16 }, [_]u16{ 65533, 16 }, [_]u16{ 65534, 16 }, [_]u16{ 0, 0 },
[_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 }, [_]u16{ 0, 0 },
};
const YQT = [_]i32{
16, 11, 10, 16, 24, 40, 51, 61, 12, 12, 14, 19, 26, 58, 60, 55, 14, 13, 16, 24, 40, 57, 69, 56, 14, 17, 22, 29, 51, 87, 80, 62, 18, 22,
37, 56, 68, 109, 103, 77, 24, 35, 55, 64, 81, 104, 113, 92, 49, 64, 78, 87, 103, 121, 120, 101, 72, 92, 95, 98, 112, 100, 103, 99,
};
const UVQT = [_]i32{
17, 18, 24, 47, 99, 99, 99, 99, 18, 21, 26, 66, 99, 99, 99, 99, 24, 26, 56, 99, 99, 99, 99, 99, 47, 66, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
};
const aasf = [_]f32{
1.0 * 2.828427125, 1.387039845 * 2.828427125, 1.306562965 * 2.828427125, 1.175875602 * 2.828427125,
1.0 * 2.828427125, 0.785694958 * 2.828427125, 0.541196100 * 2.828427125, 0.275899379 * 2.828427125,
};
fn writeToStreamCore(writer: var, width: usize, height: usize, comp: usize, data: []const u8, _quality: i32) !void {
if (width == 0 or height == 0 or comp > 4 or comp < 1) {
return error.InvalidArguments;
}
var fdtbl_Y: [64]f32 = undefined;
var fdtbl_UV: [64]f32 = undefined;
var YTable: [64]u8 = undefined;
var UVTable: [64]u8 = undefined;
var quality = if (_quality != 0) _quality else 90;
quality = if (quality < 1) 1 else (if (quality > 100) 100 else quality);
quality = if (quality < 50) @divFloor(5000, quality) else 200 - quality * 2;
{
var i: usize = 0;
while (i < 64) : (i += 1) {
const yti = @divFloor(YQT[i] * quality + 50, 100);
YTable[zig_zag_table[i]] = @intCast(u8, if (yti < 1) 1 else (if (yti > 255) 255 else yti));
const uvti = @divFloor(UVQT[i] * quality + 50, 100);
UVTable[zig_zag_table[i]] = @intCast(u8, if (uvti < 1) 1 else (if (uvti > 255) 255 else uvti));
}
}
{
var row: usize = 0;
var k: usize = 0;
while (row < 8) : (row += 1) {
var col: usize = 0;
while (col < 8) : ({
col += 1;
k += 1;
}) {
fdtbl_Y[k] = 1 / (@intToFloat(f32, YTable[zig_zag_table[k]]) * aasf[row] * aasf[col]);
fdtbl_UV[k] = 1 / (@intToFloat(f32, UVTable[zig_zag_table[k]]) * aasf[row] * aasf[col]);
}
}
}
// Write Headers
{
const head0 = [_]u8{ 0xFF, 0xD8, 0xFF, 0xE0, 0, 0x10, 'J', 'F', 'I', 'F', 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0xFF, 0xDB, 0, 0x84, 0 };
const head2 = [_]u8{ 0xFF, 0xDA, 0, 0xC, 3, 1, 0, 2, 0x11, 3, 0x11, 0, 0x3F, 0 };
const head1 = [_]u8{
0xFF, 0xC0, 0, 0x11, 8, @truncate(u8, height >> 8), @truncate(u8, height), @truncate(u8, width >> 8), @truncate(u8, width),
3, 1, 0x11, 0, 2, 0x11, 1, 3, 0x11,
1, 0xFF, 0xC4, 0x01, 0xA2, 0,
};
try writer.write(head0);
try writer.write(YTable);
try writer.write([_]u8{1});
try writer.write(UVTable);
try writer.write(head1);
try writer.write(std_dc_luminance_nrcodes[1..]);
try writer.write(std_dc_luminance_values);
try writer.write(([_]u8{0x10})[0..]); // HTYACinfo
try writer.write(std_ac_luminance_nrcodes[1..]);
try writer.write(std_ac_luminance_values);
try writer.write(([_]u8{1})[0..]); //HTUDCinfo
try writer.write(std_dc_chrominance_nrcodes[1..]);
try writer.write(std_dc_chrominance_values);
try writer.write(([_]u8{0x11})[0..]); // HTUACinfo
try writer.write(std_ac_chrominance_nrcodes[1..]);
try writer.write(std_ac_chrominance_values);
try writer.write(head2);
}
// Encode 8x8 macroblocks
{
const fillBits = [_]u16{ 0x7F, 7 };
var DCY: i32 = 0;
var DCU: i32 = 0;
var DCV: i32 = 0;
var bitBuf: u32 = 0;
var bitCnt: u32 = 0;
// comp == 2 is grey+alpha (alpha is ignored)
const ofsG = if (comp > 2) usize(1) else 0;
const ofsB = if (comp > 2) usize(2) else 0;
var y: usize = 0;
while (y < height) : (y += 8) {
var x: usize = 0;
while (x < width) : (x += 8) {
var YDU: [64]f32 = undefined;
var UDU: [64]f32 = undefined;
var VDU: [64]f32 = undefined;
var row = y;
var pos: usize = 0;
while (row < y + 8) : (row += 1) {
var col = x;
while (col < x + 8) : ({
col += 1;
pos += 1;
}) {
var p = (if (flip_vertically_on_write) height - 1 - row else row) * width * comp + col * comp;
if (row >= height) {
p -= width * comp * (row + 1 - height);
}
if (col >= width) {
p -= comp * (col + 1 - width);
}
const r = @intToFloat(f32, data[p + 0]);
const g = @intToFloat(f32, data[p + ofsG]);
const b = @intToFloat(f32, data[p + ofsB]);
YDU[pos] = 0.29900 * r + 0.58700 * g + 0.11400 * b - 128;
UDU[pos] = -0.16874 * r - 0.33126 * g + 0.50000 * b;
VDU[pos] = 0.50000 * r - 0.41869 * g - 0.08131 * b;
}
}
DCY = try processDU(writer, &bitBuf, &bitCnt, YDU[0..], fdtbl_Y[0..], DCY, YDC_HT, YAC_HT);
DCU = try processDU(writer, &bitBuf, &bitCnt, UDU[0..], fdtbl_UV[0..], DCU, UVDC_HT, UVAC_HT);
DCV = try processDU(writer, &bitBuf, &bitCnt, VDU[0..], fdtbl_UV[0..], DCV, UVDC_HT, UVAC_HT);
}
}
// Do the bit alignment of the EOI marker
try writeBits(writer, &bitBuf, &bitCnt, fillBits);
}
// EOI
try writer.write(([_]u8{0xFF})[0..]);
try writer.write(([_]u8{0xD9})[0..]);
} | jpeg_writer.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const log = std.log.scoped(.codegen);
const spec = @import("spirv/spec.zig");
const Module = @import("../Module.zig");
const Decl = Module.Decl;
const Type = @import("../type.zig").Type;
pub const TypeMap = std.HashMap(Type, u32, Type.hash, Type.eql, std.hash_map.default_max_load_percentage);
pub fn writeInstruction(code: *std.ArrayList(u32), instr: spec.Opcode, args: []const u32) !void {
const word_count = @intCast(u32, args.len + 1);
try code.append((word_count << 16) | @enumToInt(instr));
try code.appendSlice(args);
}
pub const SPIRVModule = struct {
next_result_id: u32 = 0,
target: std.Target,
types: TypeMap,
types_and_globals: std.ArrayList(u32),
fn_decls: std.ArrayList(u32),
pub fn init(target: std.Target, allocator: *Allocator) SPIRVModule {
return .{
.target = target,
.types = TypeMap.init(allocator),
.types_and_globals = std.ArrayList(u32).init(allocator),
.fn_decls = std.ArrayList(u32).init(allocator),
};
}
pub fn deinit(self: *SPIRVModule) void {
self.fn_decls.deinit();
self.types_and_globals.deinit();
self.types.deinit();
self.* = undefined;
}
pub fn allocResultId(self: *SPIRVModule) u32 {
defer self.next_result_id += 1;
return self.next_result_id;
}
pub fn resultIdBound(self: *SPIRVModule) u32 {
return self.next_result_id;
}
pub fn getOrGenType(self: *SPIRVModule, t: Type) !u32 {
// We can't use getOrPut here so we can recursively generate types.
if (self.types.get(t)) |already_generated| {
return already_generated;
}
const result = self.allocResultId();
switch (t.zigTypeTag()) {
.Void => try writeInstruction(&self.types_and_globals, .OpTypeVoid, &[_]u32{ result }),
.Bool => try writeInstruction(&self.types_and_globals, .OpTypeBool, &[_]u32{ result }),
.Int => {
const int_info = t.intInfo(self.target);
try writeInstruction(&self.types_and_globals, .OpTypeInt, &[_]u32{
result,
int_info.bits,
switch (int_info.signedness) {
.unsigned => 0,
.signed => 1,
},
});
},
// TODO: Verify that floatBits() will be correct.
.Float => try writeInstruction(&self.types_and_globals, .OpTypeFloat, &[_]u32{ result, t.floatBits(self.target) }),
.Null,
.Undefined,
.EnumLiteral,
.ComptimeFloat,
.ComptimeInt,
.Type,
=> unreachable, // Must be const or comptime.
.BoundFn => unreachable, // this type will be deleted from the language.
else => return error.TODO,
}
try self.types.put(t, result);
return result;
}
pub fn gen(self: *SPIRVModule, decl: *Decl) !void {
const typed_value = decl.typed_value.most_recent.typed_value;
switch (typed_value.ty.zigTypeTag()) {
.Fn => {
log.debug("Generating code for function '{s}'", .{ std.mem.spanZ(decl.name) });
_ = try self.getOrGenType(typed_value.ty.fnReturnType());
},
else => return error.TODO,
}
}
}; | src/codegen/spirv.zig |
const std = @import("std");
const aoc = @import("aoc-lib.zig");
fn part1(in: [][]const u8) u64 {
const dt = std.fmt.parseUnsigned(u64, in[0], 10) catch unreachable;
var min: u64 = std.math.maxInt(u64);
var mbus: u64 = undefined;
var bit = std.mem.split(u8, in[1], ",");
while (bit.next()) |ts| {
if (ts[0] == 'x') {
continue;
}
const t = std.fmt.parseUnsigned(u64, ts, 10) catch unreachable;
const m = t - (dt % t);
if (m < min) {
min = m;
mbus = t;
}
}
return min * mbus;
}
fn egcd(a: i64, b: i64, x: *i64, y: *i64) i64 {
if (a == 0) {
x.* = 0;
y.* = 1;
return b;
} else {
const g = egcd(@mod(b, a), a, x, y);
const t = x.*;
x.* = y.* - @divFloor(b, a) * t;
y.* = t;
return g;
}
}
fn chinese(la: std.ArrayList(i64), ln: std.ArrayList(i64)) i64 {
var p: i64 = 1;
for (ln.items) |n| {
p *= n;
}
var x: i64 = 0;
var j: i64 = undefined; // place holder for egcd result we don't need
for (ln.items) |n, i| {
const a = la.items[i];
const q = @divExact(p, n);
var y: i64 = undefined;
const z = egcd(n, q, &j, &y);
//warn("q={} x={} y={} z={}\n", .{ q, j, y, z });
if (z != 1) {
std.debug.print("{} is not coprime\n", .{n});
return 0;
}
x += a * y * q;
x = @mod(x, p);
//warn("x={}\n", .{x});
}
return x;
}
fn part2(alloc: std.mem.Allocator, in: [][]const u8) i64 {
var bit = std.mem.split(u8, in[1], ",");
var a = std.ArrayList(i64).init(alloc);
defer a.deinit();
var n = std.ArrayList(i64).init(alloc);
defer n.deinit();
var i: i64 = 0;
while (bit.next()) |ts| {
defer {
i += 1;
}
if (ts[0] == 'x') {
continue;
}
const t = std.fmt.parseUnsigned(i64, ts, 10) catch unreachable;
//warn("Adding pair {} and {}\n", .{ t - i, t });
a.append(t - i) catch unreachable;
n.append(t) catch unreachable;
}
return chinese(a, n);
}
test "examples" {
const test1 = aoc.readLines(aoc.talloc, aoc.test1file);
defer aoc.talloc.free(test1);
const test2 = aoc.readLines(aoc.talloc, aoc.test2file);
defer aoc.talloc.free(test2);
const test3 = aoc.readLines(aoc.talloc, aoc.test3file);
defer aoc.talloc.free(test3);
const test4 = aoc.readLines(aoc.talloc, aoc.test4file);
defer aoc.talloc.free(test4);
const test5 = aoc.readLines(aoc.talloc, aoc.test5file);
defer aoc.talloc.free(test5);
const test6 = aoc.readLines(aoc.talloc, aoc.test6file);
defer aoc.talloc.free(test6);
const inp = aoc.readLines(aoc.talloc, aoc.inputfile);
defer aoc.talloc.free(inp);
try aoc.assertEq(@as(u64, 295), part1(test1));
try aoc.assertEq(@as(u64, 130), part1(test2));
try aoc.assertEq(@as(u64, 295), part1(test3));
try aoc.assertEq(@as(u64, 295), part1(test4));
try aoc.assertEq(@as(u64, 295), part1(test5));
try aoc.assertEq(@as(u64, 47), part1(test6));
try aoc.assertEq(@as(u64, 3035), part1(inp));
try aoc.assertEq(@as(i64, 1068781), part2(aoc.talloc, test1));
try aoc.assertEq(@as(i64, 3417), part2(aoc.talloc, test2));
try aoc.assertEq(@as(i64, 754018), part2(aoc.talloc, test3));
try aoc.assertEq(@as(i64, 779210), part2(aoc.talloc, test4));
try aoc.assertEq(@as(i64, 1261476), part2(aoc.talloc, test5));
try aoc.assertEq(@as(i64, 1202161486), part2(aoc.talloc, test6));
try aoc.assertEq(@as(i64, 725169163285238), part2(aoc.talloc, inp));
}
fn day13(inp: []const u8, bench: bool) anyerror!void {
const lines = aoc.readLines(aoc.halloc, inp);
defer aoc.halloc.free(lines);
var p1 = part1(lines);
var p2 = part2(aoc.halloc, lines);
if (!bench) {
try aoc.print("Part 1: {}\nPart 2: {}\n", .{ p1, p2 });
}
}
pub fn main() anyerror!void {
try aoc.benchme(aoc.input(), day13);
} | 2020/13/aoc.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const build_options = @import("build_options");
const console_ = @import("console.zig");
const Console = console_.Console;
const Precision = console_.Precision;
const IoMethod = console_.IoMethod;
const sdl_bindings = @import("sdl/bindings.zig");
const Sdl = sdl_bindings.Sdl;
const video = @import("video.zig");
const audio = @import("audio.zig");
pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.detectLeaks();
var allocator = &gpa.allocator;
try Sdl.init(.{sdl_bindings.c.SDL_INIT_VIDEO | sdl_bindings.c.SDL_INIT_AUDIO | sdl_bindings.c.SDL_INIT_EVENTS});
defer Sdl.quit();
var console = Console(.{
.precision = comptime std.meta.stringToEnum(Precision, @tagName(build_options.precision)).?,
.method = .sdl,
}).alloc();
const sdl_context = if (build_options.imgui) .sdl_imgui else .sdl_basic;
var video_context = try video.Context(sdl_context).init(allocator, &console, "Badnes");
defer video_context.deinit(allocator);
var audio_context = try audio.Context(.sdl).alloc(allocator);
// TODO: need a errdefer too but lazy
try audio_context.init();
defer audio_context.deinit(allocator);
console.init(allocator, video_context.getGamePixelBuffer(), &audio_context);
defer console.deinit();
var args_iter = std.process.args();
_ = args_iter.skip();
if (args_iter.next(allocator)) |arg| {
const path = try arg;
defer allocator.free(path);
try console.loadRom(path);
}
var event: sdl_bindings.c.SDL_Event = undefined;
var total_time: i128 = 0;
var frames: usize = 0;
mloop: while (true) {
while (Sdl.pollEvent(.{&event}) == 1) {
if (!video_context.handleEvent(event)) {
break :mloop;
}
}
if (console.ppu.present_frame) {
frames += 1;
console.ppu.present_frame = false;
total_time += try video_context.draw(.{ .timing = .timed });
if (total_time > std.time.ns_per_s) {
//std.debug.print("FPS: {}\n", .{frames});
frames = 0;
total_time -= std.time.ns_per_s;
}
if (frames > 4) {
audio_context.unpause();
}
}
if (console.paused) {
_ = try video_context.draw(.{ .timing = .timed });
continue;
}
// Batch run instructions/cycles to not get bogged down by Sdl.pollEvent
var i: usize = 0;
switch (build_options.precision) {
.fast => {
while (i < 2000) : (i += 1) {
console.cpu.runStep();
}
},
.accurate => {
while (i < 5000) : (i += 1) {
console.cpu.runStep();
}
},
}
}
} | src/main.zig |
const std = @import("std");
const builtin = @import("builtin");
const runtime_safety = std.debug.runtime_safety;
const is_64: bool = switch (builtin.cpu.arch) {
.riscv64 => true,
.riscv32 => false,
else => |arch| @compileError("only riscv64 and riscv32 targets supported, found target: " ++ @tagName(arch)),
};
pub const Error = error{
FAILED,
NOT_SUPPORTED,
INVALID_PARAM,
DENIED,
INVALID_ADDRESS,
ALREADY_AVAILABLE,
ALREADY_STARTED,
ALREADY_STOPPED,
};
pub const EID = enum(i32) {
LEGACY_SET_TIMER = 0x0,
LEGACY_CONSOLE_PUTCHAR = 0x1,
LEGACY_CONSOLE_GETCHAR = 0x2,
LEGACY_CLEAR_IPI = 0x3,
LEGACY_SEND_IPI = 0x4,
LEGACY_REMOTE_FENCE_I = 0x5,
LEGACY_REMOTE_SFENCE_VMA = 0x6,
LEGACY_REMOTE_SFENCE_VMA_ASID = 0x7,
LEGACY_SHUTDOWN = 0x8,
BASE = 0x10,
TIME = 0x54494D45,
IPI = 0x735049,
RFENCE = 0x52464E43,
HSM = 0x48534D,
SRST = 0x53525354,
PMU = 0x504D55,
_,
};
/// The base extension is designed to be as small as possible.
/// As such, it only contains functionality for probing which SBI extensions are available and
/// for querying the version of the SBI.
/// All functions in the base extension must be supported by all SBI implementations, so there
/// are no error returns defined.
pub const base = struct {
/// Returns the current SBI specification version.
pub fn getSpecVersion() SpecVersion {
return @bitCast(SpecVersion, ecall.zeroArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.GET_SPEC_VERSION)));
}
/// Returns the current SBI implementation ID, which is different for every SBI implementation.
/// It is intended that this implementation ID allows software to probe for SBI implementation quirks
pub fn getImplementationId() ImplementationId {
return @intToEnum(ImplementationId, ecall.zeroArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.GET_IMP_ID)));
}
/// Returns the current SBI implementation version.
/// The encoding of this version number is specific to the SBI implementation.
pub fn getImplementationVersion() isize {
return ecall.zeroArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.GET_IMP_VERSION));
}
/// Returns false if the given SBI extension ID (EID) is not available, or true if it is available.
pub fn probeExtension(eid: EID) bool {
return ecall.oneArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.PROBE_EXT), @enumToInt(eid)) != 0;
}
/// Return a value that is legal for the `mvendorid` CSR and 0 is always a legal value for this CSR.
pub fn machineVendorId() isize {
return ecall.zeroArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.GET_MVENDORID));
}
/// Return a value that is legal for the `marchid` CSR and 0 is always a legal value for this CSR.
pub fn machineArchId() isize {
return ecall.zeroArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.GET_MARCHID));
}
/// Return a value that is legal for the `mimpid` CSR and 0 is always a legal value for this CSR.
pub fn machineImplementationId() isize {
return ecall.zeroArgsWithReturnNoError(.BASE, @enumToInt(BASE_FID.GET_MIMPID));
}
pub const ImplementationId = enum(isize) {
@"Berkeley Boot Loader (BBL)" = 0,
OpenSBI = 1,
Xvisor = 2,
KVM = 3,
RustSBI = 4,
Diosix = 5,
Coffer = 6,
_,
};
pub const SpecVersion = packed struct {
minor: u24,
major: u7,
_reserved: u1,
_: if (is_64) u32 else u0,
comptime {
std.debug.assert(@sizeOf(usize) == @sizeOf(SpecVersion));
std.debug.assert(@bitSizeOf(usize) == @bitSizeOf(SpecVersion));
}
comptime {
std.testing.refAllDecls(@This());
}
};
const BASE_FID = enum(i32) {
GET_SPEC_VERSION = 0x0,
GET_IMP_ID = 0x1,
GET_IMP_VERSION = 0x2,
PROBE_EXT = 0x3,
GET_MVENDORID = 0x4,
GET_MARCHID = 0x5,
GET_MIMPID = 0x6,
};
comptime {
std.testing.refAllDecls(@This());
}
};
/// These legacy SBI extension are deprecated in favor of the other extensions.
/// Each function needs to be individually probed to check for support.
pub const legacy = struct {
pub fn setTimerAvailable() bool {
return base.probeExtension(.LEGACY_SET_TIMER);
}
/// Programs the clock for next event after time_value time.
/// This function also clears the pending timer interrupt bit.
///
/// If the supervisor wishes to clear the timer interrupt without scheduling the next timer event,
/// it can either request a timer interrupt infinitely far into the future
/// (i.e., `@bitCast(u64, @as(i64, -1))`), or it can instead mask the timer interrupt by clearing `sie.STIE` CSR bit.
///
/// This function returns `ImplementationDefinedError` as an implementation specific error is possible.
pub fn setTimer(time_value: u64) ImplementationDefinedError {
return ecall.legacyOneArgs64NoReturnWithRawError(.LEGACY_SET_TIMER, time_value);
}
pub fn consolePutCharAvailable() bool {
return base.probeExtension(.LEGACY_CONSOLE_PUTCHAR);
}
/// Write data present in char to debug console.
/// Unlike `consoleGetChar`, this SBI call will block if there remain any pending characters to be
/// transmitted or if the receiving terminal is not yet ready to receive the byte.
/// However, if the console doesn’t exist at all, then the character is thrown away
///
/// This function returns `ImplementationDefinedError` as an implementation specific error is possible.
pub fn consolePutChar(char: u8) ImplementationDefinedError {
return ecall.legacyOneArgsNoReturnWithRawError(.LEGACY_CONSOLE_PUTCHAR, char);
}
pub fn consoleGetCharAvailable() bool {
return base.probeExtension(.LEGACY_CONSOLE_GETCHAR);
}
/// Read a byte from debug console.
pub fn consoleGetChar() error{FAILED}!u8 {
if (runtime_safety) {
return @intCast(
u8,
ecall.legacyZeroArgsWithReturnWithError(
.LEGACY_CONSOLE_GETCHAR,
error{ NOT_SUPPORTED, FAILED },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
},
);
}
return @intCast(
u8,
try ecall.legacyZeroArgsWithReturnWithError(.LEGACY_CONSOLE_GETCHAR, error{FAILED}),
);
}
pub fn clearIPIAvailable() bool {
return base.probeExtension(.LEGACY_CLEAR_IPI);
}
/// Clears the pending IPIs if any. The IPI is cleared only in the hart for which this SBI call is invoked.
/// `clearIPI` is deprecated because S-mode code can clear `sip.SSIP` CSR bit directly
pub fn clearIPI() void {
if (runtime_safety) {
ecall.legacyZeroArgsNoReturnWithError(.LEGACY_CLEAR_IPI, error{NOT_SUPPORTED}) catch unreachable;
return;
}
ecall.legacyZeroArgsNoReturnNoError(.LEGACY_CLEAR_IPI);
}
pub fn sendIPIAvailable() bool {
return base.probeExtension(.LEGACY_SEND_IPI);
}
/// Send an inter-processor interrupt to all the harts defined in hart_mask.
/// Interprocessor interrupts manifest at the receiving harts as Supervisor Software Interrupts.
/// `hart_mask` is a virtual address that points to a bit-vector of harts. The bit vector is represented as a
/// sequence of `usize` whose length equals the number of harts in the system divided by the number of bits in a `usize`,
/// rounded up to the next integer.
///
/// This function returns `ImplementationDefinedError` as an implementation specific error is possible.
pub fn sendIPI(hart_mask: [*]const usize) ImplementationDefinedError {
return ecall.legacyOneArgsNoReturnWithRawError(.LEGACY_SEND_IPI, @bitCast(isize, @ptrToInt(hart_mask)));
}
pub fn remoteFenceIAvailable() bool {
return base.probeExtension(.LEGACY_REMOTE_FENCE_I);
}
/// Instructs remote harts to execute FENCE.I instruction.
/// The `hart_mask` is the same as described in `sendIPI`.
///
/// This function returns `ImplementationDefinedError` as an implementation specific error is possible.
pub fn remoteFenceI(hart_mask: [*]const usize) ImplementationDefinedError {
return ecall.legacyOneArgsNoReturnWithRawError(.LEGACY_REMOTE_FENCE_I, @bitCast(isize, @ptrToInt(hart_mask)));
}
pub fn remoteSFenceVMAAvailable() bool {
return base.probeExtension(.LEGACY_REMOTE_SFENCE_VMA);
}
/// Instructs the remote harts to execute one or more SFENCE.VMA instructions, covering the range of
/// virtual addresses between `start` and `size`.
/// The `hart_mask` is the same as described in `sendIPI`.
pub fn remoteSFenceVMA(hart_mask: [*]const usize, start: usize, size: usize) void {
if (runtime_safety) {
ecall.legacyThreeArgsNoReturnWithError(
.LEGACY_REMOTE_SFENCE_VMA,
@bitCast(isize, @ptrToInt(hart_mask)),
@bitCast(isize, start),
@bitCast(isize, size),
error{NOT_SUPPORTED},
) catch unreachable;
return;
}
ecall.legacyThreeArgsNoReturnNoError(
.LEGACY_REMOTE_SFENCE_VMA,
@bitCast(isize, @ptrToInt(hart_mask)),
@bitCast(isize, start),
@bitCast(isize, size),
);
}
pub fn remoteSFenceVMAWithASIDAvailable() bool {
return base.probeExtension(.LEGACY_REMOTE_SFENCE_VMA_ASID);
}
/// Instruct the remote harts to execute one or more SFENCE.VMA instructions, covering the range of
/// virtual addresses between `start` and `size`. This covers only the given ASID.
/// The `hart_mask` is the same as described in `sendIPI`.
///
/// This function returns `ImplementationDefinedError` as an implementation specific error is possible.
pub fn remoteSFenceVMAWithASID(hart_mask: [*]const usize, start: usize, size: usize, asid: usize) ImplementationDefinedError {
return ecall.legacyFourArgsNoReturnWithRawError(
.LEGACY_REMOTE_SFENCE_VMA_ASID,
@bitCast(isize, @ptrToInt(hart_mask)),
@bitCast(isize, start),
@bitCast(isize, size),
@bitCast(isize, asid),
);
}
pub fn systemShutdownAvailable() bool {
return base.probeExtension(.LEGACY_SHUTDOWN);
}
/// Puts all the harts to shutdown state from supervisor point of view.
///
/// This SBI call doesn't return irrespective whether it succeeds or fails.
pub fn systemShutdown() void {
if (runtime_safety) {
ecall.legacyZeroArgsNoReturnWithError(.LEGACY_SHUTDOWN, error{NOT_SUPPORTED}) catch unreachable;
} else {
ecall.legacyZeroArgsNoReturnNoError(.LEGACY_SHUTDOWN);
}
unreachable;
}
comptime {
std.testing.refAllDecls(@This());
}
};
pub const time = struct {
pub fn available() bool {
return base.probeExtension(.TIME);
}
/// Programs the clock for next event after time_value time.
/// This function also clears the pending timer interrupt bit.
///
/// If the supervisor wishes to clear the timer interrupt without scheduling the next timer event,
/// it can either request a timer interrupt infinitely far into the future
/// (i.e., `@bitCast(u64, @as(i64, -1))`), or it can instead mask the timer interrupt by clearing `sie.STIE` CSR bit.
pub fn setTimer(time_value: u64) void {
if (runtime_safety) {
ecall.oneArgs64NoReturnWithError(
.TIME,
@enumToInt(TIME_FID.TIME_SET_TIMER),
time_value,
error{NOT_SUPPORTED},
) catch unreachable;
return;
}
ecall.oneArgs64NoReturnNoError(
.TIME,
@enumToInt(TIME_FID.TIME_SET_TIMER),
time_value,
);
}
const TIME_FID = enum(i32) {
TIME_SET_TIMER = 0x0,
};
comptime {
std.testing.refAllDecls(@This());
}
};
pub const HartMask = union(enum) {
/// all available ids must be considered
all,
mask: struct {
/// a scalar bit-vector containing ids
mask: usize,
/// the starting id from which bit-vector must be computed
base: usize,
},
};
pub const ipi = struct {
pub fn available() bool {
return base.probeExtension(.IPI);
}
/// Send an inter-processor interrupt to all the harts defined in `hart_mask`.
/// Interprocessor interrupts manifest at the receiving harts as the supervisor software interrupts.
pub fn sendIPI(hart_mask: HartMask) error{INVALID_PARAM}!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
if (runtime_safety) {
ecall.twoArgsNoReturnWithError(
.IPI,
@enumToInt(IPI_FID.SEND_IPI),
bit_mask,
mask_base,
error{ NOT_SUPPORTED, INVALID_PARAM },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.twoArgsNoReturnWithError(
.IPI,
@enumToInt(IPI_FID.SEND_IPI),
bit_mask,
mask_base,
error{INVALID_PARAM},
);
}
const IPI_FID = enum(i32) {
SEND_IPI = 0x0,
};
comptime {
std.testing.refAllDecls(@This());
}
};
/// Any function that wishes to use range of addresses (i.e. `start_addr` and `size`), have to abide by the below
/// constraints on range parameters.
///
/// The remote fence function acts as a full TLB flush if
/// • `start_addr` and `size` are both 0
/// • `size` is equal to 2^XLEN-1
pub const rfence = struct {
pub fn available() bool {
return base.probeExtension(.RFENCE);
}
/// Instructs remote harts to execute FENCE.I instruction.
pub fn remoteFenceI(hart_mask: HartMask) error{INVALID_PARAM}!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
if (runtime_safety) {
ecall.twoArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.FENCE_I),
bit_mask,
mask_base,
error{ NOT_SUPPORTED, INVALID_PARAM },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.twoArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.FENCE_I),
bit_mask,
mask_base,
error{INVALID_PARAM},
);
}
/// Instructs the remote harts to execute one or more SFENCE.VMA instructions, covering the range of
/// virtual addresses between `start_addr` and `size`.
pub fn remoteSFenceVMA(
hart_mask: HartMask,
start_addr: usize,
size: usize,
) error{ INVALID_PARAM, INVALID_ADDRESS }!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
if (runtime_safety) {
ecall.fourArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.SFENCE_VMA),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.fourArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.SFENCE_VMA),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
error{ INVALID_PARAM, INVALID_ADDRESS },
);
}
/// Instructs the remote harts to execute one or more SFENCE.VMA instructions, covering the range of
/// virtual addresses between `start_addr` and `size`.
/// This covers only the given ASID.
pub fn remoteSFenceVMAWithASID(
hart_mask: HartMask,
start_addr: usize,
size: usize,
asid: usize,
) error{ INVALID_PARAM, INVALID_ADDRESS }!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
if (runtime_safety) {
ecall.fiveArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.SFENCE_VMA_ASID),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
@bitCast(isize, asid),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.fiveArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.SFENCE_VMA_ASID),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
@bitCast(isize, asid),
error{ INVALID_PARAM, INVALID_ADDRESS },
);
}
/// Instruct the remote harts to execute one or more HFENCE.GVMA instructions, covering the range of
/// guest physical addresses between start and size only for the given VMID.
/// This function call is only valid for harts implementing hypervisor extension.
pub fn remoteHFenceGVMAWithVMID(
hart_mask: HartMask,
start_addr: usize,
size: usize,
vmid: usize,
) error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS }!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
return ecall.fiveArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.HFENCE_GVMA_VMID),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
@bitCast(isize, vmid),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS },
);
}
/// Instruct the remote harts to execute one or more HFENCE.GVMA instructions, covering the range of
/// guest physical addresses between start and size only for all guests.
/// This function call is only valid for harts implementing hypervisor extension.
pub fn remoteHFenceGVMA(
hart_mask: HartMask,
start_addr: usize,
size: usize,
) error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS }!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
return ecall.fourArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.HFENCE_GVMA),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS },
);
}
/// Instruct the remote harts to execute one or more HFENCE.VVMA instructions, covering the range of
/// guest virtual addresses between `start_addr` and `size` for the given ASID and current VMID (in hgatp CSR) of
/// calling hart.
/// This function call is only valid for harts implementing hypervisor extension.
pub fn remoteHFenceVVMAWithASID(
hart_mask: HartMask,
start_addr: usize,
size: usize,
asid: usize,
) error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS }!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
return ecall.fiveArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.HFENCE_VVMA_ASID),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
@bitCast(isize, asid),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS },
);
}
/// Instruct the remote harts to execute one or more HFENCE.VVMA instructions, covering the range of
/// guest virtual addresses between `start_addr` and `size` for current VMID (in hgatp CSR) of calling hart.
/// This function call is only valid for harts implementing hypervisor extension.
pub fn remoteHFenceVVMA(
hart_mask: HartMask,
start_addr: usize,
size: usize,
) error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS }!void {
var bit_mask: isize = undefined;
var mask_base: isize = undefined;
switch (hart_mask) {
.all => {
bit_mask = 0;
mask_base = 0;
},
.mask => |mask| {
bit_mask = @bitCast(isize, mask.mask);
mask_base = @bitCast(isize, mask.base);
},
}
return ecall.fourArgsNoReturnWithError(
.RFENCE,
@enumToInt(RFENCE_FID.HFENCE_VVMA),
bit_mask,
mask_base,
@bitCast(isize, start_addr),
@bitCast(isize, size),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS },
);
}
const RFENCE_FID = enum(i32) {
FENCE_I = 0x0,
SFENCE_VMA = 0x1,
SFENCE_VMA_ASID = 0x2,
HFENCE_GVMA_VMID = 0x3,
HFENCE_GVMA = 0x4,
HFENCE_VVMA_ASID = 0x5,
HFENCE_VVMA = 0x6,
};
comptime {
std.testing.refAllDecls(@This());
}
};
/// The Hart State Management (HSM) Extension introduces a set of hart states and a set of functions
/// which allow the supervisor-mode software to request a hart state change.
pub const hsm = struct {
pub fn available() bool {
return base.probeExtension(.HSM);
}
/// Request the SBI implementation to start executing the target hart in supervisor-mode at address specified
/// by `start_addr` parameter with specific registers values described in the SBI Specification.
///
/// This call is asynchronous — more specifically, `hartStart` may return before the target hart starts executing
/// as long as the SBI implementation is capable of ensuring the return code is accurate.
///
/// If the SBI implementation is a platform runtime firmware executing in machine-mode (M-mode) then it MUST
/// configure PMP and other M-mode state before transferring control to supervisor-mode software.
///
/// The `hartid` parameter specifies the target hart which is to be started.
///
/// The `start_addr` parameter points to a runtime-specified physical address, where the hart can start
/// executing in supervisor-mode.
///
/// The `value` parameter is a XLEN-bit value which will be set in the a1 register when the hart starts
/// executing at `start_addr`.
pub fn hartStart(
hartid: usize,
start_addr: usize,
value: usize,
) error{ INVALID_ADDRESS, INVALID_PARAM, ALREADY_AVAILABLE, FAILED }!void {
if (runtime_safety) {
ecall.threeArgsNoReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_START),
@bitCast(isize, hartid),
@bitCast(isize, start_addr),
@bitCast(isize, value),
error{ NOT_SUPPORTED, INVALID_ADDRESS, INVALID_PARAM, ALREADY_AVAILABLE, FAILED },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.threeArgsNoReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_START),
@bitCast(isize, hartid),
@bitCast(isize, start_addr),
@bitCast(isize, value),
error{ INVALID_ADDRESS, INVALID_PARAM, ALREADY_AVAILABLE, FAILED },
);
}
/// Request the SBI implementation to stop executing the calling hart in supervisor-mode and return it’s
/// ownership to the SBI implementation.
/// This call is not expected to return under normal conditions.
/// `hartStop` must be called with the supervisor-mode interrupts disabled.
pub fn hartStop() error{FAILED}!void {
if (runtime_safety) {
ecall.zeroArgsNoReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_STOP),
error{ NOT_SUPPORTED, FAILED },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
} else {
try ecall.zeroArgsNoReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_STOP),
error{FAILED},
);
}
unreachable;
}
/// Get the current status (or HSM state id) of the given hart
///
/// The harts may transition HSM states at any time due to any concurrent `hartStart`, `hartStop` or `hartSuspend` calls,
/// the return value from this function may not represent the actual state of the hart at the time of return value verification.
pub fn hartStatus(hartid: usize) error{INVALID_PARAM}!State {
if (runtime_safety) {
return @intToEnum(State, ecall.oneArgsWithReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_GET_STATUS),
@bitCast(isize, hartid),
error{ NOT_SUPPORTED, INVALID_PARAM },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
});
}
return @intToEnum(State, try ecall.oneArgsWithReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_GET_STATUS),
@bitCast(isize, hartid),
error{INVALID_PARAM},
));
}
/// Request the SBI implementation to put the calling hart in a platform specific suspend (or low power)
/// state specified by the `suspend_type` parameter.
///
/// The hart will automatically come out of suspended state and resume normal execution when it receives an interrupt
/// or platform specific hardware event.
///
/// The platform specific suspend states for a hart can be either retentive or non-retentive in nature. A retentive
/// suspend state will preserve hart register and CSR values for all privilege modes whereas a non-retentive suspend
/// state will not preserve hart register and CSR values.
///
/// Resuming from a retentive suspend state is straight forward and the supervisor-mode software will see
/// SBI suspend call return without any failures.
///
/// The `resume_addr` parameter is unused during retentive suspend.
///
/// Resuming from a non-retentive suspend state is relatively more involved and requires software to restore various
/// hart registers and CSRs for all privilege modes. Upon resuming from non-retentive suspend state, the hart will
/// jump to supervisor-mode at address specified by `resume_addr` with specific registers values described
/// in the SBI Specification
///
/// The `resume_addr` parameter points to a runtime-specified physical address, where the hart can resume execution in
/// supervisor-mode after a non-retentive suspend.
///
/// The `value` parameter is a XLEN-bit value which will be set in the a1 register when the hart resumes execution at
/// `resume_addr` after a non-retentive suspend.
pub fn hartSuspend(
suspend_type: SuspendType,
resume_addr: usize,
value: usize,
) error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS, FAILED }!void {
return ecall.threeArgsNoReturnWithError(
.HSM,
@enumToInt(HSM_FID.HART_SUSPEND),
@intCast(isize, @enumToInt(suspend_type)),
@bitCast(isize, resume_addr),
@bitCast(isize, value),
error{ NOT_SUPPORTED, INVALID_PARAM, INVALID_ADDRESS, FAILED },
);
}
pub const SuspendType = enum(u32) {
/// Default retentive suspend
RETENTIVE = 0,
/// Default non-retentive suspend
NON_RETENTIVE = 0x80000000,
_,
};
pub const State = enum(isize) {
/// The hart is physically powered-up and executing normally.
STARTED = 0x0,
/// The hart is not executing in supervisor-mode or any lower privilege mode. It is probably powered-down by the
/// SBI implementation if the underlying platform has a mechanism to physically power-down harts.
STOPPED = 0x1,
/// Some other hart has requested to start (or power-up) the hart from the `STOPPED` state and the SBI
/// implementation is still working to get the hart in the `STARTED` state.
START_PENDING = 0x2,
/// The hart has requested to stop (or power-down) itself from the `STARTED` state and the SBI implementation is
/// still working to get the hart in the `STOPPED` state.
STOP_PENDING = 0x3,
/// This hart is in a platform specific suspend (or low power) state.
SUSPENDED = 0x4,
/// The hart has requested to put itself in a platform specific low power state from the STARTED state and the SBI
/// implementation is still working to get the hart in the platform specific SUSPENDED state.
SUSPEND_PENDING = 0x5,
/// An interrupt or platform specific hardware event has caused the hart to resume normal execution from the
/// `SUSPENDED` state and the SBI implementation is still working to get the hart in the `STARTED` state.
RESUME_PENDING = 0x6,
};
const HSM_FID = enum(i32) {
HART_START = 0x0,
HART_STOP = 0x1,
HART_GET_STATUS = 0x2,
HART_SUSPEND = 0x3,
};
comptime {
std.testing.refAllDecls(@This());
}
};
/// The System Reset Extension provides a function that allow the supervisor software to request system-level
/// reboot or shutdown.
/// The term "system" refers to the world-view of supervisor software and the underlying SBI implementation
/// could be machine mode firmware or hypervisor.
pub const reset = struct {
pub fn available() bool {
return base.probeExtension(.SRST);
}
/// Reset the system based on provided `reset_type` and `reset_reason`.
/// This is a synchronous call and does not return if it succeeds.
///
/// When supervisor software is running natively, the SBI implementation is machine mode firmware.
/// In this case, shutdown is equivalent to physical power down of the entire system and cold reboot is
/// equivalent to physical power cycle of the entire system. Further, warm reboot is equivalent to a power
/// cycle of main processor and parts of the system but not the entire system. For example, on a server
/// class system with a BMC (board management controller), a warm reboot will not power cycle the BMC
/// whereas a cold reboot will definitely power cycle the BMC.
///
/// When supervisor software is running inside a virtual machine, the SBI implementation is a hypervisor.
/// The shutdown, cold reboot and warm reboot will behave functionally the same as the native case but
/// might not result in any physical power changes.
pub fn systemReset(
reset_type: ResetType,
reset_reason: ResetReason,
) error{ NOT_SUPPORTED, INVALID_PARAM, FAILED }!void {
try ecall.twoArgsNoReturnWithError(
.SRST,
@enumToInt(SRST_FID.RESET),
@intCast(isize, @enumToInt(reset_type)),
@intCast(isize, @enumToInt(reset_reason)),
error{ NOT_SUPPORTED, INVALID_PARAM, FAILED },
);
unreachable;
}
pub const ResetType = enum(u32) {
SHUTDOWN = 0x0,
COLD_REBOOT = 0x1,
WARM_REBOOT = 0x2,
_,
};
pub const ResetReason = enum(u32) {
NONE = 0x0,
SYSFAIL = 0x1,
_,
};
const SRST_FID = enum(i32) {
RESET = 0x0,
};
comptime {
std.testing.refAllDecls(@This());
}
};
pub const pmu = struct {
pub fn available() bool {
return base.probeExtension(.PMU);
}
/// Returns the number of counters (both hardware and firmware)
pub fn getNumberOfCounters() usize {
if (runtime_safety) {
return @bitCast(usize, ecall.zeroArgsWithReturnWithError(
.PMU,
@enumToInt(PMU_FID.NUM_COUNTERS),
error{NOT_SUPPORTED},
) catch unreachable);
}
return @bitCast(usize, ecall.zeroArgsWithReturnNoError(.PMU, @enumToInt(PMU_FID.NUM_COUNTERS)));
}
/// Get details about the specified counter such as underlying CSR number, width of the counter, type of
/// counter hardware/firmware, etc.
pub fn getCounterInfo(counter_index: usize) error{INVALID_PARAM}!CounterInfo {
if (runtime_safety) {
return @bitCast(CounterInfo, ecall.oneArgsWithReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_GET_INFO),
@bitCast(isize, counter_index),
error{ NOT_SUPPORTED, INVALID_PARAM },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
});
}
return @bitCast(CounterInfo, try ecall.oneArgsWithReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_GET_INFO),
@bitCast(isize, counter_index),
error{INVALID_PARAM},
));
}
/// Find and configure a counter from a set of counters which is not started (or enabled) and can monitor
/// the specified event.
pub fn configureMatchingCounter(
counter_base: usize,
counter_mask: usize,
config_flags: ConfigFlags,
event: Event,
) error{ NOT_SUPPORTED, INVALID_PARAM }!usize {
const event_data = event.toEventData();
return @bitCast(usize, try ecall.fiveArgsLastArg64WithReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_CFG_MATCH),
@bitCast(isize, counter_base),
@bitCast(isize, counter_mask),
@bitCast(isize, config_flags),
@bitCast(isize, event_data.event_index),
event_data.event_data,
error{ NOT_SUPPORTED, INVALID_PARAM },
));
}
/// Start or enable a set of counters on the calling HART with the specified initial value.
/// The `counter_mask` parameter represent the set of counters whereas the `initial_value` parameter
/// specifies the initial value of the counter (if `start_flags.INIT_VALUE` is set).
pub fn startCounters(
counter_base: usize,
counter_mask: usize,
start_flags: StartFlags,
initial_value: u64,
) error{ INVALID_PARAM, ALREADY_STARTED }!void {
if (runtime_safety) {
ecall.fourArgsLastArg64NoReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_START),
@bitCast(isize, counter_base),
@bitCast(isize, counter_mask),
@bitCast(isize, start_flags),
initial_value,
error{ NOT_SUPPORTED, INVALID_PARAM, ALREADY_STARTED },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.fourArgsLastArg64NoReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_START),
@bitCast(isize, counter_base),
@bitCast(isize, counter_mask),
@bitCast(isize, start_flags),
initial_value,
error{ INVALID_PARAM, ALREADY_STARTED },
);
}
/// Stop or disable a set of counters on the calling HART. The `counter_mask` parameter represent the set of counters.
pub fn stopCounters(
counter_base: usize,
counter_mask: usize,
stop_flags: StopFlags,
) error{ INVALID_PARAM, ALREADY_STOPPED }!void {
if (runtime_safety) {
ecall.threeArgsNoReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_START),
@bitCast(isize, counter_base),
@bitCast(isize, counter_mask),
@bitCast(isize, stop_flags),
error{ NOT_SUPPORTED, INVALID_PARAM, ALREADY_STOPPED },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
};
return;
}
return ecall.threeArgsNoReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_START),
@bitCast(isize, counter_base),
@bitCast(isize, counter_mask),
@bitCast(isize, stop_flags),
error{ INVALID_PARAM, ALREADY_STOPPED },
);
}
/// Provide the current value of a firmware counter.
pub fn readFirmwareCounter(counter_index: usize) error{INVALID_PARAM}!usize {
if (runtime_safety) {
return @bitCast(usize, ecall.oneArgsWithReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_FW_READ),
@bitCast(isize, counter_index),
error{ NOT_SUPPORTED, INVALID_PARAM },
) catch |err| switch (err) {
error.NOT_SUPPORTED => unreachable,
else => |e| return e,
});
}
return @bitCast(usize, try ecall.oneArgsWithReturnWithError(
.PMU,
@enumToInt(PMU_FID.COUNTER_FW_READ),
@bitCast(isize, counter_index),
error{INVALID_PARAM},
));
}
pub const Event = union(EventType) {
HW: HW_EVENT,
HW_CACHE: HW_CACHE_EVENT,
HW_RAW: if (is_64) u48 else u32,
FW: FW_EVENT,
pub const EventType = enum(u4) {
HW = 0x0,
HW_CACHE = 0x1,
HW_RAW = 0x2,
FW = 0xf,
};
pub const HW_EVENT = enum(u16) {
/// Event for each CPU cycle
CPU_CYCLES = 1,
/// Event for each completed instruction
INSTRUCTIONS = 2,
/// Event for cache hit
CACHE_REFERENCES = 3,
/// Event for cache miss
CACHE_MISSES = 4,
/// Event for a branch instruction
BRANCH_INSTRUCTIONS = 5,
/// Event for a branch misprediction
BRANCH_MISSES = 6,
/// Event for each BUS cycle
BUS_CYCLES = 7,
/// Event for a stalled cycle in microarchitecture frontend
STALLED_CYCLES_FRONTEND = 8,
/// Event for a stalled cycle in microarchitecture backend
STALLED_CYCLES_BACKEND = 9,
/// Event for each reference CPU cycle
REF_CPU_CYCLES = 10,
_,
};
pub const HW_CACHE_EVENT = packed struct {
result_id: ResultId,
op_id: OpId,
cache_id: CacheId,
pub const ResultId = enum(u1) {
ACCESS = 0,
MISS = 1,
};
pub const OpId = enum(u2) {
READ = 0,
WRITE = 1,
PREFETCH = 2,
};
pub const CacheId = enum(u13) {
/// Level1 data cache event
L1D = 0,
/// Level1 instruction cache event
L1I = 1,
/// Last level cache event
LL = 2,
/// Data TLB event
DTLB = 3,
/// Instruction TLB event
ITLB = 4,
/// Branch predictor unit event
BPU = 5,
/// NUMA node cache event
NODE = 6,
};
comptime {
std.debug.assert(@sizeOf(u16) == @sizeOf(HW_CACHE_EVENT));
std.debug.assert(@bitSizeOf(u16) == @bitSizeOf(HW_CACHE_EVENT));
}
comptime {
std.testing.refAllDecls(@This());
}
};
pub const FW_EVENT = enum(u16) {
MISALIGNED_LOAD = 0,
MISALIGNED_STORE = 1,
ACCESS_LOAD = 2,
ACCESS_STORE = 3,
ILLEGAL_INSN = 4,
SET_TIMER = 5,
IPI_SENT = 6,
IPI_RECVD = 7,
FENCE_I_SENT = 8,
FENCE_I_RECVD = 9,
SFENCE_VMA_SENT = 10,
SFENCE_VMA_RCVD = 11,
SFENCE_VMA_ASID_SENT = 12,
SFENCE_VMA_ASID_RCVD = 13,
HFENCE_GVMA_SENT = 14,
HFENCE_GVMA_RCVD = 15,
HFENCE_GVMA_VMID_SENT = 16,
HFENCE_GVMA_VMID_RCVD = 17,
HFENCE_VVMA_SENT = 18,
HFENCE_VVMA_RCVD = 19,
HFENCE_VVMA_ASID_SENT = 20,
HFENCE_VVMA_ASID_RCVD = 21,
_,
};
fn toEventData(self: Event) EventData {
return switch (self) {
.HW => |hw| EventData{
.event_index = @as(u20, @enumToInt(hw)) | (@as(u20, @enumToInt(EventType.HW)) << 16),
.event_data = 0,
},
.HW_CACHE => |hw_cache| EventData{
.event_index = @as(u20, @bitCast(u16, hw_cache)) | (@as(u20, @enumToInt(EventType.HW_CACHE)) << 16),
.event_data = 0,
},
.HW_RAW => |hw_raw| EventData{
.event_index = @as(u20, @enumToInt(EventType.HW_RAW)) << 16,
.event_data = hw_raw,
},
.FW => |fw| EventData{
.event_index = @as(u20, @enumToInt(fw)) | (@as(u20, @enumToInt(EventType.FW)) << 16),
.event_data = 0,
},
};
}
const EventData = struct {
event_index: usize,
event_data: u64,
};
comptime {
std.testing.refAllDecls(@This());
}
};
pub const ConfigFlags = packed struct {
/// Skip the counter matching
SKIP_MATCH: bool = false,
/// Clear (or zero) the counter value in counter configuration
CLEAR_VALUE: bool = false,
/// Start the counter after configuring a matching counter
AUTO_START: bool = false,
/// Event counting inhibited in VU-mode
SET_VUINH: bool = false,
/// Event counting inhibited in VS-mode
SET_VSINH: bool = false,
/// Event counting inhibited in U-mode
SET_UINH: bool = false,
/// Event counting inhibited in S-mode
SET_SINH: bool = false,
/// Event counting inhibited in M-mode
SET_MINH: bool = false,
// Packed structs in zig stage1 are so annoying
_reserved1: u8 = 0,
_reserved2: u16 = 0,
_reserved3: if (is_64) u32 else u0 = 0,
comptime {
std.debug.assert(@sizeOf(usize) == @sizeOf(ConfigFlags));
std.debug.assert(@bitSizeOf(usize) == @bitSizeOf(ConfigFlags));
}
comptime {
std.testing.refAllDecls(@This());
}
};
pub const StartFlags = packed struct {
/// Set the value of counters based on the `initial_value` parameter
INIT_VALUE: bool = false,
_reserved: if (is_64) u63 else u31 = 0,
comptime {
std.debug.assert(@sizeOf(usize) == @sizeOf(StartFlags));
std.debug.assert(@bitSizeOf(usize) == @bitSizeOf(StartFlags));
}
comptime {
std.testing.refAllDecls(@This());
}
};
pub const StopFlags = packed struct {
/// Reset the counter to event mapping.
RESET: bool = false,
_reserved: if (is_64) u63 else u31 = 0,
comptime {
std.debug.assert(@sizeOf(usize) == @sizeOf(StopFlags));
std.debug.assert(@bitSizeOf(usize) == @bitSizeOf(StopFlags));
}
comptime {
std.testing.refAllDecls(@This());
}
};
/// If `type` is `.firmware` `csr` and `width` should be ignored.
pub const CounterInfo = packed struct {
csr: u12,
/// Width (One less than number of bits in CSR)
width: u6,
_reserved: if (is_64) u45 else u13,
type: CounterType,
pub const CounterType = enum(u1) {
hardware = 0,
firmware = 1,
};
comptime {
std.debug.assert(@sizeOf(usize) == @sizeOf(CounterInfo));
std.debug.assert(@bitSizeOf(usize) == @bitSizeOf(CounterInfo));
}
comptime {
std.testing.refAllDecls(@This());
}
};
const PMU_FID = enum(i32) {
NUM_COUNTERS = 0x0,
COUNTER_GET_INFO = 0x1,
COUNTER_CFG_MATCH = 0x2,
COUNTER_START = 0x3,
COUNTER_STOP = 0x4,
COUNTER_FW_READ = 0x5,
};
comptime {
std.testing.refAllDecls(@This());
}
};
const ecall = struct {
inline fn zeroArgsNoReturnWithError(eid: EID, fid: i32, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
: "x11"
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn zeroArgsWithReturnWithError(eid: EID, fid: i32, comptime ErrorT: type) ErrorT!isize {
var err: ErrorCode = undefined;
var value: isize = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
[value] "={x11}" (value),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
);
if (err == .SUCCESS) return value;
return err.toError(ErrorT);
}
inline fn zeroArgsWithReturnNoError(eid: EID, fid: i32) isize {
return asm volatile ("ecall"
: [value] "={x11}" (-> isize),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
: "x10"
);
}
inline fn oneArgsWithReturnWithError(eid: EID, fid: i32, a0: isize, comptime ErrorT: type) ErrorT!isize {
var err: ErrorCode = undefined;
var value: isize = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
[value] "={x11}" (value),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
);
if (err == .SUCCESS) return value;
return err.toError(ErrorT);
}
inline fn oneArgsWithReturnNoError(eid: EID, fid: i32, a0: isize) isize {
return asm volatile ("ecall"
: [value] "={x11}" (-> isize),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
: "x10"
);
}
inline fn oneArgs64NoReturnNoError(eid: EID, fid: i32, a0: u64) void {
if (is_64) {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
: "x11", "x10"
);
} else {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0_lo] "{x10}" (@truncate(u32, a0)),
[arg0_hi] "{x11}" (@truncate(u32, a0 >> 32)),
: "x11", "x10"
);
}
}
inline fn oneArgs64NoReturnWithError(eid: EID, fid: i32, a0: u64, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
if (is_64) {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
: "x11"
);
} else {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0_lo] "{x10}" (@truncate(u32, a0)),
[arg0_hi] "{x11}" (@truncate(u32, a0 >> 32)),
: "x11"
);
}
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn legacyOneArgs64NoReturnNoError(eid: EID, a0: u64) void {
if (is_64) {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
: "x10"
);
} else {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[arg0_lo] "{x10}" (@truncate(u32, a0)),
[arg0_hi] "{x11}" (@truncate(u32, a0 >> 32)),
: "x10"
);
}
}
inline fn legacyOneArgs64NoReturnWithRawError(eid: EID, a0: u64) ImplementationDefinedError {
var err: ImplementationDefinedError = undefined;
if (is_64) {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
);
} else {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0_lo] "{x10}" (@truncate(u32, a0)),
[arg0_hi] "{x11}" (@truncate(u32, a0 >> 32)),
);
}
return err;
}
inline fn legacyOneArgs64NoReturnWithError(eid: EID, a0: u64, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
if (is_64) {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
);
} else {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0_lo] "{x10}" (@truncate(u32, a0)),
[arg0_hi] "{x11}" (@truncate(u32, a0 >> 32)),
);
}
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn legacyOneArgsNoReturnNoError(eid: EID, a0: isize) void {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
: "x10"
);
}
inline fn legacyOneArgsNoReturnWithRawError(eid: EID, a0: isize) ImplementationDefinedError {
var err: ImplementationDefinedError = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
);
return err;
}
inline fn legacyOneArgsNoReturnWithError(eid: EID, a0: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn legacyThreeArgsNoReturnNoError(eid: EID, a0: isize, a1: isize, a2: isize) void {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
: "x10"
);
}
inline fn legacyThreeArgsNoReturnWithError(eid: EID, a0: isize, a1: isize, a2: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn legacyFourArgsNoReturnWithRawError(eid: EID, a0: isize, a1: isize, a2: isize, a3: isize) ImplementationDefinedError {
var err: ImplementationDefinedError = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
);
return err;
}
inline fn legacyFourArgsNoReturnWithError(eid: EID, a0: isize, a1: isize, a2: isize, a3: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn legacyFourArgsNoReturnNoError(eid: EID, a0: isize, a1: isize, a2: isize, a3: isize) void {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
: "x10"
);
}
inline fn legacyZeroArgsWithReturnWithError(eid: EID, comptime ErrorT: type) ErrorT!isize {
var val: isize = undefined;
asm volatile ("ecall"
: [val] "={x10}" (val),
: [eid] "{x17}" (@enumToInt(eid)),
);
if (val >= 0) return val;
return @intToEnum(ErrorCode, val).toError(ErrorT);
}
inline fn legacyZeroArgsNoReturnWithError(eid: EID, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn legacyZeroArgsNoReturnNoError(eid: EID) void {
asm volatile ("ecall"
:
: [eid] "{x17}" (@enumToInt(eid)),
: "x10"
);
}
inline fn twoArgsNoReturnWithError(eid: EID, fid: i32, a0: isize, a1: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
: "x11"
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn fourArgsLastArg64NoReturnWithError(
eid: EID,
fid: i32,
a0: isize,
a1: isize,
a2: isize,
a3: u64,
comptime ErrorT: type,
) ErrorT!void {
var err: ErrorCode = undefined;
if (is_64) {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
: "x11"
);
} else {
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3_lo] "{x13}" (@truncate(u32, a3)),
[arg3_hi] "{x14}" (@truncate(u32, a3 >> 32)),
: "x11"
);
}
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn fourArgsNoReturnWithError(eid: EID, fid: i32, a0: isize, a1: isize, a2: isize, a3: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
: "x11"
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn fiveArgsLastArg64WithReturnWithError(
eid: EID,
fid: i32,
a0: isize,
a1: isize,
a2: isize,
a3: isize,
a4: u64,
comptime ErrorT: type,
) ErrorT!isize {
var err: ErrorCode = undefined;
var value: isize = undefined;
if (is_64) {
asm volatile ("ecall"
: [err] "={x10}" (err),
[value] "={x11}" (value),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
[arg4] "{x14}" (a4),
);
} else {
asm volatile ("ecall"
: [err] "={x10}" (err),
[value] "={x11}" (value),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
[arg4_lo] "{x14}" (@truncate(u32, a4)),
[arg4_hi] "{x15}" (@truncate(u32, a4 >> 32)),
);
}
if (err == .SUCCESS) return value;
return err.toError(ErrorT);
}
inline fn fiveArgsNoReturnWithError(eid: EID, fid: i32, a0: isize, a1: isize, a2: isize, a3: isize, a4: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
[arg3] "{x13}" (a3),
[arg4] "{x14}" (a4),
: "x11"
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn threeArgsNoReturnWithError(eid: EID, fid: i32, a0: isize, a1: isize, a2: isize, comptime ErrorT: type) ErrorT!void {
var err: ErrorCode = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
: "x11"
);
if (err == .SUCCESS) return;
return err.toError(ErrorT);
}
inline fn threeArgsWithReturnWithError(eid: EID, fid: i32, a0: isize, a1: isize, a2: isize, comptime ErrorT: type) ErrorT!isize {
var err: ErrorCode = undefined;
var value: isize = undefined;
asm volatile ("ecall"
: [err] "={x10}" (err),
[value] "={x11}" (value),
: [eid] "{x17}" (@enumToInt(eid)),
[fid] "{x16}" (fid),
[arg0] "{x10}" (a0),
[arg1] "{x11}" (a1),
[arg2] "{x12}" (a2),
);
if (err == .SUCCESS) return value;
return err.toError(ErrorT);
}
comptime {
std.testing.refAllDecls(@This());
}
};
pub const ImplementationDefinedError = enum(isize) {
SUCCESS = 0,
_,
};
const ErrorCode = enum(isize) {
SUCCESS = 0,
FAILED = -1,
NOT_SUPPORTED = -2,
INVALID_PARAM = -3,
DENIED = -4,
INVALID_ADDRESS = -5,
ALREADY_AVAILABLE = -6,
ALREADY_STARTED = -7,
ALREADY_STOPPED = -8,
fn toError(self: ErrorCode, comptime ErrorT: type) ErrorT {
const errors: []const std.builtin.TypeInfo.Error = @typeInfo(ErrorT).ErrorSet.?;
inline for (errors) |err| {
if (self == @field(ErrorCode, err.name)) return @field(ErrorT, err.name);
}
unreachable;
}
comptime {
std.testing.refAllDecls(@This());
}
};
comptime {
std.testing.refAllDecls(@This());
} | sbi.zig |
const std = @import("std");
const input = @embedFile("data/input18");
usingnamespace @import("util.zig");
pub fn main() !void {
var part1: u64 = 0;
var part2: u64 = 0;
var reader = lines(input);
while (reader.next()) |line| {
part1 += eval(line);
part2 += try evalV2(line);
}
print("[Part1] Sum: {}", .{part1});
print("[Part2] Sum: {}", .{part2});
}
fn eval(expr: []const u8) u64 {
var e = expr;
return evalRecursive(&e);
}
fn evalRecursive(expr: *[]const u8) u64 {
var result: u64 = 0;
var op: ?u8 = '+';
while (expr.len > 0) {
const c = expr.*[0];
const val = switch (c) {
'0'...'9' => @intCast(u64, c - '0'),
'(' => b: { expr.* = expr.*[1..]; break :b evalRecursive(expr); },
')' => return result,
else => null,
};
if (val) |v| {
if (op) |o| {
switch (o) {
'+' => result += v,
'*' => result *= v,
else => unreachable,
}
op = null;
}
}
switch (c) {
'+', '*' => op = c,
else => {},
}
expr.* = expr.*[1..];
}
return result;
}
fn evalV2(expr: []const u8) !u64 {
var allocator_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer allocator_state.deinit();
const allocator = &allocator_state.allocator;
// https://en.wikipedia.org/wiki/Shunting-yard_algorithm
var output = std.ArrayList(u8).init(allocator); // in reverse polish notation
var operator_stack = std.ArrayList(u8).init(allocator);
const top = struct { fn top(a: std.ArrayList(u8)) u8 {
return a.items[a.items.len - 1];
} }.top;
for (expr) |c| {
switch (c) {
'0'...'9' => try output.append(c),
'+', '*' => {
while (operator_stack.items.len > 0 and
top(operator_stack) == '+' and
top(operator_stack) != '(') {
try output.append(operator_stack.pop());
}
try operator_stack.append(c);
},
'(' => try operator_stack.append(c),
')' => {
while (top(operator_stack) != '(') {
try output.append(operator_stack.pop());
}
_ = operator_stack.pop(); // discard '('
},
else => {}
}
}
while (operator_stack.items.len > 0) {
try output.append(operator_stack.pop());
}
var eval_stack = std.ArrayList(u64).init(allocator);
for (output.items) |c| {
switch (c) {
'0'...'9' => try eval_stack.append(@intCast(u64, c - '0')),
'+', '*' => {
const a = eval_stack.pop();
const b = eval_stack.pop();
try eval_stack.append(if (c == '+') a + b else a * b);
},
else => unreachable,
}
}
return eval_stack.items[0];
}
const expectEqual = std.testing.expectEqual;
test "eval" {
expectEqual(@as(u64, 51), eval("1 + (2 * 3) + (4 * (5 + 6))"));
expectEqual(@as(u64, 26), eval("2 * 3 + (4 * 5)"));
expectEqual(@as(u64, 437), eval("5 + (8 * 3 + 9 + 3 * 4 * 3)"));
expectEqual(@as(u64, 12240), eval("5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))"));
expectEqual(@as(u64, 13632), eval("((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2"));
}
test "evalV2" {
expectEqual(@as(u64, 51), try evalV2("1 + (2 * 3) + (4 * (5 + 6))"));
expectEqual(@as(u64, 46), try evalV2("2 * 3 + (4 * 5)"));
expectEqual(@as(u64, 1445), try evalV2("5 + (8 * 3 + 9 + 3 * 4 * 3)"));
expectEqual(@as(u64, 669060), try evalV2("5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))"));
expectEqual(@as(u64, 23340), try evalV2("((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2"));
} | src/day18.zig |
const std = @import("std");
const print = std.debug.print;
pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
var allocator = &gpa.allocator;
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
var file = try std.fs.cwd().openFile(
"./inputs/day19.txt",
.{
.read = true,
},
);
var reader = std.io.bufferedReader(file.reader()).reader();
var line_buffer: [1024]u8 = undefined;
var done = false;
var start_molecule: []u8 = undefined;
var replacements = std.ArrayList(struct { key: []u8, value: []u8 }).init(allocator);
defer replacements.deinit();
var made_from = std.StringHashMap([]u8).init(allocator);
defer made_from.deinit();
while (try reader.readUntilDelimiterOrEof(&line_buffer, '\n')) |line| {
if (done) {
start_molecule = line;
break;
}
if (line.len == 0) {
done = true;
continue;
}
var tokens = std.mem.tokenize(u8, line, " ");
const key = try arena.allocator.dupe(u8, tokens.next().?);
_ = tokens.next();
const val = try arena.allocator.dupe(u8, tokens.next().?);
try replacements.append(.{ .key = key, .value = val });
try made_from.put(val, key);
}
var molecules = std.StringHashMap(usize).init(allocator);
defer molecules.deinit();
for (replacements.items) |r| {
const k = r.key;
const v = r.value;
for (start_molecule) |_, i| {
if (i + k.len <= start_molecule.len) {
if (std.mem.eql(u8, k, start_molecule[i .. i + k.len])) {
var new = std.ArrayList(u8).init(&arena.allocator);
if (i > 0) {
try new.appendSlice(start_molecule[0..i]);
}
try new.appendSlice(v);
try new.appendSlice(start_molecule[i + k.len ..]);
if (molecules.getPtr(new.items)) |p| {
p.* += 1;
} else {
try molecules.put(new.items, 1);
}
}
} else {
break;
}
}
}
print("Part 1: {d}\n", .{molecules.count()});
var step_count = (try min_steps(allocator, made_from, start_molecule, 0)).?;
print("Part 2: {d}\n", .{step_count});
}
fn min_steps(allocator: *std.mem.Allocator, made_from: std.StringHashMap([]u8), start_molecule: []u8, num_steps: usize) anyerror!?usize {
if (std.mem.eql(u8, "e", start_molecule)) {
return num_steps;
}
var min: ?usize = null;
for (start_molecule) |_, l| {
const i = start_molecule.len - 1 - l;
var it = made_from.iterator();
while (it.next()) |from| {
const k = from.key_ptr.*;
if (i + k.len <= start_molecule.len and std.mem.eql(u8, k, start_molecule[i .. i + k.len])) {
var molecule = std.ArrayList(u8).init(allocator);
defer molecule.deinit();
try molecule.appendSlice(start_molecule);
try molecule.replaceRange(i, k.len, from.value_ptr.*);
if (try min_steps(allocator, made_from, molecule.items, num_steps + 1)) |n| {
if (min) |m| {
min = std.math.min(m, n);
} else {
min = n;
}
return min; // Hack: it takes too long to find shorter ones
}
}
}
}
return min;
} | src/day19.zig |
const std = @import("std");
const builtin = @import("builtin");
const fun = @import("fun");
const debug = std.debug;
const mem = std.mem;
const compare = fun.generic.compare;
pub const Offset = struct {
start: usize,
end: usize,
};
pub fn findStructs(comptime Struct: type, comptime ignored_fields: []const []const u8, data: []const u8, start: []const Struct, end: []const Struct) ?[]const Struct {
const start_index = indexOfStructs(Struct, ignored_fields, data, 0, start) orelse return null;
const end_index = indexOfStructs(Struct, ignored_fields, data, start_index, end) orelse return null;
// TODO: This can fail
return @bytesToSlice(Struct, data[start_index .. end_index + end.len * @sizeOf(Struct)]);
}
fn indexOfStructs(comptime Struct: type, comptime ignored_fields: []const []const u8, data: []const u8, start_index: usize, structs: []const Struct) ?usize {
const structs_len_in_bytes = structs.len * @sizeOf(Struct);
if (data.len < structs_len_in_bytes) return null;
var i: usize = start_index;
var end = data.len - structs_len_in_bytes;
while (i <= end) : (i += 1) {
if (structsMatchesBytes(Struct, ignored_fields, data[i .. i + structs_len_in_bytes], structs)) {
return i;
}
}
return null;
}
fn structsMatchesBytes(comptime Struct: type, comptime ignored_fields: []const []const u8, data: []const u8, structs: []const Struct) bool {
const structs_len_in_bytes = structs.len * @sizeOf(Struct);
if (data.len != structs_len_in_bytes) return false;
for (structs) |s, s_i| {
const data_bytes = data[s_i * @sizeOf(Struct) ..][0..@sizeOf(Struct)];
const data_s = @bytesToSlice(Struct, data_bytes)[0];
switch (@typeInfo(Struct)) {
builtin.TypeId.Array => |arr| {
for (s) |child, i| {
if (!structsMatchesBytes(arr.child, ignored_fields, data_bytes, s))
return false;
}
},
builtin.TypeId.Struct => |str| {
inline for (str.fields) |field, i| {
if (comptime contains([]const u8, ignored_fields, field.name, strEql))
continue;
if (!fieldsEql(field.name, Struct, s, data_s))
return false;
}
},
else => comptime unreachable,
}
}
return true;
}
fn fieldsEql(comptime field: []const u8, comptime T: type, a: T, b: T) bool {
const af = @field(a, field);
const bf = @field(b, field);
return compare.equal(@typeOf(af), af, bf);
}
fn strEql(a: []const u8, b: []const u8) bool {
return mem.eql(u8, a, b);
}
fn contains(comptime T: type, items: []const T, value: T, eql: fn (T, T) bool) bool {
for (items) |item| {
if (eql(item, value)) return true;
}
return false;
}
/// Finds the start and end index based on a start and end pattern.
pub fn findPattern(comptime T: type, data: []const T, start: []const ?T, end: []const ?T) ?[]const u8 {
const start_index = indexOfPattern(T, data, 0, start) orelse return null;
const end_index = indexOfPattern(T, data, start_index, end) orelse return null;
return data[start_index .. end_index + end.len];
}
/// Finds the start and end index based on a start and end.
pub fn findBytes(comptime T: type, data: []const T, start: []const T, end: []const T) ?[]const u8 {
const start_index = mem.indexOf(T, data, start) orelse return null;
const end_index = mem.indexOfPos(T, data, start_index, end) orelse return null;
return data[start_index .. end_index + end.len];
}
fn indexOfPattern(comptime T: type, data: []const T, start_index: usize, pattern: []const ?T) ?usize {
if (data.len < pattern.len) return null;
var i: usize = start_index;
var end = data.len - pattern.len;
while (i <= end) : (i += 1) {
if (matchesPattern(T, data[i .. i + pattern.len], pattern)) {
return i;
}
}
return null;
}
/// Given data and a "pattern", returns if the data matches the pattern.
/// For now, a pattern is just data that might contain wild card values, aka
/// values that always match.
fn matchesPattern(comptime T: type, data: []const T, pattern: []const ?T) bool {
if (data.len != pattern.len) return false;
for (pattern) |pat, i| {
if (pat) |value| {
if (data[i] != value) return false;
}
}
return true;
} | tools/offset-finder/search.zig |
const assert = @import("std").debug.assert;
const builtin = @import("std").builtin;
const math = @import("std").math;
const mem = @import("std").mem;
pub const block_size: u8 = 8;
const ip = [64]u8{
6, 14, 22, 30, 38, 46, 54, 62,
4, 12, 20, 28, 36, 44, 52, 60,
2, 10, 18, 26, 34, 42, 50, 58,
0, 8, 16, 24, 32, 40, 48, 56,
7, 15, 23, 31, 39, 47, 55, 63,
5, 13, 21, 29, 37, 45, 53, 61,
3, 11, 19, 27, 35, 43, 51, 59,
1, 9, 17, 25, 33, 41, 49, 57,
};
const fp = [64]u8{
31, 63, 23, 55, 15, 47, 7, 39,
30, 62, 22, 54, 14, 46, 6, 38,
29, 61, 21, 53, 13, 45, 5, 37,
28, 60, 20, 52, 12, 44, 4, 36,
27, 59, 19, 51, 11, 43, 3, 35,
26, 58, 18, 50, 10, 42, 2, 34,
25, 57, 17, 49, 9, 41, 1, 33,
24, 56, 16, 48, 8, 40, 0, 32,
};
const pc1 = [56]u8{
7, 15, 23, 31, 39, 47, 55, 63,
6, 14, 22, 30, 38, 46, 54, 62,
5, 13, 21, 29, 37, 45, 53, 61,
4, 12, 20, 28, 1, 9, 17, 25,
33, 41, 49, 57, 2, 10, 18, 26,
34, 42, 50, 58, 3, 11, 19, 27,
35, 43, 51, 59, 36, 44, 52, 60,
};
const pc2 = [48]u8{
13, 16, 10, 23, 0, 4, 2, 27,
14, 5, 20, 9, 22, 18, 11, 3,
25, 7, 15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54, 29, 39,
50, 44, 32, 47, 43, 48, 38, 55,
33, 52, 45, 41, 49, 35, 28, 31,
};
const s0 = [_]u32{
0x00410100, 0x00010000, 0x40400000, 0x40410100, 0x00400000, 0x40010100, 0x40010000, 0x40400000,
0x40010100, 0x00410100, 0x00410000, 0x40000100, 0x40400100, 0x00400000, 0x00000000, 0x40010000,
0x00010000, 0x40000000, 0x00400100, 0x00010100, 0x40410100, 0x00410000, 0x40000100, 0x00400100,
0x40000000, 0x00000100, 0x00010100, 0x40410000, 0x00000100, 0x40400100, 0x40410000, 0x00000000,
0x00000000, 0x40410100, 0x00400100, 0x40010000, 0x00410100, 0x00010000, 0x40000100, 0x00400100,
0x40410000, 0x00000100, 0x00010100, 0x40400000, 0x40010100, 0x40000000, 0x40400000, 0x00410000,
0x40410100, 0x00010100, 0x00410000, 0x40400100, 0x00400000, 0x40000100, 0x40010000, 0x00000000,
0x00010000, 0x00400000, 0x40400100, 0x00410100, 0x40000000, 0x40410000, 0x00000100, 0x40010100,
};
const s1 = [_]u32{
0x08021002, 0x00000000, 0x00021000, 0x08020000, 0x08000002, 0x00001002, 0x08001000, 0x00021000,
0x00001000, 0x08020002, 0x00000002, 0x08001000, 0x00020002, 0x08021000, 0x08020000, 0x00000002,
0x00020000, 0x08001002, 0x08020002, 0x00001000, 0x00021002, 0x08000000, 0x00000000, 0x00020002,
0x08001002, 0x00021002, 0x08021000, 0x08000002, 0x08000000, 0x00020000, 0x00001002, 0x08021002,
0x00020002, 0x08021000, 0x08001000, 0x00021002, 0x08021002, 0x00020002, 0x08000002, 0x00000000,
0x08000000, 0x00001002, 0x00020000, 0x08020002, 0x00001000, 0x08000000, 0x00021002, 0x08001002,
0x08021000, 0x00001000, 0x00000000, 0x08000002, 0x00000002, 0x08021002, 0x00021000, 0x08020000,
0x08020002, 0x00020000, 0x00001002, 0x08001000, 0x08001002, 0x00000002, 0x08020000, 0x00021000,
};
const s2 = [_]u32{
0x20800000, 0x00808020, 0x00000020, 0x20800020, 0x20008000, 0x00800000, 0x20800020, 0x00008020,
0x00800020, 0x00008000, 0x00808000, 0x20000000, 0x20808020, 0x20000020, 0x20000000, 0x20808000,
0x00000000, 0x20008000, 0x00808020, 0x00000020, 0x20000020, 0x20808020, 0x00008000, 0x20800000,
0x20808000, 0x00800020, 0x20008020, 0x00808000, 0x00008020, 0x00000000, 0x00800000, 0x20008020,
0x00808020, 0x00000020, 0x20000000, 0x00008000, 0x20000020, 0x20008000, 0x00808000, 0x20800020,
0x00000000, 0x00808020, 0x00008020, 0x20808000, 0x20008000, 0x00800000, 0x20808020, 0x20000000,
0x20008020, 0x20800000, 0x00800000, 0x20808020, 0x00008000, 0x00800020, 0x20800020, 0x00008020,
0x00800020, 0x00000000, 0x20808000, 0x20000020, 0x20800000, 0x20008020, 0x00000020, 0x00808000,
};
const s3 = [_]u32{
0x00080201, 0x02000200, 0x00000001, 0x02080201, 0x00000000, 0x02080000, 0x02000201, 0x00080001,
0x02080200, 0x02000001, 0x02000000, 0x00000201, 0x02000001, 0x00080201, 0x00080000, 0x02000000,
0x02080001, 0x00080200, 0x00000200, 0x00000001, 0x00080200, 0x02000201, 0x02080000, 0x00000200,
0x00000201, 0x00000000, 0x00080001, 0x02080200, 0x02000200, 0x02080001, 0x02080201, 0x00080000,
0x02080001, 0x00000201, 0x00080000, 0x02000001, 0x00080200, 0x02000200, 0x00000001, 0x02080000,
0x02000201, 0x00000000, 0x00000200, 0x00080001, 0x00000000, 0x02080001, 0x02080200, 0x00000200,
0x02000000, 0x02080201, 0x00080201, 0x00080000, 0x02080201, 0x00000001, 0x02000200, 0x00080201,
0x00080001, 0x00080200, 0x02080000, 0x02000201, 0x00000201, 0x02000000, 0x02000001, 0x02080200,
};
const s4 = [_]u32{
0x01000000, 0x00002000, 0x00000080, 0x01002084, 0x01002004, 0x01000080, 0x00002084, 0x01002000,
0x00002000, 0x00000004, 0x01000004, 0x00002080, 0x01000084, 0x01002004, 0x01002080, 0x00000000,
0x00002080, 0x01000000, 0x00002004, 0x00000084, 0x01000080, 0x00002084, 0x00000000, 0x01000004,
0x00000004, 0x01000084, 0x01002084, 0x00002004, 0x01002000, 0x00000080, 0x00000084, 0x01002080,
0x01002080, 0x01000084, 0x00002004, 0x01002000, 0x00002000, 0x00000004, 0x01000004, 0x01000080,
0x01000000, 0x00002080, 0x01002084, 0x00000000, 0x00002084, 0x01000000, 0x00000080, 0x00002004,
0x01000084, 0x00000080, 0x00000000, 0x01002084, 0x01002004, 0x01002080, 0x00000084, 0x00002000,
0x00002080, 0x01002004, 0x01000080, 0x00000084, 0x00000004, 0x00002084, 0x01002000, 0x01000004,
};
const s5 = [_]u32{
0x10000008, 0x00040008, 0x00000000, 0x10040400, 0x00040008, 0x00000400, 0x10000408, 0x00040000,
0x00000408, 0x10040408, 0x00040400, 0x10000000, 0x10000400, 0x10000008, 0x10040000, 0x00040408,
0x00040000, 0x10000408, 0x10040008, 0x00000000, 0x00000400, 0x00000008, 0x10040400, 0x10040008,
0x10040408, 0x10040000, 0x10000000, 0x00000408, 0x00000008, 0x00040400, 0x00040408, 0x10000400,
0x00000408, 0x10000000, 0x10000400, 0x00040408, 0x10040400, 0x00040008, 0x00000000, 0x10000400,
0x10000000, 0x00000400, 0x10040008, 0x00040000, 0x00040008, 0x10040408, 0x00040400, 0x00000008,
0x10040408, 0x00040400, 0x00040000, 0x10000408, 0x10000008, 0x10040000, 0x00040408, 0x00000000,
0x00000400, 0x10000008, 0x10000408, 0x10040400, 0x10040000, 0x00000408, 0x00000008, 0x10040008,
};
const s6 = [_]u32{
0x00000800, 0x00000040, 0x00200040, 0x80200000, 0x80200840, 0x80000800, 0x00000840, 0x00000000,
0x00200000, 0x80200040, 0x80000040, 0x00200800, 0x80000000, 0x00200840, 0x00200800, 0x80000040,
0x80200040, 0x00000800, 0x80000800, 0x80200840, 0x00000000, 0x00200040, 0x80200000, 0x00000840,
0x80200800, 0x80000840, 0x00200840, 0x80000000, 0x80000840, 0x80200800, 0x00000040, 0x00200000,
0x80000840, 0x00200800, 0x80200800, 0x80000040, 0x00000800, 0x00000040, 0x00200000, 0x80200800,
0x80200040, 0x80000840, 0x00000840, 0x00000000, 0x00000040, 0x80200000, 0x80000000, 0x00200040,
0x00000000, 0x80200040, 0x00200040, 0x00000840, 0x80000040, 0x00000800, 0x80200840, 0x00200000,
0x00200840, 0x80000000, 0x80000800, 0x80200840, 0x80200000, 0x00200840, 0x00200800, 0x80000800,
};
const s7 = [_]u32{
0x04100010, 0x04104000, 0x00004010, 0x00000000, 0x04004000, 0x00100010, 0x04100000, 0x04104010,
0x00000010, 0x04000000, 0x00104000, 0x00004010, 0x00104010, 0x04004010, 0x04000010, 0x04100000,
0x00004000, 0x00104010, 0x00100010, 0x04004000, 0x04104010, 0x04000010, 0x00000000, 0x00104000,
0x04000000, 0x00100000, 0x04004010, 0x04100010, 0x00100000, 0x00004000, 0x04104000, 0x00000010,
0x00100000, 0x00004000, 0x04000010, 0x04104010, 0x00004010, 0x04000000, 0x00000000, 0x00104000,
0x04100010, 0x04004010, 0x04004000, 0x00100010, 0x04104000, 0x00000010, 0x00100010, 0x04004000,
0x04104010, 0x00100000, 0x04100000, 0x04000010, 0x00104000, 0x00004010, 0x04004010, 0x04100000,
0x00000010, 0x04104000, 0x00104010, 0x00000000, 0x04000000, 0x04100010, 0x00004000, 0x00104010,
};
const sboxes = [8][64]u32{ s0, s1, s2, s3, s4, s5, s6, s7 };
pub const CryptMode = enum { Encrypt, Decrypt };
fn permuteBits(long: anytype, indices: []const u8) @TypeOf(long) {
comptime const T = @TypeOf(long);
comptime const TL = math.Log2Int(T);
var out: T = 0;
for (indices) |x, i| {
out ^= (((long >> @intCast(u6, x)) & 1) << @intCast(TL, i));
}
return out;
}
fn precomutePermutation(comptime permutation: []const u8) [8][256]u64 {
@setEvalBranchQuota(1000000);
comptime var i: u64 = 0;
comptime var out: [8][256]u64 = undefined;
inline while (i < 8) : (i += 1) {
comptime var j: u64 = 0;
inline while (j < 256) : (j += 1) {
var p: u64 = j << (i * 8);
out[i][j] = permuteBits(p, permutation);
}
}
return out;
}
fn permuteBitsPrecomputed(long: u64, comptime precomputedPerm: [8][256]u64) u64 {
var out: u64 = 0;
inline for (precomputedPerm) |p, i| {
out ^= p[@truncate(u8, long >> @intCast(u6, i * 8))];
}
return out;
}
fn initialPermutation(long: u64) u64 {
return if (builtin.mode == .ReleaseSmall)
permuteBits(long, &ip)
else
permuteBitsPrecomputed(long, comptime precomutePermutation(&ip));
}
fn finalPermutation(long: u64) u64 {
return if (builtin.mode == .ReleaseSmall)
permuteBits(long, &fp)
else
permuteBitsPrecomputed(long, comptime precomutePermutation(&fp));
}
fn permutePc1(long: u64) u64 {
if (builtin.mode == .ReleaseSmall) {
return permuteBits(long, &pc1);
} else {
comptime const prepc1 = precomutePermutation(&pc1);
return permuteBitsPrecomputed(long, prepc1);
}
}
fn permutePc2(long: u56) u56 {
if (builtin.mode == .ReleaseSmall) {
return permuteBits(long, &pc2);
} else {
comptime const prepc2 = precomutePermutation(&pc2);
return @intCast(u56, permuteBitsPrecomputed(@as(u64, long), prepc2));
}
}
pub fn cryptBlock(comptime crypt_mode: CryptMode, keys: []const u48, dest: []u8, source: []const u8) void {
assert(source.len == block_size);
assert(dest.len >= block_size);
const dataLong = mem.readIntSliceBig(u64, source);
const perm = initialPermutation(dataLong);
var left = @truncate(u32, perm & 0xFFFFFFFF);
var right = @truncate(u32, perm >> 32);
comptime var i: u8 = 0;
inline while (i < 16) : (i += 1) {
const r = right;
const k = keys[if (crypt_mode == .Encrypt) i else (15 - i)];
var work: u32 = 0;
// zig fmt: off
work = s0[@truncate(u6, math.rotl(u32, r, 1)) ^ @truncate(u6, k)]
^ s1[@truncate(u6, r >> 3) ^ @truncate(u6, k >> 6)]
^ s2[@truncate(u6, r >> 7) ^ @truncate(u6, k >> 12)]
^ s3[@truncate(u6, r >> 11) ^ @truncate(u6, k >> 18)]
^ s4[@truncate(u6, r >> 15) ^ @truncate(u6, k >> 24)]
^ s5[@truncate(u6, r >> 19) ^ @truncate(u6, k >> 30)]
^ s6[@truncate(u6, r >> 23) ^ @truncate(u6, k >> 36)]
^ s7[@truncate(u6, math.rotr(u32, r, 1) >> 26) ^ @truncate(u6, k >> 42)];
// zig fmt: on
right = left ^ work;
left = r;
}
var out: u64 = left;
out <<= 32;
out ^= right;
out = finalPermutation(out);
const outBytes = mem.asBytes(&out);
mem.copy(u8, dest, outBytes);
}
const shifts = [_]u32{
1, 2, 4, 6, 8, 10, 12, 14, 15, 17, 19, 21, 23, 25, 27, 28,
};
pub fn subkeys(keyBytes: []const u8) [16]u48 {
assert(keyBytes.len == block_size);
const size: u6 = math.maxInt(u6);
const key = mem.readIntSliceBig(u64, keyBytes);
const perm = @truncate(u56, permutePc1(key));
var left: u28 = @truncate(u28, perm & 0xfffffff);
var right: u28 = @truncate(u28, (perm >> 28) & 0xfffffff);
var keys: [16]u48 = undefined;
inline for (shifts) |shift, i| {
var subkey: u56 = math.rotr(u28, right, shift);
subkey <<= 28;
subkey ^= math.rotr(u28, left, shift);
subkey = permutePc2(subkey);
keys[i] = @truncate(u48, subkey);
}
return keys;
}
pub const DES = struct {
const Self = @This();
subkeys: [16]u48,
pub fn init(key: [8]u8) Self {
return Self{ .subkeys = subkeys(&key) };
}
pub fn crypt(self: Self, crypt_mode: CryptMode, dest: []u8, source: []const u8) void {
return switch (crypt_mode) {
.Encrypt => cryptBlock(.Encrypt, &self.subkeys, dest, source),
.Decrypt => cryptBlock(.Decrypt, &self.subkeys, dest, source),
};
}
};
pub const TDES = struct {
const Self = @This();
subkeys: [3][16]u48,
pub fn init(key: [24]u8) Self {
return Self{ .subkeys = [_][16]u48{
subkeys(key[0..8]),
subkeys(key[8..16]),
subkeys(key[16..]),
} };
}
pub fn crypt(self: Self, crypt_mode: CryptMode, dest: []u8, source: []const u8) void {
var work: [8]u8 = undefined;
mem.copy(u8, &work, source);
switch (crypt_mode) {
.Encrypt => {
cryptBlock(.Encrypt, &self.subkeys[0], &work, &work);
cryptBlock(.Decrypt, &self.subkeys[1], &work, &work);
cryptBlock(.Encrypt, &self.subkeys[2], &work, &work);
},
.Decrypt => {
cryptBlock(.Decrypt, &self.subkeys[2], &work, &work);
cryptBlock(.Encrypt, &self.subkeys[1], &work, &work);
cryptBlock(.Decrypt, &self.subkeys[0], &work, &work);
},
}
mem.copy(u8, dest, &work);
}
}; | src/des.zig |
const std = @import("std");
const print = std.debug.print;
const List = std.ArrayList;
const util = @import("util.zig");
const gpa = util.gpa;
const data = @embedFile("../data/day17.txt");
const Target = struct {
x_min: i32,
x_max: i32,
y_min: i32,
y_max: i32,
};
fn parseTarget(d: []const u8) !Target {
var it = std.mem.split(std.mem.trim(u8, d, "\n"), ": ");
_ = it.next().?;
it = std.mem.split(it.next().?, ", ");
var x_it = std.mem.split(it.next().?[2..], "..");
var y_it = std.mem.split(it.next().?[2..], "..");
return Target{
.x_min = try std.fmt.parseInt(i32, x_it.next().?, 10),
.x_max = try std.fmt.parseInt(i32, x_it.next().?, 10),
.y_min = try std.fmt.parseInt(i32, y_it.next().?, 10),
.y_max = try std.fmt.parseInt(i32, y_it.next().?, 10),
};
}
const Probe = struct {
x: i32,
y: i32,
vel_x: i32,
vel_y: i32,
max: i32 = std.math.minInt(i32),
x_at_max: i32 = 0,
const Self = @This();
fn new(x: i32, y: i32) Self {
return .{ .x = 0, .y = 0, .vel_x = x, .vel_y = y };
}
fn sim(self: *Self, target: Target) ?i32 {
if (self.y > self.max) {
self.max = self.y;
self.x_at_max = self.x;
}
while (true) {
// move
self.x += self.vel_x;
self.y += self.vel_y;
// set max
if (self.y > self.max) {
self.max = self.y;
self.x_at_max = self.x;
}
// in target?
if (self.x >= target.x_min and self.x <= target.x_max and
self.y >= target.y_min and self.y <= target.y_max)
{
return self.max;
}
// not worth moving again?
if (self.x >= target.x_max or self.y <= target.y_min) return null;
// update vel
if (self.vel_x != 0) self.vel_x += if (self.vel_x < 0) @as(i32, 1) else @as(i32, -1);
self.vel_y -= 1;
}
}
};
pub fn main() !void {
var target = try parseTarget(data);
// brute force it
var x: i32 = 1;
var y: i32 = -100;
var max_y: i32 = std.math.minInt(i32);
var valid = List(Probe).init(gpa);
while (true) {
var probe = Probe.new(x, y);
if (probe.sim(target)) |max| {
if (max > max_y) max_y = max;
try valid.append(probe);
}
x += 1;
if (x > target.x_max) {
x = 1;
y += 1;
}
// print("{} ({}, {}), {}\n", .{ max_y, x, y, probe.x_at_max });
if (probe.x_at_max > target.x_max and y > 1000) break;
}
print("{}\n", .{max_y});
print("{}\n", .{valid.items.len});
} | 2021/src/day17.zig |
const std = @import("std");
const zriscv = @import("zriscv");
const clap = @import("clap");
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = &gpa.allocator;
const stdin = std.io.getStdIn();
const stdin_reader = stdin.reader();
const stdout_writer = std.io.getStdOut().writer();
const stderr_writer = std.io.getStdErr().writer();
const NoOutputCpu = zriscv.Cpu(.{});
const OutputCpu = zriscv.Cpu(.{ .writer_type = @TypeOf(stdout_writer) });
pub fn main() !u8 {
defer _ = gpa.deinit();
const params = comptime [_]clap.Param(clap.Help){
clap.parseParam("-h, --help Display this help and exit.") catch unreachable,
clap.parseParam("<FILE>") catch unreachable,
};
var diag = clap.Diagnostic{};
var args = clap.parse(clap.Help, ¶ms, .{ .diagnostic = &diag }) catch |err| {
// Report useful error and exit
diag.report(stderr_writer, err) catch {};
return err;
};
defer args.deinit();
if (args.flag("--help")) {
try clap.help(stdout_writer, ¶ms);
return 0;
}
const file_path = blk: {
if (args.positionals().len < 1) {
try stderr_writer.writeAll("no file path provided\n");
return 1;
}
if (args.positionals().len > 1) {
try stderr_writer.writeAll("multiple files are not supported\n");
return 1;
}
break :blk args.positionals()[0];
};
const file_contents = blk: {
var file = std.fs.cwd().openFile(file_path, .{}) catch |err| switch (err) {
error.FileNotFound => {
try stderr_writer.print("file not found: {s}\n", .{file_path});
return 1;
},
else => |e| return e,
};
defer file.close();
break :blk try file.readToEndAlloc(allocator, std.math.maxInt(usize));
};
defer allocator.free(file_contents);
var memory = try allocator.dupe(u8, file_contents);
defer allocator.free(memory);
var cpu_state = zriscv.CpuState{ .memory = memory };
const previous_terminal_settings = try std.os.tcgetattr(stdin.handle);
try setRawMode(previous_terminal_settings);
defer {
std.os.tcsetattr(stdin.handle, .FLUSH, previous_terminal_settings) catch {};
stdout_writer.writeByte('\n') catch {};
}
var opt_break_point: ?u64 = null;
while (true) {
try stdout_writer.writeAll("> ");
const input = stdin_reader.readByte() catch return 0;
if (input == '?' or input == 'h' or input == '\n') {
try stdout_writer.writeAll(
\\help:
\\ ?|h|'\n' - this help menu
\\ r - run without output (this will not stop unless a breakpoint is hit, or an error)
\\ e - run with output (this will not stop unless a breakpoint is hit, or an error)
\\ b[addr] - set breakpoint, [addr] must be in hex, blank [addr] clears the breakpoint
\\ s - single step with output
\\ n - single step without output
\\ d - dump cpu state
\\ 0 - reset cpu
\\ q - quit
\\
);
continue;
}
if (input == '0') {
const new_memory = try allocator.dupe(u8, file_contents);
allocator.free(memory);
memory = new_memory;
cpu_state = zriscv.CpuState{ .memory = memory };
try stdout_writer.writeAll("\nstate reset\n");
continue;
}
if (input == 'b') {
// disable raw mode to enable user to enter hex string
std.os.tcsetattr(stdin.handle, .FLUSH, previous_terminal_settings) catch {};
defer setRawMode(previous_terminal_settings) catch {};
const hex_str = (try stdin_reader.readUntilDelimiterOrEofAlloc(allocator, '\n', std.math.maxInt(usize))) orelse return 1;
defer allocator.free(hex_str);
if (std.mem.eql(u8, hex_str, "")) {
opt_break_point = null;
try stdout_writer.writeAll("cleared breakpoint\n");
continue;
}
const addr = std.fmt.parseUnsigned(u64, hex_str, 16) catch |err| {
try stdout_writer.print("unable to parse '{s}' as hex: {s}\n", .{ hex_str, @errorName(err) });
continue;
};
if (addr >= cpu_state.memory.len) {
try stdout_writer.print("breakpoint address 0x{x} exceeds cpu memory\n", .{addr});
continue;
}
try stdout_writer.print("set breakpoint to 0x{x}\n", .{addr});
opt_break_point = addr;
continue;
}
if (input == 'r') {
try stdout_writer.writeByte('\n');
const timer = try std.time.Timer.start();
if (opt_break_point) |break_point| {
while (cpu_state.pc != break_point) {
NoOutputCpu.step(&cpu_state) catch |err| {
try stdout_writer.print("error: {s}\n", .{@errorName(err)});
break;
};
} else {
try stdout_writer.writeAll("hit breakpoint\n");
}
} else {
while (true) {
NoOutputCpu.run(&cpu_state) catch |err| {
try stdout_writer.print("error: {s}\n", .{@errorName(err)});
break;
};
}
}
const elapsed = timer.read();
try stdout_writer.print("execution took: {} ({} ns)\n", .{ std.fmt.fmtDuration(elapsed), elapsed });
continue;
}
if (input == 'e') {
try stdout_writer.writeByte('\n');
const timer = try std.time.Timer.start();
if (opt_break_point) |break_point| {
while (cpu_state.pc != break_point) {
OutputCpu.step(&cpu_state, stdout_writer) catch |err| {
try stdout_writer.print("error: {s}\n", .{@errorName(err)});
break;
};
} else {
try stdout_writer.writeAll("hit breakpoint\n");
}
} else {
while (true) {
OutputCpu.run(&cpu_state, stdout_writer) catch |err| {
try stdout_writer.print("error: {s}\n", .{@errorName(err)});
break;
};
}
}
const elapsed = timer.read();
try stdout_writer.print("execution took: {} ({} ns)\n", .{ std.fmt.fmtDuration(elapsed), elapsed });
continue;
}
if (input == 'n') {
try stdout_writer.writeByte('\n');
NoOutputCpu.step(&cpu_state) catch |err| {
try stdout_writer.print("error: {s}\n", .{@errorName(err)});
};
continue;
}
if (input == 's') {
try stdout_writer.writeByte('\n');
OutputCpu.step(&cpu_state, stdout_writer) catch |err| {
try stdout_writer.print("error: {s}\n", .{@errorName(err)});
};
continue;
}
if (input == 'd') {
try stdout_writer.writeByte('\n');
try cpu_state.dump(stdout_writer);
continue;
}
if (input == 'q') {
return 0;
}
try stdout_writer.writeAll("invalid option\n");
}
}
fn setRawMode(previous: std.os.termios) !void {
var current_settings = previous;
current_settings.lflag &= ~@as(u32, std.os.linux.ICANON);
try std.os.tcsetattr(stdin.handle, .FLUSH, current_settings);
}
comptime {
std.testing.refAllDecls(@This());
} | runner/main.zig |
const std = @import("std");
usingnamespace (@import("../machine.zig"));
usingnamespace (@import("../util.zig"));
const imm = Operand.immediate;
const mem = Operand.memory;
const memRm = Operand.memoryRm;
const reg = Operand.register;
test "reserved opcodes" {
const m16 = Machine.init(.x86_16);
const m32 = Machine.init(.x86_32);
const m64 = Machine.init(.x64);
const rm8 = Operand.memoryRm(.DefaultSeg, .BYTE, .EAX, 0);
const rm16 = Operand.memoryRm(.DefaultSeg, .WORD, .EAX, 0);
const rm32 = Operand.memoryRm(.DefaultSeg, .DWORD, .EAX, 0);
const rm64 = Operand.memoryRm(.DefaultSeg, .QWORD, .EAX, 0);
debugPrint(false);
{
testOp0(m16, .SALC, "D6");
testOp0(m32, .SALC, "D6");
testOp0(m64, .SALC, AsmError.InvalidOperand);
}
// Immediate Group 1
// Same behavior as corresponding instruction with Opcode Op1r(0x80, x)
{
testOp2(m32, .RESRV_ADD, rm8, imm(0), "82 00 00");
testOp2(m32, .RESRV_OR, rm8, imm(0), "82 08 00");
testOp2(m32, .RESRV_ADC, rm8, imm(0), "82 10 00");
testOp2(m32, .RESRV_SBB, rm8, imm(0), "82 18 00");
testOp2(m32, .RESRV_AND, rm8, imm(0), "82 20 00");
testOp2(m32, .RESRV_SUB, rm8, imm(0), "82 28 00");
testOp2(m32, .RESRV_XOR, rm8, imm(0), "82 30 00");
testOp2(m32, .RESRV_CMP, rm8, imm(0), "82 38 00");
//
testOp2(m64, .RESRV_ADD, rm8, imm(0), AsmError.InvalidOperand);
testOp2(m64, .RESRV_OR, rm8, imm(0), AsmError.InvalidOperand);
testOp2(m64, .RESRV_ADC, rm8, imm(0), AsmError.InvalidOperand);
testOp2(m64, .RESRV_SBB, rm8, imm(0), AsmError.InvalidOperand);
testOp2(m64, .RESRV_AND, rm8, imm(0), AsmError.InvalidOperand);
testOp2(m64, .RESRV_SUB, rm8, imm(0), AsmError.InvalidOperand);
testOp2(m64, .RESRV_XOR, rm8, imm(0), AsmError.InvalidOperand);
testOp2(m64, .RESRV_CMP, rm8, imm(0), AsmError.InvalidOperand);
}
// Shift Group 2 /6
// Same behavior as corresponding instruction with Opcode Op1r(x, 4)
{
testOp2(m64, .RESRV_SAL, rm8, imm(1), "67 d0 30");
testOp2(m64, .RESRV_SAL, rm8, reg(.CL), "67 d2 30");
testOp2(m64, .RESRV_SAL, rm8, imm(0), "67 c0 30 00");
testOp2(m64, .RESRV_SAL, rm16, imm(1), "66 67 d1 30");
testOp2(m64, .RESRV_SAL, rm32, imm(1), "67 d1 30");
testOp2(m64, .RESRV_SAL, rm64, imm(1), "67 48 d1 30");
testOp2(m64, .RESRV_SAL, rm16, imm(0), "66 67 c1 30 00");
testOp2(m64, .RESRV_SAL, rm32, imm(0), "67 c1 30 00");
testOp2(m64, .RESRV_SAL, rm64, imm(0), "67 48 c1 30 00");
testOp2(m64, .RESRV_SAL, rm16, reg(.CL), "66 67 d3 30");
testOp2(m64, .RESRV_SAL, rm32, reg(.CL), "67 d3 30");
testOp2(m64, .RESRV_SAL, rm64, reg(.CL), "67 48 d3 30");
//
testOp2(m64, .RESRV_SHL, rm8, imm(1), "67 d0 30");
testOp2(m64, .RESRV_SHL, rm8, reg(.CL), "67 d2 30");
testOp2(m64, .RESRV_SHL, rm8, imm(0), "67 c0 30 00");
testOp2(m64, .RESRV_SHL, rm16, imm(1), "66 67 d1 30");
testOp2(m64, .RESRV_SHL, rm32, imm(1), "67 d1 30");
testOp2(m64, .RESRV_SHL, rm64, imm(1), "67 48 d1 30");
testOp2(m64, .RESRV_SHL, rm16, imm(0), "66 67 c1 30 00");
testOp2(m64, .RESRV_SHL, rm32, imm(0), "67 c1 30 00");
testOp2(m64, .RESRV_SHL, rm64, imm(0), "67 48 c1 30 00");
testOp2(m64, .RESRV_SHL, rm16, reg(.CL), "66 67 d3 30");
testOp2(m64, .RESRV_SHL, rm32, reg(.CL), "67 d3 30");
testOp2(m64, .RESRV_SHL, rm64, reg(.CL), "67 48 d3 30");
}
// Unary Group 3 /1
{
testOp2(m64, .RESRV_TEST, rm8, imm(0), "67 f6 08 00");
testOp2(m64, .RESRV_TEST, rm16, imm(0x7FFF), "66 67 f7 08 ff 7f");
testOp2(m64, .RESRV_TEST, rm32, imm(0x7FFFFFFF), "67 f7 08 ff ff ff 7f");
testOp2(m64, .RESRV_TEST, rm64, imm(0x7FFFFFFF), "67 48 f7 08 ff ff ff 7f");
}
// x87
{
// DCD0 - DCD7 (same as FCOM D8D0-D8D7)
testOp2(m64, .RESRV_FCOM, reg(.ST0), reg(.ST7), "dcd7");
testOp1(m64, .RESRV_FCOM, reg(.ST0), "dcd0");
testOp1(m64, .RESRV_FCOM, reg(.ST7), "dcd7");
testOp0(m64, .RESRV_FCOM, "dcd1");
// DCD8 - DCDF (same as FCOMP D8D8-D8DF)
testOp2(m64, .RESRV_FCOMP, reg(.ST0), reg(.ST7), "dcdf");
testOp1(m64, .RESRV_FCOMP, reg(.ST0), "dcd8");
testOp1(m64, .RESRV_FCOMP, reg(.ST7), "dcdf");
testOp0(m64, .RESRV_FCOMP, "dcd9");
// DED0 - DED7 (same as FCOMP D8C8-D8DF)
testOp2(m64, .RESRV_FCOMP2, reg(.ST0), reg(.ST7), "ded7");
testOp1(m64, .RESRV_FCOMP2, reg(.ST0), "ded0");
testOp1(m64, .RESRV_FCOMP2, reg(.ST7), "ded7");
testOp0(m64, .RESRV_FCOMP2, "ded1");
// D0C8 - D0CF (same as FXCH D9C8-D9CF)
testOp2(m64, .RESRV_FXCH, reg(.ST0), reg(.ST7), "d0cf");
testOp1(m64, .RESRV_FXCH, reg(.ST0), "d0c8");
testOp1(m64, .RESRV_FXCH, reg(.ST7), "d0cf");
testOp0(m64, .RESRV_FXCH, "d0c9");
// DFC8 - DFCF (same as FXCH D9C8-D9CF)
testOp1(m64, .RESRV_FXCH2, reg(.ST0), "dfc8");
testOp1(m64, .RESRV_FXCH2, reg(.ST7), "dfcf");
testOp2(m64, .RESRV_FXCH2, reg(.ST0), reg(.ST7), "dfcf");
testOp0(m64, .RESRV_FXCH2, "dfc9");
// DFD0 - DFD7 (same as FSTP DDD8-DDDF)
testOp1(m64, .RESRV_FSTP, reg(.ST0), "dfd0");
testOp1(m64, .RESRV_FSTP, reg(.ST7), "dfd7");
testOp2(m64, .RESRV_FSTP, reg(.ST0), reg(.ST7), "dfd7");
// DFD8 - DFDF (same as FSTP DDD8-DDDF)
testOp1(m64, .RESRV_FSTP2, reg(.ST0), "dfd8");
testOp1(m64, .RESRV_FSTP2, reg(.ST7), "dfdf");
testOp2(m64, .RESRV_FSTP2, reg(.ST0), reg(.ST7), "dfdf");
// D9D8 - D9DF (same as FFREE with addition of an x87 POP)
testOp1(m64, .FFREEP, reg(.ST7), "dfc7");
// DFC0 - DFC7 (same as FSTP DDD8-DDDF but won't cause a stack underflow exception)
testOp1(m64, .FSTPNOUFLOW, reg(.ST7), "dddf");
testOp2(m64, .FSTPNOUFLOW, reg(.ST0), reg(.ST7), "dddf");
}
{
// NOP - 0F 0D
testOp1(m64, .RESRV_NOP_0F0D_0, rm16, "66 67 0f 0d 00");
testOp1(m64, .RESRV_NOP_0F0D_0, rm32, "67 0f 0d 00");
testOp1(m64, .RESRV_NOP_0F0D_1, rm16, "66 67 0f 0d 08");
testOp1(m64, .RESRV_NOP_0F0D_1, rm32, "67 0f 0d 08");
testOp1(m64, .RESRV_NOP_0F0D_1, rm64, "67 48 0f 0d 08");
testOp1(m64, .RESRV_NOP_0F0D_2, rm16, "66 67 0f 0d 10");
testOp1(m64, .RESRV_NOP_0F0D_2, rm32, "67 0f 0d 10");
testOp1(m64, .RESRV_NOP_0F0D_2, rm64, "67 48 0f 0d 10");
testOp1(m64, .RESRV_NOP_0F0D_3, rm16, "66 67 0f 0d 18");
testOp1(m64, .RESRV_NOP_0F0D_3, rm32, "67 0f 0d 18");
testOp1(m64, .RESRV_NOP_0F0D_3, rm64, "67 48 0f 0d 18");
testOp1(m64, .RESRV_NOP_0F0D_4, rm16, "66 67 0f 0d 20");
testOp1(m64, .RESRV_NOP_0F0D_4, rm32, "67 0f 0d 20");
testOp1(m64, .RESRV_NOP_0F0D_4, rm64, "67 48 0f 0d 20");
testOp1(m64, .RESRV_NOP_0F0D_5, rm16, "66 67 0f 0d 28");
testOp1(m64, .RESRV_NOP_0F0D_5, rm32, "67 0f 0d 28");
testOp1(m64, .RESRV_NOP_0F0D_5, rm64, "67 48 0f 0d 28");
testOp1(m64, .RESRV_NOP_0F0D_6, rm16, "66 67 0f 0d 30");
testOp1(m64, .RESRV_NOP_0F0D_6, rm32, "67 0f 0d 30");
testOp1(m64, .RESRV_NOP_0F0D_6, rm64, "67 48 0f 0d 30");
testOp1(m64, .RESRV_NOP_0F0D_7, rm16, "66 67 0f 0d 38");
testOp1(m64, .RESRV_NOP_0F0D_7, rm32, "67 0f 0d 38");
testOp1(m64, .RESRV_NOP_0F0D_7, rm64, "67 48 0f 0d 38");
}
{
// NOP - 0F 18
testOp1(m64, .RESRV_NOP_0F18_0, rm16, "66 67 0f 18 00");
testOp1(m64, .RESRV_NOP_0F18_0, rm32, "67 0f 18 00");
testOp1(m64, .RESRV_NOP_0F18_0, rm64, "67 48 0f 18 00");
testOp1(m64, .RESRV_NOP_0F18_1, rm16, "66 67 0f 18 08");
testOp1(m64, .RESRV_NOP_0F18_1, rm32, "67 0f 18 08");
testOp1(m64, .RESRV_NOP_0F18_1, rm64, "67 48 0f 18 08");
testOp1(m64, .RESRV_NOP_0F18_2, rm16, "66 67 0f 18 10");
testOp1(m64, .RESRV_NOP_0F18_2, rm32, "67 0f 18 10");
testOp1(m64, .RESRV_NOP_0F18_2, rm64, "67 48 0f 18 10");
testOp1(m64, .RESRV_NOP_0F18_3, rm16, "66 67 0f 18 18");
testOp1(m64, .RESRV_NOP_0F18_3, rm32, "67 0f 18 18");
testOp1(m64, .RESRV_NOP_0F18_3, rm64, "67 48 0f 18 18");
testOp1(m64, .RESRV_NOP_0F18_4, rm16, "66 67 0f 18 20");
testOp1(m64, .RESRV_NOP_0F18_4, rm32, "67 0f 18 20");
testOp1(m64, .RESRV_NOP_0F18_4, rm64, "67 48 0f 18 20");
testOp1(m64, .RESRV_NOP_0F18_5, rm16, "66 67 0f 18 28");
testOp1(m64, .RESRV_NOP_0F18_5, rm32, "67 0f 18 28");
testOp1(m64, .RESRV_NOP_0F18_5, rm64, "67 48 0f 18 28");
testOp1(m64, .RESRV_NOP_0F18_6, rm16, "66 67 0f 18 30");
testOp1(m64, .RESRV_NOP_0F18_6, rm32, "67 0f 18 30");
testOp1(m64, .RESRV_NOP_0F18_6, rm64, "67 48 0f 18 30");
testOp1(m64, .RESRV_NOP_0F18_7, rm16, "66 67 0f 18 38");
testOp1(m64, .RESRV_NOP_0F18_7, rm32, "67 0f 18 38");
testOp1(m64, .RESRV_NOP_0F18_7, rm64, "67 48 0f 18 38");
}
{
// NOP - 0F 19
testOp1(m64, .RESRV_NOP_0F19_0, rm16, "66 67 0f 19 00");
testOp1(m64, .RESRV_NOP_0F19_0, rm32, "67 0f 19 00");
testOp1(m64, .RESRV_NOP_0F19_0, rm64, "67 48 0f 19 00");
testOp1(m64, .RESRV_NOP_0F19_1, rm16, "66 67 0f 19 08");
testOp1(m64, .RESRV_NOP_0F19_1, rm32, "67 0f 19 08");
testOp1(m64, .RESRV_NOP_0F19_1, rm64, "67 48 0f 19 08");
testOp1(m64, .RESRV_NOP_0F19_2, rm16, "66 67 0f 19 10");
testOp1(m64, .RESRV_NOP_0F19_2, rm32, "67 0f 19 10");
testOp1(m64, .RESRV_NOP_0F19_2, rm64, "67 48 0f 19 10");
testOp1(m64, .RESRV_NOP_0F19_3, rm16, "66 67 0f 19 18");
testOp1(m64, .RESRV_NOP_0F19_3, rm32, "67 0f 19 18");
testOp1(m64, .RESRV_NOP_0F19_3, rm64, "67 48 0f 19 18");
testOp1(m64, .RESRV_NOP_0F19_4, rm16, "66 67 0f 19 20");
testOp1(m64, .RESRV_NOP_0F19_4, rm32, "67 0f 19 20");
testOp1(m64, .RESRV_NOP_0F19_4, rm64, "67 48 0f 19 20");
testOp1(m64, .RESRV_NOP_0F19_5, rm16, "66 67 0f 19 28");
testOp1(m64, .RESRV_NOP_0F19_5, rm32, "67 0f 19 28");
testOp1(m64, .RESRV_NOP_0F19_5, rm64, "67 48 0f 19 28");
testOp1(m64, .RESRV_NOP_0F19_6, rm16, "66 67 0f 19 30");
testOp1(m64, .RESRV_NOP_0F19_6, rm32, "67 0f 19 30");
testOp1(m64, .RESRV_NOP_0F19_6, rm64, "67 48 0f 19 30");
testOp1(m64, .RESRV_NOP_0F19_7, rm16, "66 67 0f 19 38");
testOp1(m64, .RESRV_NOP_0F19_7, rm32, "67 0f 19 38");
testOp1(m64, .RESRV_NOP_0F19_7, rm64, "67 48 0f 19 38");
}
{
// NOP - 0F 1A
testOp1(m64, .RESRV_NOP_0F1A_0, rm16, "66 67 0f 1a 00");
testOp1(m64, .RESRV_NOP_0F1A_0, rm32, "67 0f 1a 00");
testOp1(m64, .RESRV_NOP_0F1A_0, rm64, "67 48 0f 1a 00");
testOp1(m64, .RESRV_NOP_0F1A_1, rm16, "66 67 0f 1a 08");
testOp1(m64, .RESRV_NOP_0F1A_1, rm32, "67 0f 1a 08");
testOp1(m64, .RESRV_NOP_0F1A_1, rm64, "67 48 0f 1a 08");
testOp1(m64, .RESRV_NOP_0F1A_2, rm16, "66 67 0f 1a 10");
testOp1(m64, .RESRV_NOP_0F1A_2, rm32, "67 0f 1a 10");
testOp1(m64, .RESRV_NOP_0F1A_2, rm64, "67 48 0f 1a 10");
testOp1(m64, .RESRV_NOP_0F1A_3, rm16, "66 67 0f 1a 18");
testOp1(m64, .RESRV_NOP_0F1A_3, rm32, "67 0f 1a 18");
testOp1(m64, .RESRV_NOP_0F1A_3, rm64, "67 48 0f 1a 18");
testOp1(m64, .RESRV_NOP_0F1A_4, rm16, "66 67 0f 1a 20");
testOp1(m64, .RESRV_NOP_0F1A_4, rm32, "67 0f 1a 20");
testOp1(m64, .RESRV_NOP_0F1A_4, rm64, "67 48 0f 1a 20");
testOp1(m64, .RESRV_NOP_0F1A_5, rm16, "66 67 0f 1a 28");
testOp1(m64, .RESRV_NOP_0F1A_5, rm32, "67 0f 1a 28");
testOp1(m64, .RESRV_NOP_0F1A_5, rm64, "67 48 0f 1a 28");
testOp1(m64, .RESRV_NOP_0F1A_6, rm16, "66 67 0f 1a 30");
testOp1(m64, .RESRV_NOP_0F1A_6, rm32, "67 0f 1a 30");
testOp1(m64, .RESRV_NOP_0F1A_6, rm64, "67 48 0f 1a 30");
testOp1(m64, .RESRV_NOP_0F1A_7, rm16, "66 67 0f 1a 38");
testOp1(m64, .RESRV_NOP_0F1A_7, rm32, "67 0f 1a 38");
testOp1(m64, .RESRV_NOP_0F1A_7, rm64, "67 48 0f 1a 38");
}
{
// NOP - 0F 1B
testOp1(m64, .RESRV_NOP_0F1B_0, rm16, "66 67 0f 1b 00");
testOp1(m64, .RESRV_NOP_0F1B_0, rm32, "67 0f 1b 00");
testOp1(m64, .RESRV_NOP_0F1B_0, rm64, "67 48 0f 1b 00");
testOp1(m64, .RESRV_NOP_0F1B_1, rm16, "66 67 0f 1b 08");
testOp1(m64, .RESRV_NOP_0F1B_1, rm32, "67 0f 1b 08");
testOp1(m64, .RESRV_NOP_0F1B_1, rm64, "67 48 0f 1b 08");
testOp1(m64, .RESRV_NOP_0F1B_2, rm16, "66 67 0f 1b 10");
testOp1(m64, .RESRV_NOP_0F1B_2, rm32, "67 0f 1b 10");
testOp1(m64, .RESRV_NOP_0F1B_2, rm64, "67 48 0f 1b 10");
testOp1(m64, .RESRV_NOP_0F1B_3, rm16, "66 67 0f 1b 18");
testOp1(m64, .RESRV_NOP_0F1B_3, rm32, "67 0f 1b 18");
testOp1(m64, .RESRV_NOP_0F1B_3, rm64, "67 48 0f 1b 18");
testOp1(m64, .RESRV_NOP_0F1B_4, rm16, "66 67 0f 1b 20");
testOp1(m64, .RESRV_NOP_0F1B_4, rm32, "67 0f 1b 20");
testOp1(m64, .RESRV_NOP_0F1B_4, rm64, "67 48 0f 1b 20");
testOp1(m64, .RESRV_NOP_0F1B_5, rm16, "66 67 0f 1b 28");
testOp1(m64, .RESRV_NOP_0F1B_5, rm32, "67 0f 1b 28");
testOp1(m64, .RESRV_NOP_0F1B_5, rm64, "67 48 0f 1b 28");
testOp1(m64, .RESRV_NOP_0F1B_6, rm16, "66 67 0f 1b 30");
testOp1(m64, .RESRV_NOP_0F1B_6, rm32, "67 0f 1b 30");
testOp1(m64, .RESRV_NOP_0F1B_6, rm64, "67 48 0f 1b 30");
testOp1(m64, .RESRV_NOP_0F1B_7, rm16, "66 67 0f 1b 38");
testOp1(m64, .RESRV_NOP_0F1B_7, rm32, "67 0f 1b 38");
testOp1(m64, .RESRV_NOP_0F1B_7, rm64, "67 48 0f 1b 38");
}
{
// NOP - 0F 1C
testOp1(m64, .RESRV_NOP_0F1C_0, rm16, "66 67 0f 1c 00");
testOp1(m64, .RESRV_NOP_0F1C_0, rm32, "67 0f 1c 00");
testOp1(m64, .RESRV_NOP_0F1C_0, rm64, "67 48 0f 1c 00");
testOp1(m64, .RESRV_NOP_0F1C_1, rm16, "66 67 0f 1c 08");
testOp1(m64, .RESRV_NOP_0F1C_1, rm32, "67 0f 1c 08");
testOp1(m64, .RESRV_NOP_0F1C_1, rm64, "67 48 0f 1c 08");
testOp1(m64, .RESRV_NOP_0F1C_2, rm16, "66 67 0f 1c 10");
testOp1(m64, .RESRV_NOP_0F1C_2, rm32, "67 0f 1c 10");
testOp1(m64, .RESRV_NOP_0F1C_2, rm64, "67 48 0f 1c 10");
testOp1(m64, .RESRV_NOP_0F1C_3, rm16, "66 67 0f 1c 18");
testOp1(m64, .RESRV_NOP_0F1C_3, rm32, "67 0f 1c 18");
testOp1(m64, .RESRV_NOP_0F1C_3, rm64, "67 48 0f 1c 18");
testOp1(m64, .RESRV_NOP_0F1C_4, rm16, "66 67 0f 1c 20");
testOp1(m64, .RESRV_NOP_0F1C_4, rm32, "67 0f 1c 20");
testOp1(m64, .RESRV_NOP_0F1C_4, rm64, "67 48 0f 1c 20");
testOp1(m64, .RESRV_NOP_0F1C_5, rm16, "66 67 0f 1c 28");
testOp1(m64, .RESRV_NOP_0F1C_5, rm32, "67 0f 1c 28");
testOp1(m64, .RESRV_NOP_0F1C_5, rm64, "67 48 0f 1c 28");
testOp1(m64, .RESRV_NOP_0F1C_6, rm16, "66 67 0f 1c 30");
testOp1(m64, .RESRV_NOP_0F1C_6, rm32, "67 0f 1c 30");
testOp1(m64, .RESRV_NOP_0F1C_6, rm64, "67 48 0f 1c 30");
testOp1(m64, .RESRV_NOP_0F1C_7, rm16, "66 67 0f 1c 38");
testOp1(m64, .RESRV_NOP_0F1C_7, rm32, "67 0f 1c 38");
testOp1(m64, .RESRV_NOP_0F1C_7, rm64, "67 48 0f 1c 38");
}
{
// NOP - 0F 1D
testOp1(m64, .RESRV_NOP_0F1D_0, rm16, "66 67 0f 1d 00");
testOp1(m64, .RESRV_NOP_0F1D_0, rm32, "67 0f 1d 00");
testOp1(m64, .RESRV_NOP_0F1D_0, rm64, "67 48 0f 1d 00");
testOp1(m64, .RESRV_NOP_0F1D_1, rm16, "66 67 0f 1d 08");
testOp1(m64, .RESRV_NOP_0F1D_1, rm32, "67 0f 1d 08");
testOp1(m64, .RESRV_NOP_0F1D_1, rm64, "67 48 0f 1d 08");
testOp1(m64, .RESRV_NOP_0F1D_2, rm16, "66 67 0f 1d 10");
testOp1(m64, .RESRV_NOP_0F1D_2, rm32, "67 0f 1d 10");
testOp1(m64, .RESRV_NOP_0F1D_2, rm64, "67 48 0f 1d 10");
testOp1(m64, .RESRV_NOP_0F1D_3, rm16, "66 67 0f 1d 18");
testOp1(m64, .RESRV_NOP_0F1D_3, rm32, "67 0f 1d 18");
testOp1(m64, .RESRV_NOP_0F1D_3, rm64, "67 48 0f 1d 18");
testOp1(m64, .RESRV_NOP_0F1D_4, rm16, "66 67 0f 1d 20");
testOp1(m64, .RESRV_NOP_0F1D_4, rm32, "67 0f 1d 20");
testOp1(m64, .RESRV_NOP_0F1D_4, rm64, "67 48 0f 1d 20");
testOp1(m64, .RESRV_NOP_0F1D_5, rm16, "66 67 0f 1d 28");
testOp1(m64, .RESRV_NOP_0F1D_5, rm32, "67 0f 1d 28");
testOp1(m64, .RESRV_NOP_0F1D_5, rm64, "67 48 0f 1d 28");
testOp1(m64, .RESRV_NOP_0F1D_6, rm16, "66 67 0f 1d 30");
testOp1(m64, .RESRV_NOP_0F1D_6, rm32, "67 0f 1d 30");
testOp1(m64, .RESRV_NOP_0F1D_6, rm64, "67 48 0f 1d 30");
testOp1(m64, .RESRV_NOP_0F1D_7, rm16, "66 67 0f 1d 38");
testOp1(m64, .RESRV_NOP_0F1D_7, rm32, "67 0f 1d 38");
testOp1(m64, .RESRV_NOP_0F1D_7, rm64, "67 48 0f 1d 38");
}
{
// NOP - 0F 1E
testOp1(m64, .RESRV_NOP_0F1E_0, rm16, "66 67 0f 1e 00");
testOp1(m64, .RESRV_NOP_0F1E_0, rm32, "67 0f 1e 00");
testOp1(m64, .RESRV_NOP_0F1E_0, rm64, "67 48 0f 1e 00");
testOp1(m64, .RESRV_NOP_0F1E_1, rm16, "66 67 0f 1e 08");
testOp1(m64, .RESRV_NOP_0F1E_1, rm32, "67 0f 1e 08");
testOp1(m64, .RESRV_NOP_0F1E_1, rm64, "67 48 0f 1e 08");
testOp1(m64, .RESRV_NOP_0F1E_2, rm16, "66 67 0f 1e 10");
testOp1(m64, .RESRV_NOP_0F1E_2, rm32, "67 0f 1e 10");
testOp1(m64, .RESRV_NOP_0F1E_2, rm64, "67 48 0f 1e 10");
testOp1(m64, .RESRV_NOP_0F1E_3, rm16, "66 67 0f 1e 18");
testOp1(m64, .RESRV_NOP_0F1E_3, rm32, "67 0f 1e 18");
testOp1(m64, .RESRV_NOP_0F1E_3, rm64, "67 48 0f 1e 18");
testOp1(m64, .RESRV_NOP_0F1E_4, rm16, "66 67 0f 1e 20");
testOp1(m64, .RESRV_NOP_0F1E_4, rm32, "67 0f 1e 20");
testOp1(m64, .RESRV_NOP_0F1E_4, rm64, "67 48 0f 1e 20");
testOp1(m64, .RESRV_NOP_0F1E_5, rm16, "66 67 0f 1e 28");
testOp1(m64, .RESRV_NOP_0F1E_5, rm32, "67 0f 1e 28");
testOp1(m64, .RESRV_NOP_0F1E_5, rm64, "67 48 0f 1e 28");
testOp1(m64, .RESRV_NOP_0F1E_6, rm16, "66 67 0f 1e 30");
testOp1(m64, .RESRV_NOP_0F1E_6, rm32, "67 0f 1e 30");
testOp1(m64, .RESRV_NOP_0F1E_6, rm64, "67 48 0f 1e 30");
testOp1(m64, .RESRV_NOP_0F1E_7, rm16, "66 67 0f 1e 38");
testOp1(m64, .RESRV_NOP_0F1E_7, rm32, "67 0f 1e 38");
testOp1(m64, .RESRV_NOP_0F1E_7, rm64, "67 48 0f 1e 38");
}
{
// NOP - 0F 1F
testOp1(m64, .RESRV_NOP_0F1F_0, rm16, "66 67 0f 1f 00");
testOp1(m64, .RESRV_NOP_0F1F_0, rm32, "67 0f 1f 00");
testOp1(m64, .RESRV_NOP_0F1F_0, rm64, "67 48 0f 1f 00");
testOp1(m64, .RESRV_NOP_0F1F_1, rm16, "66 67 0f 1f 08");
testOp1(m64, .RESRV_NOP_0F1F_1, rm32, "67 0f 1f 08");
testOp1(m64, .RESRV_NOP_0F1F_1, rm64, "67 48 0f 1f 08");
testOp1(m64, .RESRV_NOP_0F1F_2, rm16, "66 67 0f 1f 10");
testOp1(m64, .RESRV_NOP_0F1F_2, rm32, "67 0f 1f 10");
testOp1(m64, .RESRV_NOP_0F1F_2, rm64, "67 48 0f 1f 10");
testOp1(m64, .RESRV_NOP_0F1F_3, rm16, "66 67 0f 1f 18");
testOp1(m64, .RESRV_NOP_0F1F_3, rm32, "67 0f 1f 18");
testOp1(m64, .RESRV_NOP_0F1F_3, rm64, "67 48 0f 1f 18");
testOp1(m64, .RESRV_NOP_0F1F_4, rm16, "66 67 0f 1f 20");
testOp1(m64, .RESRV_NOP_0F1F_4, rm32, "67 0f 1f 20");
testOp1(m64, .RESRV_NOP_0F1F_4, rm64, "67 48 0f 1f 20");
testOp1(m64, .RESRV_NOP_0F1F_5, rm16, "66 67 0f 1f 28");
testOp1(m64, .RESRV_NOP_0F1F_5, rm32, "67 0f 1f 28");
testOp1(m64, .RESRV_NOP_0F1F_5, rm64, "67 48 0f 1f 28");
testOp1(m64, .RESRV_NOP_0F1F_6, rm16, "66 67 0f 1f 30");
testOp1(m64, .RESRV_NOP_0F1F_6, rm32, "67 0f 1f 30");
testOp1(m64, .RESRV_NOP_0F1F_6, rm64, "67 48 0f 1f 30");
testOp1(m64, .RESRV_NOP_0F1F_7, rm16, "66 67 0f 1f 38");
testOp1(m64, .RESRV_NOP_0F1F_7, rm32, "67 0f 1f 38");
testOp1(m64, .RESRV_NOP_0F1F_7, rm64, "67 48 0f 1f 38");
}
} | src/x86/tests/reserved.zig |
const std = @import("std");
const getty = @import("getty");
const Deserializer = @import("deserializer.zig").Deserializer;
const Token = @import("common/token.zig").Token;
const allocator = std.testing.allocator;
const assert = std.debug.assert;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectEqualSlices = std.testing.expectEqualSlices;
const expectEqualStrings = std.testing.expectEqualStrings;
test "array" {
try t([_]i32{}, &[_]Token{
.{ .Seq = .{ .len = 0 } },
.{ .SeqEnd = .{} },
});
try t([3]i32{ 1, 2, 3 }, &[_]Token{
.{ .Seq = .{ .len = 0 } },
.{ .I32 = 1 },
.{ .I32 = 2 },
.{ .I32 = 3 },
.{ .SeqEnd = .{} },
});
try t([3][2]i32{ .{ 1, 2 }, .{ 3, 4 }, .{ 5, 6 } }, &[_]Token{
.{ .Seq = .{ .len = 3 } },
.{ .Seq = .{ .len = 2 } },
.{ .I32 = 1 },
.{ .I32 = 2 },
.{ .SeqEnd = .{} },
.{ .Seq = .{ .len = 2 } },
.{ .I32 = 3 },
.{ .I32 = 4 },
.{ .SeqEnd = .{} },
.{ .Seq = .{ .len = 2 } },
.{ .I32 = 5 },
.{ .I32 = 6 },
.{ .SeqEnd = .{} },
.{ .SeqEnd = .{} },
});
}
test "array list" {
{
var expected = std.ArrayList(void).init(allocator);
defer expected.deinit();
try t(expected, &[_]Token{
.{ .Seq = .{ .len = 0 } },
.{ .SeqEnd = .{} },
});
}
{
var expected = std.ArrayList(isize).init(allocator);
defer expected.deinit();
try expected.append(1);
try expected.append(2);
try expected.append(3);
try t(expected, &[_]Token{
.{ .Seq = .{ .len = 3 } },
.{ .I8 = 1 },
.{ .I32 = 2 },
.{ .I64 = 3 },
.{ .SeqEnd = .{} },
});
}
{
const Child = std.ArrayList(isize);
const Parent = std.ArrayList(Child);
var expected = Parent.init(allocator);
var a = Child.init(allocator);
var b = Child.init(allocator);
var c = Child.init(allocator);
defer {
expected.deinit();
a.deinit();
b.deinit();
c.deinit();
}
try b.append(1);
try c.append(2);
try c.append(3);
try expected.append(a);
try expected.append(b);
try expected.append(c);
const tokens = &[_]Token{
.{ .Seq = .{ .len = 3 } },
.{ .Seq = .{ .len = 0 } },
.{ .SeqEnd = .{} },
.{ .Seq = .{ .len = 1 } },
.{ .I32 = 1 },
.{ .SeqEnd = .{} },
.{ .Seq = .{ .len = 2 } },
.{ .I32 = 2 },
.{ .I32 = 3 },
.{ .SeqEnd = .{} },
.{ .SeqEnd = .{} },
};
// Test manually since the `t` function cannot recursively test
// user-defined containers containers without ugly hacks.
var d = Deserializer.init(allocator, tokens);
const v = getty.deserialize(allocator, Parent, d.deserializer()) catch return error.TestUnexpectedError;
defer getty.de.free(allocator, v);
try expectEqual(expected.capacity, v.capacity);
for (v.items) |l, i| {
try expectEqual(expected.items[i].capacity, l.capacity);
try expectEqualSlices(isize, expected.items[i].items, l.items);
}
}
}
test "bool" {
try t(true, &[_]Token{.{ .Bool = true }});
try t(false, &[_]Token{.{ .Bool = false }});
}
test "float" {
try t(@as(f16, 0), &[_]Token{.{ .F16 = 0 }});
try t(@as(f32, 0), &[_]Token{.{ .F32 = 0 }});
try t(@as(f64, 0), &[_]Token{.{ .F64 = 0 }});
try t(@as(f128, 0), &[_]Token{.{ .F64 = 0 }});
}
test "integer" {
// signed
try t(@as(i8, 0), &[_]Token{.{ .I8 = 0 }});
try t(@as(i16, 0), &[_]Token{.{ .I16 = 0 }});
try t(@as(i32, 0), &[_]Token{.{ .I32 = 0 }});
try t(@as(i64, 0), &[_]Token{.{ .I64 = 0 }});
try t(@as(i128, 0), &[_]Token{.{ .I128 = 0 }});
try t(@as(isize, 0), &[_]Token{.{ .I128 = 0 }});
// unsigned
try t(@as(u8, 0), &[_]Token{.{ .U8 = 0 }});
try t(@as(u16, 0), &[_]Token{.{ .U16 = 0 }});
try t(@as(u32, 0), &[_]Token{.{ .U32 = 0 }});
try t(@as(u64, 0), &[_]Token{.{ .U64 = 0 }});
try t(@as(u128, 0), &[_]Token{.{ .U128 = 0 }});
try t(@as(usize, 0), &[_]Token{.{ .U128 = 0 }});
}
test "string" {
try t("abc", &[_]Token{.{ .String = "abc" }});
var arr = [_]u8{ 'a', 'b', 'c' };
try t(@as([]u8, &arr), &[_]Token{.{ .String = "abc" }});
try t(@as([]const u8, &arr), &[_]Token{.{ .String = "abc" }});
}
test "struct" {
try t(struct {}{}, &[_]Token{
.{ .Map = .{ .len = 0 } },
.{ .MapEnd = .{} },
});
const T = struct { a: i32, b: i32, c: i32 };
try t(T{ .a = 1, .b = 2, .c = 3 }, &[_]Token{
.{ .Struct = .{ .name = "T", .len = 3 } },
.{ .String = "a" },
.{ .I32 = 1 },
.{ .String = "b" },
.{ .I32 = 2 },
.{ .String = "c" },
.{ .I32 = 3 },
.{ .StructEnd = .{} },
});
try t(T{ .a = 1, .b = 2, .c = 3 }, &[_]Token{
.{ .Map = .{ .len = 3 } },
.{ .String = "a" },
.{ .I32 = 1 },
.{ .String = "b" },
.{ .I32 = 2 },
.{ .String = "c" },
.{ .I32 = 3 },
.{ .MapEnd = .{} },
});
}
test "tuple" {
try t(std.meta.Tuple(&[_]type{}){}, &[_]Token{
.{ .Tuple = .{ .len = 0 } },
.{ .TupleEnd = .{} },
});
try t(std.meta.Tuple(&[_]type{ i32, u32 }){ 1, 2 }, &[_]Token{
.{ .Tuple = .{ .len = 2 } },
.{ .I32 = 1 },
.{ .U32 = 2 },
.{ .TupleEnd = .{} },
});
try t(std.meta.Tuple(&[_]type{}){}, &[_]Token{
.{ .Seq = .{ .len = 0 } },
.{ .SeqEnd = .{} },
});
try t(std.meta.Tuple(&[_]type{ i32, u32 }){ 1, 2 }, &[_]Token{
.{ .Seq = .{ .len = 2 } },
.{ .I32 = 1 },
.{ .U32 = 2 },
.{ .SeqEnd = .{} },
});
try t(std.meta.Tuple(&[_]type{
std.meta.Tuple(&[_]type{ i32, i32 }),
std.meta.Tuple(&[_]type{ i32, i32 }),
std.meta.Tuple(&[_]type{ i32, i32 }),
}){ .{ 1, 2 }, .{ 3, 4 }, .{ 5, 6 } }, &[_]Token{
.{ .Tuple = .{ .len = 3 } },
.{ .Tuple = .{ .len = 2 } },
.{ .I32 = 1 },
.{ .I32 = 2 },
.{ .TupleEnd = .{} },
.{ .Tuple = .{ .len = 2 } },
.{ .I32 = 3 },
.{ .I32 = 4 },
.{ .TupleEnd = .{} },
.{ .Tuple = .{ .len = 2 } },
.{ .I32 = 5 },
.{ .I32 = 6 },
.{ .TupleEnd = .{} },
.{ .TupleEnd = .{} },
});
}
test "void" {
try t({}, &[_]Token{.{ .Void = {} }});
}
/// This test function does not support recursive, user-defined containers such
/// as `std.ArrayList(std.ArrayList(u8))`.
fn t(expected: anytype, tokens: []const Token) !void {
const T = @TypeOf(expected);
var d = Deserializer.init(allocator, tokens);
const v = getty.deserialize(allocator, T, d.deserializer()) catch return error.TestUnexpectedError;
defer getty.de.free(allocator, v);
switch (@typeInfo(T)) {
.Bool,
.Float,
.Int,
.Void,
//.Enum,
=> try expectEqual(expected, v),
.Array => |info| try expectEqualSlices(info.child, &expected, &v),
.Pointer => |info| switch (comptime std.meta.trait.isZigString(T)) {
true => try expectEqualStrings(expected, v),
false => switch (info.size) {
//.One => ,
.Slice => try expectEqualSlices(info.child, expected, v),
else => unreachable,
},
},
.Struct => |info| {
if (comptime std.mem.startsWith(u8, @typeName(T), "std.array_list")) {
try expectEqual(expected.capacity, v.capacity);
try expectEqualSlices(std.meta.Child(T.Slice), expected.items, v.items);
} else switch (info.is_tuple) {
true => {
const length = std.meta.fields(T).len;
comptime var i: usize = 0;
inline while (i < length) : (i += 1) {
try expectEqual(expected[i], v[i]);
}
},
false => try expectEqual(expected, v),
}
},
else => unreachable,
}
try expect(d.remaining() == 0);
} | src/tests/de/tests.zig |
const std = @import("std");
const os = std.os;
const mem = std.mem;
const net = std.net;
const testing = std.testing;
const assert = std.debug.assert;
// For testing slow and fast clients.
const delay_time = std.time.ns_per_ms * 0;
var bytes_sent: usize = 0;
var bytes_read: usize = 0;
test "socket sends small data amounts without blocking" {
const socket_path = try std.fmt.allocPrint(testing.allocator, "/tmp/poll.socket", .{});
defer testing.allocator.free(socket_path);
std.fs.deleteFileAbsolute(socket_path) catch |err| switch (err) {
error.FileNotFound => {},
else => return err,
};
const address = try testing.allocator.create(net.Address);
defer testing.allocator.destroy(address);
address.* = try net.Address.initUnix(socket_path);
const pid = try os.fork();
if (pid == 0) {
// Client
const message = try std.fmt.allocPrint(testing.allocator, "hello from client!", .{});
defer testing.allocator.free(message);
var buf: [6000]u8 = undefined;
std.time.sleep(delay_time);
const client_socket = try os.socket(
os.AF_UNIX,
os.SOCK_SEQPACKET | os.SOCK_CLOEXEC,
os.PF_UNIX,
);
defer os.closeSocket(client_socket);
try os.connect(
client_socket,
@ptrCast(*os.sockaddr, &address.un),
@sizeOf(@TypeOf(address.un)),
);
bytes_read = try os.recv(client_socket, &buf, 0);
std.debug.print("received on client: {d} bytes\n", .{bytes_read});
std.time.sleep(delay_time);
bytes_read = try os.recv(client_socket, &buf, 0);
std.debug.print("received on client: {d} bytes\n", .{bytes_read});
std.time.sleep(delay_time);
bytes_read = try os.recv(client_socket, &buf, 0);
std.debug.print("received on client: {d} bytes\n", .{bytes_read});
std.time.sleep(delay_time);
} else {
// Server
const a5000_const = [_]u8{'a'} ** 5000;
const a5000 = try std.fmt.allocPrint(testing.allocator, &a5000_const, .{});
defer testing.allocator.free(a5000);
const socket = try os.socket(
os.AF_UNIX,
os.SOCK_SEQPACKET | os.SOCK_CLOEXEC,
os.PF_UNIX,
);
try os.bind(socket, @ptrCast(*os.sockaddr, &address.un), @sizeOf(@TypeOf(address.un)));
try os.listen(socket, 10);
const client_socket = try os.accept(socket, null, null, os.SOCK_CLOEXEC);
defer os.closeSocket(client_socket);
const socket_abstr = std.x.os.Socket.from(client_socket);
std.debug.print(
"\nInitial socket write buffer size: {d}\n",
.{try socket_abstr.getWriteBufferSize()},
);
bytes_sent = try os.send(client_socket, a5000, os.MSG_EOR | os.MSG_DONTWAIT);
assert(a5000.len == bytes_sent);
try socket_abstr.setWriteBufferSize(5000);
bytes_sent = try os.send(client_socket, a5000, os.MSG_EOR | os.MSG_DONTWAIT);
assert(a5000.len == bytes_sent);
try socket_abstr.setWriteBufferSize(4000);
try std.testing.expectError(
error.WouldBlock,
os.send(client_socket, a5000, os.MSG_EOR | os.MSG_DONTWAIT),
);
}
} | poc/nonblocking_io.zig |
const std = @import("std");
const stdx = @import("stdx");
const Vec3 = stdx.math.Vec3;
const ma = @import("miniaudio");
const log = stdx.log.scoped(.audio);
pub const AudioEngine = struct {
const Self = @This();
engine: ma.ma_engine,
pub fn init(self: *Self) void {
const res = ma.ma_engine_init(null, &self.engine);
ma.assertNoError(res);
}
pub fn deinit(self: *Self) void {
ma.ma_engine_uninit(&self.engine);
}
/// Data is duped so it can be manged.
pub fn createSound(self: *Self, alloc: std.mem.Allocator, encoding: Sound.Encoding, data: []const u8) !*Sound {
const new = alloc.create(Sound) catch unreachable;
new.* = .{
.sound = undefined,
.decoder = undefined,
.data = alloc.dupe(u8, data) catch unreachable,
};
errdefer {
alloc.free(new.data);
alloc.destroy(new);
}
var config = ma.ma_decoder_config_init_default();
config.encodingFormat = switch (encoding) {
.Wav => ma.ma_encoding_format_wav,
.Mp3 => ma.ma_encoding_format_mp3,
.Flac => ma.ma_encoding_format_flac,
.Ogg => ma.ma_encoding_format_vorbis,
// ma will try to detect the encoding.
.Unknown => ma.ma_encoding_format_unknown,
};
var res = ma.ma_decoder_init_memory(new.data.ptr, new.data.len, &config, &new.decoder);
if (res != ma.MA_SUCCESS) {
switch (res) {
ma.MA_NO_BACKEND => return error.InvalidFormat,
else => ma.assertNoError(res),
}
}
res = ma.ma_sound_init_from_data_source(&self.engine, &new.decoder, 0, null, &new.sound);
ma.assertNoError(res);
return new;
}
pub fn getNumListeners(self: Self) u32 {
return ma.ma_engine_get_listener_count(&self.engine);
}
pub fn isListenerEnabled(self: Self, idx: u32) bool {
return ma.ma_engine_listener_is_enabled(&self.engine, idx) == 1;
}
pub fn setListenerEnabled(self: *Self, idx: u32, enabled: bool) void {
ma.ma_engine_listener_set_enabled(&self.engine, idx, if (enabled) 1 else 0);
}
pub fn getListenerPosition(self: Self, idx: u32) Vec3 {
const res = ma.ma_engine_listener_get_position(&self.engine, idx);
return .{ .x = res.x, .y = res.y, .z = res.z };
}
pub fn setListenerPosition(self: *Self, idx: u32, pos: Vec3) void {
ma.ma_engine_listener_set_position(&self.engine, idx, pos.x, pos.y, pos.z);
}
pub fn getListenerDirection(self: Self, idx: u32) Vec3 {
const res = ma.ma_engine_listener_get_direction(&self.engine, idx);
return .{ .x = res.x, .y = res.y, .z = res.z };
}
pub fn setListenerDirection(self: *Self, idx: u32, dir: Vec3) void {
ma.ma_engine_listener_set_direction(&self.engine, idx, dir.x, dir.y, dir.z);
}
pub fn getListenerWorldUp(self: Self, idx: u32) Vec3 {
const res = ma.ma_engine_listener_get_world_up(&self.engine, idx);
return .{ .x = res.x, .y = res.y, .z = res.z };
}
pub fn setListenerWorldUp(self: *Self, idx: u32, dir: Vec3) void {
ma.ma_engine_listener_set_world_up(&self.engine, idx, dir.x, dir.y, dir.z);
}
pub fn getListenerVelocity(self: Self, idx: u32) Vec3 {
const res = ma.ma_engine_listener_get_velocity(&self.engine, idx);
return .{ .x = res.x, .y = res.y, .z = res.z };
}
pub fn setListenerVelocity(self: *Self, idx: u32, vel: Vec3) void {
ma.ma_engine_listener_set_velocity(&self.engine, idx, vel.x, vel.y, vel.z);
}
};
pub const Sound = struct {
const Self = @This();
pub const Encoding = enum {
Wav,
Mp3,
Flac,
Ogg,
Unknown,
};
sound: ma.ma_sound,
decoder: ma.ma_decoder,
data: []const u8,
pub fn deinit(self: *Self, alloc: std.mem.Allocator) void {
ma.ma_sound_uninit(&self.sound);
const res = ma.ma_decoder_uninit(&self.decoder);
ma.assertNoError(res);
alloc.free(self.data);
}
pub fn play(self: *Self) void {
ma.ma_sound_set_looping(&self.sound, 0);
const res = ma.ma_sound_start(&self.sound);
ma.assertNoError(res);
while (true) {
if (ma.ma_sound_is_playing(&self.sound) == 0) {
break;
}
std.time.sleep(30e6);
}
std.debug.assert(ma.ma_sound_at_end(&self.sound) == 1);
}
pub fn playBg(self: *Self) void {
ma.ma_sound_set_looping(&self.sound, 0);
if (self.isPlayingBg()) {
self.seekToPcmFrame(0);
}
const res = ma.ma_sound_start(&self.sound);
ma.assertNoError(res);
}
pub fn isPlayingBg(self: Self) bool {
return ma.ma_sound_is_playing(&self.sound) == 1;
}
pub fn loopBg(self: *Self) void {
ma.ma_sound_set_looping(&self.sound, 1);
if (self.isPlayingBg()) {
self.seekToPcmFrame(0);
}
const res = ma.ma_sound_start(&self.sound);
ma.assertNoError(res);
}
pub fn isLoopingBg(self: Self) bool {
return ma.ma_sound_is_looping(&self.sound) == 1;
}
pub fn pauseBg(self: *Self) void {
const res = ma.ma_sound_stop(&self.sound);
ma.assertNoError(res);
}
pub fn resumeBg(self: *Self) void {
const res = ma.ma_sound_start(&self.sound);
ma.assertNoError(res);
}
pub fn stopBg(self: *Self) void {
const res = ma.ma_sound_stop(&self.sound);
ma.assertNoError(res);
self.seekToPcmFrame(0);
}
pub fn setVolume(self: *Self, volume: f32) void {
ma.ma_sound_set_volume(&self.sound, volume);
}
pub fn getVolume(self: Self) f32 {
return ma.ma_sound_get_volume(&self.sound);
}
pub fn setGain(self: *Self, gain: f32) void {
const volume = ma.ma_volume_db_to_linear(gain);
ma.ma_sound_set_volume(&self.sound, volume);
}
pub fn getGain(self: Self) f32 {
const volume = ma.ma_sound_get_volume(&self.sound);
return ma.ma_volume_linear_to_db(volume);
}
/// Value must be greater than 0.
pub fn setPitch(self: *Self, pitch: f32) void {
ma.ma_sound_set_pitch(&self.sound, pitch);
}
pub fn getPitch(self: Self) f32 {
return ma.ma_sound_get_pitch(&self.sound);
}
/// -1 (stereo left) to 1 (stereo right). Middle is 0.
pub fn setPan(self: *Self, pan: f32) void {
ma.ma_sound_set_pan(&self.sound, pan);
}
pub fn getPan(self: Self) f32 {
return ma.ma_sound_get_pan(&self.sound);
}
pub fn getLengthInPcmFrames(self: *Self) !u64 {
var length: c_ulonglong = undefined;
const res = ma.ma_sound_get_length_in_pcm_frames(&self.sound, &length);
ma.assertNoError(res);
if (length == 0) {
return error.Unsupported;
}
return length;
}
pub fn getDataFormat(self: *Self) DataFormat {
var format: ma.ma_format = undefined;
var channels: u32 = undefined;
var sample_rate: u32 = undefined;
const res = ma.ma_sound_get_data_format(&self.sound, &format, &channels, &sample_rate, null, 0);
ma.assertNoError(res);
return .{
.format = @intToEnum(Format, format),
.channels = channels,
.sample_rate = sample_rate,
};
}
/// Returns length in milliseconds.
pub fn getLength(self: *Self) !u64 {
const format = self.getDataFormat();
const len = try self.getLengthInPcmFrames();
return @floatToInt(u64, @ceil(@intToFloat(f64, len) / @intToFloat(f64, format.sample_rate) * 1000));
}
pub fn getCursorPcmFrame(self: *Self) u64 {
var cursor: c_ulonglong = undefined;
const res = ma.ma_sound_get_cursor_in_pcm_frames(&self.sound, &cursor);
ma.assertNoError(res);
return cursor;
}
pub fn seekToPcmFrame(self: *Self, frame_index: u64) void {
// Since the data source is not managed by the ma_sound,
// we should invoke ma_data_source_seek_to_pcm_frame instead of ma_sound_seek_to_pcm_frame.
const res = ma.ma_data_source_seek_to_pcm_frame(&self.decoder, frame_index);
ma.assertNoError(res);
}
pub fn setPosition(self: *Self, x: f32, y: f32, z: f32) void {
ma.ma_sound_set_position(&self.sound, x, y, z);
}
pub fn getPosition(self: Self) Vec3 {
const res = ma.ma_sound_get_position(&self.sound);
return .{ .x = res.x, .y = res.y, .z = res.z };
}
pub fn setDirection(self: *Self, x: f32, y: f32, z: f32) void {
ma.ma_sound_set_direction(&self.sound, x, y, z);
}
pub fn getDirection(self: Self) Vec3 {
const res = ma.ma_sound_get_direction(&self.sound);
return .{ .x = res.x, .y = res.y, .z = res.z };
}
pub fn setVelocity(self: *Self, x: f32, y: f32, z: f32) void {
ma.ma_sound_set_velocity(&self.sound, x, y, z);
}
pub fn getVelocity(self: Self) Vec3 {
const res = ma.ma_sound_get_velocity(&self.sound);
return .{ .x = res.x, .y = res.y, .z = res.z };
}
};
const DataFormat = struct {
format: Format,
channels: u32,
sample_rate: u32,
};
const Format = enum(u32) {
Unknown = ma.ma_format_unknown,
U8 = ma.ma_format_u8,
S16 = ma.ma_format_s16,
S24 = ma.ma_format_s24,
S32 = ma.ma_format_s32,
F32 = ma.ma_format_f32,
}; | runtime/audio.zig |
const std = @import("std");
const uv = @import("uv");
const ssl = @import("openssl");
const c = @cImport({
@cDefine("H2O_USE_LIBUV", "1");
@cInclude("h2o.h");
@cInclude("h2o/http1.h");
@cInclude("h2o/http2.h");
});
// pub usingnamespace c;
const h2o_loop = uv.uv_loop_t;
pub extern fn h2o_config_init(config: *h2o_globalconf) void;
pub extern fn h2o_config_dispose(config: *h2o_globalconf) void;
pub extern fn h2o_config_register_host(config: *h2o_globalconf, host: h2o_iovec_t, port: u16) *h2o_hostconf;
pub extern fn h2o_config_register_path(hostconf: [*c]h2o_hostconf, path: [*c]const u8, flags: c_int) [*c]h2o_pathconf;
pub extern fn h2o_create_handler(conf: [*c]h2o_pathconf, sz: usize) ?*h2o_handler;
pub extern fn h2o_context_init(context: *h2o_context, loop: *h2o_loop, config: [*c]h2o_globalconf) void;
pub extern fn h2o_context_dispose(context: *h2o_context) void;
pub extern fn h2o_context_request_shutdown(context: *h2o_context) void;
pub extern fn h2o_accept(ctx: [*c]h2o_accept_ctx, sock: *h2o_socket) void;
pub extern fn h2o_add_header(pool: *c.h2o_mem_pool_t, headers: *h2o_headers, token: *const h2o_token, orig_name: [*c]const u8, value: [*c]const u8, value_len: usize) isize;
pub extern fn h2o_set_header(pool: *c.h2o_mem_pool_t, headers: *h2o_headers, token: *const h2o_token, value: [*c]const u8, value_len: usize, overwrite_if_exists: c_int) isize;
pub extern fn h2o_set_header_by_str(pool: *c.h2o_mem_pool_t, headers: *h2o_headers, lowercase_name: [*c]const u8, lowercase_name_len: usize, maybe_token: c_int, value: [*c]const u8, value_len: usize, overwrite_if_exists: c_int) isize;
pub extern fn h2o_start_response(req: ?*h2o_req, generator: [*c]c.h2o_generator_t) void;
pub extern fn h2o_strdup(pool: *c.h2o_mem_pool_t, s: [*c]const u8, len: usize) c.h2o_iovec_t;
pub extern fn h2o_send(req: ?*h2o_req, bufs: [*c]c.h2o_iovec_t, bufcnt: usize, state: c.h2o_send_state_t) void;
pub extern fn h2o_uv_socket_create(handle: *uv.uv_handle_t, close_cb: uv.uv_close_cb) ?*h2o_socket;
pub extern fn h2o_ssl_register_alpn_protocols(ctx: *ssl.SSL_CTX, protocols: [*c]const h2o_iovec_t) void;
pub extern fn h2o_access_log_open_handle(path: [*c]const u8, fmt: [*c]const u8, escape: c_int) ?*c.h2o_access_log_filehandle_t;
pub extern fn h2o_access_log_register(pathconf: [*c]h2o_pathconf, handle: ?*anyopaque) [*c]*anyopaque;
pub extern fn h2o_timer_unlink(timer: *c.h2o_timer_t) void;
pub const H2O_LOGCONF_ESCAPE_APACHE = c.H2O_LOGCONF_ESCAPE_APACHE;
pub const H2O_LOGCONF_ESCAPE_JSON = c.H2O_LOGCONF_ESCAPE_JSON;
// Includes just http2
pub extern fn h2o_get_http2_alpn_protocols() [*c]const h2o_iovec_t;
// Includes http2 and http1
pub extern fn h2o_get_alpn_protocols() [*c]const h2o_iovec_t;
pub extern fn h2o_globalconf_size() usize;
pub extern fn h2o_hostconf_size() usize;
pub extern fn h2o_context_size() usize;
pub extern fn h2o_accept_ctx_size() usize;
pub extern fn h2o_httpclient_ctx_size() usize;
pub extern fn h2o_socket_size() usize;
pub extern const h2o__tokens: [100]h2o_token;
pub var H2O_TOKEN_CONTENT_TYPE: *const h2o_token = undefined;
pub fn init() void {
// Initialize constants.
H2O_TOKEN_CONTENT_TYPE = &h2o__tokens[31];
// Verify struct sizes.
// std.debug.print("sizes {} {}\n", .{ h2o_httpclient_ctx_size(), @sizeOf(h2o_httpclient_ctx) });
std.debug.assert(h2o_globalconf_size() == @sizeOf(h2o_globalconf));
std.debug.assert(h2o_hostconf_size() == @sizeOf(h2o_hostconf));
std.debug.assert(h2o_httpclient_ctx_size() == @sizeOf(h2o_httpclient_ctx));
std.debug.assert(h2o_context_size() == @sizeOf(h2o_context));
std.debug.assert(h2o_accept_ctx_size() == @sizeOf(h2o_accept_ctx));
std.debug.assert(h2o_socket_size() == @sizeOf(h2o_socket));
}
// Send states.
pub const H2O_SEND_STATE_IN_PROGRESS = c.H2O_SEND_STATE_IN_PROGRESS;
pub const H2O_SEND_STATE_FINAL = c.H2O_SEND_STATE_FINAL; // Indicates eof.
pub const H2O_SEND_STATE_ERROR = c.H2O_SEND_STATE_ERROR;
pub const h2o_generator_t = c.h2o_generator_t;
pub fn h2o_iovec_init(slice: []const u8) h2o_iovec_t {
return .{
.base = @ptrToInt(slice.ptr),
.len = slice.len,
};
}
pub const h2o_iovec_t = c.h2o_iovec_t;
pub const h2o_req_t = c.h2o_req_t;
pub const uv_loop_t = c.uv_loop_t;
pub const uv_loop_init = c.uv_loop_init;
pub const uv_tcp_t = c.uv_tcp_t;
pub const uv_accept = c.uv_accept;
pub const uv_close_cb = c.uv_close_cb;
pub const free = c.free;
pub const uv_tcp_init = c.uv_tcp_init;
pub const sockaddr_in = c.sockaddr_in;
pub const sockaddr = c.sockaddr;
pub const uv_ip4_addr = c.uv_ip4_addr;
pub const uv_tcp_bind = c.uv_tcp_bind;
pub const uv_strerror = c.uv_strerror;
pub const uv_close = c.uv_close;
pub const uv_handle_t = c.uv_handle_t;
pub const uv_listen = c.uv_listen;
pub const uv_stream_t = c.uv_stream_t;
pub const uv_run = c.uv_run;
pub const UV_RUN_DEFAULT = c.UV_RUN_DEFAULT;
pub const h2o_socket_t = c.h2o_socket_t;
pub const h2o_mem_alloc = c.h2o_mem_alloc;
// https://github.com/ziglang/zig/issues/1499
// Declare structs as needed if they contain bit fields.
pub const h2o_globalconf = extern struct {
hosts: [*c][*c]h2o_hostconf,
fallback_host: [*c]h2o_hostconf,
configurators: c.h2o_linklist_t,
server_name: c.h2o_iovec_t,
max_request_entity_size: usize,
max_delegations: c_uint,
user: [*c]u8,
usdt_selective_tracing: c_int,
handshake_timeout: u64,
http1: struct_unnamed_189,
http2: struct_unnamed_190,
http3: struct_unnamed_191,
proxy: struct_unnamed_192,
send_informational_mode: c.h2o_send_informational_mode_t,
mimemap: ?*c.h2o_mimemap_t,
filecache: struct_unnamed_193,
statuses: h2o_status_callbacks,
_num_config_slots: usize,
};
pub const h2o_hostconf = extern struct {
global: [*c]h2o_globalconf,
authority: struct_unnamed_172,
strict_match: u8,
paths: struct_unnamed_173,
fallback_path: h2o_pathconf,
mimemap: ?*c.h2o_mimemap_t,
http2: struct_unnamed_188,
};
const struct_unnamed_188 = extern struct {
fields: packed struct {
/// whether if blocking assets being pulled should be given highest priority in case of clients that do not implement
/// dependency-based prioritization
reprioritize_blocking_assets: bool,
/// if server push should be used
push_preload: bool,
/// if cross origin pushes should be authorized
allow_cross_origin_push: bool,
},
/// casper settings
capser: c.h2o_casper_conf_t
};
const struct_unnamed_172 = extern struct {
hostport: c.h2o_iovec_t,
host: c.h2o_iovec_t,
port: u16,
};
const struct_unnamed_173 = extern struct {
entries: [*c][*c]h2o_pathconf,
size: usize,
capacity: usize,
};
const struct_unnamed_174 = extern struct {
entries: [*c]?*h2o_handler,
size: usize,
capacity: usize,
};
const struct_unnamed_185 = extern struct {
entries: [*c][*c]h2o_filter,
size: usize,
capacity: usize,
};
const struct_unnamed_186 = extern struct {
entries: [*c][*c]h2o_logger,
size: usize,
capacity: usize,
};
const struct_unnamed_187 = extern struct {
fields: packed struct {
/// if request-level errors should be emitted to stderr
emit_request_errors: bool,
},
};
pub const h2o_pathconf = extern struct {
global: [*c]h2o_globalconf,
path: c.h2o_iovec_t,
handlers: struct_unnamed_174,
_filters: struct_unnamed_185,
_loggers: struct_unnamed_186,
mimemap: ?*c.h2o_mimemap_t,
env: [*c]c.h2o_envconf_t,
error_log: struct_unnamed_187,
};
const h2o_filter = extern struct {
_config_slot: usize,
on_context_init: ?fn ([*c]h2o_filter, [*c]h2o_context) callconv(.C) void,
on_context_dispose: ?fn ([*c]h2o_filter, [*c]h2o_context) callconv(.C) void,
dispose: ?fn ([*c]h2o_filter) callconv(.C) void,
on_setup_ostream: ?fn ([*c]h2o_filter, ?*c.h2o_req_t, [*c][*c]c.h2o_ostream_t) callconv(.C) void,
on_informational: ?fn ([*c]h2o_filter, ?*c.h2o_req_t) callconv(.C) void,
};
pub const h2o_context = extern struct {
loop: *h2o_loop,
globalconf: *h2o_globalconf,
queue: ?*c.h2o_multithread_queue_t,
receivers: struct_unnamed_194,
filecache: ?*c.h2o_filecache_t,
storage: c.h2o_context_storage_t,
shutdown_requested: c_int,
http1: struct_unnamed_195,
http2: struct_unnamed_197,
http3: struct_unnamed_199,
proxy: struct_unnamed_201,
ssl: struct_unnamed_202,
quic: c.struct_st_h2o_quic_aggregated_stats_t,
_module_configs: [*c]?*anyopaque,
_timestamp_cache: struct_unnamed_206,
emitted_error_status: [10]u64,
_pathconfs_inited: struct_unnamed_207,
};
const struct_unnamed_194 = extern struct {
hostinfo_getaddr: c.h2o_multithread_receiver_t,
};
const struct_unnamed_196 = extern struct {
request_timeouts: u64,
request_io_timeouts: u64,
};
const struct_unnamed_195 = extern struct {
_conns: c.h2o_linklist_t,
events: struct_unnamed_196,
};
const struct_unnamed_197 = extern struct {
_conns: c.h2o_linklist_t,
_graceful_shutdown_timeout: c.h2o_timer_t,
events: struct_unnamed_198,
};
const struct_unnamed_198 = extern struct {
protocol_level_errors: [13]u64,
read_closed: u64,
write_closed: u64,
idle_timeouts: u64,
streaming_requests: u64,
};
const struct_unnamed_201 = extern struct {
client_ctx: h2o_httpclient_ctx,
connpool: c.h2o_httpclient_connection_pool_t,
};
const struct_unnamed_202 = extern struct {
errors: u64,
alpn_h1: u64,
alpn_h2: u64,
handshake_full: u64,
handshake_resume: u64,
handshake_accum_time_full: u64,
handshake_accum_time_resume: u64,
};
const struct_unnamed_206 = extern struct {
tv_at: c.struct_timeval,
value: [*c]c.h2o_timestamp_string_t,
};
const struct_unnamed_207 = extern struct {
entries: [*c][*c]h2o_pathconf,
size: usize,
capacity: usize,
};
const struct_unnamed_200 = extern struct {
packet_forwarded: u64,
forwarded_packet_received: u64,
};
const struct_unnamed_199 = extern struct {
_conns: c.h2o_linklist_t,
_graceful_shutdown_timeout: c.h2o_timer_t,
events: struct_unnamed_200,
};
const h2o_httpclient_ctx = extern struct {
loop: *c.h2o_loop_t,
getaddr_receiver: *c.h2o_multithread_receiver_t,
io_timeout: u64,
connect_timeout: u64,
first_byte_timeout: u64,
keepalive_timeout: u64, // only used for http2 for now
max_buffer_size: usize,
fields: packed struct {
tunnel_enabled: bool,
force_cleartext_http2: bool,
},
protocol_selector: extern struct {
ratio: c.h2o_httpclient_protocol_ratio_t,
/// Each deficit is initialized to zero, then incremented by the respective percentage, and the protocol corresponding to the
/// one with the highest value is chosen. Then, the chosen variable is decremented by 100.
_deficits: [4]i16,
},
/// HTTP/2-specific settings
http2: extern struct {
latency_optimization: c.h2o_socket_latency_optimization_conditions_t,
max_concurrent_streams: u32,
},
/// HTTP/3-specific settings; 1-to(0|1) relationship, NULL when h3 is not used
http3: *h2o_http3client_ctx,
};
const h2o_http3client_ctx = extern struct {
tls: ptls_context,
quic: quicly_context,
h3: h2o_quic_ctx,
load_session: ?fn (?*h2o_httpclient_ctx, [*c]c.struct_sockaddr, [*c]const u8, [*c]c.ptls_iovec_t, [*c]c.ptls_iovec_t, ?*quicly_transport_parameters) callconv(.C) c_int,
};
const ptls_context = extern struct {
/// PRNG to be used
random_bytes: ?fn (buf: ?*anyopaque, len: usize) callconv (.C) void,
get_time: *c.ptls_get_time_t,
/// list of supported key-exchange algorithms terminated by NULL
key_exchanges: **c.ptls_key_exchange_algorithm_t,
/// list of supported cipher-suites terminated by NULL
cipher_suites: **c.ptls_cipher_suite_t,
/// list of certificates
certificates: extern struct {
list: *c.ptls_iovec_t,
count: usize,
},
/// list of ESNI data terminated by NULL
esni: **c.ptls_esni_context_t,
on_client_hello: *c.ptls_on_client_hello_t,
emit_certificate: *c.ptls_emit_certificate_t,
sign_certificate: *c.ptls_sign_certificate_t,
verify_certificate: *c.ptls_verify_certificate_t,
/// lifetime of a session ticket (server-only)
ticket_lifetime: u32,
/// maximum permitted size of early data (server-only)
max_early_data_size: u32,
/// maximum size of the message buffer (default: 0 = unlimited = 3 + 2^24 bytes)
max_buffer_size: usize,
/// the field is obsolete; should be set to NULL for QUIC draft-17. Note also that even though everybody did, it was incorrect
/// to set the value to "quic " in the earlier versions of the draft.
hkdf_label_prefix__obsolete: [*c]const u8,
fields: packed struct {
/// if set, psk handshakes use (ec)dhe
require_dhe_on_psk: bool,
/// if exporter master secrets should be recorded
use_exporter: bool,
/// if ChangeCipherSpec record should be sent during handshake. If the client sends CCS, the server sends one in response
/// regardless of the value of this flag. See RFC 8446 Appendix D.3.
send_change_cipher_spec: bool,
/// if set, the server requests client certificates
/// to authenticate the client.
require_client_authentication: bool,
/// if set, EOED will not be emitted or accepted
omit_end_of_early_data: bool,
/// This option turns on support for Raw Public Keys (RFC 7250).
///
/// When running as a client, this option instructs the client to request the server to send raw public keys in place of X.509
/// certificate chain. The client should set its `certificate_verify` callback to one that is capable of validating the raw
/// public key that will be sent by the server.
///
/// When running as a server, this option instructs the server to only handle clients requesting the use of raw public keys. If
/// the client does not, the handshake is rejected. Note however that the rejection happens only after the `on_client_hello`
/// callback is being called. Therefore, applications can support both X.509 and raw public keys by swapping `ptls_context_t` to
/// the correct one when that callback is being called (like handling swapping the contexts based on the value of SNI).
use_raw_public_keys: bool,
/// boolean indicating if the cipher-suite should be chosen based on server's preference
server_cipher_preference: bool,
},
encrypt_ticket: *c.ptls_encrypt_ticket_t,
save_ticket: *c.ptls_save_ticket_t,
log_event: *c.ptls_log_event_t,
update_open_count: *c.ptls_update_open_count_t,
update_traffic_key: *c.ptls_update_traffic_key_t,
decompress_certificate: *c.ptls_decompress_certificate_t,
update_esni_key: *c.ptls_update_esni_key_t,
on_extension: *c.ptls_on_extension_t,
};
const quicly_context = extern struct {
/// tls context to use
tls: *ptls_context,
/// Maximum size of packets that we are willing to send when path-specific information is unavailable. As a path-specific
/// optimization, quicly acting as a server expands this value to `min(local.tp.max_udp_payload_size,
/// remote.tp.max_udp_payload_size, max_size_of_incoming_datagrams)` when it receives the Transport Parameters from the client.
initial_egress_max_udp_payload_size: u16,
/// loss detection parameters
loss: c.quicly_loss_conf_t,
/// transport parameters
transport_params: quicly_transport_parameters,
/// number of packets that can be sent without a key update
max_packets_per_key: u64,
/// maximum number of bytes that can be transmitted on a CRYPTO stream (per each epoch)
max_crypto_bytes: u64,
/// initial CWND in terms of packet numbers
initcwnd_packets: u32,
/// (client-only) Initial QUIC protocol version used by the client. Setting this to a greased version will enforce version
/// negotiation.
initial_version: u32,
/// (server-only) amplification limit before the peer address is validated
pre_validation_amplification_limit: u16,
/// How frequent the endpoint should induce ACKs from the peer, relative to RTT (or CWND) multiplied by 1024. As an example, 128
/// will request the peer to send one ACK every 1/8 RTT (or CWND). 0 disables the use of the delayed-ack extension.
ack_frequency: u16,
fields: packed struct {
/// expand client hello so that it does not fit into one datagram
expand_client_hello: bool,
},
cid_encryptor: *c.quicly_cid_encryptor_t,
/// callback called when a new stream is opened by remote peer
stream_open: *c.quicly_stream_open_t,
/// callbacks for scheduling stream data
stream_scheduler: *c.quicly_stream_scheduler_t,
/// callback for receiving datagram frame
receive_datagram_frame: *c.quicly_receive_datagram_frame_t,
/// callback called when a connection is closed by remote peer
closed_by_remote: *c.quicly_closed_by_remote_t,
/// returns current time in milliseconds
now: *c.quicly_now_t,
/// called wen a NEW_TOKEN token is being received
save_resumption_token: *c.quicly_save_resumption_token_t,
generate_resumption_token: *quicly_generate_resumption_token_t,
/// crypto engine (offload API)
crypto_engine: *c.quicly_crypto_engine_t,
/// initializes a congestion controller for given connection.
init_cc: *quicly_init_cc,
/// optional refcount callback
update_open_count: *c.quicly_update_open_count_t,
};
const quicly_generate_resumption_token_t = extern struct {
cb: ?fn ([*c]struct_st_quicly_generate_resumption_token_t, ?*c.quicly_conn_t, [*c]c.ptls_buffer_t, [*c]quicly_address_token_plaintext_t) callconv(.C) c_int,
};
pub const struct_st_quicly_generate_resumption_token_t = extern struct {
cb: ?fn ([*c]struct_st_quicly_generate_resumption_token_t, ?*c.quicly_conn_t, [*c]c.ptls_buffer_t, [*c]quicly_address_token_plaintext_t) callconv(.C) c_int,
};
const quicly_address_token_plaintext_t = extern struct {
stub: c_int,
};
/// Transport Parameters; the struct contains "configuration parameters", ODCID is managed separately
const quicly_transport_parameters = extern struct {
/// in octets
max_stream_data: c.quicly_max_stream_data_t,
/// in octets
max_data: u64,
/// in milliseconds
max_idle_timeout: u64,
max_streams_bidi: u64,
max_streams_uni: u64,
max_udp_payload_size: u64,
/// quicly ignores the value set for quicly_context_t::transport_parameters
ack_delay_exponent: u8,
/// in milliseconds; quicly ignores the value set for quicly_context_t::transport_parameters
max_ack_delay: u16,
/// Delayed-ack extension. UINT64_MAX indicates that the extension is disabled or that the peer does not support it. Any local
/// value other than UINT64_MAX indicates that the use of the extension should be negotiated.
min_ack_delay_usec: u64,
fields: packed struct {
disable_active_migration: bool,
},
active_connection_id_limit: u64,
max_datagram_frame_size: u16,
};
const quicly_sent_packet = extern struct {
packet_numberr: u64,
sent_at: i64,
/// epoch to be acked in
ack_epoch: u8,
fields: packed struct {
ack_eliciting: bool,
/// if the frames being contained are considered inflight (becomes zero when deemed lost or when PTO fires)
frames_in_flight: bool,
},
/// number of bytes in-flight for the packet, from the context of CC (becomes zero when deemed lost, but not when PTO fires)
cc_bytes_in_flight: u16,
};
const union_unnamed_111 = extern union {
packet: quicly_sent_packet,
ack: struct_unnamed_112,
stream: struct_unnamed_116,
max_stream_data: struct_unnamed_117,
max_data: struct_unnamed_118,
max_streams: struct_unnamed_119,
data_blocked: struct_unnamed_120,
stream_data_blocked: struct_unnamed_121,
streams_blocked: struct_unnamed_122,
stream_state_sender: struct_unnamed_123,
new_token: struct_unnamed_124,
new_connection_id: struct_unnamed_125,
retire_connection_id: struct_unnamed_126,
};
const quicly_sent_acked_cb = fn (map: *quicly_sentmap, packet: *const quicly_sent_packet, acked: c_int, data: *quicly_sent) callconv(.C) c_int;
const quicly_sent = extern struct {
acked: quicly_sent_acked_cb,
data: union_unnamed_111,
};
const quicly_sent_block = extern struct {
next: [*c]quicly_sent_block,
num_entries: usize,
next_insert_at: usize,
entries: [16]quicly_sent,
};
const quicly_sentmap = extern struct {
head: [*c]quicly_sent_block,
tail: [*c]quicly_sent_block,
num_packets: usize,
bytes_in_flight: usize,
_pending_packet: [*c]quicly_sent,
};
const quicly_loss = extern struct {
conf: [*c]const c.quicly_loss_conf_t,
max_ack_delay: [*c]const u16,
ack_delay_exponent: [*c]const u8,
pto_count: i8,
time_of_last_packet_sent: i64,
largest_acked_packet_plus1: [4]u64,
total_bytes_sent: u64,
loss_time: i64,
alarm_at: i64,
rtt: c.quicly_rtt_t,
sentmap: quicly_sentmap,
};
const quicly_cc_type = extern struct {
name: [*c]const u8,
cc_init: [*c]quicly_init_cc,
cc_on_acked: ?fn ([*c]quicly_cc, [*c]const quicly_loss, u32, u64, u32, u64, i64, u32) callconv(.C) void,
cc_on_lost: ?fn ([*c]quicly_cc, [*c]const quicly_loss, u32, u64, u64, i64, u32) callconv(.C) void,
cc_on_persistent_congestion: ?fn ([*c]quicly_cc, [*c]const quicly_loss, i64) callconv(.C) void,
cc_on_sent: ?fn ([*c]quicly_cc, [*c]const quicly_loss, u32, i64) callconv(.C) void,
cc_switch: ?fn ([*c]quicly_cc) callconv(.C) c_int,
};
const quicly_cc = extern struct {
type: [*c]const quicly_cc_type,
cwnd: u32,
ssthresh: u32,
recovery_end: u64,
state: union_unnamed_127,
cwnd_initial: u32,
cwnd_exiting_slow_start: u32,
cwnd_minimum: u32,
cwnd_maximum: u32,
num_loss_episodes: u32,
};
const quicly_init_cc = extern struct {
cb: ?fn ([*c]quicly_init_cc, [*c]quicly_cc, u32, i64) callconv(.C) void,
};
const struct_unnamed_114 = extern struct {
start_length: u64,
additional: [4]c.struct_st_quicly_sent_ack_additional_t,
};
const struct_unnamed_115 = extern struct {
start_length: u8,
additional: [7]c.struct_st_quicly_sent_ack_additional_t,
};
const union_unnamed_113 = extern union {
ranges64: struct_unnamed_114,
ranges8: struct_unnamed_115,
};
const struct_unnamed_112 = extern struct {
start: u64,
unnamed_0: union_unnamed_113,
};
const struct_unnamed_116 = extern struct {
stream_id: c.quicly_stream_id_t,
args: c.quicly_sendstate_sent_t,
};
const struct_unnamed_128 = extern struct {
stash: u32,
};
const struct_unnamed_129 = extern struct {
stash: u32,
bytes_per_mtu_increase: u32,
};
const struct_unnamed_130 = extern struct {
k: f64,
w_max: u32,
w_last_max: u32,
avoidance_start: i64,
last_sent_time: i64,
};
const union_unnamed_127 = extern union {
reno: struct_unnamed_128,
pico: struct_unnamed_129,
cubic: struct_unnamed_130,
};
const struct_unnamed_117 = extern struct {
stream_id: c.quicly_stream_id_t,
args: quicly_maxsender_sent,
};
const struct_unnamed_118 = extern struct {
args: quicly_maxsender_sent,
};
const struct_unnamed_119 = extern struct {
uni: c_int,
args: quicly_maxsender_sent,
};
const struct_unnamed_120 = extern struct {
offset: u64,
};
const struct_unnamed_121 = extern struct {
stream_id: c.quicly_stream_id_t,
offset: u64,
};
const struct_unnamed_122 = extern struct {
uni: c_int,
args: quicly_maxsender_sent,
};
const struct_unnamed_123 = extern struct {
stream_id: c.quicly_stream_id_t,
};
const struct_unnamed_124 = extern struct {
is_inflight: c_int,
generation: u64,
};
const struct_unnamed_125 = extern struct {
sequence: u64,
};
const struct_unnamed_126 = extern struct {
sequence: u64,
};
const quicly_maxsender_sent = extern struct {
fields: packed struct {
inflight: u1,
value: u63,
},
};
const quicly_cid_plaintext = extern struct {
/// the internal "connection ID" unique to each connection (rather than QUIC's CID being unique to each path)
master_id: u32,
fields: packed struct {
/// path ID of the connection; we issue up to 255 CIDs per connection (see QUICLY_MAX_PATH_ID)
path_id: u8,
/// for intra-node routing
thread_id: u24,
},
/// for inter-node routing; available only when using a 16-byte cipher to encrypt CIDs, otherwise set to zero.
node_id: u64
};
const h2o_quic_ctx = extern struct {
loop: [*c]c.h2o_loop_t,
sock: struct_unnamed_154,
quic: ?*quicly_context,
next_cid: quicly_cid_plaintext,
conns_by_id: ?*c.struct_kh_h2o_quic_idmap_s,
conns_accepting: ?*c.struct_kh_h2o_quic_acceptmap_s,
notify_conn_update: h2o_quic_notify_connection_update_cb,
acceptor: h2o_quic_accept_cb,
accept_thread_divisor: u32,
forward_packets: h2o_quic_forward_packets_cb,
default_ttl: u8,
use_gso: u8,
preprocess_packet: h2o_quic_preprocess_packet_cb,
};
const struct_unnamed_154 = extern struct {
sock: ?*c.h2o_socket_t,
addr: c.struct_sockaddr_storage,
addrlen: c.socklen_t,
port: [*c]c.in_port_t,
};
const h2o_quic_notify_connection_update_cb = fn (ctx: *h2o_quic_ctx, conn: *h2o_quic_conn) callconv(.C) void;
const h2o_quic_conn = extern struct {
ctx: [*c]h2o_quic_ctx,
quic: ?*c.quicly_conn_t,
callbacks: [*c]const h2o_quic_conn_callbacks,
_timeout: c.h2o_timer_t,
_accept_hashkey: u64,
};
const h2o_quic_conn_callbacks = extern struct {
destroy_connection: ?fn ([*c]h2o_quic_conn) callconv(.C) void,
};
const h2o_quic_accept_cb = ?fn ([*c]h2o_quic_ctx, [*c]quicly_address_t, [*c]quicly_address_t, [*c]quicly_decoded_packet) callconv(.C) [*c]h2o_quic_conn;
const quicly_address_t = extern struct {
stub: c_int,
};
const quicly_decoded_packet = extern struct {
octets: c.ptls_iovec_t,
cid: struct_unnamed_149,
version: u32,
token: c.ptls_iovec_t,
encrypted_off: usize,
datagram_size: usize,
decrypted: struct_unnamed_151,
_is_stateless_reset_cached: enum_unnamed_152,
};
const struct_unnamed_149 = extern struct {
/// destination CID
dest: extern struct {
/// CID visible on wire
encrypted: c.ptls_iovec_t,
/// The decrypted CID, or `quicly_cid_plaintext_invalid`. Assuming that `cid_encryptor` is non-NULL, this variable would
/// contain a valid value whenever `might_be_client_generated` is false. When `might_be_client_generated` is true, this
/// value might be set to `quicly_cid_plaintext_invalid`. Note however that, as the CID itself is not authenticated,
/// a packet might be bogus regardless of the value of the CID.
/// When `cid_encryptor` is NULL, the value is always set to `quicly_cid_plaintext_invalid`.
plaintext: quicly_cid_plaintext,
/// If destination CID might be one generated by a client. This flag would be set for Initial and 0-RTT packets.
fields: packed struct {
might_be_client_generated: bool,
},
},
/// source CID; {NULL, 0} if is a short header packet
src: c.ptls_iovec_t,
};
const struct_unnamed_151 = extern struct {
pn: u64,
key_phase: u64,
};
const enum_unnamed_152 = c_uint;
const h2o_quic_forward_packets_cb = ?fn ([*c]h2o_quic_ctx, [*c]const u64, u32, [*c]quicly_address_t, [*c]quicly_address_t, u8, [*c]quicly_decoded_packet, usize) callconv(.C) c_int;
const h2o_quic_preprocess_packet_cb = ?fn ([*c]h2o_quic_ctx, [*c]c.struct_msghdr, [*c]quicly_address_t, [*c]quicly_address_t, [*c]u8) callconv(.C) c_int;
const h2o_logger = extern struct {
_config_slot: usize,
on_context_init: ?fn ([*c]h2o_logger, [*c]h2o_context) callconv(.C) void,
on_context_dispose: ?fn ([*c]h2o_logger, [*c]h2o_context) callconv(.C) void,
dispose: ?fn ([*c]h2o_logger) callconv(.C) void,
log_access: ?fn ([*c]h2o_logger, ?*c.h2o_req_t) callconv(.C) void,
};
const struct_unnamed_189 = extern struct {
req_timeout: u64,
req_io_timeout: u64,
upgrade_to_http2: c_int,
callbacks: h2o_protocol_callbacks,
};
const struct_unnamed_190 = extern struct {
/// idle timeout (in milliseconds)
idle_timeout: u64,
/// graceful shutdown timeout (in milliseconds)
graceful_shutdown_timeout: u64,
/// maximum number of HTTP2 requests (per connection) to be handled simultaneously internally.
/// H2O accepts at most 256 requests over HTTP/2, but internally limits the number of in-flight requests to the value
/// specified by this property in order to limit the resources allocated to a single connection.
max_concurrent_requests_per_connection: usize,
/// maximum number of HTTP2 streaming requests (per connection) to be handled simultaneously internally.
max_concurrent_streaming_requests_per_connection: usize,
/// maximum number of streams (per connection) to be allowed in IDLE / CLOSED state (used for tracking dependencies).
max_streams_for_priority: usize,
/// size of the stream-level flow control window (once it becomes active)
active_stream_window_size: u32,
/// conditions for latency optimization
latency_optimization: c.h2o_socket_latency_optimization_conditions_t,
/// list of callbacks
callbacks: h2o_protocol_callbacks,
origin_frame: c.h2o_iovec_t,
};
const struct_unnamed_191 = extern struct {
/// idle timeout (in milliseconds)
idle_timeout: u64,
/// graceful shutdown timeout (in milliseconds)
graceful_shutdown_timeout: u64,
/// receive window size of the unblocked request stream
active_stream_window_size: u32,
/// See quicly_context_t::ack_frequency
ack_frequency: u16,
fields: packed struct {
/// a boolean indicating if the delayed ack extension should be used (default true)
allow_delayed_ack: bool,
/// a boolean indicating if UDP GSO should be used when possible
use_gso: bool,
padding: u6,
},
/// the callbacks
callbacks: h2o_protocol_callbacks,
};
const struct_unnamed_192 = extern struct {
/// io timeout (in milliseconds)
io_timeout: u64,
/// io timeout (in milliseconds)
connect_timeout: u64,
/// io timeout (in milliseconds)
first_byte_timeout: u64,
/// keepalive timeout (in milliseconds)
keepalive_timeout: u64,
fields: packed struct {
/// a boolean flag if set to true, instructs the proxy to close the frontend h1 connection on behalf of the upstream
forward_close_connection: bool,
/// a boolean flag if set to true, instructs the proxy to preserve the x-forwarded-proto header passed by the client
preserve_x_forwarded_proto: bool,
/// a boolean flag if set to true, instructs the proxy to preserve the server header passed by the origin
preserve_server_header: bool,
/// a boolean flag if set to true, instructs the proxy to emit x-forwarded-proto and x-forwarded-for headers
emit_x_forwarded_headers: bool,
/// a boolean flag if set to true, instructs the proxy to emit a via header
emit_via_header: bool,
/// a boolean flag if set to true, instructs the proxy to emit a date header, if it's missing from the upstream response
emit_missing_date_header: bool,
padding: u26,
},
/// maximum size to buffer for the response
max_buffer_size: usize,
http2: extern struct {
max_concurrent_streams: u32,
},
/// See the documentation of `h2o_httpclient_t::protocol_selector.ratio`.
protocol_ratio: extern struct {
http2: i8,
http3: i8,
},
/// global socketpool
global_socket_pool: c.h2o_socketpool_t,
};
const h2o_protocol_callbacks = extern struct {
request_shutdown: ?fn ([*c]h2o_context) callconv(.C) void,
foreach_request: ?fn ([*c]h2o_context, ?fn (?*c.h2o_req_t, ?*anyopaque) callconv(.C) c_int, ?*anyopaque) callconv(.C) c_int,
};
const struct_unnamed_193 = extern struct {
capacity: usize,
};
const h2o_status_callbacks = extern struct {
entries: [*c][*c]const h2o_status_handler,
size: usize,
capacity: usize,
};
const h2o_status_handler = extern struct {
name: c.h2o_iovec_t,
final: ?fn (?*anyopaque, [*c]h2o_globalconf, ?*c.h2o_req_t) callconv(.C) c.h2o_iovec_t,
init: ?fn () callconv(.C) ?*anyopaque,
per_thread: ?fn (?*anyopaque, [*c]h2o_context) callconv(.C) void,
};
pub const h2o_accept_ctx = extern struct {
ctx: [*c]h2o_context,
hosts: [*c][*c]h2o_hostconf,
ssl_ctx: ?*ssl.SSL_CTX,
http2_origin_frame: [*c]c.h2o_iovec_t,
expect_proxy_line: c_int,
libmemcached_receiver: [*c]c.h2o_multithread_receiver_t,
};
/// basic structure of a handler (an object that MAY generate a response)
/// The handlers should register themselves to h2o_context_t::handlers.
pub const h2o_handler = extern struct {
_config_slot: usize,
on_context_init: ?fn(self: *h2o_handler, ctx: *h2o_context) callconv(.C) void,
on_context_dispose: ?fn(self: *h2o_handler, ctx: *h2o_context) callconv(.C) void,
dispose: ?fn(self: *h2o_handler) callconv(.C) void,
on_req: ?fn(self: *h2o_handler, req: *h2o_req) callconv(.C) c_int,
/// If the flag is set, protocol handler may invoke the request handler before receiving the end of the request body. The request
/// handler can determine if the protocol handler has actually done so by checking if `req->proceed_req` is set to non-NULL.
/// In such case, the handler should replace `req->write_req.cb` (and ctx) with its own callback to receive the request body
/// bypassing the buffer of the protocol handler. Parts of the request body being received before the handler replacing the
/// callback is accessible via `req->entity`.
/// The request handler can delay replacing the callback to a later moment. In such case, the handler can determine if
/// `req->entity` already contains a complete request body by checking if `req->proceed_req` is NULL.
fields: packed struct {
supports_request_streaming: bool,
},
};
const h2o_conn_callbacks = extern struct {
get_sockname: ?fn ([*c]h2o_conn, [*c]c.struct_sockaddr) callconv(.C) c.socklen_t,
get_peername: ?fn ([*c]h2o_conn, [*c]c.struct_sockaddr) callconv(.C) c.socklen_t,
get_ptls: ?fn ([*c]h2o_conn) callconv(.C) ?*c.ptls_t,
skip_tracing: ?fn ([*c]h2o_conn) callconv(.C) c_int,
push_path: ?fn (?*h2o_req, [*c]const u8, usize, c_int) callconv(.C) void,
get_debug_state: ?fn (?*h2o_req, c_int) callconv(.C) [*c]c.h2o_http2_debug_state_t,
num_reqs_inflight: ?fn ([*c]h2o_conn) callconv(.C) u32,
get_tracer: ?fn ([*c]h2o_conn) callconv(.C) [*c]c.quicly_tracer_t,
get_rtt: ?fn ([*c]h2o_conn) callconv(.C) i64,
log_: union_unnamed_208,
};
const union_unnamed_208 = extern union {
unnamed_0: struct_unnamed_209,
callbacks: [1]?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
};
const struct_unnamed_211 = extern struct {
protocol_version: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
session_reused: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
cipher: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
cipher_bits: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
session_id: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
server_name: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
negotiated_protocol: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
};
const struct_unnamed_212 = extern struct {
request_index: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
};
const struct_unnamed_213 = extern struct {
stream_id: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
priority_received: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
priority_received_exclusive: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
priority_received_parent: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
priority_received_weight: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
priority_actual: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
priority_actual_parent: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
priority_actual_weight: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
};
const struct_unnamed_214 = extern struct {
stream_id: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
quic_stats: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
quic_version: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
};
const struct_unnamed_210 = extern struct {
cc_name: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
delivery_rate: ?fn (?*h2o_req) callconv(.C) c.h2o_iovec_t,
};
const struct_unnamed_209 = extern struct {
transport: struct_unnamed_210,
ssl: struct_unnamed_211,
http1: struct_unnamed_212,
http2: struct_unnamed_213,
http3: struct_unnamed_214,
};
pub const h2o_conn = extern struct {
ctx: [*c]h2o_context,
hosts: [*c][*c]h2o_hostconf,
connected_at: c.struct_timeval,
id: u64,
callbacks: [*c]const h2o_conn_callbacks,
_uuid: struct_unnamed_215,
};
const struct_unnamed_215 = extern struct {
str: [37]u8,
is_initialized: u8,
};
pub const h2o_headers = extern struct {
entries: [*c]h2o_header,
size: usize,
capacity: usize,
};
pub const h2o_header = extern struct {
name: [*c]c.h2o_iovec_t,
orig_name: [*c]const u8,
value: c.h2o_iovec_t,
flags: h2o_header_flags,
};
const h2o_header_flags = packed struct {
dont_compress: bool,
pad: u7,
};
const h2o_res = extern struct {
status: c_int,
reason: [*c]const u8,
content_length: usize,
headers: h2o_headers,
mime_attr: [*c]c.h2o_mime_attributes_t,
original: struct_unnamed_183,
};
const struct_unnamed_183 = extern struct {
status: c_int,
headers: h2o_headers,
};
/// a HTTP request
pub const h2o_req = struct {
/// the underlying connection
conn: *h2o_conn,
/// the request sent by the client (as is)
input: extern struct {
/// scheme (http, https, etc.)
scheme: *const c.h2o_url_scheme_t,
/// authority (a.k.a. the Host header; the value is supplemented if missing before the handlers are being called)
authority: c.h2o_iovec_t,
/// method
method: c.h2o_iovec_t,
/// abs-path of the request (unmodified)
path: c.h2o_iovec_t,
/// offset of '?' within path, or SIZE_MAX if not found
query_at: usize,
},
/// the host context
hostconf: *h2o_hostconf,
/// the path context
pathconf: *h2o_pathconf,
/// filters and the size of it
filters: **h2o_filter,
num_filters: usize,
/// loggers and the size of it
loggers: **h2o_logger,
num_loggers: usize,
/// the handler that has been executed
handler: *h2o_handler,
/// scheme (http, https, etc.)
scheme: *const c.h2o_url_scheme_t,
/// authority (of the processing request)
authority: c.h2o_iovec_t,
/// method (of the processing request)
method: c.h2o_iovec_t,
/// abs-path of the processing request
path: c.h2o_iovec_t,
/// offset of '?' within path, or SIZE_MAX if not found
query_at: usize,
/// normalized path of the processing request (i.e. no "." or "..", no query)
path_normalized: c.h2o_iovec_t,
/// Map of indexes of `path_normalized` into the next character in `path`; built only if `path` required normalization
norm_indexes: *usize,
/// authority's prefix matched with `*` against defined hosts
authority_wildcard_match: c.h2o_iovec_t,
/// filters assigned per request
prefilters: *c.h2o_req_prefilter_t,
/// additional information (becomes available for extension-based dynamic content)
filereq: *c.h2o_filereq_t,
/// overrides (maybe NULL)
overrides: *c.h2o_req_overrides_t,
/// the HTTP version (represented as 0xMMmm (M=major, m=minor))
version: c_int,
/// list of request headers
headers: h2o_headers,
/// the request entity (base == NULL if none), can't be used if the handler is streaming the body
entity: c.h2o_iovec_t,
/// amount of request body being received
req_body_bytes_received: usize,
/// If different of SIZE_MAX, the numeric value of the received content-length: header
content_length: usize,
/// timestamp when the request was processed
processed_at: c.h2o_timestamp_t,
/// additional timestamps
timestamps: extern struct {
request_begin_at: c.timeval,
request_body_begin_at: c.timeval,
response_start_at: c.timeval,
response_end_at: c.timeval,
},
/// proxy stats
proxy_stats: extern struct {
bytes_written: extern struct {
total: u64,
header: u64,
body: u64,
},
bytes_read: extern struct {
total: u64,
header: u64,
body: u64,
},
timestamps: c.h2o_httpclient_timings_t,
conn: c.h2o_httpclient_conn_properties_t,
},
/// the response
res: h2o_res,
/// number of bytes sent by the generator (excluding headers)
bytes_sent: u64,
/// the number of times the request can be reprocessed (excluding delegation)
remaining_reprocesses: u32,
/// the number of times the request can be delegated
remaining_delegations: u32,
/// Optional callback used to establish a tunnel. When a tunnel is being established to upstream, the generator fills the
/// response headers, then calls this function directly, bypassing the ordinary `h2o_send` chain.
establish_tunnel: fn (req: *c.h2o_req_t, tunnel: *c.h2o_tunnel_t, idle_timeout: u64) void,
/// environment variables
env: c.h2o_iovec_vector_t,
/// error log for the request (`h2o_req_log_error` must be used for error logging)
error_logs: *c.h2o_buffer_t,
/// error log redirection called by `h2o_req_log_error`. By default, the error is appended to `error_logs`. The callback is
/// replaced by mruby middleware to send the error log to the rack handler.
error_log_delegate: extern struct {
cb: fn (data: *anyopaque, prefix: c.h2o_iovec_t, msg: c.h2o_iovec_t) callconv(.C) void,
data: *anyopaque,
},
/// flags
fields: packed struct {
/// whether or not the connection is persistent.
// Applications should set this flag to zero in case the connection cannot be kept keep-alive (due to an error etc.)
http1_is_persistent: bool,
/// whether if the response has been delegated (i.e. reproxied).
/// For delegated responses, redirect responses would be handled internally.
res_is_delegated: bool,
/// set by the generator if the protocol handler should replay the request upon seeing 425
reprocess_if_too_early: bool,
/// set by the prxy handler if the http2 upstream refused the stream so the client can retry the request
upstream_refused: bool,
/// if h2o_process_request has been called
process_called: bool,
},
/// whether if the response should include server-timing header. Logical OR of H2O_SEND_SERVER_TIMING_*
send_server_timing: u32,
/// Whether the producer of the response has explicitly disabled or
/// enabled compression. One of H2O_COMPRESS_HINT_*
compress_hint: u8,
/// the Upgrade request header (or { NULL, 0 } if not available)
upgrade: c.h2o_iovec_t,
/// preferred chunk size by the ostream
preferred_chunk_size: usize,
/// callback and context for receiving request body (see h2o_handler_t::supports_request_streaming for details)
write_req: extern struct {
cb: c.h2o_write_req_cb,
ctx: *anyopaque,
},
/// callback and context for receiving more request body (see h2o_handler_t::supports_request_streaming for details)
proceed_req: c.h2o_proceed_req_cb,
/// internal structure
_generator: *c.h2o_generator_t,
_ostr_top: *c.h2o_ostream_t,
_next_filter_index: usize,
_timeout_entry: c.h2o_timer_t,
/// per-request memory pool (placed at the last since the structure is large)
pool: c.h2o_mem_pool_t,
};
pub const h2o_token = extern struct {
buf: c.h2o_iovec_t,
flags: h2o_token_flags,
};
const h2o_token_flags = extern struct {
http2_static_table_name_index: u8, // non-zero if any
fields: packed struct {
proxy_should_drop_for_req: bool,
proxy_should_drop_for_res: bool,
is_init_header_special: bool,
http2_should_reject: bool,
copy_for_push_request: bool,
dont_compress: bool, // consult `h2o_header_t:dont_compress` as well
likely_to_repeat: bool,
padding: u1,
},
};
/// abstraction layer for sockets (SSL vs. TCP)
pub const h2o_socket = struct {
data: *anyopaque,
ssl: *c.st_h2o_socket_ssl_t,
input: *c.h2o_buffer_t,
/// total bytes read (above the TLS layer)
bytes_read: u64,
/// total bytes written (above the TLS layer)
bytes_written: u64,
fields: packed struct {
/// boolean flag to indicate if sock is NOT being traced
_skip_tracing: bool,
padding: u7,
},
on_close: extern struct {
cb: fn (data: *anyopaque) callconv(.C) void,
data: *anyopaque,
},
_cb: extern struct {
read: c.h2o_socket_cb,
write: c.h2o_socket_cb,
},
_peername: *c.st_h2o_socket_addr_t,
_sockname: *c.st_h2o_socket_addr_t,
_write_buf: extern struct {
cnt: usize,
bufs: *c.h2o_iovec_t,
u: extern union {
alloced_ptr: *c.h2o_iovec_t,
smallbufs: [4]c.h2o_iovec_t,
},
},
_latency_optimization: extern struct {
state: u8, // one of H2O_SOCKET_LATENCY_STATE_*
fields: packed struct {
notsent_is_minimized: bool,
padding: u7,
},
suggested_tls_payload_size: usize, // suggested TLS record payload size, or SIZE_MAX when no need to restrict
suggested_write_size: usize, // SIZE_MAX if no need to optimize for latency
},
}; | lib/h2o/h2o.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const nfd = @import("nfd");
const nvg = @import("nanovg");
const gui = @import("gui");
const icons = @import("icons.zig");
const geometry = @import("gui/geometry.zig");
const Point = geometry.Point;
const Rect = geometry.Rect;
const Clipboard = @import("Clipboard.zig");
const Document = @import("Document.zig");
const NewDocumentWidget = @import("NewDocumentWidget.zig");
const CanvasWidget = @import("CanvasWidget.zig");
const ColorPaletteWidget = @import("ColorPaletteWidget.zig");
const ColorPickerWidget = @import("ColorPickerWidget.zig");
const ColorForegroundBackgroundWidget = @import("ColorForegroundBackgroundWidget.zig");
const PreviewWidget = @import("PreviewWidget.zig");
pub const EditorWidget = @This();
widget: gui.Widget,
allocator: *Allocator,
document: *Document,
menu_bar: *gui.Toolbar,
new_button: *gui.Button,
open_button: *gui.Button,
save_button: *gui.Button,
undo_button: *gui.Button,
redo_button: *gui.Button,
cut_button: *gui.Button,
copy_button: *gui.Button,
paste_button: *gui.Button,
crop_tool_button: *gui.Button,
select_tool_button: *gui.Button,
draw_tool_button: *gui.Button,
fill_tool_button: *gui.Button,
mirror_h_tool_button: *gui.Button,
mirror_v_tool_button: *gui.Button,
rotate_ccw_tool_button: *gui.Button,
rotate_cw_tool_button: *gui.Button,
grid_button: *gui.Button,
zoom_label: *gui.Label,
zoom_spinner: *gui.Spinner(f32),
status_bar: *gui.Toolbar,
help_status_label: *gui.Label,
tool_status_label: *gui.Label,
image_status_label: *gui.Label,
memory_status_label: *gui.Label,
// help_text: [200]u8 = .{0} ** 200,
tool_text: [100]u8 = .{0} ** 100,
image_text: [20]u8 = .{0} ** 20,
memory_text: [100]u8 = .{0} ** 100,
new_document_widget: *NewDocumentWidget,
canvas: *CanvasWidget,
color_palette: *ColorPaletteWidget,
color_picker: *ColorPickerWidget,
color_foreground_background: *ColorForegroundBackgroundWidget,
preview: *PreviewWidget,
panel_right: *gui.Panel,
const Self = @This();
pub fn init(allocator: *Allocator, rect: Rect(f32)) !*Self {
var self = try allocator.create(Self);
self.* = Self{
.widget = gui.Widget.init(allocator, rect),
.allocator = allocator,
.document = try Document.init(allocator),
.menu_bar = try gui.Toolbar.init(allocator, rect),
.new_button = try gui.Button.init(allocator, rect, ""),
.open_button = try gui.Button.init(allocator, rect, ""),
.save_button = try gui.Button.init(allocator, rect, ""),
.undo_button = try gui.Button.init(allocator, rect, ""),
.redo_button = try gui.Button.init(allocator, rect, ""),
.cut_button = try gui.Button.init(allocator, rect, ""),
.copy_button = try gui.Button.init(allocator, rect, ""),
.paste_button = try gui.Button.init(allocator, rect, ""),
.crop_tool_button = try gui.Button.init(allocator, rect, ""),
.select_tool_button = try gui.Button.init(allocator, rect, ""),
.draw_tool_button = try gui.Button.init(allocator, rect, ""),
.fill_tool_button = try gui.Button.init(allocator, rect, ""),
.mirror_h_tool_button = try gui.Button.init(allocator, rect, ""),
.mirror_v_tool_button = try gui.Button.init(allocator, rect, ""),
.rotate_ccw_tool_button = try gui.Button.init(allocator, rect, ""),
.rotate_cw_tool_button = try gui.Button.init(allocator, rect, ""),
.grid_button = try gui.Button.init(allocator, rect, ""),
.zoom_label = try gui.Label.init(allocator, Rect(f32).make(0, 0, 37, 20), "Zoom:"),
.zoom_spinner = try gui.Spinner(f32).init(allocator, Rect(f32).make(0, 0, 53, 20)),
.status_bar = try gui.Toolbar.init(allocator, rect),
.help_status_label = try gui.Label.init(allocator, Rect(f32).make(0, 0, 450, 20), ""),
.tool_status_label = try gui.Label.init(allocator, Rect(f32).make(0, 0, 120, 20), ""),
.image_status_label = try gui.Label.init(allocator, Rect(f32).make(0, 0, 80, 20), ""),
.memory_status_label = try gui.Label.init(allocator, Rect(f32).make(0, 0, 80, 20), ""),
.new_document_widget = try NewDocumentWidget.init(allocator, self),
.canvas = try CanvasWidget.init(allocator, Rect(f32).make(0, 24, rect.w, rect.h), self.document),
.color_palette = try ColorPaletteWidget.init(allocator, Rect(f32).make(0, 0, 163, 163)),
.color_picker = try ColorPickerWidget.init(allocator, Rect(f32).make(0, 0, 163, 117)),
.color_foreground_background = try ColorForegroundBackgroundWidget.init(allocator, Rect(f32).make(0, 0, 163, 66)),
.preview = try PreviewWidget.init(allocator, Rect(f32).make(0, 0, 163, 120), self.document),
.panel_right = try gui.Panel.init(allocator, Rect(f32).make(0, 0, 163, 200)),
};
self.widget.onResizeFn = onResize;
self.widget.onKeyDownFn = onKeyDown;
self.widget.onClipboardUpdateFn = onClipboardUpdate;
try self.initMenubar();
self.help_status_label.padding = 3;
self.help_status_label.draw_border = true;
self.tool_status_label.padding = 3;
self.tool_status_label.draw_border = true;
self.image_status_label.padding = 3;
self.image_status_label.draw_border = true;
self.memory_status_label.padding = 3;
self.memory_status_label.draw_border = true;
self.status_bar.has_grip = true;
// build status bar
try self.status_bar.addWidget(&self.help_status_label.widget);
try self.status_bar.addWidget(&self.tool_status_label.widget);
try self.status_bar.addWidget(&self.image_status_label.widget);
try self.status_bar.addWidget(&self.memory_status_label.widget);
// add main widgets
try self.widget.addChild(&self.menu_bar.widget);
try self.widget.addChild(&self.canvas.widget);
try self.widget.addChild(&self.color_palette.widget);
try self.widget.addChild(&self.color_picker.widget);
try self.widget.addChild(&self.color_foreground_background.widget);
try self.widget.addChild(&self.preview.widget);
try self.widget.addChild(&self.panel_right.widget);
try self.widget.addChild(&self.status_bar.widget);
try self.color_palette.loadPalContents(@embedFile("../data/palettes/arne16.pal"));
self.color_palette.onSelectionChangedFn = struct {
fn selectionChanged(color_palette: *ColorPaletteWidget) void {
if (color_palette.widget.parent) |parent| {
var editor = @fieldParentPtr(EditorWidget, "widget", parent);
if (color_palette.selected) |selected| {
const color = color_palette.colors[selected];
editor.color_picker.setRgb(color);
editor.color_foreground_background.setActiveRgba(editor.color_picker.color);
switch (editor.color_foreground_background.active) {
.foreground => editor.document.setForegroundColorRgba(editor.color_picker.color),
.background => editor.document.setBackgroundColorRgba(editor.color_picker.color),
}
}
}
}
}.selectionChanged;
self.canvas.onColorChangedFn = struct {
fn colorChanged(canvas: *CanvasWidget, color: [4]u8) void {
if (canvas.widget.parent) |parent| {
var editor = @fieldParentPtr(EditorWidget, "widget", parent);
editor.color_foreground_background.setRgba(.foreground, color);
}
}
}.colorChanged;
self.canvas.onScaleChangedFn = struct {
fn zoomChanged(canvas: *CanvasWidget, zoom: f32) void {
if (canvas.widget.parent) |parent| {
var editor = @fieldParentPtr(EditorWidget, "widget", parent);
editor.zoom_spinner.setValue(zoom);
}
}
}.zoomChanged;
self.color_picker.onChangedFn = struct {
fn changed(color_picker: *ColorPickerWidget) void {
if (color_picker.widget.parent) |parent| {
var editor = @fieldParentPtr(EditorWidget, "widget", parent);
if (editor.color_palette.selected) |selected| {
std.mem.copy(u8, editor.color_palette.colors[selected][0..], color_picker.color[0..3]);
}
editor.color_foreground_background.setActiveRgba(color_picker.color);
switch (editor.color_foreground_background.active) {
.foreground => editor.document.setForegroundColorRgba(editor.color_picker.color),
.background => editor.document.setBackgroundColorRgba(editor.color_picker.color),
}
}
}
}.changed;
self.color_foreground_background.onChangedFn = struct {
fn changed(color_foreground_background: *ColorForegroundBackgroundWidget) void {
if (color_foreground_background.widget.parent) |parent| {
var editor = @fieldParentPtr(EditorWidget, "widget", parent);
editor.color_picker.setRgba(color_foreground_background.getActiveRgba());
//editor.color_palette.clearSelection();
switch (editor.color_foreground_background.active) {
.foreground => editor.document.setForegroundColorRgba(editor.color_picker.color),
.background => editor.document.setBackgroundColorRgba(editor.color_picker.color),
}
}
}
}.changed;
self.document.undo_system.undo_listener_address = @ptrToInt(self);
self.document.undo_system.onUndoChangedFn = struct {
fn undoChanged(document: *Document) void {
var editor = @intToPtr(*EditorWidget, document.undo_system.undo_listener_address);
editor.undo_button.enabled = document.canUndo();
editor.undo_button.iconFn = if (editor.undo_button.enabled)
icons.iconUndoEnabled
else
icons.iconUndoDisabled;
editor.redo_button.enabled = document.canRedo();
editor.redo_button.iconFn = if (editor.redo_button.enabled)
icons.iconRedoEnabled
else
icons.iconRedoDisabled;
}
}.undoChanged;
self.updateLayout();
self.setTool(.draw);
self.canvas.centerDocument();
self.updateImageStatus();
return self;
}
fn initMenubar(self: *Self) !void {
self.new_button.iconFn = icons.iconNew;
self.new_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).newDocument();
}
}.click;
self.new_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("New Document (Ctrl+N)");
}
}.enter;
self.new_button.onLeaveFn = menuButtonOnLeave;
self.open_button.iconFn = icons.iconOpen;
self.open_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).openDocument();
}
}.click;
self.open_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Open Document (Ctrl+O)");
}
}.enter;
self.open_button.onLeaveFn = menuButtonOnLeave;
self.save_button.iconFn = icons.iconSave;
self.save_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).saveDocument();
}
}.click;
self.save_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Save Document (Ctrl+S)");
}
}.enter;
self.save_button.onLeaveFn = menuButtonOnLeave;
self.undo_button.iconFn = icons.iconUndoDisabled;
self.undo_button.enabled = false;
self.undo_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).undoDocument();
}
}.click;
self.undo_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Undo Action (Ctrl+Z)");
}
}.enter;
self.undo_button.onLeaveFn = menuButtonOnLeave;
self.redo_button.iconFn = icons.iconRedoDisabled;
self.redo_button.enabled = false;
self.redo_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).redoDocument();
}
}.click;
self.redo_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Redo Action (Ctrl+Y)");
}
}.enter;
self.redo_button.onLeaveFn = menuButtonOnLeave;
self.cut_button.iconFn = icons.iconCut;
self.cut_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).cutDocument();
}
}.click;
self.cut_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Cut Selection to clipboard (Ctrl+X)");
}
}.enter;
self.cut_button.onLeaveFn = menuButtonOnLeave;
self.copy_button.iconFn = icons.iconCopy;
self.copy_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).copyDocument();
}
}.click;
self.copy_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Copy Selection to Clipboard (Ctrl+C)");
}
}.enter;
self.copy_button.onLeaveFn = menuButtonOnLeave;
self.paste_button.iconFn = icons.iconPasteEnabled;
self.checkClipboard(); // will set the correct icon
self.paste_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).pasteDocument();
}
}.click;
self.paste_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Paste from Clipboard (Ctrl+V)");
}
}.enter;
self.paste_button.onLeaveFn = menuButtonOnLeave;
self.crop_tool_button.iconFn = icons.iconToolCrop;
self.crop_tool_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).setTool(.crop);
}
}.click;
self.crop_tool_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Crop/Enlarge Tool (C)");
}
}.enter;
self.crop_tool_button.onLeaveFn = menuButtonOnLeave;
self.select_tool_button.iconFn = icons.iconToolSelect;
self.select_tool_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).setTool(.select);
}
}.click;
self.select_tool_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Rectangle Select Tool (R)");
}
}.enter;
self.select_tool_button.onLeaveFn = menuButtonOnLeave;
self.draw_tool_button.iconFn = icons.iconToolPen;
self.draw_tool_button.checked = true;
self.draw_tool_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).setTool(.draw);
}
}.click;
self.draw_tool_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Pen Tool (N)");
}
}.enter;
self.draw_tool_button.onLeaveFn = menuButtonOnLeave;
self.fill_tool_button.iconFn = icons.iconToolBucket;
self.fill_tool_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).setTool(.fill);
}
}.click;
self.fill_tool_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Fill Tool (B)");
}
}.enter;
self.fill_tool_button.onLeaveFn = menuButtonOnLeave;
self.mirror_h_tool_button.iconFn = icons.iconMirrorHorizontally;
self.mirror_h_tool_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).mirrorHorizontallyDocument();
}
}.click;
self.mirror_h_tool_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Mirror Horizontally");
}
}.enter;
self.mirror_h_tool_button.onLeaveFn = menuButtonOnLeave;
self.mirror_v_tool_button.iconFn = icons.iconMirrorVertically;
self.mirror_v_tool_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).mirrorVerticallyDocument();
}
}.click;
self.mirror_v_tool_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Mirror Vertically");
}
}.enter;
self.mirror_v_tool_button.onLeaveFn = menuButtonOnLeave;
self.rotate_ccw_tool_button.iconFn = icons.iconRotateCcw;
self.rotate_ccw_tool_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).rotateCcwDocument();
}
}.click;
self.rotate_ccw_tool_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Rotate Counterclockwise");
}
}.enter;
self.rotate_ccw_tool_button.onLeaveFn = menuButtonOnLeave;
self.rotate_cw_tool_button.iconFn = icons.iconRotateCw;
self.rotate_cw_tool_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).rotateCwDocument();
}
}.click;
self.rotate_cw_tool_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Rotate Clockwise");
}
}.enter;
self.rotate_cw_tool_button.onLeaveFn = menuButtonOnLeave;
self.grid_button.iconFn = icons.iconGrid;
self.grid_button.checked = self.canvas.grid_enabled;
self.grid_button.onClickFn = struct {
fn click(button: *gui.Button) void {
getEditorFromMenuButton(button).toggleGrid();
}
}.click;
self.grid_button.onEnterFn = struct {
fn enter(button: *gui.Button) void {
getEditorFromMenuButton(button).setHelpText("Toggle Pixel Grid (#)");
}
}.enter;
self.grid_button.onLeaveFn = menuButtonOnLeave;
self.zoom_spinner.setValue(self.canvas.scale);
self.zoom_spinner.min_value = 1.0 / 64.0;
self.zoom_spinner.max_value = 64;
//self.zoom_spinner.step_value = 0.5;
self.zoom_spinner.step_mode = .exponential;
self.zoom_spinner.onChangedFn = struct {
fn changed(spinner: *gui.Spinner(f32)) void {
if (spinner.widget.parent) |menu_bar_widget| {
if (menu_bar_widget.parent) |parent| {
var editor = @fieldParentPtr(EditorWidget, "widget", parent);
const factor = spinner.value / editor.canvas.scale;
editor.canvas.zoomToDocumentCenter(factor);
}
}
}
}.changed;
// build menu bar
try self.menu_bar.addButton(self.new_button);
try self.menu_bar.addButton(self.open_button);
try self.menu_bar.addButton(self.save_button);
try self.menu_bar.addSeparator();
try self.menu_bar.addButton(self.undo_button);
try self.menu_bar.addButton(self.redo_button);
try self.menu_bar.addSeparator();
try self.menu_bar.addButton(self.cut_button);
try self.menu_bar.addButton(self.copy_button);
try self.menu_bar.addButton(self.paste_button);
try self.menu_bar.addSeparator();
try self.menu_bar.addButton(self.crop_tool_button);
try self.menu_bar.addButton(self.select_tool_button);
try self.menu_bar.addButton(self.draw_tool_button);
try self.menu_bar.addButton(self.fill_tool_button);
try self.menu_bar.addSeparator();
try self.menu_bar.addButton(self.mirror_h_tool_button);
try self.menu_bar.addButton(self.mirror_v_tool_button);
try self.menu_bar.addButton(self.rotate_ccw_tool_button);
try self.menu_bar.addButton(self.rotate_cw_tool_button);
try self.menu_bar.addSeparator();
try self.menu_bar.addButton(self.grid_button);
try self.menu_bar.addSeparator();
try self.menu_bar.addWidget(&self.zoom_label.widget);
try self.menu_bar.addWidget(&self.zoom_spinner.widget);
}
pub fn deinit(self: *Self) void {
self.document.deinit();
self.menu_bar.deinit();
self.new_button.deinit();
self.open_button.deinit();
self.save_button.deinit();
self.undo_button.deinit();
self.redo_button.deinit();
self.cut_button.deinit();
self.copy_button.deinit();
self.paste_button.deinit();
self.crop_tool_button.deinit();
self.select_tool_button.deinit();
self.draw_tool_button.deinit();
self.fill_tool_button.deinit();
self.mirror_h_tool_button.deinit();
self.mirror_v_tool_button.deinit();
self.rotate_ccw_tool_button.deinit();
self.rotate_cw_tool_button.deinit();
self.grid_button.deinit();
self.zoom_label.deinit();
self.zoom_spinner.deinit();
self.status_bar.deinit();
self.help_status_label.deinit();
self.tool_status_label.deinit();
self.image_status_label.deinit();
self.memory_status_label.deinit();
self.new_document_widget.deinit();
self.canvas.deinit();
self.color_palette.deinit();
self.color_picker.deinit();
self.color_foreground_background.deinit();
self.preview.deinit();
self.panel_right.deinit();
self.widget.deinit();
self.allocator.destroy(self);
}
fn onResize(widget: *gui.Widget, event: *const gui.ResizeEvent) void {
_ = event;
const self = @fieldParentPtr(Self, "widget", widget);
self.updateLayout();
}
fn onKeyDown(widget: *gui.Widget, key_event: *gui.KeyEvent) void {
const self = @fieldParentPtr(Self, "widget", widget);
if (key_event.isModifierPressed(.ctrl)) {
switch (key_event.key) {
.N => self.newDocument(),
.O => self.openDocument(),
.S => self.saveDocument(),
.Z => self.undoDocument(),
.Y => self.redoDocument(),
.A => self.selectAll(),
.X => self.cutDocument(),
.C => self.copyDocument(),
.V => self.pasteDocument(),
.Comma => self.fillDocument(.foreground),
.Period => self.fillDocument(.background),
else => key_event.event.ignore(),
}
} else if (key_event.modifiers == 0) {
switch (key_event.key) {
.C => self.setTool(.crop), // Crop
.R => self.setTool(.select), // Rectangle select
.N => self.setTool(.draw), // peNcil
.B => self.setTool(.fill), // Bucket
.X => self.color_foreground_background.swap(),
.Hash => self.toggleGrid(),
else => key_event.event.ignore(),
}
} else {
key_event.event.ignore();
}
}
fn onClipboardUpdate(widget: *gui.Widget) void {
const self = @fieldParentPtr(Self, "widget", widget);
self.checkClipboard();
}
fn checkClipboard(self: *Self) void {
if (Clipboard.hasImage()) {
self.paste_button.enabled = true;
self.paste_button.iconFn = icons.iconPasteEnabled;
} else {
self.paste_button.enabled = false;
self.paste_button.iconFn = icons.iconPasteDisabled;
}
}
fn updateLayout(self: *Self) void {
const rect = self.widget.relative_rect;
const menu_bar_h = 24;
const right_col_w = self.color_picker.widget.relative_rect.w;
const canvas_w = rect.w - right_col_w;
const canvas_h = rect.h - menu_bar_h - menu_bar_h;
self.canvas.widget.relative_rect.x = 0;
self.color_palette.widget.relative_rect.x = canvas_w;
self.color_picker.widget.relative_rect.x = canvas_w;
self.color_foreground_background.widget.relative_rect.x = canvas_w;
self.preview.widget.relative_rect.x = canvas_w;
self.panel_right.widget.relative_rect.x = canvas_w;
self.canvas.widget.relative_rect.y = menu_bar_h;
self.color_palette.widget.relative_rect.y = menu_bar_h;
self.color_picker.widget.relative_rect.y = self.color_palette.widget.relative_rect.y + self.color_palette.widget.relative_rect.h;
self.color_foreground_background.widget.relative_rect.y = self.color_picker.widget.relative_rect.y + self.color_picker.widget.relative_rect.h;
self.preview.widget.relative_rect.y = self.color_foreground_background.widget.relative_rect.y + self.color_foreground_background.widget.relative_rect.h;
self.panel_right.widget.relative_rect.y = self.preview.widget.relative_rect.y + self.preview.widget.relative_rect.h;
self.status_bar.widget.relative_rect.y = rect.h - menu_bar_h;
self.menu_bar.widget.setSize(rect.w, menu_bar_h);
self.panel_right.widget.setSize(right_col_w, std.math.max(0, menu_bar_h + canvas_h - self.panel_right.widget.relative_rect.y));
self.canvas.widget.setSize(canvas_w, canvas_h);
self.status_bar.widget.setSize(rect.w, menu_bar_h);
}
fn newDocument(self: *Self) void {
if (self.widget.getWindow()) |parent_window| {
var window_or_error = parent_window.createChildWindow(
"New Document",
self.new_document_widget.widget.relative_rect.w,
self.new_document_widget.widget.relative_rect.h,
gui.Window.CreateOptions{ .resizable = false },
);
if (window_or_error) |window| {
window.is_modal = true;
window.setMainWidget(&self.new_document_widget.widget);
self.new_document_widget.width_spinner.setFocus(true, .keyboard); // keyboard will select text
} else |_| {
// TODO: show error
}
}
}
pub fn createNewDocument(self: *Self, width: u32, height: u32) !void {
try self.document.createNew(width, height);
self.canvas.centerDocument(); // TODO: also zoom
self.updateImageStatus();
self.updateWindowTitle("Untitled");
}
fn openDocument(self: *Self) void {
if (nfd.openFileDialog("png", null)) |result| {
if (result) |file_path| {
defer nfd.freePath(file_path);
self.loadDocument(file_path);
}
} else |_| {
// TODO: could not open dialog
}
}
fn saveDocument(self: *Self) void {
if (nfd.saveFileDialog("png", null)) |result| {
if (result) |file_path| {
defer nfd.freePath(file_path);
// check extension
var png_file_path_or_error = (if (!isExtPng(file_path))
std.mem.concat(self.allocator, u8, &.{ file_path, ".png" })
else
self.allocator.dupe(u8, file_path));
if (png_file_path_or_error) |png_file_path| {
defer self.allocator.free(png_file_path);
self.document.save(png_file_path) catch {
// TODO: show error message
};
self.updateWindowTitle(png_file_path);
} else |_| {
// TODO: show error message
}
}
} else |_| {
// TODO: could not open dialog
}
}
fn undoDocument(self: *Self) void {
self.document.undo() catch {}; // TODO
}
fn redoDocument(self: *Self) void {
self.document.redo() catch {}; // TODO
}
fn selectAll(self: *Self) void {
self.setTool(.select);
if (self.document.selection) |_| {
self.document.clearSelection() catch {}; // TODO
}
const w = @intCast(i32, self.document.width);
const h = @intCast(i32, self.document.height);
self.document.makeSelection(Rect(i32).make(0, 0, w, h)) catch {}; // TODO
}
fn cutDocument(self: *Self) void {
self.document.cut() catch {
// TODO handle
};
self.checkClipboard();
}
fn copyDocument(self: *Self) void {
self.document.copy() catch {
// TODO handle
};
self.checkClipboard();
}
fn pasteDocument(self: *Self) void {
self.document.paste() catch {
// TODO handle
};
self.checkClipboard();
self.setTool(.select);
}
fn fillDocument(self: *Self, color_type: ColorForegroundBackgroundWidget.ColorType) void {
const color = self.color_foreground_background.getRgba(color_type);
self.document.fill(color) catch {}; // TODO: handle
}
fn mirrorHorizontallyDocument(self: *Self) void {
self.document.mirrorHorizontally() catch {}; // TODO: handle
}
fn mirrorVerticallyDocument(self: *Self) void {
self.document.mirrorVertically() catch {}; // TODO: handle
}
fn rotateCwDocument(self: *Self) void {
self.document.rotateCw() catch {}; // TODO: handle
}
fn rotateCcwDocument(self: *Self) void {
self.document.rotateCcw() catch {}; // TODO: handle
}
fn setTool(self: *Self, tool: CanvasWidget.ToolType) void {
self.canvas.setTool(tool);
self.crop_tool_button.checked = tool == .crop;
self.select_tool_button.checked = tool == .select;
self.draw_tool_button.checked = tool == .draw;
self.fill_tool_button.checked = tool == .fill;
self.setHelpText(self.getToolHelpText());
}
fn toggleGrid(self: *Self) void {
self.canvas.grid_enabled = !self.canvas.grid_enabled;
self.grid_button.checked = self.canvas.grid_enabled;
}
pub fn loadDocument(self: *Self, file_path: []const u8) void {
self.document.load(file_path) catch {
// TODO: show error message
};
self.canvas.centerDocument();
self.updateImageStatus();
self.updateWindowTitle(file_path);
}
var buf: [1024]u8 = undefined;
fn updateWindowTitle(self: *Self, file_path: []const u8) void {
if (self.widget.getWindow()) |window| {
const basename = std.fs.path.basename(file_path);
const title = std.fmt.bufPrintZ(&buf, "{s} - Mini Pixel", .{basename}) catch unreachable;
window.setTitle(title);
}
}
fn setHelpText(self: *Self, help_text: []const u8) void {
self.help_status_label.text = help_text;
}
fn getToolHelpText(self: Self) []const u8 {
return switch (self.canvas.tool) {
.crop => "Drag to create crop region, double click region to apply, right click to cancel",
.select => "Drag to create selection, right click to cancel selection",
.draw => "Left click to draw, right click to pick color, hold shift to draw line",
.fill => "Left click to flood fill, right click to pick color",
};
}
pub fn updateImageStatus(self: *Self) void {
self.image_status_label.text = std.fmt.bufPrintZ(
self.image_text[0..],
"{d}x{d}",
.{ self.document.width, self.document.height },
) catch unreachable;
}
pub fn setMemoryUsageInfo(self: *Self, bytes: usize) void {
var unit = "KiB";
var fb = @intToFloat(f32, bytes) / 1024.0;
if (bytes > 1 << 20) {
unit = "MiB";
fb /= 1024.0;
}
self.memory_status_label.text = std.fmt.bufPrintZ(
self.memory_text[0..],
"{d:.2} {s}",
.{ fb, unit },
) catch unreachable;
}
fn isExtPng(file_path: []const u8) bool {
const ext = std.fs.path.extension(file_path);
return std.ascii.eqlIgnoreCase(".png", ext);
}
fn getEditorFromMenuButton(menu_button: *gui.Button) *Self {
if (menu_button.widget.parent) |menu_bar_widget| {
if (menu_bar_widget.parent) |parent| {
return @fieldParentPtr(EditorWidget, "widget", parent);
}
}
unreachable; // forgot to add button to the menu_bar
}
fn menuButtonOnLeave(menu_button: *gui.Button) void {
const editor = getEditorFromMenuButton(menu_button);
editor.setHelpText(editor.getToolHelpText());
} | src/EditorWidget.zig |
const std = @import("std");
const c = @import("c.zig");
const key = @import("key.zig");
const pem = @import("pem.zig");
pub const Anchor = struct {
anchor: c.br_x509_trust_anchor,
const Self = @This();
pub fn init(der: []const u8, allocator: std.mem.Allocator) !Self
{
var state = X509State {
.list = std.ArrayList(u8).init(allocator),
.success = true,
};
defer state.list.deinit();
var context: c.br_x509_decoder_context = undefined;
c.br_x509_decoder_init(&context, x509Callback, &state);
c.br_x509_decoder_push(&context, &der[0], der.len);
if (!state.success) {
return error.x509DecodeCallback;
}
if (state.list.items.len == 0) {
return error.NoAnchorData;
}
const err = c.br_x509_decoder_last_error(&context);
if (err != 0) {
return error.x509Decode;
}
var self = Self {
.anchor = undefined,
};
const dn = state.list.toOwnedSlice();
errdefer allocator.free(dn);
self.anchor.dn.data = &dn[0];
self.anchor.dn.len = dn.len;
self.anchor.flags = @intCast(c_uint, c.br_x509_decoder_isCA(&context));
const publicKey = c.br_x509_decoder_get_pkey(&context);
try key.copyPublicKey(&self.anchor.pkey, publicKey, allocator);
errdefer key.freePublicKey(self.anchor.pkey);
return self;
}
pub fn deinit(self: Self, allocator: std.mem.Allocator) void
{
allocator.free(self.anchor.dn.data[0..self.anchor.dn.len]);
key.freePublicKey(self.anchor.pkey, allocator);
}
};
const X509State = struct {
list: std.ArrayList(u8),
success: bool,
};
fn x509Callback(userData: ?*anyopaque, data: ?*const anyopaque, len: usize) callconv(.C) void
{
var state = @ptrCast(*X509State, @alignCast(@alignOf(*X509State), userData));
const bytes = @ptrCast([*]const u8, data);
const slice = bytes[0..len];
state.list.appendSlice(slice) catch {
state.success = false;
};
}
pub const Anchors = struct {
anchors: []Anchor,
const Self = @This();
pub fn init(pemData: []const u8, allocator: std.mem.Allocator) !Self
{
var state = PemAnchorsState {
.allocator = allocator,
.list = std.ArrayList(Anchor).init(allocator),
};
defer state.list.deinit();
try pem.decode(pemData, *PemAnchorsState, &state, pemAnchorsCallback, allocator);
if (state.list.items.len == 0) {
return error.NoCerts;
}
var self = Self {
.anchors = state.list.toOwnedSlice(),
};
return self;
}
pub fn deinit(self: Self, allocator: std.mem.Allocator) void
{
for (self.anchors) |anchor| {
anchor.deinit(allocator);
}
allocator.free(self.anchors);
}
/// Caller should call allocator.free on the result
pub fn getRawAnchors(self: Self, allocator: std.mem.Allocator) ![]const c.br_x509_trust_anchor
{
var raw = try allocator.alloc(c.br_x509_trust_anchor, self.anchors.len);
for (self.anchors) |_, i| {
raw[i] = self.anchors[i].anchor;
}
return raw;
}
};
const PemAnchorsState = struct {
allocator: std.mem.Allocator,
list: std.ArrayList(Anchor),
};
fn pemAnchorsCallback(state: *PemAnchorsState, data: []const u8) !void
{
var anchor = try state.list.addOne();
anchor.* = try Anchor.init(data, state.allocator);
}
pub const Chain = struct {
chain: []c.br_x509_certificate,
const Self = @This();
pub fn init(pemData: []const u8, allocator: std.mem.Allocator) !Self
{
var state = PemChainState {
.allocator = allocator,
.list = std.ArrayList(c.br_x509_certificate).init(allocator),
};
defer state.list.deinit();
try pem.decode(pemData, *PemChainState, &state, pemChainCallback, allocator);
if (state.list.items.len == 0) {
return error.NoCerts;
}
var self = Self {
.chain = state.list.toOwnedSlice(),
};
return self;
}
pub fn deinit(self: Self, allocator: std.mem.Allocator) void
{
for (self.chain) |cert| {
if (cert.data_len > 0) {
allocator.free(cert.data[0..cert.data_len]);
}
}
allocator.free(self.chain);
}
};
const PemChainState = struct {
allocator: std.mem.Allocator,
list: std.ArrayList(c.br_x509_certificate),
};
fn pemChainCallback(state: *PemChainState, data: []const u8) !void
{
const copy = try state.allocator.dupe(u8, data);
var entry = try state.list.addOne();
entry.data = ©[0];
entry.data_len = copy.len;
} | src/crt.zig |
const utils = @import("utils");
const memory = @import("memory.zig");
const Allocator = memory.Allocator;
const MemoryError = memory.MemoryError;
const MappedList = @import("mapped_list.zig").MappedList;
const print = @import("print.zig");
const io = @import("georgios").io;
pub const File = io.File;
pub const FileError = io.FileError;
pub const BufferFile = io.BufferFile;
pub const BlockError = error {
// TODO: This is unused, but causes compile issues with fs, completely
// remove this or fix the other issues.
// InvalidBlockSize,
} || FileError || MemoryError;
pub const AddressType = u64;
fn address_eql(a: AddressType, b: AddressType) bool {
return a == b;
}
fn address_cmp(a: AddressType, b: AddressType) bool {
return a > b;
}
pub const Block = struct {
address: AddressType,
data: ?[]u8 = null,
};
/// Abstract Block IO Interface
pub const BlockStore = struct {
const Self = @This();
block_size: AddressType,
read_block_impl: fn(*BlockStore, *Block) BlockError!void = default.read_block_impl,
free_block_impl: fn(*BlockStore, *Block) BlockError!void = default.free_block_impl,
pub const default = struct {
pub fn read_block_impl(self: *BlockStore, block: *Block) BlockError!void {
_ = self;
_ = block;
return BlockError.Unsupported;
}
pub fn free_block_impl(self: *BlockStore, block: *Block) BlockError!void {
_ = self;
_ = block;
// Nop
}
};
pub fn read_block(self: *BlockStore, block: *Block) BlockError!void {
try self.read_block_impl(self, block);
}
pub fn free_block(self: *BlockStore, block: *Block) BlockError!void {
try self.free_block_impl(self, block);
block.data = null;
}
pub fn read(self: *BlockStore, address: AddressType, to: []u8) BlockError!void {
const start_block = address / self.block_size;
const block_count = utils.div_round_up(
AddressType, @intCast(AddressType, to.len), self.block_size);
const end_block = start_block + block_count;
var block_address = start_block;
var dest_offset: usize = 0;
var src_offset = @intCast(usize, address % self.block_size);
while (block_address < end_block) {
var block = Block{.address = block_address};
try self.read_block(&block);
const new_dest_offset = dest_offset + utils.min(usize,
@intCast(usize, self.block_size), to.len - dest_offset);
_ = utils.memory_copy_truncate(
to[dest_offset..new_dest_offset], block.data.?[src_offset..]);
src_offset = 0;
dest_offset = new_dest_offset;
block_address += 1;
}
}
};
/// Cached Block IO Interface
///
/// TODO: Use Slab Alloc for MappedList, Pages for Block Data?
pub const CachedBlockStore = struct {
const Self = @This();
const Cache = MappedList(AddressType, Block, address_eql, address_cmp);
alloc: *Allocator = undefined,
real_block_store: *BlockStore = undefined,
max_block_count: usize = undefined,
cache: Cache = undefined,
block_store: BlockStore = undefined,
pub fn init(self: *Self, alloc: *Allocator,
real_block_store: *BlockStore, max_block_count: usize) void {
self.alloc = alloc;
self.real_block_store = real_block_store;
self.max_block_count = max_block_count;
self.cache = Cache{.alloc = alloc};
self.block_store.block_size = real_block_store.block_size;
self.block_store.read_block_impl = Self.read_block_impl;
self.block_store.free_block_impl = Self.free_block_impl;
}
fn read_block_impl(block_store: *BlockStore, block: *Block) BlockError!void {
const self = @fieldParentPtr(Self, "block_store", block_store);
if (self.cache.find_bump_to_front(block.address)) |cached_block| {
block.* = cached_block;
return;
}
try self.real_block_store.read_block(block);
try self.cache.push_front(block.address, block.*);
if (self.cache.len() > self.max_block_count) {
if (try self.cache.pop_back()) |popped| {
// print.format("Popping block at {} out of the cache\n", popped.address);
var block_copy = popped;
try self.real_block_store.free_block(&block_copy);
} else {
@panic("Cache is full, but null pop_back?");
}
}
}
fn free_block_impl(block_store: *BlockStore, block: *Block) BlockError!void {
const self = @fieldParentPtr(Self, "block_store", block_store);
try self.real_block_store.free_block(block);
}
}; | kernel/io.zig |
const std = @import("std");
const allocator = std.heap.page_allocator;
const json = std.json;
const Gltf = @import("gltf.zig").Gltf;
const Program = @import("program.zig").Program;
const gl = @import("webgl.zig");
const wasm = @import("wasm.zig");
const vert_src = @embedFile("shaders/color.vert");
const frag_src = @embedFile("shaders/color.frag");
const gltf_json = @embedFile("assets/buster_drone/scene.gltf");
const gltf_bin = @embedFile("assets/buster_drone/scene.bin");
pub const Game = struct {
program: Program,
vao: c_uint,
vbo: c_uint,
time_location: c_uint,
gltf: Gltf,
pub fn init() !Game {
// Set Viewport
const width = wasm.getCanvasWidth();
const height = wasm.getCanvasHeight();
gl.viewport(0, 0, width, height);
// Create Shader Program
const program = Program.init(vert_src, frag_src);
// Vertex data
const vertices = [_]f32{
-1.0, -1.0, 1.0, 0.0, 0.0,
1.0, 1.0, 0.0, 1.0, 0.0,
1.0, -1.0, 0.0, 0.0, 1.0,
-1.0, -1.0, 1.0, 0.0, 0.0,
1.0, 1.0, 0.0, 1.0, 0.0,
-1.0, 1.0, 1.0, 1.0, 1.0,
};
// Create vertex buffer with fullscreen quad
const vao = gl.createVertexArray();
const vbo = gl.createBuffer();
gl.bindVertexArray(vao);
gl.bindBuffer(gl.ARRAY_BUFFER, vbo);
gl.bufferData(gl.ARRAY_BUFFER, @sizeOf(f32) * vertices.len, @ptrCast([*]const u8, &vertices[0]), gl.STATIC_DRAW);
gl.vertexAttribPointer(0, 2, gl.FLOAT, gl.FALSE, 5 * @sizeOf(f32), null);
gl.enableVertexAttribArray(0);
gl.vertexAttribPointer(1, 3, gl.FLOAT, gl.FALSE, 5 * @sizeOf(f32), @intToPtr(*c_uint, 2 * @sizeOf(f32)));
gl.enableVertexAttribArray(1);
// Get uniform locations
const time_location = program.getUniformLocation("time");
// Load gltf from embedded file
const gltf = try Gltf.fromBytes(allocator, gltf_json, (&[_][]const u8 { gltf_bin })[0..]);
return Game{
.program = program,
.vao = vao,
.vbo = vbo,
.time_location = time_location,
.gltf = gltf,
};
}
pub fn deinit(self: *Game) void {
self.gltf.deinit();
}
pub fn frame(self: Game, now_time: c_int) void {
const time = @intToFloat(f32, now_time) / 1000.0;
gl.bindVertexArray(self.vao);
self.program.use();
gl.uniform1f(self.time_location, time);
gl.drawArrays(gl.TRIANGLES, 0, 6);
}
pub fn resize(self: Game, width: c_int, height: c_int) void {
gl.viewport(0, 0, width, height);
}
}; | src/game.zig |
// This uses procfs heavily, and while procfs lives in memory and works,
// maybe it isn't the most scalable solution. We could peek into netlink, but
// that can be done later.
const std = @import("std");
pub const Stats = struct {
cpu_usage: f64, memory_usage: u64
};
const StatsFile = struct {
utime: u32,
stime: u32,
cstime: u32,
starttime: u32,
};
// TODO port this from musl
//pub extern "c" fn sysconf(name: c_int) c_longdouble;
//pub const _SC_CLK_TCK = 2;
fn readStatsFile(path: []const u8) !StatsFile {
var stat_file = try std.fs.cwd().openFile(path, .{ .read = true, .write = false });
defer stat_file.close();
// TODO check if [512]u8 is what we want, and also see if we really need
// the entire line.
var stat_buffer: [512]u8 = undefined;
const read_bytes = try stat_file.read(&stat_buffer);
const stat_line = stat_buffer[0..read_bytes];
var line_it = std.mem.split(stat_line, " ");
// skip pid, comm
_ = line_it.next();
_ = line_it.next();
// maybe we can use state someday?
_ = line_it.next();
// skip ppid, pgrp, session, tty_nr, tgpid, flags, minflt, cminflt,
// majflt, cmajflt
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
// get utime, stime
// TODO error handling
const utime = line_it.next();
const stime = line_it.next();
const cutime = line_it.next();
const cstime = line_it.next();
// skip priority, nice, num_threads, itrealvalue
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
const starttime = line_it.next();
const vsize = line_it.next().?;
const rss = line_it.next().?;
const rsslim = line_it.next().?;
// skip startcode, endcode, startstack, kstkesp, kstkeip, signal, blocked,
// sigignore, sigcatch, wchan
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
const nswap = line_it.next().?;
const cnswap = line_it.next().?;
// skip exit_signal
_ = line_it.next();
const processor = line_it.next().?;
// skip rt_priority, policy, delayacct_blkio_ticks
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
const guest_time = line_it.next().?;
const cguest_time = line_it.next().?;
// skip start_data, end_data, start_brk, arg_start, arg_end, env_start
// env_end, exit_code
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
_ = line_it.next();
return StatsFile{
.utime = try std.fmt.parseInt(u32, utime.?, 10),
.stime = try std.fmt.parseInt(u32, stime.?, 10),
.cstime = try std.fmt.parseInt(u32, cstime.?, 10),
.starttime = try std.fmt.parseInt(u32, starttime.?, 10),
};
}
fn fetchUptime() !u64 {
var uptime_file = try std.fs.cwd().openFile("/proc/uptime", .{ .read = true, .write = false });
defer uptime_file.close();
var uptime_buffer: [64]u8 = undefined;
const uptime_str = try uptime_file.inStream().readUntilDelimiterOrEof(&uptime_buffer, ' ');
const uptime_float = try std.fmt.parseFloat(f64, uptime_str.?);
return @floatToInt(u64, uptime_float);
}
/// Fetch total memory in kilobytes of the system.
fn fetchTotalMemory() !u64 {
var meminfo_file = try std.fs.cwd().openFile("/proc/meminfo", .{ .read = true, .write = false });
defer meminfo_file.close();
while (true) {
var line_buffer: [128]u8 = undefined;
const line_opt = try meminfo_file.inStream().readUntilDelimiterOrEof(&line_buffer, '\n');
if (line_opt) |line| {
var it = std.mem.tokenize(line, " ");
const line_header = it.next().?;
if (!std.mem.eql(u8, line_header, "MemTotal:")) {
continue;
}
const mem_total_str = it.next().?;
return try std.fmt.parseInt(u64, mem_total_str, 10);
} else {
// reached eof
break;
}
}
unreachable;
}
pub const StatmFile = struct {
resident: u64,
data_and_stack: u64,
};
fn readStatmFile(statm_path: []const u8) !StatmFile {
var statm = try std.fs.cwd().openFile(statm_path, .{ .read = true, .write = false });
defer statm.close();
// TODO check if [512]u8 is what we want
var statm_buffer: [512]u8 = undefined;
const read_bytes = try statm.read(&statm_buffer);
const statm_line = statm_buffer[0..read_bytes];
var it = std.mem.split(statm_line, " ");
_ = it.next();
const rss_str = it.next().?;
const resident = try std.fmt.parseInt(u64, rss_str, 10);
_ = it.next();
_ = it.next();
_ = it.next();
const data_and_stack = try std.fmt.parseInt(u64, it.next().?, 10);
return StatmFile{
.resident = resident,
.data_and_stack = data_and_stack,
};
}
pub const ProcessStats = struct {
clock_ticks: u64,
pub fn init() @This() {
return .{
// TODO: write sysconf(_SC_CLK_TCK). this is hardcoded for my machine
.clock_ticks = 100,
};
}
pub fn fetchCPUStats(self: @This(), pid: std.os.pid_t) !f64 {
// Always refetch uptime on every cpu stat fetch.
const uptime = try fetchUptime();
// pids are usually 5 digit, so we can keep a lot of space for them
var path_buffer: [64]u8 = undefined;
const stat_path = try std.fmt.bufPrint(&path_buffer, "/proc/{}/stat", .{pid});
const stats1 = try readStatsFile(stat_path);
const utime = stats1.utime;
const stime = stats1.stime;
const cstime = stats1.cstime;
const starttime = stats1.starttime;
const total_time = utime + stime + cstime;
const seconds = uptime - (starttime / self.clock_ticks);
return @as(f64, 100) *
((@intToFloat(f64, total_time) / @intToFloat(f64, self.clock_ticks)) / @intToFloat(f64, seconds));
}
pub fn fetchMemoryUsage(self: @This(), pid: std.os.pid_t) !u64 {
var path_buffer: [64]u8 = undefined;
// calculate ram usage
const statm_path = try std.fmt.bufPrint(&path_buffer, "/proc/{}/statm", .{pid});
const statm_data = try readStatmFile(statm_path);
return statm_data.resident + statm_data.data_and_stack;
}
pub fn fetchAllStats(self: @This(), pid: std.os.pid_t) !Stats {
return Stats{
.cpu_usage = try self.fetchCPUStats(pid),
.memory_usage = try self.fetchMemoryUsage(pid),
};
}
}; | src/process_stats.zig |
const std = @import("std");
const mem = std.mem;
const primes = []u64{
0xa0761d6478bd642f, 0xe7037ed1a0b428db,
0x8ebc6af09c88c6e3, 0x589965cc75374cc3,
0x1d8e4e27c47d124f, 0xeb44accab455d165,
};
fn read_bytes(comptime bytes: u8, data: []const u8) u64 {
return mem.readVarInt(u64, data[0..bytes], @import("builtin").endian);
}
fn read_8bytes_swapped(data: []const u8) u64 {
return (read_bytes(4, data) << 32 | read_bytes(4, data[4..]));
}
fn mum(a: u64, b: u64) u64 {
var r: u128 = @intCast(u128, a) * @intCast(u128, b);
r = (r >> 64) ^ r;
return @truncate(u64, r);
}
pub fn hash(key: []const u8, initial_seed: u64) u64 {
const len = key.len;
var seed = initial_seed;
var i: usize = 0;
while (i + 32 <= key.len) : (i += 32) {
seed = mum(seed ^ primes[0],
mum(read_bytes(8, key[i ..]) ^ primes[1],
read_bytes(8, key[i + 8 ..]) ^ primes[2]) ^
mum(read_bytes(8, key[i + 16 ..]) ^ primes[3],
read_bytes(8, key[i + 24 ..]) ^ primes[4]));
}
seed ^= primes[0];
const rem_len = @truncate(u5, len);
if (rem_len != 0) {
const rem_bits = @truncate(u3, rem_len % 8);
const rem_bytes = @truncate(u2, (len - 1) / 8);
const rem_key = key[i + @intCast(usize, rem_bytes) * 8 ..];
const rest = switch (rem_bits) {
0 => read_8bytes_swapped(rem_key),
1 => read_bytes(1, rem_key),
2 => read_bytes(2, rem_key),
3 => read_bytes(2, rem_key) << 8 | read_bytes(1, rem_key[2..]),
4 => read_bytes(4, rem_key),
5 => read_bytes(4, rem_key) << 8 | read_bytes(1, rem_key[4..]),
6 => read_bytes(4, rem_key) << 16 | read_bytes(2, rem_key[4..]),
7 => read_bytes(4, rem_key) << 24 | read_bytes(2, rem_key[4..]) << 8 | read_bytes(1, rem_key[6..]),
} ^ primes[@intCast(usize, rem_bytes) + 1];
seed = switch (rem_bytes) {
0 => mum(seed, rest),
1 => mum(read_8bytes_swapped(key[i ..]) ^ seed, rest),
2 => mum(read_8bytes_swapped(key[i ..]) ^ seed,
read_8bytes_swapped(key[i + 8 ..]) ^ primes[2]) ^
mum(seed, rest),
3 => mum(read_8bytes_swapped(key[i ..]) ^ seed,
read_8bytes_swapped(key[i + 8 ..]) ^ primes[2]) ^
mum(read_8bytes_swapped(key[i + 16 ..]) ^ seed, rest),
};
}
return mum(seed, len ^ primes[5]);
}
pub fn rng(initial_seed: u64) u64 {
var seed = initial_seed +% primes[0];
return mum(seed ^ primes[1], seed);
} | src/main.zig |
const inputFile = @embedFile("./input/day13.txt");
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const Str = []const u8;
const BitSet = std.DynamicBitSet;
const StrMap = std.StringHashMap;
const assert = std.debug.assert;
const tokenize = std.mem.tokenize;
const print = std.debug.print;
const parseInt = std.fmt.parseInt;
fn sort(comptime T: type, items: []T) void {
std.sort.sort(T, items, {}, comptime std.sort.asc(T));
}
fn println(x: Str) void {
print("{s}\n", .{x});
}
const LineDirection = enum { X, Y };
fn run(input: Str, allocator: Allocator, stdout: anytype) !void {
// ============= Step 1: parse the grid to get max row and col ================
const colsAndRows = blk: {
// Wait until Zig has multiple return: https://github.com/ziglang/zig/issues/4335
var nCols: usize = 0;
var nRows: usize = 0;
var it = tokenize(u8, input, "\n");
while (it.next()) |line| {
if (line[0] == 'f') {
break :blk .{ nRows, nCols };
} else if (std.mem.indexOfScalar(u8, line, ',')) |commaPos| {
const col = try std.fmt.parseInt(usize, line[0..commaPos], 10);
const row = try parseInt(usize, line[commaPos + 1 .. line.len], 10);
if (col >= nCols) {
nCols = col + 1;
}
if (row >= nRows) {
nRows = row + 1;
}
} else {
unreachable;
}
}
unreachable;
};
var nRows = colsAndRows[0];
var nCols = colsAndRows[1];
const originalNCols = colsAndRows[1];
// ============= Step 2: Parse the grid to get the dots ================
var dots = try BitSet.initEmpty(allocator, nRows * nCols);
defer dots.deinit(); // no defer since we use an arena
var p1: usize = 0;
var it = tokenize(u8, input, "\n");
while (it.next()) |line| {
if (line[0] != 'f') {
const commaPos = std.mem.indexOfScalar(u8, line, ',').?;
const col = try parseInt(usize, line[0..commaPos], 10);
const row = try parseInt(usize, line[commaPos + 1 .. line.len], 10);
dots.set(row * nCols + col);
} else {
// try printGrid(stdout, dots, nCols, nRows, originalNCols);
// ============= Step 3: Process each fold in turn ================
const foldAlongLen = "fold along ".len;
const foldDirection: LineDirection = if (line[foldAlongLen] == 'x') .X else .Y;
const lineNum = try parseInt(usize, line[foldAlongLen + 2 .. line.len], 10);
foldGrid(&dots, nCols, nRows, originalNCols, foldDirection, lineNum);
switch (foldDirection) {
.X => nCols = lineNum,
.Y => nRows = lineNum,
}
if (p1 == 0) p1 = countScore(dots, nCols, nRows, originalNCols);
}
}
try stdout.print("Part1: {d}\nPart2:\n\n", .{p1});
try printGrid(stdout, dots, nCols, nRows, originalNCols);
}
pub fn main() !void {
// Standard boilerplate for Aoc problems
const stdout = std.io.getStdOut().writer();
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
var gpaAllocator = gpa.allocator();
defer assert(!gpa.deinit()); // Check for memory leaks
var arena = std.heap.ArenaAllocator.init(gpaAllocator);
defer arena.deinit();
var allocator = arena.allocator(); // use an arena
try run(inputFile, allocator, stdout);
}
/// modifies grid
fn foldGrid(
grid: *BitSet,
nCols: usize,
nRows: usize,
originalNCols: usize,
foldDirection: LineDirection,
foldNum: usize,
) void {
switch (foldDirection) {
.X => {
var row: usize = 0;
while (row < nRows) : (row += 1) {
var col = foldNum + 1;
while (col < nCols) : (col += 1) {
if (grid.isSet(row * originalNCols + col)) {
grid.set(row * originalNCols + (foldNum - (col - foldNum)));
}
}
}
},
.Y => {
var row: usize = foldNum + 1;
while (row < nRows) : (row += 1) {
var col: usize = 0;
while (col < nCols) : (col += 1) {
if (grid.isSet(row * originalNCols + col)) {
grid.set((foldNum - (row - foldNum)) * originalNCols + col);
}
}
}
},
}
}
fn countScore(
grid: BitSet,
nCols: usize,
nRows: usize,
originalNCols: usize,
) usize {
var result: usize = 0;
var row: usize = 0;
while (row < nRows) : (row += 1) {
var col: usize = 0;
while (col < nCols) : (col += 1) {
if (grid.isSet(row * originalNCols + col)) result += 1;
}
}
return result;
}
fn printGrid(writer: anytype, grid: BitSet, nCols: usize, nRows: usize, originalNCols: usize) !void {
var row: usize = 0;
while (row < nRows) : (row += 1) {
var col: usize = 0;
while (col < nCols) : (col += 1) {
const x = grid.isSet(row * originalNCols + col);
if (x) {
try writer.print("#", .{});
} else {
try writer.print(".", .{});
}
}
try writer.print("\n", .{});
}
try writer.print("\n", .{});
}
test "Part 1" {
const testInput =
\\6,10
\\0,14
\\9,10
\\0,3
\\10,4
\\4,11
\\6,0
\\6,12
\\4,1
\\0,13
\\10,12
\\3,4
\\3,0
\\8,4
\\1,10
\\2,14
\\8,10
\\9,0
\\
\\fold along y=7
\\fold along x=5
\\
;
try run(testInput, std.testing.allocator, std.io.getStdOut().writer());
} | src/day13.zig |
const std = @import("std");
const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
const bits = @import("./bits.zig");
pub const LZ_WND_SIZE = 32768;
const HASH_SIZE = 15;
const NO_POS = math.maxInt(usize);
const MIN_LEN = 4;
// Output a literal byte at dst_pos in dst.
pub inline fn lz77_output_lit(dst: [*]u8, dst_pos: usize, lit: u8) void {
dst[dst_pos] = lit;
}
// Output the (dist,len) back reference at dst_pos in dst.
pub inline fn lz77_output_backref(dst: [*]u8, dst_pos: usize, dist: usize, len: usize) void {
var i: usize = 0;
var pos: usize = dst_pos;
assert(dist <= pos); // "cannot reference before beginning of dst"
while (i < len) : (i += 1) {
dst[pos] = dst[pos - dist];
pos += 1;
}
}
// Output the (dist,len) backref at dst_pos in dst using 64-bit wide writes.
// There must be enough room for len bytes rounded to the next multiple of 8.
pub inline fn lz77_output_backref64(dst: [*]u8, dst_pos: usize, dist: usize, len: usize) void {
var i: usize = 0;
var tmp: [8]u8 = undefined;
assert(len > 0);
assert(dist <= dst_pos); // "cannot reference before beginning of dst"
if (len > dist) {
// Self-overlapping backref; fall back to byte-by-byte copy.
lz77_output_backref(dst, dst_pos, dist, len);
return;
}
while (i < len) : (i += 8) {
var ref_pos = dst_pos - dist + i;
var backref_pos = dst_pos + i;
mem.copy(u8, tmp[0..8], dst[ref_pos .. ref_pos + 8]);
mem.copy(u8, dst[backref_pos .. backref_pos + 8], tmp[0..8]);
}
}
// Compare the substrings starting at src[i] and src[j], and return the length
// of the common prefix if it is strictly longer than prev_match_len
// and shorter or equal to max_match_len, otherwise return zero.
pub fn cmp(
src: [*]const u8,
i: usize,
j: usize,
prev_match_len: usize,
max_match_len: usize,
) usize {
var l: usize = 0;
assert(prev_match_len < max_match_len);
// Check whether the first prev_match_len + 1 characters match. Do this
// backwards for a higher chance of finding a mismatch quickly.
while (l < prev_match_len + 1) : (l += 1) {
if (src[i + prev_match_len - l] != src[j + prev_match_len - l]) {
return 0;
}
}
assert(l == prev_match_len + 1);
// Now check how long the full match is.
while (l < max_match_len) : (l += 1) {
if (src[i + l] != src[j + l]) {
break;
}
}
assert(l > prev_match_len);
assert(l <= max_match_len);
assert(mem.eql(u8, src[i .. i + l], src[j .. j + l]));
return l;
}
pub fn min(a: usize, b: usize) usize {
if (a < b) {
return a;
} else {
return b;
}
}
// Find the longest most recent string which matches the string starting
// at src[pos]. The match must be strictly longer than prev_match_len and
// shorter or equal to max_match_len. Returns the length of the match if found
// and stores the match position in *match_pos, otherwise returns zero.
pub fn find_match(
src: [*]const u8,
pos: usize,
hash: u32,
max_dist: usize,
prev_match_len: usize,
max_match_len: usize,
allow_overlap: bool,
head: *[1 << HASH_SIZE]usize,
prev: *[LZ_WND_SIZE]usize,
match_pos: *usize,
) usize {
var max_match_steps: usize = 4096;
var i: usize = undefined;
var l: usize = undefined;
var found: bool = undefined;
var max_cmp: usize = undefined;
var prev_match_len_result: usize = prev_match_len;
if (prev_match_len_result == 0) {
// We want backrefs of length MIN_LEN or longer.
prev_match_len_result = MIN_LEN - 1;
}
if (prev_match_len_result >= max_match_len) {
// A longer match would be too long.
return 0;
}
if (prev_match_len_result >= 32) {
// Do not try too hard if there is already a good match.
max_match_steps /= 4;
}
found = false;
i = head[hash];
max_cmp = max_match_len;
// Walk the linked list of prefix positions.
while (i != NO_POS) : (i = prev[i % LZ_WND_SIZE]) {
if (max_match_steps == 0) {
break;
}
max_match_steps -= 1;
assert(i < pos); // "Matches should precede pos."
if (pos - i > max_dist) {
// The match is too far back.
break;
}
if (!allow_overlap) {
max_cmp = min(max_match_len, pos - i);
if (max_cmp <= prev_match_len_result) {
continue;
}
}
l = cmp(src, i, pos, prev_match_len_result, max_cmp);
if (l != 0) {
assert(l > prev_match_len_result);
assert(l <= max_match_len);
found = true;
match_pos.* = i;
prev_match_len_result = l;
if (l == max_match_len) {
// A longer match is not possible.
return l;
}
}
}
if (!found) {
return 0;
}
return prev_match_len_result;
}
// Compute a hash value based on four bytes pointed to by ptr.
pub fn hash4(ptr: [*]const u8) u32 {
assert(HASH_SIZE >= 0 and HASH_SIZE <= 32);
const HASH_MUL: u32 = 2654435761;
// Knuth's multiplicative hash.
var mult: u32 = undefined;
_ = @mulWithOverflow(u32, bits.read32le(ptr), HASH_MUL, &mult);
return mult >> (32 - HASH_SIZE);
}
pub fn insert_hash(hash: u32, pos: usize, head: *[1 << HASH_SIZE]usize, prev: *[LZ_WND_SIZE]usize) void {
assert(pos != NO_POS); // "Invalid pos!"
prev[pos % LZ_WND_SIZE] = head[hash];
head[hash] = pos;
}
// Perform LZ77 compression on the src_len bytes in src, with back references
// limited to a certain maximum distance and length, and with or without
// self-overlap. Returns false as soon as either of the callback functions
// returns false, otherwise returns true when all bytes have been processed.
//
// A back reference is a tupple consisting of a distance and a length, this tupple refers to a part
// of the previous content.
// The distance is the number of bytes between the start of the referenced content and the start of
// the back reference.
// The length is the size in bytes of the referenced content.
// Self-overlap is the fact that a referenced content overlaps with its back reference space and
// means that in order to retrieve the referenced content the back reference will refer to itself.
//
// Example with no self-overlap:
//
// ┌ back reference position
// ┌ referenced content │
// ┌3┐ ╵
// CATS: YOU HAVE NO CHANCE TO SURVIVE MAKE YOUR TIME.
// └3┘ (length)
// <───────────────35─────────────────┘ (distance)
//
// ┌ back reference
// ┌──┴──┐
// CATS: YOU HAVE NO CHANCE TO SURVIVE MAKE (35, 3)R TIME.
//
// Example with self-overlap:
//
// ┌ back reference position
// │
// │ ┌─── referenced content
// ┌──┼─8──┐
// CATS: HA HA HA HA ....
// └───8───┘ (length)
// <─3┘ (distance)
//
// ┌ back reference
// ┌──┴─┐
// CATS: HA (3, 8)....
//
pub fn lz77_compress(
src: [*]const u8,
src_len: usize,
max_dist: usize,
max_len: usize,
allow_overlap: bool,
comptime lit_callback: fn (lit: u8, aux: anytype) bool,
comptime backref_callback: fn (dist: usize, len: usize, aux: anytype) bool,
aux: anytype,
) bool {
var head: [1 << HASH_SIZE]usize = undefined; // 1u // maps the hash value of a four-letter prefix to a position in the input data
var prev: [LZ_WND_SIZE]usize = undefined; // maps a position to the previous position with the same hash value
var i: usize = 0;
var h: u32 = undefined;
var dist: usize = undefined;
var match_len: usize = undefined;
var match_pos: usize = undefined;
var prev_match_len: usize = undefined;
var prev_match_pos: usize = undefined;
// Initialize the hash table.
for (head) |_, hi| {
head[hi] = NO_POS;
}
prev_match_len = 0;
prev_match_pos = NO_POS;
while (i + MIN_LEN - 1 < src_len) : (i += 1) {
// Search for a match using the hash table.
h = hash4(src[i .. i + 4].ptr);
match_len = find_match(src, i, h, max_dist, prev_match_len, min(max_len, src_len - i), allow_overlap, &head, &prev, &match_pos);
// Insert the current hash for future searches.
insert_hash(h, i, &head, &prev);
// If the previous match is at least as good as the current.
if (prev_match_len != 0 and prev_match_len >= match_len) {
// Output the previous match.
dist = (i - 1) - prev_match_pos;
if (!backref_callback(dist, prev_match_len, aux)) {
return false;
}
// Move past the match.
{
var j: usize = i + 1;
//for (j = i + 1; j < min((i - 1) + prev_match_len, src_len - (MIN_LEN - 1)); j++) {
while (j < min((i - 1) + prev_match_len, src_len - (MIN_LEN - 1))) : (j += 1) {
h = hash4(src[j .. j + 4].ptr);
insert_hash(h, j, &head, &prev);
}
}
i = (i - 1) + prev_match_len - 1;
prev_match_len = 0;
continue;
}
// If no match (and no previous match), output literal.
if (match_len == 0) {
assert(prev_match_len == 0);
if (!lit_callback(src[i], aux)) {
return false;
}
continue;
}
// Otherwise the current match is better than the previous.
if (prev_match_len != 0) {
// Output a literal instead of the previous match.
if (!lit_callback(src[i - 1], aux)) {
return false;
}
}
// Defer this match and see if the next is even better.
prev_match_len = match_len;
prev_match_pos = match_pos;
}
// Output any previous match.
if (prev_match_len != 0) {
dist = (i - 1) - prev_match_pos;
if (!backref_callback(dist, prev_match_len, aux)) {
return false;
}
i = (i - 1) + prev_match_len;
}
// Output any remaining literals.
while (i < src_len) : (i += 1) {
if (!lit_callback(src[i], aux)) {
return false;
}
}
return true;
} | src/lz77.zig |
const std = @import("std");
const testing = std.testing;
const Allocator = std.mem.Allocator;
const io = std.io;
const OutError = io.SliceOutStream.Error;
const InError = io.SliceInStream.Error;
const dns = @import("../dns.zig");
const rdata = dns.rdata;
const Packet = dns.Packet;
test "convert domain string to dns name" {
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
const domain = "www.google.com";
var name = try dns.Name.fromString(allocator, domain[0..]);
std.debug.assert(name.labels.len == 3);
testing.expect(std.mem.eql(u8, name.labels[0], "www"));
testing.expect(std.mem.eql(u8, name.labels[1], "google"));
testing.expect(std.mem.eql(u8, name.labels[2], "com"));
}
// extracted with 'dig google.com a +noedns'
const TEST_PKT_QUERY = "FEUBIAABAAAAAAAABmdvb2dsZQNjb20AAAEAAQ==";
const TEST_PKT_RESPONSE = "RM2BgAABAAEAAAAABmdvb2dsZQNjb20AAAEAAcAMAAEAAQAAASwABNg6yo4=";
const GOOGLE_COM_LABELS = [_][]const u8{ "google"[0..], "com"[0..] };
test "Packet serialize/deserialize" {
// setup a random id packet
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
var packet = dns.Packet.init(allocator, ""[0..]);
var r = std.rand.DefaultPrng.init(std.time.timestamp());
const random_id = r.random.int(u16);
packet.header.id = random_id;
// then we'll serialize it under a buffer on the stack,
// deserialize it, and the header.id should be equal to random_id
var buf = try serialTest(allocator, packet);
// deserialize it
var new_packet = try deserialTest(allocator, buf);
testing.expectEqual(new_packet.header.id, packet.header.id);
const fields = [_][]const u8{ "id", "opcode", "qdcount", "ancount" };
var new_header = new_packet.header;
var header = packet.header;
inline for (fields) |field| {
testing.expectEqual(@field(new_header, field), @field(header, field));
}
}
fn decodeBase64(encoded: []const u8) ![]u8 {
var buf: [0x10000]u8 = undefined;
var decoded = buf[0..try std.base64.standard_decoder.calcSize(encoded)];
try std.base64.standard_decoder.decode(decoded, encoded);
return decoded;
}
fn expectGoogleLabels(actual: [][]const u8) void {
for (actual) |label, idx| {
std.testing.expectEqualSlices(u8, label, GOOGLE_COM_LABELS[idx]);
}
}
test "deserialization of original google.com/A" {
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
var decoded = try decodeBase64(TEST_PKT_QUERY[0..]);
var pkt = try deserialTest(allocator, decoded);
std.debug.assert(pkt.header.id == 5189);
std.debug.assert(pkt.header.qdcount == 1);
std.debug.assert(pkt.header.ancount == 0);
std.debug.assert(pkt.header.nscount == 0);
std.debug.assert(pkt.header.arcount == 0);
const question = pkt.questions.at(0);
expectGoogleLabels(question.qname.labels);
std.testing.expectEqual(question.qtype, dns.Type.A);
std.testing.expectEqual(question.qclass, dns.Class.IN);
}
test "deserialization of reply google.com/A" {
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
var decoded = try decodeBase64(TEST_PKT_RESPONSE[0..]);
var pkt = try deserialTest(allocator, decoded);
std.debug.assert(pkt.header.qdcount == 1);
std.debug.assert(pkt.header.ancount == 1);
std.debug.assert(pkt.header.nscount == 0);
std.debug.assert(pkt.header.arcount == 0);
var question = pkt.questions.at(0);
expectGoogleLabels(question.qname.labels);
testing.expectEqual(dns.Type.A, question.qtype);
testing.expectEqual(dns.Class.IN, question.qclass);
var answer = pkt.answers.at(0);
expectGoogleLabels(answer.name.labels);
testing.expectEqual(dns.Type.A, answer.rr_type);
testing.expectEqual(dns.Class.IN, answer.class);
testing.expectEqual(@as(i32, 300), answer.ttl);
var answer_rdata = try rdata.deserializeRData(pkt, answer);
testing.expectEqual(dns.Type.A, @as(dns.Type, answer_rdata));
const addr = @ptrCast(*[4]u8, &answer_rdata.A.in.addr).*;
testing.expectEqual(@as(u8, 216), addr[0]);
testing.expectEqual(@as(u8, 58), addr[1]);
testing.expectEqual(@as(u8, 202), addr[2]);
testing.expectEqual(@as(u8, 142), addr[3]);
}
fn encodeBase64(buffer: []u8, out: []const u8) []const u8 {
var encoded = buffer[0..std.base64.Base64Encoder.calcSize(out.len)];
std.base64.standard_encoder.encode(encoded, out);
return encoded;
}
fn encodePacket(buffer: []u8, pkt: Packet) ![]const u8 {
var out = try serialTest(pkt.allocator, pkt);
return encodeBase64(buffer, out);
}
test "serialization of google.com/A (question)" {
// setup a random id packet
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
var pkt = dns.Packet.init(allocator, ""[0..]);
pkt.header.id = 5189;
pkt.header.rd = true;
pkt.header.z = 2;
var qname = try dns.Name.fromString(allocator, "google.com");
try pkt.addQuestion(dns.Question{ .qname = qname, .qtype = .A, .qclass = .IN });
var buffer: [128]u8 = undefined;
var encoded = try encodePacket(&buffer, pkt);
testing.expectEqualSlices(u8, encoded, TEST_PKT_QUERY);
}
fn serialTest(allocator: *Allocator, packet: Packet) ![]u8 {
var buf = try allocator.alloc(u8, packet.size());
var out = io.SliceOutStream.init(buf);
var out_stream = &out.stream;
var serializer = io.Serializer(.Big, .Bit, OutError).init(out_stream);
try serializer.serialize(packet);
try serializer.flush();
return buf;
}
fn deserialTest(allocator: *Allocator, buf: []u8) !Packet {
var in = io.SliceInStream.init(buf);
var stream = &in.stream;
var deserializer = dns.DNSDeserializer.init(stream);
var pkt = Packet.init(allocator, buf);
try deserializer.deserializeInto(&pkt);
return pkt;
}
test "convert string to dns type" {
var parsed = try dns.Type.fromStr("AAAA");
testing.expectEqual(dns.Type.AAAA, parsed);
}
test "size() methods are good" {
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
var name = try dns.Name.fromString(allocator, "example.com");
// length + data + length + data + null
testing.expectEqual(@as(usize, 1 + 7 + 1 + 3 + 1), name.size());
var resource = dns.Resource{
.name = name,
.rr_type = .A,
.class = .IN,
.ttl = 300,
.opaque_rdata = "",
};
// name + rr (2) + class (2) + ttl (4) + rdlength (2)
testing.expectEqual(@as(usize, name.size() + 10 + resource.opaque_rdata.len), resource.size());
}
// This is a known packet generated by zigdig. It would be welcome to have it
// tested in other libraries.
const SERIALIZED_PKT = "FEUBIAAAAAEAAAAABmdvb2dsZQNjb20AAAEAAQAAASwABAEAAH8=";
test "rdata serialization" {
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
var pkt = dns.Packet.init(allocator, ""[0..]);
pkt.header.id = 5189;
pkt.header.rd = true;
pkt.header.z = 2;
var name = try dns.Name.fromString(allocator, "google.com");
var pkt_rdata = dns.rdata.DNSRData{
.A = try std.net.Address.parseIp4("127.0.0.1", 0),
};
var rdata_buffer = try allocator.alloc(u8, 0x10000);
var opaque_rdata = rdata_buffer[0..pkt_rdata.size()];
var out = io.SliceOutStream.init(rdata_buffer);
var out_stream = &out.stream;
var serializer = io.Serializer(.Big, .Bit, OutError).init(out_stream);
try rdata.serializeRData(pkt_rdata, &serializer);
try pkt.addAnswer(dns.Resource{
.name = name,
.rr_type = .A,
.class = .IN,
.ttl = 300,
.opaque_rdata = opaque_rdata,
});
var buffer: [128]u8 = undefined;
var res = try encodePacket(&buffer, pkt);
testing.expectEqualSlices(u8, res, SERIALIZED_PKT);
} | src/pkg/dns/test.zig |
const std = @import("std");
const Arena = std.heap.ArenaAllocator;
const expectEqual = std.testing.expectEqual;
const expectEqualStrings = std.testing.expectEqualStrings;
const ecs = @import("ecs.zig");
const ECS = ecs.ECS;
const Entity = ecs.Entity;
const Strings = @import("strings.zig").Strings;
const components = @import("components.zig");
const query = @import("query.zig");
const literalOf = query.literalOf;
const typeOf = query.typeOf;
pub fn initCodebase(arena: *Arena) !*ECS {
const codebase = try arena.allocator().create(ECS);
codebase.* = ECS.init(arena);
try codebase.set(.{Strings.init(arena)});
try initBuiltins(codebase);
return codebase;
}
fn builtinType(codebase: *ECS, scope: *components.Scope, symbol: []const u8, Type: Entity, bytes: i32) !Entity {
const size = components.Size{ .bytes = bytes };
const interned = try codebase.getPtr(Strings).intern(symbol);
const entity = try codebase.createEntity(.{
components.Literal.init(interned),
components.Type.init(Type),
size,
});
try scope.putInterned(interned, entity);
return entity;
}
pub fn initBuiltins(codebase: *ECS) !void {
const allocator = codebase.arena.allocator();
var scope = components.Scope.init(allocator, codebase.getPtr(Strings));
const interned = try codebase.getPtr(Strings).intern("type");
const Type = try codebase.createEntity(.{
components.Literal.init(interned),
});
try scope.putInterned(interned, Type);
_ = try Type.set(.{components.Type.init(Type)});
const Module = try builtinType(codebase, &scope, "module", Type, 0);
const I64 = try builtinType(codebase, &scope, "i64", Type, 8);
const I32 = try builtinType(codebase, &scope, "i32", Type, 4);
const I16 = try builtinType(codebase, &scope, "i16", Type, 2);
const I8 = try builtinType(codebase, &scope, "i8", Type, 1);
const U64 = try builtinType(codebase, &scope, "u64", Type, 8);
const U32 = try builtinType(codebase, &scope, "u32", Type, 4);
const U16 = try builtinType(codebase, &scope, "u16", Type, 2);
const U8 = try builtinType(codebase, &scope, "u8", Type, 1);
const F64 = try builtinType(codebase, &scope, "f64", Type, 8);
const F32 = try builtinType(codebase, &scope, "f32", Type, 4);
const IntLiteral = try builtinType(codebase, &scope, "IntLiteral", Type, 0);
const FloatLiteral = try builtinType(codebase, &scope, "FloatLiteral", Type, 0);
const Void = try builtinType(codebase, &scope, "void", Type, 0);
const I64X2 = try builtinType(codebase, &scope, "i64x2", Type, 16);
const I32X4 = try builtinType(codebase, &scope, "i32x4", Type, 16);
const I16X8 = try builtinType(codebase, &scope, "i16x8", Type, 16);
const I8X16 = try builtinType(codebase, &scope, "i8x16", Type, 16);
const U64X2 = try builtinType(codebase, &scope, "u64x2", Type, 16);
const U32X4 = try builtinType(codebase, &scope, "u32x4", Type, 16);
const U16X8 = try builtinType(codebase, &scope, "u16x8", Type, 16);
const U8X16 = try builtinType(codebase, &scope, "u8x16", Type, 16);
const F64X2 = try builtinType(codebase, &scope, "f64x2", Type, 16);
const F32X4 = try builtinType(codebase, &scope, "f32x4", Type, 16);
// TODO: p32 is a function from type to type, not a type itself
// type_of(p32) == Fn(T: type): type
// type_of(p32(i64)) == type
const Ptr = try builtinType(codebase, &scope, "ptr", Type, 4);
_ = try Ptr.set(.{components.Memoized.init(allocator)});
const Array = try builtinType(codebase, &scope, "array", Type, 4);
_ = try Array.set(.{components.Memoized.init(allocator)});
const Range = try builtinType(codebase, &scope, "Range", Type, 0);
_ = try Range.set(.{components.Memoized.init(allocator)});
// TODO: cast is a function from type, and value to value of new type
// type_of(cast) == Fn(T: type, value: U): T
// type_of(cast(i64, 5)) == i64
const Cast = try builtinType(codebase, &scope, "cast", Type, 0);
const builtins = components.Builtins{
.Type = Type,
.Module = Module,
.I64 = I64,
.I32 = I32,
.I16 = I16,
.I8 = I8,
.U64 = U64,
.U32 = U32,
.U16 = U16,
.U8 = U8,
.F64 = F64,
.F32 = F32,
.Void = Void,
.Ptr = Ptr,
.Array = Array,
.Range = Range,
.IntLiteral = IntLiteral,
.FloatLiteral = FloatLiteral,
.I64X2 = I64X2,
.I32X4 = I32X4,
.I16X8 = I16X8,
.I8X16 = I8X16,
.U64X2 = U64X2,
.U32X4 = U32X4,
.U16X8 = U16X8,
.U8X16 = U8X16,
.F64X2 = F64X2,
.F32X4 = F32X4,
.Cast = Cast,
};
try codebase.set(.{ builtins, scope });
} | src/init_codebase.zig |
align allowzero and anyframe asm async await break
callconv catch comptime const continue defer else
enum errdefer error export extern fn for if
inline noalias orelse or packed promise pub resume
return linksection struct suspend switch test
threadlocal try union unreachable usingnamespace var
volatile while
// special words
true false null undefined
// built-in functons
@addWithOverflow @alignCast @alignOf @as @asyncCall
@atomicLoad @atomicRmw @atomicStore @bitCast @bitOffsetOf
@bitReverse @bitSizeOf @boolToInt @breakpoint
@byteOffsetOf @byteSwap @call @cDefine @ceil @cImport
@cInclude @clz @cmpxchgStrong @cmpxchgWeak @compileError
@compileLog @cos @ctz @cUndef @divExact @divFloor
@divTrunc @embedFile @enumToInt @errorName
@errorReturnTrace @errorToInt @errSetCast @exp2 @export
@exp @fabs @fence @fieldParentPtr @field @floatCast
@floatToInt @floor @frameAddress @frameSize @frame
@Frame @hasDecl @hasField @import @intCast @intToEnum
@intToError @intToFloat @intToPtr @log10 @log2 @log
@memcpy @memset @mod @mulAdd @mulWithOverflow @OpaqueType
@panic @popCount @ptrCast @ptrToInt @rem @returnAddress
@round @setAlignStack @setCold @setEvalBranchQuota
@setFloatMode @setRuntimeSafety @shlExact @shlWithOverflow
@shrExact @shuffle @sin @sizeOf @splat @sqrt
@subWithOverflow @tagName @TagType @This @truncate
@trunc @typeInfo @typeName @TypeOf @Type @unionInit
@Vector
// types
anyerror bool c_int c_longdouble c_longlong c_long
comptime_float comptime_int c_short c_uint c_ulonglong
c_ulong c_ushort c_void f128 f16 f32 f64 isize
noreturn type usize void
// arbitrary bit precision from 0 to 65535
i0 i1 i2 i3 ... i8 i16 i32 i64 i128 ... i65535
u0 u1 u2 u3 ... u8 u16 u32 u64 u128 ... u65535
// hello.zig
const std = @import("std");
pub fn main() !void {
const stdout = &std.io.getStdOut().outStream().stream;
try stdout.print("Hello, {}!\n", .{"world"});
}
// values
const assert = @import("std").debug.assert;
const std = @import("std");
// exported value
export const Unit = enum(u32) {
Hundred = 100,
Thousand = 1000,
Million = 1000000,
};
pub fn main() void {
// integer literals
const decimal_int = 98222;
const hex_int = 0xff;
const another_hex_int = 0xFF;
const octal_int = 0o755;
const binary_int = 0b11110000;
// floating-point literals
const floating_point = 123.0E+77;
const another_float = 123.0;
const yet_another = 123.0e+77;
const hex_floating_point = 0x103.70p-5;
const another_hex_float = 0x103.70;
const yet_another_hex_float = 0x103.70P-5;
// boolean
const t = true;
const f = false;
// string literals
const bytes = "hello";
assert(@TypeOf(bytes) == *const [5:0]u8);
assert(bytes.len == 5);
assert(bytes[1] == 'e');
assert(bytes[5] == 0);
assert('e' == '\x65');
assert('\u{1f4a9}' == 128169);
assert('💯' == 128175);
assert(mem.eql(u8, "hello", "h\x65llo"));
// multi-line string literals
const hello_world_in_c =
\\#include <stdio.h>
\\
\\int main(int argc, char **argv) {
\\ printf("hello world\n");
\\ return 0;
\\}
;
// array literal
const message = [_]u8{ 'h', 'e', 'l', 'l', 'o' };
// optional
var optional_value: ?[]const u8 = null;
// error union
var number_or_error: anyerror!i32 = error.ArgNotFound;
// anonymous struct literal
const Point = struct {x: i32, y: i32};
var pt: Point = .{
.x = 13,
.y = 67,
};
// enums
const Suit = enum {
Clubs,
Spades,
Diamonds,
Hearts,
pub fn isClubs(self: Suit) bool {
return self == Suit.Clubs;
}
};
const Number = packed enum(u8) {
one,
two,
three,
};
// anonymous union literals
const Number = union {
int: i32,
float: f64,
};
// switches
const a: u64 = 10;
const zz: u64 = 103;
const b = switch (a) {
1, 2, 3 => 0,
5...100 => 1,
101 => blk: {
const c: u64 = 5;
break :blk c * 2 + 1;
},
zz => zz,
comptime blk: {
const d: u32 = 5;
const e: u32 = 100;
break :blk d + e;
} => 107,
else => 9,
};
// for
const items = [_]i32 { 4, 5, 3, 4, 0 };
var sum: i32 = 0;
for (items) |value| {
if (value == 0) {
continue;
}
sum += value;
}
assert(sum == 16);
for (items[0..1]) |value| {
sum += value;
}
assert(sum == 20);
var sum2: i32 = 0;
for (items) |value, i| {
assert(@TypeOf(i) == usize);
sum2 += @intCast(i32, i);
}
} | bundles/zig/misc/example.zig |
const std = @import("std");
const Context = @import("Context.zig");
const Token = @import("Token.zig");
allocator: std.mem.Allocator,
ctx: Context,
offset: ?u16 = null,
const Lexer = @This();
// API
pub fn lex(self: *Lexer) !std.MultiArrayList(Token) {
var tokens = std.MultiArrayList(Token){};
errdefer tokens.deinit(self.allocator);
var after_newline = false;
while (try self.next()) |token| {
if (token.is(.punct_newline)) {
after_newline = true;
} else {
if (after_newline) {
if (token.is(.punct_lparen) or token.is(.punct_lbracket)) try tokens.append(self.allocator, self.oneChar(.punct_semicolon));
after_newline = false;
}
try tokens.append(self.allocator, token);
}
}
return tokens;
}
fn next(self: *Lexer) !?Token {
if (self.skipWhitespce()) |byte| {
if (isIdentStart(byte)) return self.lexIdent();
if (isNumStart(byte)) return self.lexNumber(byte);
return switch (byte) {
'#' => self.lexComment(),
'\n' => self.lexNewline(),
'"' => try self.lexString(),
'`' => try self.lexRawStr(),
'@' => try self.lexGlobal(),
':',
'!',
'<',
'>',
'=',
'~',
'*',
'/',
'%',
'.',
'?',
=> self.lexOp(byte),
';' => self.oneChar(.punct_semicolon),
'{' => self.oneChar(.punct_lbrace),
'}' => self.oneChar(.punct_rbrace),
',' => self.oneChar(.punct_comma),
'$' => self.oneChar(.punct_dollar),
'(' => self.oneChar(.punct_lparen),
')' => self.oneChar(.punct_rparen),
'[' => self.oneChar(.punct_lbracket),
']' => self.oneChar(.punct_rbracket),
'|' => self.oneChar(.punct_pipe),
else => self.ctx.err("Unknown byte {c}.", .{byte}, error.UnknownByte, self.offset.?),
};
}
return null;
}
// Lexing
fn lexComment(self: *Lexer) anyerror!?Token {
while (self.advance()) |byte| {
switch (byte) {
'\n', '#' => break,
else => continue,
}
}
return try self.next();
}
fn lexGlobal(self: *Lexer) !Token {
const start = self.offset.?;
if (self.peek()) |peek_byte| {
if (isWhitespace(peek_byte)) return self.oneChar(.punct_at);
if (!isIdentByte(peek_byte)) return self.ctx.err("Invalid global variable.", .{}, error.InvalidGlobal, start);
} else return self.oneChar(.punct_at);
self.run(isIdentByte);
const src = self.ctx.src[start .. self.offset.? + 1];
if (Token.predef.get(src)) |tag| return Token.new(tag, start, src.len);
return Token.new(.ident, start, src.len);
}
fn lexIdent(self: *Lexer) Token {
const start = self.offset.?;
self.run(isIdentByte);
const src = self.ctx.src[start .. self.offset.? + 1];
const tag = if (Token.predef.get(src)) |tag| tag else Token.Tag.ident;
return Token.new(tag, start, src.len);
}
fn lexNumber(self: *Lexer, byte: u8) Token {
//TODO: Fix 1+1
if ('+' == byte) {
// Plus is special
var token = self.oneChar(.punct_plus);
if (self.peek() == null or self.peekPred(isWhitespace)) return token;
if (self.skipByte('+')) {
token.tag = .op_concat;
token.len = 2;
return token;
} else if (self.skipByte('=')) {
token.tag = .op_add_eq;
token.len = 2;
return token;
} else if (self.skipByte('>')) {
token.tag = .op_redir_append;
token.len = 2;
return token;
}
}
if ('-' == byte) {
// Minus is special too
var token = self.oneChar(.punct_minus);
if (self.peek() == null or self.peekPred(isWhitespace)) return token;
if (self.peekPred(isIdentStart)) {
token.tag = .op_neg;
return token;
}
if (self.skipByte('=')) {
token.tag = .op_sub_eq;
token.len = 2;
return token;
}
}
const start = self.offset.?;
while (self.peek()) |peek_byte| {
const current_byte = self.ctx.src[self.offset.?];
if ('.' == peek_byte and self.peekNIs(2, '.')) break;
if (('+' == peek_byte or '-' == peek_byte) and
('e' != current_byte and 'p' != current_byte)) break;
if (!isNumeric(peek_byte)) break;
_ = self.advance();
}
const src = self.ctx.src[start .. self.offset.? + 1];
var token = Token.new(.uint, start, src.len);
if (src.len > 2) {
const is_float = for (src[1..]) |num_byte| {
if (isFloatOnly(num_byte)) break true;
} else false;
if (is_float) {
token.tag = .float;
return token;
}
}
if ('-' == src[0] or '+' == src[0]) token.tag = .int;
return token;
}
fn lexNewline(self: *Lexer) Token {
const token = self.oneChar(.punct_newline);
self.run(isNewline);
return token;
}
fn lexCombineAssing(self: *Lexer, base: Token.Tag, with_eq: Token.Tag) Token {
var token = self.oneChar(base);
if (self.skipByte('=')) {
token.tag = with_eq;
token.len = 2;
}
return token;
}
fn lexOp(self: *Lexer, byte: u8) Token {
return switch (byte) {
':' => self.lexCombineAssing(.punct_colon, .op_define),
'=' => op: {
if (self.skipByte('>')) break :op self.twoChar(.punct_fat_rarrow);
break :op self.lexCombineAssing(.punct_equals, .op_eq);
},
'!' => op: {
var token = self.oneChar(.punct_bang);
if (self.skipByte('=')) {
token.tag = .op_neq;
token.len = 2;
} else if (self.skipByte('~')) {
token.tag = .op_nomatch;
token.len = 2;
} else if (self.skipByte('>')) {
token.tag = .op_redir_clobber;
token.len = 2;
}
break :op token;
},
'<' => self.lexCombineAssing(.punct_lt, .op_lte),
'>' => self.lexCombineAssing(.punct_gt, .op_gte),
'~' => op: {
var token = self.oneChar(.op_match);
if (self.skipByte('@')) {
token.tag = .op_matcher;
token.len = 2;
}
break :op token;
},
'*' => op: {
if (self.skipByte('*')) break :op self.twoChar(.op_repeat);
break :op self.lexCombineAssing(.punct_star, .op_mul_eq);
},
'/' => self.lexCombineAssing(.punct_slash, .op_div_eq),
'%' => self.lexCombineAssing(.punct_percent, .op_mod_eq),
'.' => op: {
var token = self.oneChar(.punct_dot);
if (self.peekStr(".<")) {
self.skipN(2);
token.tag = .op_range_ex;
token.len = 3;
} else if (self.peekStr(".=")) {
self.skipN(2);
token.tag = .op_range_in;
token.len = 3;
}
break :op token;
},
'?' => op: {
var token = self.oneChar(.punct_question);
if (self.skipByte(':')) {
token.tag = .op_elvis;
token.len = 2;
} else if (self.skipByte('=')) {
token.tag = .op_elvis_eq;
token.len = 2;
}
break :op token;
},
else => unreachable,
};
}
fn lexString(self: *Lexer) !Token {
const start = self.offset.?;
var nest_level: usize = 0;
var reached_eof = true;
while (self.advance()) |byte| {
if ('{' == byte and self.peekIs('{')) {
_ = self.advance();
} else if ('}' == byte and self.peekIs('}')) {
_ = self.advance();
} else if ('{' == byte and !self.peekIs('{')) {
nest_level += 1;
} else if (nest_level > 0 and '}' == byte and !self.peekIs('}')) {
nest_level -= 1;
} else if ('\\' == byte and self.peekIs('"')) {
_ = self.advance();
} else if ('"' == byte and nest_level == 0) {
reached_eof = false;
break;
}
}
if (reached_eof) return self.ctx.err("Unterminated string.", .{}, error.UnterminatedString, start);
const src = self.ctx.src[start .. self.offset.? + 1];
return Token.new(.string, start, src.len);
}
fn lexRawStr(self: *Lexer) !Token {
const start = self.offset.?;
var reached_eof = true;
while (self.advance()) |byte| {
if ('\\' == byte and self.peekIs('`')) _ = self.advance();
if ('`' == byte) {
reached_eof = false;
break;
}
}
if (reached_eof) return self.ctx.err("Unterminated raw string.", .{}, error.UnterminatedRawString, start);
const src = self.ctx.src[start .. self.offset.? + 1];
return Token.new(.raw_str, start, src.len);
}
// Scanning
fn advance(self: *Lexer) ?u8 {
if (self.ctx.src.len == 0) return null;
if (self.offset) |*offset| {
offset.* += 1;
if (offset.* >= self.ctx.src.len) return null;
} else self.offset = 0;
return self.ctx.src[self.offset.?];
}
fn peekN(self: Lexer, n: u16) ?u8 {
if (self.ctx.src.len == 0) return null;
if (self.offset) |offset| {
return if (offset + n < self.ctx.src.len) return self.ctx.src[offset + n] else null;
} else {
return if (n - 1 < self.ctx.src.len) self.ctx.src[n - 1] else null;
}
}
fn peekNIs(self: Lexer, n: u16, byte: u8) bool {
return if (self.peekN(n)) |peek_byte| peek_byte == byte else false;
}
fn peek(self: Lexer) ?u8 {
return self.peekN(1);
}
fn peekIs(self: Lexer, byte: u8) bool {
return self.peekNIs(1, byte);
}
fn peekPred(self: Lexer, pred: Predicate) bool {
return if (self.peek()) |peek_byte| pred(peek_byte) else false;
}
fn peekStr(self: Lexer, str: []const u8) bool {
if (self.peek() == null) return false;
if (self.offset) |offset| {
return std.mem.startsWith(u8, self.ctx.src[offset + 1 ..], str);
} else {
return std.mem.startsWith(u8, self.ctx.src, str);
}
}
fn skipN(self: *Lexer, n: usize) void {
var i: usize = 0;
while (i < n) : (i += 1) _ = self.advance();
}
fn skipByte(self: *Lexer, byte: u8) bool {
if (self.peekIs(byte)) {
_ = self.advance();
return true;
} else return false;
}
fn run(self: *Lexer, predicate: Predicate) void {
while (self.peek()) |peek_byte| {
if (!predicate(peek_byte)) break;
_ = self.advance();
}
}
fn skipWhitespce(self: *Lexer) ?u8 {
self.run(isWhitespace);
return self.advance();
}
// Predicates
const Predicate = fn (u8) bool;
fn isFloatOnly(byte: u8) bool {
return switch (byte) {
'+',
'-',
'.',
'p',
=> true,
else => false,
};
}
fn isIdentByte(byte: u8) bool {
return switch (byte) {
'_',
'a'...'z',
'A'...'Z',
'0'...'9',
=> true,
else => false,
};
}
fn isIdentStart(byte: u8) bool {
return switch (byte) {
'_',
'a'...'z',
'A'...'Z',
=> true,
else => false,
};
}
fn isNewline(byte: u8) bool {
return '\n' == byte or '\r' == byte;
}
fn isDigit(byte: u8) bool {
return '0' <= byte and byte <= '9';
}
fn isNumeric(byte: u8) bool {
return switch (byte) {
'+',
'-',
'_',
'.',
'o',
'p',
'x',
'a'...'f',
'A'...'F',
'0'...'9',
=> true,
else => false,
};
}
fn isNumStart(byte: u8) bool {
return ('0' <= byte and byte <= '9') or
'+' == byte or
'-' == byte;
}
fn isWhitespace(byte: u8) bool {
return ' ' == byte or '\t' == byte or '\r' == byte;
}
fn requiresSemi(tag: Token.Tag) bool {
return switch (tag) {
.float,
.ident,
.int,
.string,
.uint,
.pd_false,
.pd_nil,
.pd_true,
.punct_rparen,
.punct_rbrace,
.punct_rbracket,
=> true,
else => false,
};
}
// Helpers
fn oneChar(self: Lexer, tag: Token.Tag) Token {
return Token.new(tag, self.offset.?, 1);
}
fn twoChar(self: Lexer, tag: Token.Tag) Token {
return Token.new(tag, self.offset.? - 1, 2);
}
// Tests
fn testLex(allocator: std.mem.Allocator, input: []const u8) !std.MultiArrayList(Token) {
var lexer = Lexer{ .allocator = allocator, .ctx = Context{ .filename = "inline", .src = input } };
return try lexer.lex();
}
fn singleTokenTests(tokens: std.MultiArrayList(Token), tag: Token.Tag, len: u16) !void {
try std.testing.expectEqual(@as(usize, 1), tokens.items(.tag).len);
try std.testing.expectEqual(tag, tokens.items(.tag)[0]);
try std.testing.expectEqual(@as(u16, 0), tokens.items(.offset)[0]);
try std.testing.expectEqual(len, tokens.items(.len)[0]);
}
test "Lex booleans" {
const allocator = std.testing.allocator;
var tokens = try testLex(allocator, "true; false");
defer tokens.deinit(allocator);
try std.testing.expectEqual(@as(usize, 3), tokens.items(.tag).len);
try std.testing.expectEqual(Token.Tag.pd_true, tokens.items(.tag)[0]);
try std.testing.expectEqual(@as(usize, 0), tokens.items(.offset)[0]);
try std.testing.expectEqual(@as(usize, 4), tokens.items(.len)[0]);
try std.testing.expectEqual(Token.Tag.punct_semicolon, tokens.items(.tag)[1]);
try std.testing.expectEqual(Token.Tag.pd_false, tokens.items(.tag)[2]);
try std.testing.expectEqual(@as(usize, 6), tokens.items(.offset)[2]);
try std.testing.expectEqual(@as(usize, 5), tokens.items(.len)[2]);
}
test "Lex auto semicolons" {
const allocator = std.testing.allocator;
var tokens = try testLex(allocator, "}\n[");
errdefer tokens.deinit(allocator);
try std.testing.expectEqual(@as(usize, 3), tokens.items(.tag).len);
try std.testing.expectEqual(Token.Tag.punct_rbrace, tokens.items(.tag)[0]);
try std.testing.expectEqual(Token.Tag.punct_semicolon, tokens.items(.tag)[1]);
try std.testing.expectEqual(Token.Tag.punct_lbracket, tokens.items(.tag)[2]);
tokens.deinit(allocator);
tokens = try testLex(allocator, "}\n(");
try std.testing.expectEqual(@as(usize, 3), tokens.items(.tag).len);
try std.testing.expectEqual(Token.Tag.punct_rbrace, tokens.items(.tag)[0]);
try std.testing.expectEqual(Token.Tag.punct_semicolon, tokens.items(.tag)[1]);
try std.testing.expectEqual(Token.Tag.punct_lparen, tokens.items(.tag)[2]);
tokens.deinit(allocator);
}
test "Lex ident" {
const allocator = std.testing.allocator;
var tokens = try testLex(allocator, "foo");
defer tokens.deinit(allocator);
try singleTokenTests(tokens, .ident, 3);
}
test "Lex float" {
const allocator = std.testing.allocator;
var tokens = try testLex(allocator, "3.14e+00");
defer tokens.deinit(allocator);
try singleTokenTests(tokens, .float, 8);
}
test "Lex 1+1" {
const allocator = std.testing.allocator;
var lexer = Lexer{ .allocator = allocator, .ctx = Context{ .filename = "inline", .src = "1+1" } };
var tokens = try lexer.lex();
defer tokens.deinit(allocator);
try std.testing.expectEqual(@as(usize, 2), tokens.items(.tag).len);
try std.testing.expectEqual(Token.Tag.uint, tokens.items(.tag)[0]);
try std.testing.expectEqual(@as(u16, 0), tokens.items(.offset)[0]);
try std.testing.expectEqual(@as(usize, 1), tokens.items(.len)[0]);
try std.testing.expectEqual(Token.Tag.int, tokens.items(.tag)[1]);
try std.testing.expectEqual(@as(u16, 1), tokens.items(.offset)[1]);
try std.testing.expectEqual(@as(usize, 2), tokens.items(.len)[1]);
}
test "Lex int" {
const allocator = std.testing.allocator;
var tokens = try testLex(allocator, "-314");
defer tokens.deinit(allocator);
try singleTokenTests(tokens, .int, 4);
}
test "Lex uint" {
const allocator = std.testing.allocator;
var tokens = try testLex(allocator, "314");
defer tokens.deinit(allocator);
try singleTokenTests(tokens, .uint, 3);
}
test "Lex comment" {
const allocator = std.testing.allocator;
var tokens = try testLex(allocator, "# foo bar");
defer tokens.deinit(allocator);
try std.testing.expectEqual(@as(usize, 0), tokens.items(.tag).len);
}
test "Lex string" {
const allocator = std.testing.allocator;
var tokens = try testLex(allocator,
\\"foo { "bar" } \" baz"
);
errdefer tokens.deinit(allocator);
try singleTokenTests(tokens, .string, 22);
tokens.deinit(allocator);
tokens = try testLex(allocator,
\\"foo {{ "
);
try singleTokenTests(tokens, .string, 9);
tokens.deinit(allocator);
tokens = try testLex(allocator,
\\"foo }} "
);
try singleTokenTests(tokens, .string, 9);
tokens.deinit(allocator);
tokens = try testLex(allocator,
\\"foo } "
);
try singleTokenTests(tokens, .string, 8);
tokens.deinit(allocator);
//try std.testing.expectError(error.UnterminatedString, testLex(allocator,
// \\"foo { "
//));
}
test "Lex global" {
const allocator = std.testing.allocator;
var tokens = try testLex(allocator, "@file");
defer tokens.deinit(allocator);
try singleTokenTests(tokens, .at_file, 5);
//try std.testing.expectError(error.InvalidGlobal, testLex(allocator,
// \\@+
//));
}
test "Lex ops" {
const allocator = std.testing.allocator;
const tests = [_]struct {
input: []const u8,
tag: Token.Tag,
len: u16,
}{
.{ .input = "+", .tag = .punct_plus, .len = 1 },
.{ .input = "-", .tag = .punct_minus, .len = 1 },
.{ .input = "*", .tag = .punct_star, .len = 1 },
.{ .input = "/", .tag = .punct_slash, .len = 1 },
.{ .input = "%", .tag = .punct_percent, .len = 1 },
.{ .input = "+=", .tag = .op_add_eq, .len = 2 },
.{ .input = "-=", .tag = .op_sub_eq, .len = 2 },
.{ .input = "*=", .tag = .op_mul_eq, .len = 2 },
.{ .input = "/=", .tag = .op_div_eq, .len = 2 },
.{ .input = "%=", .tag = .op_mod_eq, .len = 2 },
.{ .input = "<", .tag = .punct_lt, .len = 1 },
.{ .input = "<=", .tag = .op_lte, .len = 2 },
.{ .input = ">", .tag = .punct_gt, .len = 1 },
.{ .input = ">=", .tag = .op_gte, .len = 2 },
.{ .input = "=", .tag = .punct_equals, .len = 1 },
.{ .input = "==", .tag = .op_eq, .len = 2 },
.{ .input = "!", .tag = .punct_bang, .len = 1 },
.{ .input = "!>", .tag = .op_redir_clobber, .len = 2 },
.{ .input = "+>", .tag = .op_redir_append, .len = 2 },
.{ .input = "!=", .tag = .op_neq, .len = 2 },
.{ .input = ":", .tag = .punct_colon, .len = 1 },
.{ .input = ".", .tag = .punct_dot, .len = 1 },
.{ .input = ":=", .tag = .op_define, .len = 2 },
.{ .input = "~", .tag = .op_match, .len = 1 },
.{ .input = "!~", .tag = .op_nomatch, .len = 2 },
.{ .input = "..<", .tag = .op_range_ex, .len = 3 },
.{ .input = "..=", .tag = .op_range_in, .len = 3 },
.{ .input = "?", .tag = .punct_question, .len = 1 },
.{ .input = "?:", .tag = .op_elvis, .len = 2 },
.{ .input = "?=", .tag = .op_elvis_eq, .len = 2 },
.{ .input = "$", .tag = .punct_dollar, .len = 1 },
.{ .input = ",", .tag = .punct_comma, .len = 1 },
.{ .input = "|", .tag = .punct_pipe, .len = 1 },
.{ .input = "{", .tag = .punct_lbrace, .len = 1 },
.{ .input = "[", .tag = .punct_lbracket, .len = 1 },
.{ .input = "(", .tag = .punct_lparen, .len = 1 },
.{ .input = "}", .tag = .punct_rbrace, .len = 1 },
.{ .input = "]", .tag = .punct_rbracket, .len = 1 },
.{ .input = ")", .tag = .punct_rparen, .len = 1 },
};
for (tests) |t| {
var tokens = try testLex(allocator, t.input);
defer tokens.deinit(allocator);
try singleTokenTests(tokens, t.tag, t.len);
}
}
test "Lex synthetic semicolons" {
const avoid_call =
\\5 + 5
\\(5)
;
const allocator = std.testing.allocator;
var tokens = try testLex(allocator, avoid_call);
defer tokens.deinit(allocator);
try std.testing.expectEqual(@as(usize, 7), tokens.items(.tag).len);
try std.testing.expectEqual(Token.Tag.uint, tokens.items(.tag)[0]);
try std.testing.expectEqual(Token.Tag.punct_plus, tokens.items(.tag)[1]);
try std.testing.expectEqual(Token.Tag.uint, tokens.items(.tag)[2]);
try std.testing.expectEqual(Token.Tag.punct_semicolon, tokens.items(.tag)[3]);
try std.testing.expectEqual(Token.Tag.punct_lparen, tokens.items(.tag)[4]);
try std.testing.expectEqual(Token.Tag.uint, tokens.items(.tag)[5]);
try std.testing.expectEqual(Token.Tag.punct_rparen, tokens.items(.tag)[6]);
tokens.deinit(allocator);
const avoid_subscript =
\\5 + 5
\\[5]
;
tokens = try testLex(allocator, avoid_subscript);
try std.testing.expectEqual(@as(usize, 7), tokens.items(.tag).len);
try std.testing.expectEqual(Token.Tag.uint, tokens.items(.tag)[0]);
try std.testing.expectEqual(Token.Tag.punct_plus, tokens.items(.tag)[1]);
try std.testing.expectEqual(Token.Tag.uint, tokens.items(.tag)[2]);
try std.testing.expectEqual(Token.Tag.punct_semicolon, tokens.items(.tag)[3]);
try std.testing.expectEqual(Token.Tag.punct_lbracket, tokens.items(.tag)[4]);
try std.testing.expectEqual(Token.Tag.uint, tokens.items(.tag)[5]);
try std.testing.expectEqual(Token.Tag.punct_rbracket, tokens.items(.tag)[6]);
tokens.deinit(allocator);
const no_semi_needed =
\\5 + 5
\\- 5
;
tokens = try testLex(allocator, no_semi_needed);
try std.testing.expectEqual(@as(usize, 5), tokens.items(.tag).len);
try std.testing.expectEqual(Token.Tag.uint, tokens.items(.tag)[0]);
try std.testing.expectEqual(Token.Tag.punct_plus, tokens.items(.tag)[1]);
try std.testing.expectEqual(Token.Tag.uint, tokens.items(.tag)[2]);
try std.testing.expectEqual(Token.Tag.punct_minus, tokens.items(.tag)[3]);
try std.testing.expectEqual(Token.Tag.uint, tokens.items(.tag)[4]);
} | src/Lexer.zig |
const std = @import("std");
const root = @import("root");
const liu = @import("./lib.zig");
const builtin = @import("builtin");
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const wasm = @This();
pub const Obj = enum(u32) {
// These are kept up to date with src/wasm.ts
jsundefined,
jsnull,
jsEmptyString,
log,
info,
warn,
err,
success,
U8Array,
F32Array,
_,
pub const objSet = ext.objSet;
pub const arrayPush = ext.arrayPush;
};
const Watermark = enum(i32) { _ };
const ext = struct {
extern fn makeString(message: [*]const u8, length: usize, is_temp: bool) Obj;
extern fn makeView(o: Obj, message: ?*const anyopaque, length: usize, is_temp: bool) Obj;
extern fn makeArray(is_temp: bool) Obj;
extern fn makeObj(is_temp: bool) Obj;
extern fn arrayPush(arr: Obj, obj: Obj) void;
extern fn objSet(obj: Obj, key: Obj, value: Obj) void;
extern fn watermark() Watermark;
extern fn setWatermark(watermark: Watermark) void;
extern fn deleteObj(obj: Obj) void;
extern fn encodeString(idx: Obj) usize;
extern fn exactExpFloatFormat(value: f64, is_temp: bool) Obj;
extern fn fixedFormatFloat(value: f64, decimal_places: u32, is_temp: bool) Obj;
extern fn parseFloat(obj: Obj) f64;
extern fn objLen(idx: Obj) usize;
extern fn readBytes(idx: Obj, begin: [*]u8) void;
extern fn postMessage(tagIdx: Obj, id: Obj) void;
extern fn exit(objIndex: Obj) noreturn;
};
var error_code: ?u32 = null;
comptime {
@export(liuWasmErrorCode, .{ .name = "liuWasmErrorCode", .linkage = .Strong });
}
fn liuWasmErrorCode(code: u32) callconv(.C) void {
error_code = code;
}
pub const watermark = ext.watermark;
pub const setWatermark = ext.setWatermark;
pub const Lifetime = enum {
manual,
temp,
fn isTemp(self: @This()) bool {
return self == .temp;
}
};
// Copied from std
fn parseInfOrNan(comptime T: type, s: []const u8, negative: bool) ?T {
// inf/infinity; infxxx should only consume inf.
if (std.ascii.startsWithIgnoreCase(s, "inf")) {
const n: usize = if (std.ascii.startsWithIgnoreCase(s[3..], "inity")) 8 else 3;
if (n != s.len) return null;
return if (!negative) std.math.inf(T) else -std.math.inf(T);
}
if (std.ascii.startsWithIgnoreCase(s, "nan")) {
if (s.len != 3) return null;
return std.math.nan(T);
}
return null;
}
pub fn parseFloat(bytes: []const u8) std.fmt.ParseFloatError!f64 {
if (bytes.len == 0) {
return error.InvalidCharacter;
}
var i: usize = 0;
const negative = bytes[i] == '-';
if (negative or bytes[i] == '+') {
i += 1;
}
if (bytes.len == i) {
return error.InvalidCharacter;
}
if (parseInfOrNan(f64, bytes[i..], negative)) |val| {
return val;
}
const mark = watermark();
defer setWatermark(mark);
const obj = make.string(.temp, bytes);
const val = ext.parseFloat(obj);
if (std.math.isNan(val)) {
return error.InvalidCharacter;
}
return val;
}
pub const make = struct {
pub fn slice(life: Lifetime, data: anytype) Obj {
const ptr: ?*const anyopaque = ptr: {
switch (@typeInfo(@TypeOf(data))) {
.Array => {},
.Pointer => |info| switch (info.size) {
.One => switch (@typeInfo(info.child)) {
.Array => break :ptr data,
else => {},
},
.Many, .C => {},
.Slice => break :ptr data.ptr,
},
else => {},
}
@compileError("Need to pass a slice or array");
};
const len = data.len;
const T = std.meta.Elem(@TypeOf(data));
const is_temp = life.isTemp();
return switch (T) {
u8 => ext.makeView(.U8Array, ptr, len, is_temp),
f32 => ext.makeView(.F32Array, ptr, len, is_temp),
else => unreachable,
};
}
pub fn fmt(life: Lifetime, comptime format: []const u8, args: anytype) Obj {
const mark = liu.TempMark;
defer liu.TempMark = mark;
const allocResult = std.fmt.allocPrint(liu.Temp, format, args);
const data = allocResult catch @panic("failed to print");
return ext.makeString(data.ptr, data.len, life.isTemp());
}
pub fn string(life: Lifetime, a: []const u8) Obj {
return ext.makeString(a.ptr, a.len, life.isTemp());
}
pub fn array(life: Lifetime) Obj {
return ext.makeArray(life.isTemp());
}
pub fn obj(life: Lifetime) Obj {
return ext.makeObj(life.isTemp());
}
pub fn exactExpFloatPrint(life: Lifetime, value: f64) Obj {
return ext.exactExpFloatPrint(value, life.isTemp());
}
pub fn fixedFloatPrint(life: Lifetime, value: f64, places: u32) Obj {
return ext.fixedFormatFloat(value, places, life.isTemp());
}
};
pub fn post(level: Obj, comptime format: []const u8, args: anytype) void {
if (builtin.target.cpu.arch != .wasm32) {
std.log.info(format, args);
return;
}
const mark = watermark();
defer setWatermark(mark);
const object = make.fmt(.temp, format, args);
ext.postMessage(level, object);
}
pub const delete = ext.deleteObj;
pub fn deleteMany(obj_slice: []const Obj) void {
for (obj_slice) |o| {
delete(o);
}
}
pub const out = struct {
pub inline fn array() Obj {
return wasm.make.array(.temp);
}
pub inline fn obj() Obj {
return wasm.make.obj(.temp);
}
pub inline fn slice(data: anytype) Obj {
return wasm.make.slice(.temp, data);
}
pub inline fn string(a: []const u8) Obj {
return wasm.make.string(.temp, a);
}
pub inline fn fmt(comptime format: []const u8, args: anytype) Obj {
return wasm.make.fmt(.temp, format, args);
}
pub fn exactExpFloatPrint(value: f64) Obj {
return wasm.make.exactExpFloatPrint(.temp, value);
}
pub fn fixedFloatPrint(value: f64, places: u32) Obj {
return wasm.make.fixedFloatPrint(.temp, value, places);
}
};
pub const in = struct {
pub fn bytes(byte_object: Obj, alloc: Allocator) ![]u8 {
return alignedBytes(byte_object, alloc, null);
}
pub fn alignedBytes(byte_object: Obj, alloc: Allocator, comptime alignment: ?u29) ![]align(alignment orelse 1) u8 {
if (builtin.target.cpu.arch != .wasm32) return &.{};
defer ext.deleteObj(byte_object);
const len = ext.objLen(byte_object);
const data = try alloc.alignedAlloc(u8, alignment, len);
ext.readBytes(byte_object, data.ptr);
return data;
}
pub fn string(string_object: Obj, alloc: Allocator) ![]u8 {
if (builtin.target.cpu.arch != .wasm32) return &.{};
defer ext.deleteObj(string_object);
const len = ext.encodeString(string_object);
const data = try alloc.alloc(u8, len);
ext.readBytes(string_object, data.ptr);
return data;
}
};
pub fn exit(msg: []const u8) noreturn {
const exit_message = wasm.make.string(.temp, msg);
return ext.exit(exit_message);
}
var initialized: bool = false;
pub fn initIfNecessary() void {
if (builtin.target.cpu.arch != .wasm32) {
return;
}
if (!initialized) {
initialized = true;
}
}
pub const strip_debug_info = true;
pub const have_error_return_tracing = false;
pub fn log(
comptime message_level: std.log.Level,
comptime scope: @Type(.EnumLiteral),
comptime fmt: []const u8,
args: anytype,
) void {
if (builtin.target.cpu.arch != .wasm32) {
std.log.defaultLog(message_level, scope, fmt, args);
return;
}
_ = scope;
if (@enumToInt(message_level) > @enumToInt(std.log.level)) {
return;
}
const level_obj: Obj = switch (message_level) {
.debug => .info,
.info => .info,
.warn => .warn,
.err => .err,
};
post(level_obj, fmt ++ "\n", args);
}
pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn {
@setCold(true);
if (builtin.target.cpu.arch != .wasm32) {
std.builtin.default_panic(msg, error_return_trace);
}
_ = error_return_trace;
exit(msg);
} | src/liu/wasm.zig |
const std = @import("std.zig");
const StringHashMap = std.StringHashMap;
const mem = std.mem;
const Allocator = mem.Allocator;
const testing = std.testing;
/// BufMap copies keys and values before they go into the map, and
/// frees them when they get removed.
pub const BufMap = struct {
hash_map: BufMapHashMap,
const BufMapHashMap = StringHashMap([]const u8);
pub fn init(allocator: *Allocator) BufMap {
var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) };
return self;
}
pub fn deinit(self: *BufMap) void {
var it = self.hash_map.iterator();
while (true) {
const entry = it.next() orelse break;
self.free(entry.key);
self.free(entry.value);
}
self.hash_map.deinit();
}
/// Same as `set` but the key and value become owned by the BufMap rather
/// than being copied.
/// If `setMove` fails, the ownership of key and value does not transfer.
pub fn setMove(self: *BufMap, key: []u8, value: []u8) !void {
const get_or_put = try self.hash_map.getOrPut(key);
if (get_or_put.found_existing) {
self.free(get_or_put.kv.key);
get_or_put.kv.key = key;
}
get_or_put.kv.value = value;
}
/// `key` and `value` are copied into the BufMap.
pub fn set(self: *BufMap, key: []const u8, value: []const u8) !void {
const value_copy = try self.copy(value);
errdefer self.free(value_copy);
// Avoid copying key if it already exists
const get_or_put = try self.hash_map.getOrPut(key);
if (!get_or_put.found_existing) {
get_or_put.kv.key = self.copy(key) catch |err| {
_ = self.hash_map.remove(key);
return err;
};
}
get_or_put.kv.value = value_copy;
}
pub fn get(self: BufMap, key: []const u8) ?[]const u8 {
const entry = self.hash_map.get(key) orelse return null;
return entry.value;
}
pub fn delete(self: *BufMap, key: []const u8) void {
const entry = self.hash_map.remove(key) orelse return;
self.free(entry.key);
self.free(entry.value);
}
pub fn count(self: BufMap) usize {
return self.hash_map.count();
}
pub fn iterator(self: *const BufMap) BufMapHashMap.Iterator {
return self.hash_map.iterator();
}
fn free(self: BufMap, value: []const u8) void {
self.hash_map.allocator.free(value);
}
fn copy(self: BufMap, value: []const u8) ![]u8 {
return mem.dupe(self.hash_map.allocator, u8, value);
}
};
test "BufMap" {
var bufmap = BufMap.init(std.heap.page_allocator);
defer bufmap.deinit();
try bufmap.set("x", "1");
testing.expect(mem.eql(u8, bufmap.get("x").?, "1"));
testing.expect(1 == bufmap.count());
try bufmap.set("x", "2");
testing.expect(mem.eql(u8, bufmap.get("x").?, "2"));
testing.expect(1 == bufmap.count());
try bufmap.set("x", "3");
testing.expect(mem.eql(u8, bufmap.get("x").?, "3"));
testing.expect(1 == bufmap.count());
bufmap.delete("x");
testing.expect(0 == bufmap.count());
} | lib/std/buf_map.zig |
const Atom = @This();
const std = @import("std");
const build_options = @import("build_options");
const aarch64 = @import("../../arch/aarch64/bits.zig");
const assert = std.debug.assert;
const commands = @import("commands.zig");
const log = std.log.scoped(.link);
const macho = std.macho;
const math = std.math;
const mem = std.mem;
const meta = std.meta;
const trace = @import("../../tracy.zig").trace;
const Allocator = mem.Allocator;
const Arch = std.Target.Cpu.Arch;
const MachO = @import("../MachO.zig");
const Object = @import("Object.zig");
const StringIndexAdapter = std.hash_map.StringIndexAdapter;
/// Each decl always gets a local symbol with the fully qualified name.
/// The vaddr and size are found here directly.
/// The file offset is found by computing the vaddr offset from the section vaddr
/// the symbol references, and adding that to the file offset of the section.
/// If this field is 0, it means the codegen size = 0 and there is no symbol or
/// offset table entry.
local_sym_index: u32,
/// List of symbol aliases pointing to the same atom via different nlists
aliases: std.ArrayListUnmanaged(u32) = .{},
/// List of symbols contained within this atom
contained: std.ArrayListUnmanaged(SymbolAtOffset) = .{},
/// Code (may be non-relocated) this atom represents
code: std.ArrayListUnmanaged(u8) = .{},
/// Size and alignment of this atom
/// Unlike in Elf, we need to store the size of this symbol as part of
/// the atom since macho.nlist_64 lacks this information.
size: u64,
/// Alignment of this atom as a power of 2.
/// For instance, alignment of 0 should be read as 2^0 = 1 byte aligned.
alignment: u32,
/// List of relocations belonging to this atom.
relocs: std.ArrayListUnmanaged(Relocation) = .{},
/// List of offsets contained within this atom that need rebasing by the dynamic
/// loader in presence of ASLR.
rebases: std.ArrayListUnmanaged(u64) = .{},
/// List of offsets contained within this atom that will be dynamically bound
/// by the dynamic loader and contain pointers to resolved (at load time) extern
/// symbols (aka proxies aka imports)
bindings: std.ArrayListUnmanaged(Binding) = .{},
/// List of lazy bindings
lazy_bindings: std.ArrayListUnmanaged(Binding) = .{},
/// List of data-in-code entries. This is currently specific to x86_64 only.
dices: std.ArrayListUnmanaged(macho.data_in_code_entry) = .{},
/// Stab entry for this atom. This is currently specific to a binary created
/// by linking object files in a traditional sense - in incremental sense, we
/// bypass stabs altogether to produce dSYM bundle directly with fully relocated
/// DWARF sections.
stab: ?Stab = null,
/// Points to the previous and next neighbours
next: ?*Atom,
prev: ?*Atom,
/// Previous/next linked list pointers.
/// This is the linked list node for this Decl's corresponding .debug_info tag.
dbg_info_prev: ?*Atom,
dbg_info_next: ?*Atom,
/// Offset into .debug_info pointing to the tag for this Decl.
dbg_info_off: u32,
/// Size of the .debug_info tag for this Decl, not including padding.
dbg_info_len: u32,
dirty: bool = true,
pub const Binding = struct {
n_strx: u32,
offset: u64,
};
pub const SymbolAtOffset = struct {
local_sym_index: u32,
offset: u64,
stab: ?Stab = null,
};
pub const Stab = union(enum) {
function: u64,
static,
global,
pub fn asNlists(stab: Stab, local_sym_index: u32, macho_file: anytype) ![]macho.nlist_64 {
var nlists = std.ArrayList(macho.nlist_64).init(macho_file.base.allocator);
defer nlists.deinit();
const sym = macho_file.locals.items[local_sym_index];
switch (stab) {
.function => |size| {
try nlists.ensureUnusedCapacity(4);
nlists.appendAssumeCapacity(.{
.n_strx = 0,
.n_type = macho.N_BNSYM,
.n_sect = sym.n_sect,
.n_desc = 0,
.n_value = sym.n_value,
});
nlists.appendAssumeCapacity(.{
.n_strx = sym.n_strx,
.n_type = macho.N_FUN,
.n_sect = sym.n_sect,
.n_desc = 0,
.n_value = sym.n_value,
});
nlists.appendAssumeCapacity(.{
.n_strx = 0,
.n_type = macho.N_FUN,
.n_sect = 0,
.n_desc = 0,
.n_value = size,
});
nlists.appendAssumeCapacity(.{
.n_strx = 0,
.n_type = macho.N_ENSYM,
.n_sect = sym.n_sect,
.n_desc = 0,
.n_value = size,
});
},
.global => {
try nlists.append(.{
.n_strx = sym.n_strx,
.n_type = macho.N_GSYM,
.n_sect = 0,
.n_desc = 0,
.n_value = 0,
});
},
.static => {
try nlists.append(.{
.n_strx = sym.n_strx,
.n_type = macho.N_STSYM,
.n_sect = sym.n_sect,
.n_desc = 0,
.n_value = sym.n_value,
});
},
}
return nlists.toOwnedSlice();
}
};
pub const Relocation = struct {
pub const Target = union(enum) {
local: u32,
global: u32,
};
/// Offset within the atom's code buffer.
/// Note relocation size can be inferred by relocation's kind.
offset: u32,
target: Target,
addend: i64,
subtractor: ?u32,
pcrel: bool,
length: u2,
@"type": u4,
};
pub const empty = Atom{
.local_sym_index = 0,
.size = 0,
.alignment = 0,
.prev = null,
.next = null,
.dbg_info_prev = null,
.dbg_info_next = null,
.dbg_info_off = undefined,
.dbg_info_len = undefined,
};
pub fn deinit(self: *Atom, allocator: *Allocator) void {
self.dices.deinit(allocator);
self.lazy_bindings.deinit(allocator);
self.bindings.deinit(allocator);
self.rebases.deinit(allocator);
self.relocs.deinit(allocator);
self.contained.deinit(allocator);
self.aliases.deinit(allocator);
self.code.deinit(allocator);
}
pub fn clearRetainingCapacity(self: *Atom) void {
self.dices.clearRetainingCapacity();
self.lazy_bindings.clearRetainingCapacity();
self.bindings.clearRetainingCapacity();
self.rebases.clearRetainingCapacity();
self.relocs.clearRetainingCapacity();
self.contained.clearRetainingCapacity();
self.aliases.clearRetainingCapacity();
self.code.clearRetainingCapacity();
}
/// Returns how much room there is to grow in virtual address space.
/// File offset relocation happens transparently, so it is not included in
/// this calculation.
pub fn capacity(self: Atom, macho_file: MachO) u64 {
const self_sym = macho_file.locals.items[self.local_sym_index];
if (self.next) |next| {
const next_sym = macho_file.locals.items[next.local_sym_index];
return next_sym.n_value - self_sym.n_value;
} else {
// We are the last atom.
// The capacity is limited only by virtual address space.
return std.math.maxInt(u64) - self_sym.n_value;
}
}
pub fn freeListEligible(self: Atom, macho_file: MachO) bool {
// No need to keep a free list node for the last atom.
const next = self.next orelse return false;
const self_sym = macho_file.locals.items[self.local_sym_index];
const next_sym = macho_file.locals.items[next.local_sym_index];
const cap = next_sym.n_value - self_sym.n_value;
const ideal_cap = MachO.padToIdeal(self.size);
if (cap <= ideal_cap) return false;
const surplus = cap - ideal_cap;
return surplus >= MachO.min_text_capacity;
}
const RelocContext = struct {
base_addr: u64 = 0,
allocator: *Allocator,
object: *Object,
macho_file: *MachO,
};
pub fn parseRelocs(self: *Atom, relocs: []macho.relocation_info, context: RelocContext) !void {
const tracy = trace(@src());
defer tracy.end();
const arch = context.macho_file.base.options.target.cpu.arch;
var addend: i64 = 0;
var subtractor: ?u32 = null;
for (relocs) |rel, i| {
blk: {
switch (arch) {
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, rel.r_type)) {
.ARM64_RELOC_ADDEND => {
assert(addend == 0);
addend = rel.r_symbolnum;
// Verify that it's followed by ARM64_RELOC_PAGE21 or ARM64_RELOC_PAGEOFF12.
if (relocs.len <= i + 1) {
log.err("no relocation after ARM64_RELOC_ADDEND", .{});
return error.UnexpectedRelocationType;
}
const next = @intToEnum(macho.reloc_type_arm64, relocs[i + 1].r_type);
switch (next) {
.ARM64_RELOC_PAGE21, .ARM64_RELOC_PAGEOFF12 => {},
else => {
log.err("unexpected relocation type after ARM64_RELOC_ADDEND", .{});
log.err(" expected ARM64_RELOC_PAGE21 or ARM64_RELOC_PAGEOFF12", .{});
log.err(" found {s}", .{next});
return error.UnexpectedRelocationType;
},
}
continue;
},
.ARM64_RELOC_SUBTRACTOR => {},
else => break :blk,
},
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, rel.r_type)) {
.X86_64_RELOC_SUBTRACTOR => {},
else => break :blk,
},
else => unreachable,
}
assert(subtractor == null);
const sym = context.object.symtab.items[rel.r_symbolnum];
if (MachO.symbolIsSect(sym) and !MachO.symbolIsExt(sym)) {
subtractor = context.object.symbol_mapping.get(rel.r_symbolnum).?;
} else {
const sym_name = context.object.getString(sym.n_strx);
const n_strx = context.macho_file.strtab_dir.getKeyAdapted(
@as([]const u8, sym_name),
StringIndexAdapter{
.bytes = &context.macho_file.strtab,
},
).?;
const resolv = context.macho_file.symbol_resolver.get(n_strx).?;
assert(resolv.where == .global);
subtractor = resolv.local_sym_index;
}
// Verify that *_SUBTRACTOR is followed by *_UNSIGNED.
if (relocs.len <= i + 1) {
log.err("no relocation after *_RELOC_SUBTRACTOR", .{});
return error.UnexpectedRelocationType;
}
switch (arch) {
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, relocs[i + 1].r_type)) {
.ARM64_RELOC_UNSIGNED => {},
else => {
log.err("unexpected relocation type after ARM64_RELOC_ADDEND", .{});
log.err(" expected ARM64_RELOC_UNSIGNED", .{});
log.err(" found {s}", .{@intToEnum(macho.reloc_type_arm64, relocs[i + 1].r_type)});
return error.UnexpectedRelocationType;
},
},
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, relocs[i + 1].r_type)) {
.X86_64_RELOC_UNSIGNED => {},
else => {
log.err("unexpected relocation type after X86_64_RELOC_ADDEND", .{});
log.err(" expected X86_64_RELOC_UNSIGNED", .{});
log.err(" found {s}", .{@intToEnum(macho.reloc_type_x86_64, relocs[i + 1].r_type)});
return error.UnexpectedRelocationType;
},
},
else => unreachable,
}
continue;
}
const target = target: {
if (rel.r_extern == 0) {
const sect_id = @intCast(u16, rel.r_symbolnum - 1);
const local_sym_index = context.object.sections_as_symbols.get(sect_id) orelse blk: {
const seg = context.object.load_commands.items[context.object.segment_cmd_index.?].Segment;
const sect = seg.sections.items[sect_id];
const match = (try context.macho_file.getMatchingSection(sect)) orelse unreachable;
const local_sym_index = @intCast(u32, context.macho_file.locals.items.len);
try context.macho_file.locals.append(context.allocator, .{
.n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = @intCast(u8, context.macho_file.section_ordinals.getIndex(match).? + 1),
.n_desc = 0,
.n_value = 0,
});
try context.object.sections_as_symbols.putNoClobber(context.allocator, sect_id, local_sym_index);
break :blk local_sym_index;
};
break :target Relocation.Target{ .local = local_sym_index };
}
const sym = context.object.symtab.items[rel.r_symbolnum];
const sym_name = context.object.getString(sym.n_strx);
if (MachO.symbolIsSect(sym) and !MachO.symbolIsExt(sym)) {
const sym_index = context.object.symbol_mapping.get(rel.r_symbolnum) orelse unreachable;
break :target Relocation.Target{ .local = sym_index };
}
const n_strx = context.macho_file.strtab_dir.getKeyAdapted(
@as([]const u8, sym_name),
StringIndexAdapter{
.bytes = &context.macho_file.strtab,
},
) orelse unreachable;
break :target Relocation.Target{ .global = n_strx };
};
const offset = @intCast(u32, rel.r_address);
switch (arch) {
.aarch64 => {
switch (@intToEnum(macho.reloc_type_arm64, rel.r_type)) {
.ARM64_RELOC_BRANCH26 => {
// TODO rewrite relocation
try addStub(target, context);
},
.ARM64_RELOC_GOT_LOAD_PAGE21, .ARM64_RELOC_GOT_LOAD_PAGEOFF12 => {
// TODO rewrite relocation
try addGotEntry(target, context);
},
.ARM64_RELOC_UNSIGNED => {
assert(rel.r_extern == 1);
addend = if (rel.r_length == 3)
mem.readIntLittle(i64, self.code.items[offset..][0..8])
else
mem.readIntLittle(i32, self.code.items[offset..][0..4]);
try self.addPtrBindingOrRebase(rel, target, context);
},
else => {},
}
},
.x86_64 => {
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
switch (rel_type) {
.X86_64_RELOC_BRANCH => {
// TODO rewrite relocation
try addStub(target, context);
},
.X86_64_RELOC_GOT, .X86_64_RELOC_GOT_LOAD => {
// TODO rewrite relocation
try addGotEntry(target, context);
addend = mem.readIntLittle(i32, self.code.items[offset..][0..4]);
},
.X86_64_RELOC_UNSIGNED => {
addend = if (rel.r_length == 3)
mem.readIntLittle(i64, self.code.items[offset..][0..8])
else
mem.readIntLittle(i32, self.code.items[offset..][0..4]);
if (rel.r_extern == 0) {
const seg = context.object.load_commands.items[context.object.segment_cmd_index.?].Segment;
const target_sect_base_addr = seg.sections.items[rel.r_symbolnum - 1].addr;
addend -= @intCast(i64, target_sect_base_addr);
}
try self.addPtrBindingOrRebase(rel, target, context);
},
.X86_64_RELOC_SIGNED,
.X86_64_RELOC_SIGNED_1,
.X86_64_RELOC_SIGNED_2,
.X86_64_RELOC_SIGNED_4,
=> {
const correction: u3 = switch (rel_type) {
.X86_64_RELOC_SIGNED => 0,
.X86_64_RELOC_SIGNED_1 => 1,
.X86_64_RELOC_SIGNED_2 => 2,
.X86_64_RELOC_SIGNED_4 => 4,
else => unreachable,
};
addend = mem.readIntLittle(i32, self.code.items[offset..][0..4]) + correction;
if (rel.r_extern == 0) {
const seg = context.object.load_commands.items[context.object.segment_cmd_index.?].Segment;
const target_sect_base_addr = seg.sections.items[rel.r_symbolnum - 1].addr;
addend += @intCast(i64, context.base_addr + offset + correction + 4) -
@intCast(i64, target_sect_base_addr);
}
},
else => {},
}
},
else => unreachable,
}
try self.relocs.append(context.allocator, .{
.offset = offset,
.target = target,
.addend = addend,
.subtractor = subtractor,
.pcrel = rel.r_pcrel == 1,
.length = rel.r_length,
.@"type" = rel.r_type,
});
addend = 0;
subtractor = null;
}
}
fn addPtrBindingOrRebase(
self: *Atom,
rel: macho.relocation_info,
target: Relocation.Target,
context: RelocContext,
) !void {
switch (target) {
.global => |n_strx| {
try self.bindings.append(context.allocator, .{
.n_strx = n_strx,
.offset = @intCast(u32, rel.r_address),
});
},
.local => {
const source_sym = context.macho_file.locals.items[self.local_sym_index];
const match = context.macho_file.section_ordinals.keys()[source_sym.n_sect - 1];
const seg = context.macho_file.load_commands.items[match.seg].Segment;
const sect = seg.sections.items[match.sect];
const sect_type = commands.sectionType(sect);
const should_rebase = rebase: {
if (rel.r_length != 3) break :rebase false;
// TODO actually, a check similar to what dyld is doing, that is, verifying
// that the segment is writable should be enough here.
const is_right_segment = blk: {
if (context.macho_file.data_segment_cmd_index) |idx| {
if (match.seg == idx) {
break :blk true;
}
}
if (context.macho_file.data_const_segment_cmd_index) |idx| {
if (match.seg == idx) {
break :blk true;
}
}
break :blk false;
};
if (!is_right_segment) break :rebase false;
if (sect_type != macho.S_LITERAL_POINTERS and
sect_type != macho.S_REGULAR and
sect_type != macho.S_MOD_INIT_FUNC_POINTERS and
sect_type != macho.S_MOD_TERM_FUNC_POINTERS)
{
break :rebase false;
}
break :rebase true;
};
if (should_rebase) {
try self.rebases.append(context.allocator, @intCast(u32, rel.r_address));
}
},
}
}
fn addGotEntry(target: Relocation.Target, context: RelocContext) !void {
if (context.macho_file.got_entries_map.contains(target)) return;
const atom = try context.macho_file.createGotAtom(target);
try context.macho_file.got_entries_map.putNoClobber(context.macho_file.base.allocator, target, atom);
const match = MachO.MatchingSection{
.seg = context.macho_file.data_const_segment_cmd_index.?,
.sect = context.macho_file.got_section_index.?,
};
if (!context.object.start_atoms.contains(match)) {
try context.object.start_atoms.putNoClobber(context.allocator, match, atom);
}
if (context.object.end_atoms.getPtr(match)) |last| {
last.*.next = atom;
atom.prev = last.*;
last.* = atom;
} else {
try context.object.end_atoms.putNoClobber(context.allocator, match, atom);
}
}
fn addStub(target: Relocation.Target, context: RelocContext) !void {
if (target != .global) return;
if (context.macho_file.stubs_map.contains(target.global)) return;
// TODO clean this up!
const stub_helper_atom = atom: {
const atom = try context.macho_file.createStubHelperAtom();
const match = MachO.MatchingSection{
.seg = context.macho_file.text_segment_cmd_index.?,
.sect = context.macho_file.stub_helper_section_index.?,
};
if (!context.object.start_atoms.contains(match)) {
try context.object.start_atoms.putNoClobber(context.allocator, match, atom);
}
if (context.object.end_atoms.getPtr(match)) |last| {
last.*.next = atom;
atom.prev = last.*;
last.* = atom;
} else {
try context.object.end_atoms.putNoClobber(context.allocator, match, atom);
}
break :atom atom;
};
const laptr_atom = atom: {
const atom = try context.macho_file.createLazyPointerAtom(
stub_helper_atom.local_sym_index,
target.global,
);
const match = MachO.MatchingSection{
.seg = context.macho_file.data_segment_cmd_index.?,
.sect = context.macho_file.la_symbol_ptr_section_index.?,
};
if (!context.object.start_atoms.contains(match)) {
try context.object.start_atoms.putNoClobber(context.allocator, match, atom);
}
if (context.object.end_atoms.getPtr(match)) |last| {
last.*.next = atom;
atom.prev = last.*;
last.* = atom;
} else {
try context.object.end_atoms.putNoClobber(context.allocator, match, atom);
}
break :atom atom;
};
const atom = try context.macho_file.createStubAtom(laptr_atom.local_sym_index);
const match = MachO.MatchingSection{
.seg = context.macho_file.text_segment_cmd_index.?,
.sect = context.macho_file.stubs_section_index.?,
};
if (!context.object.start_atoms.contains(match)) {
try context.object.start_atoms.putNoClobber(context.allocator, match, atom);
}
if (context.object.end_atoms.getPtr(match)) |last| {
last.*.next = atom;
atom.prev = last.*;
last.* = atom;
} else {
try context.object.end_atoms.putNoClobber(context.allocator, match, atom);
}
try context.macho_file.stubs_map.putNoClobber(context.allocator, target.global, atom);
}
pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
for (self.relocs.items) |rel| {
log.debug("relocating {}", .{rel});
const arch = macho_file.base.options.target.cpu.arch;
const source_addr = blk: {
const sym = macho_file.locals.items[self.local_sym_index];
break :blk sym.n_value + rel.offset;
};
const target_addr = blk: {
const is_via_got = got: {
switch (arch) {
.aarch64 => break :got switch (@intToEnum(macho.reloc_type_arm64, rel.@"type")) {
.ARM64_RELOC_GOT_LOAD_PAGE21, .ARM64_RELOC_GOT_LOAD_PAGEOFF12 => true,
else => false,
},
.x86_64 => break :got switch (@intToEnum(macho.reloc_type_x86_64, rel.@"type")) {
.X86_64_RELOC_GOT, .X86_64_RELOC_GOT_LOAD => true,
else => false,
},
else => unreachable,
}
};
if (is_via_got) {
const atom = macho_file.got_entries_map.get(rel.target) orelse {
const n_strx = switch (rel.target) {
.local => |sym_index| macho_file.locals.items[sym_index].n_strx,
.global => |n_strx| n_strx,
};
log.err("expected GOT entry for symbol '{s}'", .{macho_file.getString(n_strx)});
log.err(" this is an internal linker error", .{});
return error.FailedToResolveRelocationTarget;
};
break :blk macho_file.locals.items[atom.local_sym_index].n_value;
}
switch (rel.target) {
.local => |sym_index| {
const sym = macho_file.locals.items[sym_index];
const is_tlv = is_tlv: {
const source_sym = macho_file.locals.items[self.local_sym_index];
const match = macho_file.section_ordinals.keys()[source_sym.n_sect - 1];
const seg = macho_file.load_commands.items[match.seg].Segment;
const sect = seg.sections.items[match.sect];
break :is_tlv commands.sectionType(sect) == macho.S_THREAD_LOCAL_VARIABLES;
};
if (is_tlv) {
// For TLV relocations, the value specified as a relocation is the displacement from the
// TLV initializer (either value in __thread_data or zero-init in __thread_bss) to the first
// defined TLV template init section in the following order:
// * wrt to __thread_data if defined, then
// * wrt to __thread_bss
const seg = macho_file.load_commands.items[macho_file.data_segment_cmd_index.?].Segment;
const base_address = inner: {
if (macho_file.tlv_data_section_index) |i| {
break :inner seg.sections.items[i].addr;
} else if (macho_file.tlv_bss_section_index) |i| {
break :inner seg.sections.items[i].addr;
} else {
log.err("threadlocal variables present but no initializer sections found", .{});
log.err(" __thread_data not found", .{});
log.err(" __thread_bss not found", .{});
return error.FailedToResolveRelocationTarget;
}
};
break :blk sym.n_value - base_address;
}
break :blk sym.n_value;
},
.global => |n_strx| {
// TODO Still trying to figure out how to possibly use stubs for local symbol indirection with
// branching instructions. If it is not possible, then the best course of action is to
// resurrect the former approach of defering creating synthethic atoms in __got and __la_symbol_ptr
// sections until we resolve the relocations.
const resolv = macho_file.symbol_resolver.get(n_strx).?;
switch (resolv.where) {
.global => break :blk macho_file.globals.items[resolv.where_index].n_value,
.undef => {
break :blk if (macho_file.stubs_map.get(n_strx)) |atom|
macho_file.locals.items[atom.local_sym_index].n_value
else
0;
},
}
},
}
};
log.debug(" | source_addr = 0x{x}", .{source_addr});
log.debug(" | target_addr = 0x{x}", .{target_addr});
switch (arch) {
.aarch64 => {
switch (@intToEnum(macho.reloc_type_arm64, rel.@"type")) {
.ARM64_RELOC_BRANCH26 => {
const displacement = math.cast(
i28,
@intCast(i64, target_addr) - @intCast(i64, source_addr),
) catch |err| switch (err) {
error.Overflow => {
log.err("jump too big to encode as i28 displacement value", .{});
log.err(" (target - source) = displacement => 0x{x} - 0x{x} = 0x{x}", .{
target_addr,
source_addr,
@intCast(i64, target_addr) - @intCast(i64, source_addr),
});
log.err(" TODO implement branch islands to extend jump distance for arm64", .{});
return error.TODOImplementBranchIslands;
},
};
const code = self.code.items[rel.offset..][0..4];
var inst = aarch64.Instruction{
.unconditional_branch_immediate = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.unconditional_branch_immediate,
), code),
};
inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2));
mem.writeIntLittle(u32, code, inst.toU32());
},
.ARM64_RELOC_PAGE21,
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_TLVP_LOAD_PAGE21,
=> {
const actual_target_addr = @intCast(i64, target_addr) + rel.addend;
const source_page = @intCast(i32, source_addr >> 12);
const target_page = @intCast(i32, actual_target_addr >> 12);
const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
const code = self.code.items[rel.offset..][0..4];
var inst = aarch64.Instruction{
.pc_relative_address = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.pc_relative_address,
), code),
};
inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
inst.pc_relative_address.immlo = @truncate(u2, pages);
mem.writeIntLittle(u32, code, inst.toU32());
},
.ARM64_RELOC_PAGEOFF12 => {
const code = self.code.items[rel.offset..][0..4];
const actual_target_addr = @intCast(i64, target_addr) + rel.addend;
const narrowed = @truncate(u12, @intCast(u64, actual_target_addr));
if (isArithmeticOp(self.code.items[rel.offset..][0..4])) {
var inst = aarch64.Instruction{
.add_subtract_immediate = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.add_subtract_immediate,
), code),
};
inst.add_subtract_immediate.imm12 = narrowed;
mem.writeIntLittle(u32, code, inst.toU32());
} else {
var inst = aarch64.Instruction{
.load_store_register = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.load_store_register,
), code),
};
const offset: u12 = blk: {
if (inst.load_store_register.size == 0) {
if (inst.load_store_register.v == 1) {
// 128-bit SIMD is scaled by 16.
break :blk try math.divExact(u12, narrowed, 16);
}
// Otherwise, 8-bit SIMD or ldrb.
break :blk narrowed;
} else {
const denom: u4 = try math.powi(u4, 2, inst.load_store_register.size);
break :blk try math.divExact(u12, narrowed, denom);
}
};
inst.load_store_register.offset = offset;
mem.writeIntLittle(u32, code, inst.toU32());
}
},
.ARM64_RELOC_GOT_LOAD_PAGEOFF12 => {
const code = self.code.items[rel.offset..][0..4];
const actual_target_addr = @intCast(i64, target_addr) + rel.addend;
const narrowed = @truncate(u12, @intCast(u64, actual_target_addr));
var inst: aarch64.Instruction = .{
.load_store_register = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.load_store_register,
), code),
};
const offset = try math.divExact(u12, narrowed, 8);
inst.load_store_register.offset = offset;
mem.writeIntLittle(u32, code, inst.toU32());
},
.ARM64_RELOC_TLVP_LOAD_PAGEOFF12 => {
const code = self.code.items[rel.offset..][0..4];
const RegInfo = struct {
rd: u5,
rn: u5,
size: u1,
};
const reg_info: RegInfo = blk: {
if (isArithmeticOp(code)) {
const inst = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.add_subtract_immediate,
), code);
break :blk .{
.rd = inst.rd,
.rn = inst.rn,
.size = inst.sf,
};
} else {
const inst = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.load_store_register,
), code);
break :blk .{
.rd = inst.rt,
.rn = inst.rn,
.size = @truncate(u1, inst.size),
};
}
};
const actual_target_addr = @intCast(i64, target_addr) + rel.addend;
const narrowed = @truncate(u12, @intCast(u64, actual_target_addr));
var inst = aarch64.Instruction{
.add_subtract_immediate = .{
.rd = reg_info.rd,
.rn = reg_info.rn,
.imm12 = narrowed,
.sh = 0,
.s = 0,
.op = 0,
.sf = reg_info.size,
},
};
mem.writeIntLittle(u32, code, inst.toU32());
},
.ARM64_RELOC_POINTER_TO_GOT => {
const result = try math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr));
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, result));
},
.ARM64_RELOC_UNSIGNED => {
const result = blk: {
if (rel.subtractor) |subtractor| {
const sym = macho_file.locals.items[subtractor];
break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + rel.addend;
} else {
break :blk @intCast(i64, target_addr) + rel.addend;
}
};
if (rel.length == 3) {
mem.writeIntLittle(u64, self.code.items[rel.offset..][0..8], @bitCast(u64, result));
} else {
mem.writeIntLittle(
u32,
self.code.items[rel.offset..][0..4],
@truncate(u32, @bitCast(u64, result)),
);
}
},
.ARM64_RELOC_SUBTRACTOR => unreachable,
.ARM64_RELOC_ADDEND => unreachable,
}
},
.x86_64 => {
switch (@intToEnum(macho.reloc_type_x86_64, rel.@"type")) {
.X86_64_RELOC_BRANCH => {
const displacement = try math.cast(
i32,
@intCast(i64, target_addr) - @intCast(i64, source_addr) - 4,
);
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, displacement));
},
.X86_64_RELOC_GOT, .X86_64_RELOC_GOT_LOAD => {
const displacement = try math.cast(
i32,
@intCast(i64, target_addr) - @intCast(i64, source_addr) - 4 + rel.addend,
);
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, displacement));
},
.X86_64_RELOC_TLV => {
// We need to rewrite the opcode from movq to leaq.
self.code.items[rel.offset - 2] = 0x8d;
const displacement = try math.cast(
i32,
@intCast(i64, target_addr) - @intCast(i64, source_addr) - 4 + rel.addend,
);
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, displacement));
},
.X86_64_RELOC_SIGNED,
.X86_64_RELOC_SIGNED_1,
.X86_64_RELOC_SIGNED_2,
.X86_64_RELOC_SIGNED_4,
=> {
const correction: u3 = switch (@intToEnum(macho.reloc_type_x86_64, rel.@"type")) {
.X86_64_RELOC_SIGNED => 0,
.X86_64_RELOC_SIGNED_1 => 1,
.X86_64_RELOC_SIGNED_2 => 2,
.X86_64_RELOC_SIGNED_4 => 4,
else => unreachable,
};
const actual_target_addr = @intCast(i64, target_addr) + rel.addend;
const displacement = try math.cast(
i32,
actual_target_addr - @intCast(i64, source_addr + correction + 4),
);
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, displacement));
},
.X86_64_RELOC_UNSIGNED => {
const result = blk: {
if (rel.subtractor) |subtractor| {
const sym = macho_file.locals.items[subtractor];
break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + rel.addend;
} else {
break :blk @intCast(i64, target_addr) + rel.addend;
}
};
if (rel.length == 3) {
mem.writeIntLittle(u64, self.code.items[rel.offset..][0..8], @bitCast(u64, result));
} else {
mem.writeIntLittle(
u32,
self.code.items[rel.offset..][0..4],
@truncate(u32, @bitCast(u64, result)),
);
}
},
.X86_64_RELOC_SUBTRACTOR => unreachable,
}
},
else => unreachable,
}
}
}
inline fn isArithmeticOp(inst: *const [4]u8) bool {
const group_decode = @truncate(u5, inst[3]);
return ((group_decode >> 2) == 4);
} | src/link/MachO/Atom.zig |
const std = @import("std");
const input = @import("input.zig");
pub fn run(stdout: anytype) anyerror!void {
{
var input_ = try input.readFile("inputs/day18");
defer input_.deinit();
const result = try part1(&input_);
try stdout.print("18a: {}\n", .{ result });
std.debug.assert(result == 4365);
}
{
var input_ = try input.readFile("inputs/day18");
defer input_.deinit();
const result = try part2(&input_);
try stdout.print("18b: {}\n", .{ result });
std.debug.assert(result == 4490);
}
}
fn part1(input_: anytype) !u16 {
const result = try addAll(input_);
return try getMagnitude(result);
}
fn part2(input_: anytype) !u16 {
var numbers = std.BoundedArray(Tree, 100).init(0) catch unreachable;
while (try input_.next()) |*line| {
var tree: Tree = undefined;
try parseTreeNode(line, &tree, root);
try numbers.append(tree);
}
var max_magnitude: u16 = 0;
for (numbers.constSlice()) |first, first_i| {
for (numbers.constSlice()) |second, second_i| {
if (first_i == second_i) {
continue;
}
const sum = try add(first, second);
max_magnitude = std.math.max(max_magnitude, try getMagnitude(sum));
}
}
return max_magnitude;
}
const max_num_tree_nodes = 64;
const Tree = [max_num_tree_nodes]TreeNode;
const max_work = 6 + 1;
const TreeNode = union(enum) {
num: u8,
pair,
};
fn addAll(input_: anytype) !Tree {
var result: ?Tree = null;
while (try input_.next()) |*line| {
var tree: Tree = undefined;
try parseTreeNode(line, &tree, root);
if (result) |result_| {
result = try add(result_, tree);
}
else {
result = tree;
}
}
return result orelse error.InvalidInput;
}
const root: usize = 1;
const ChildDirection = enum {
left,
right,
};
fn child(i: usize, direction: ChildDirection) usize {
return switch (direction) {
.left => i << 1,
.right => (i << 1) | 1,
};
}
fn parent(i: usize) ?usize {
const result = i >> 1;
return if (result == 0) null else result;
}
fn parseTreeNode(s: *[]const u8, tree: *Tree, i: usize) (std.fmt.ParseIntError || error{InvalidInput})!void {
if ((s.*)[0] == '[') {
s.* = (s.*)[1..];
tree[i] = .pair;
try parseTreeNode(s, tree, child(i, .left));
if ((s.*)[0] != ',') {
return error.InvalidInput;
}
s.* = (s.*)[1..];
try parseTreeNode(s, tree, child(i, .right));
if ((s.*)[0] != ']') {
return error.InvalidInput;
}
s.* = (s.*)[1..];
}
else {
std.debug.assert((s.*)[0] >= '0' and (s.*)[0] <= '9');
const num = try std.fmt.parseInt(u8, (s.*)[0..1], 10);
s.* = (s.*)[1..];
tree[i] = .{ .num = num };
}
}
fn add(left: Tree, right: Tree) !Tree {
var sum: Tree = undefined;
sum[root] = .pair;
try copyTree(&sum, child(root, .left), left, root);
try copyTree(&sum, child(root, .right), right, root);
try reduce(&sum);
return sum;
}
fn copyTree(dst: *Tree, dst_i: usize, src: Tree, src_i: usize) !void {
const Work = struct {
dst_i: usize,
src_i: usize,
};
var work = std.BoundedArray(Work, max_work).init(0) catch unreachable;
try work.append(.{ .dst_i = dst_i, .src_i = src_i });
while (work.popOrNull()) |work_| {
dst[work_.dst_i] = src[work_.src_i];
switch (src[work_.src_i]) {
.num => {},
.pair => {
try work.append(.{ .dst_i = child(work_.dst_i, .right), .src_i = child(work_.src_i, .right) });
try work.append(.{ .dst_i = child(work_.dst_i, .left), .src_i = child(work_.src_i, .left) });
},
}
}
}
fn reduce(tree: *Tree) !void {
while (
(try walkForExploding(tree)) or
(try walkForSplitting(tree))
) {}
}
fn walkForExploding(tree: *Tree) !bool {
var work = std.BoundedArray(usize, max_work).init(0) catch unreachable;
try work.append(root);
while (work.popOrNull()) |i| {
switch (tree[i]) {
.num => {},
.pair => {
if (i >= 1 << 4) {
switch (tree[child(i, .left)]) {
.num => |left| {
switch (tree[child(i, .right)]) {
.num => |right| {
const parent_i = parent(i).?;
if (findAdjacentNum(tree, parent_i, .left, i)) |left_num| {
left_num.* += left;
}
if (findAdjacentNum(tree, parent_i, .right, i)) |right_num| {
right_num.* += right;
}
tree[i] = .{ .num = 0 };
return true;
},
.pair => {},
}
},
.pair => {},
}
}
try work.append(child(i, .right));
try work.append(child(i, .left));
},
}
}
return false;
}
fn walkForSplitting(tree: *Tree) !bool {
var work = std.BoundedArray(usize, max_work).init(0) catch unreachable;
try work.append(root);
while (work.popOrNull()) |i| {
switch (tree[i]) {
.num => |num| {
if (num < 10) {
continue;
}
const left = num / 2;
const right = num - left;
tree[i] = .pair;
tree[child(i, .left)] = .{ .num = left };
tree[child(i, .right)] = .{ .num = right };
return true;
},
.pair => {
try work.append(child(i, .right));
try work.append(child(i, .left));
},
}
}
return false;
}
// TODO:
// Can't use @call(.{ .modifier = .always_tail }, findAdjacentNum, .{ ... })
// because of https://github.com/ziglang/zig/issues/5692
fn findAdjacentNum(
tree: *Tree,
i: usize,
direction: ChildDirection,
except: ?usize,
) ?*u8 {
switch (tree[i]) {
.num => |*num| return num,
.pair => {
if (except) |except_| {
switch (direction) {
.left => if (child(i, .left) != except_) {
return findAdjacentNum(tree, child(i, .left), .right, null);
},
.right => if (child(i, .right) != except_) {
return findAdjacentNum(tree, child(i, .right), .left, null);
},
}
if (parent(i)) |parent_| {
return findAdjacentNum(tree, parent_, direction, i);
}
return null;
}
return findAdjacentNum(
tree,
switch (direction) { .left => child(i, .left), .right => child(i, .right) },
direction,
null,
);
},
}
}
fn getMagnitude(tree: Tree) !u16 {
var result: u16 = 0;
const Work = struct {
i: usize,
multiplier: u16,
};
var work = std.BoundedArray(Work, max_work).init(0) catch unreachable;
try work.append(.{ .i = root, .multiplier = 1 });
while (work.popOrNull()) |work_| {
switch (tree[work_.i]) {
.num => |num| result += num * work_.multiplier,
.pair => {
try work.append(.{ .i = child(work_.i, .right), .multiplier = work_.multiplier * 2 });
try work.append(.{ .i = child(work_.i, .left), .multiplier = work_.multiplier * 3 });
},
}
}
return result;
}
test "day 18 example 1" {
const input_ =
\\[[[[4,3],4],4],[7,[[8,4],9]]]
\\[1,1]
;
try expectEqualTree("[[[[0,7],4],[[7,8],[6,0]]],[8,1]]", try addAll(&input.readString(input_)));
}
test "day 18 example 2" {
const input_ =
\\[1,1]
\\[2,2]
\\[3,3]
\\[4,4]
;
try expectEqualTree("[[[[1,1],[2,2]],[3,3]],[4,4]]", try addAll(&input.readString(input_)));
}
test "day 18 example 3" {
const input_ =
\\[1,1]
\\[2,2]
\\[3,3]
\\[4,4]
\\[5,5]
;
try expectEqualTree("[[[[3,0],[5,3]],[4,4]],[5,5]]", try addAll(&input.readString(input_)));
}
test "day 18 example 4" {
const input_ =
\\[1,1]
\\[2,2]
\\[3,3]
\\[4,4]
\\[5,5]
\\[6,6]
;
try expectEqualTree("[[[[5,0],[7,4]],[5,5]],[6,6]]", try addAll(&input.readString(input_)));
}
test "day 18 example 5" {
const input_ =
\\[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]]
\\[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]
\\[[2,[[0,8],[3,4]]],[[[6,7],1],[7,[1,6]]]]
\\[[[[2,4],7],[6,[0,5]]],[[[6,8],[2,8]],[[2,1],[4,5]]]]
\\[7,[5,[[3,8],[1,4]]]]
\\[[2,[2,2]],[8,[8,1]]]
\\[2,9]
\\[1,[[[9,3],9],[[9,0],[0,7]]]]
\\[[[5,[7,4]],7],1]
\\[[[[4,2],2],6],[8,7]]
;
try expectEqualTree("[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]", try addAll(&input.readString(input_)));
}
test "day 18 example 6" {
const input_ = "[9,1]";
try std.testing.expectEqual(@as(u16, 29), try part1(&input.readString(input_)));
}
test "day 18 example 7" {
const input_ = "[1,9]";
try std.testing.expectEqual(@as(u16, 21), try part1(&input.readString(input_)));
}
test "day 18 example 8" {
const input_ = "[[9,1],[1,9]]";
try std.testing.expectEqual(@as(u16, 129), try part1(&input.readString(input_)));
}
test "day 18 example 9" {
const input_ = "[[1,2],[[3,4],5]]";
try std.testing.expectEqual(@as(u16, 143), try part1(&input.readString(input_)));
}
test "day 18 example 10" {
const input_ = "[[[[0,7],4],[[7,8],[6,0]]],[8,1]]";
try std.testing.expectEqual(@as(u16, 1384), try part1(&input.readString(input_)));
}
test "day 18 example 11" {
const input_ = "[[[[1,1],[2,2]],[3,3]],[4,4]]";
try std.testing.expectEqual(@as(u16, 445), try part1(&input.readString(input_)));
}
test "day 18 example 12" {
const input_ = "[[[[3,0],[5,3]],[4,4]],[5,5]]";
try std.testing.expectEqual(@as(u16, 791), try part1(&input.readString(input_)));
}
test "day 18 example 13" {
const input_ = "[[[[5,0],[7,4]],[5,5]],[6,6]]";
try std.testing.expectEqual(@as(u16, 1137), try part1(&input.readString(input_)));
}
test "day 18 example 14" {
const input_ = "[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]";
try std.testing.expectEqual(@as(u16, 3488), try part1(&input.readString(input_)));
}
test "day 18 example 15" {
const input_ =
\\[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
\\[[[5,[2,8]],4],[5,[[9,9],0]]]
\\[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
\\[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
\\[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
\\[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
\\[[[[5,4],[7,7]],8],[[8,3],8]]
\\[[9,3],[[9,9],[6,[4,9]]]]
\\[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
\\[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]
;
try expectEqualTree("[[[[6,6],[7,6]],[[7,7],[7,0]]],[[[7,7],[7,7]],[[7,8],[9,9]]]]", try addAll(&input.readString(input_)));
try std.testing.expectEqual(@as(u16, 4140), try part1(&input.readString(input_)));
try std.testing.expectEqual(@as(u16, 3993), try part2(&input.readString(input_)));
}
fn expectEqualTree(expected: []const u8, actual: Tree) !void {
var string = std.ArrayList(u8).init(std.testing.allocator);
defer string.deinit();
var string_writer = string.writer();
try printTree(actual, root, string_writer);
try std.testing.expectEqualStrings(expected, string.items);
}
fn printTree(tree: Tree, i: usize, writer: anytype) (@TypeOf(writer).Error)!void {
switch (tree[i]) {
.num => |num| try writer.print("{}", .{ num }),
.pair => {
try writer.print("[", .{});
try printTree(tree, child(i, .left), writer);
try writer.print(",", .{});
try printTree(tree, child(i, .right), writer);
try writer.print("]", .{});
},
}
} | src/day18.zig |
const std = @import("std");
const input = @import("input.zig");
pub fn run(stdout: anytype) anyerror!void {
{
var input_ = try input.readFile("inputs/day23");
defer input_.deinit();
const result = try part1(&input_);
try stdout.print("23a: {}\n", .{ result });
std.debug.assert(result == 14510);
}
{
var input_ = try input.readFile("inputs/day23");
defer input_.deinit();
const result = try part2(&input_);
try stdout.print("23b: {}\n", .{ result });
std.debug.assert(result == 49180);
}
}
fn part1(input_: anytype) !u64 {
var state = try State.init(input_);
var cost: u64 = 0;
move(&state, .{ .room = state.d_room[0..], .pos = 0 }, .{ .room = state.hallway[0..], .pos = 5 }, &cost);
move(&state, .{ .room = state.d_room[0..], .pos = 1 }, .{ .room = state.hallway[0..], .pos = 4 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 4 }, .{ .room = state.hallway[0..], .pos = 0 }, &cost);
move(&state, .{ .room = state.b_room[0..], .pos = 0 }, .{ .room = state.hallway[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 2 }, .{ .room = state.hallway[0..], .pos = 1 }, &cost);
move(&state, .{ .room = state.b_room[0..], .pos = 1 }, .{ .room = state.hallway[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 3 }, .{ .room = state.hallway[0..], .pos = 4 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 4 }, .{ .room = state.d_room[0..], .pos = 1 }, &cost);
move(&state, .{ .room = state.c_room[0..], .pos = 0 }, .{ .room = state.hallway[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 3 }, .{ .room = state.b_room[0..], .pos = 1 }, &cost);
move(&state, .{ .room = state.c_room[0..], .pos = 1 }, .{ .room = state.hallway[0..], .pos = 4 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 4 }, .{ .room = state.d_room[0..], .pos = 0 }, &cost);
move(&state, .{ .room = state.a_room[0..], .pos = 0 }, .{ .room = state.hallway[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 2 }, .{ .room = state.b_room[0..], .pos = 0 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 5 }, .{ .room = state.hallway[0..], .pos = 4 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 4 }, .{ .room = state.c_room[0..], .pos = 1 }, &cost);
move(&state, .{ .room = state.a_room[0..], .pos = 1 }, .{ .room = state.hallway[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 2 }, .{ .room = state.hallway[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 3 }, .{ .room = state.c_room[0..], .pos = 0 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 1 }, .{ .room = state.a_room[0..], .pos = 1 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 0 }, .{ .room = state.hallway[0..], .pos = 1 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 1 }, .{ .room = state.a_room[0..], .pos = 0 }, &cost);
std.debug.assert(isSolved1(state));
return cost;
}
fn part2(input_: anytype) !u64 {
var state = try State.init(input_);
state.a_room[3] = state.a_room[1];
state.a_room[2] = .d;
state.a_room[1] = .d;
state.b_room[3] = state.b_room[1];
state.b_room[2] = .b;
state.b_room[1] = .c;
state.c_room[3] = state.c_room[1];
state.c_room[2] = .a;
state.c_room[1] = .b;
state.d_room[3] = state.d_room[1];
state.d_room[2] = .c;
state.d_room[1] = .a;
var cost: u64 = 0;
move(&state, .{ .room = state.b_room[0..], .pos = 0 }, .{ .room = state.hallway[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 2 }, .{ .room = state.hallway[0..], .pos = 0 }, &cost);
move(&state, .{ .room = state.b_room[0..], .pos = 1 }, .{ .room = state.hallway[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 3 }, .{ .room = state.hallway[0..], .pos = 6 }, &cost);
move(&state, .{ .room = state.c_room[0..], .pos = 0 }, .{ .room = state.hallway[0..], .pos = 4 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 4 }, .{ .room = state.hallway[0..], .pos = 5 }, &cost);
move(&state, .{ .room = state.c_room[0..], .pos = 1 }, .{ .room = state.hallway[0..], .pos = 4 }, &cost);
move(&state, .{ .room = state.c_room[0..], .pos = 2 }, .{ .room = state.hallway[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 3 }, .{ .room = state.hallway[0..], .pos = 1 }, &cost);
move(&state, .{ .room = state.b_room[0..], .pos = 2 }, .{ .room = state.hallway[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.b_room[0..], .pos = 3 }, .{ .room = state.hallway[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 3 }, .{ .room = state.b_room[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 4 }, .{ .room = state.hallway[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 3 }, .{ .room = state.b_room[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 5 }, .{ .room = state.hallway[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 3 }, .{ .room = state.b_room[0..], .pos = 1 }, &cost);
move(&state, .{ .room = state.c_room[0..], .pos = 3 }, .{ .room = state.hallway[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 6 }, .{ .room = state.hallway[0..], .pos = 4 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 4 }, .{ .room = state.c_room[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.d_room[0..], .pos = 0 }, .{ .room = state.hallway[0..], .pos = 4 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 4 }, .{ .room = state.c_room[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.d_room[0..], .pos = 1 }, .{ .room = state.hallway[0..], .pos = 5 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 5 }, .{ .room = state.hallway[0..], .pos = 6 }, &cost);
move(&state, .{ .room = state.d_room[0..], .pos = 2 }, .{ .room = state.hallway[0..], .pos = 4 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 4 }, .{ .room = state.c_room[0..], .pos = 1 }, &cost);
move(&state, .{ .room = state.d_room[0..], .pos = 3 }, .{ .room = state.hallway[0..], .pos = 5 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 3 }, .{ .room = state.hallway[0..], .pos = 4 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 4 }, .{ .room = state.d_room[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 2 }, .{ .room = state.hallway[0..], .pos = 4 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 4 }, .{ .room = state.d_room[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.a_room[0..], .pos = 0 }, .{ .room = state.hallway[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 2 }, .{ .room = state.b_room[0..], .pos = 0 }, &cost);
move(&state, .{ .room = state.a_room[0..], .pos = 1 }, .{ .room = state.hallway[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 2 }, .{ .room = state.hallway[0..], .pos = 4 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 4 }, .{ .room = state.d_room[0..], .pos = 1 }, &cost);
move(&state, .{ .room = state.a_room[0..], .pos = 2 }, .{ .room = state.hallway[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 2 }, .{ .room = state.hallway[0..], .pos = 4 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 4 }, .{ .room = state.d_room[0..], .pos = 0 }, &cost);
move(&state, .{ .room = state.a_room[0..], .pos = 3 }, .{ .room = state.hallway[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 2 }, .{ .room = state.hallway[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 3 }, .{ .room = state.c_room[0..], .pos = 0 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 1 }, .{ .room = state.a_room[0..], .pos = 3 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 0 }, .{ .room = state.hallway[0..], .pos = 1 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 1 }, .{ .room = state.a_room[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 5 }, .{ .room = state.hallway[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 2 }, .{ .room = state.a_room[0..], .pos = 1 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 6 }, .{ .room = state.hallway[0..], .pos = 2 }, &cost);
move(&state, .{ .room = state.hallway[0..], .pos = 2 }, .{ .room = state.a_room[0..], .pos = 0 }, &cost);
std.debug.assert(isSolved2(state));
return cost;
}
const Spot = enum {
a,
b,
c,
d,
empty,
fn init(c: u8) !@This() {
return switch (c) {
'A' => Spot.a,
'B' => Spot.b,
'C' => Spot.c,
'D' => Spot.d,
else => return error.InvalidInput,
};
}
};
const State = struct {
hallway: [7]Spot,
a_room: [4]Spot,
b_room: [4]Spot,
c_room: [4]Spot,
d_room: [4]Spot,
fn init(input_: anytype) !@This() {
var state = State {
.hallway = [_]Spot { .empty } ** 7,
.a_room = [_]Spot { .empty } ** 4,
.b_room = [_]Spot { .empty } ** 4,
.c_room = [_]Spot { .empty } ** 4,
.d_room = [_]Spot { .empty } ** 4,
};
{
const line = (try input_.next()) orelse return error.InvalidInput;
if (!std.mem.eql(u8, line, "#############")) {
return error.InvalidInput;
}
}
{
const line = (try input_.next()) orelse return error.InvalidInput;
if (!std.mem.eql(u8, line, "#...........#")) {
return error.InvalidInput;
}
}
{
const line = (try input_.next()) orelse return error.InvalidInput;
if (
line.len != 13 or
!std.mem.eql(u8, line[0..3], "###") or
line[4] != '#' or
line[6] != '#' or
line[8] != '#' or
!std.mem.eql(u8, line[10..13], "###")
) {
return error.InvalidInput;
}
state.a_room[0] = try Spot.init(line[3]);
state.b_room[0] = try Spot.init(line[5]);
state.c_room[0] = try Spot.init(line[7]);
state.d_room[0] = try Spot.init(line[9]);
}
{
const line = (try input_.next()) orelse return error.InvalidInput;
if (
line.len != 11 or
!std.mem.eql(u8, line[0..3], " #") or
line[4] != '#' or
line[6] != '#' or
line[8] != '#' or
line[10] != '#'
) {
return error.InvalidInput;
}
state.a_room[1] = try Spot.init(line[3]);
state.b_room[1] = try Spot.init(line[5]);
state.c_room[1] = try Spot.init(line[7]);
state.d_room[1] = try Spot.init(line[9]);
}
return state;
}
};
fn isSolved1(state: State) bool {
return
isA(state.a_room[0]) and isA(state.a_room[1]) and
isB(state.b_room[0]) and isB(state.b_room[1]) and
isC(state.c_room[0]) and isC(state.c_room[1]) and
isD(state.d_room[0]) and isD(state.d_room[1]);
}
fn isSolved2(state: State) bool {
return
isSolved1(state) and
isA(state.a_room[2]) and isA(state.a_room[3]) and
isB(state.b_room[2]) and isB(state.b_room[3]) and
isC(state.c_room[2]) and isC(state.c_room[3]) and
isD(state.d_room[2]) and isD(state.d_room[3]);
}
fn isA(spot: Spot) bool {
return switch (spot) {
.a => true,
else => false,
};
}
fn isB(spot: Spot) bool {
return switch (spot) {
.b => true,
else => false,
};
}
fn isC(spot: Spot) bool {
return switch (spot) {
.c => true,
else => false,
};
}
fn isD(spot: Spot) bool {
return switch (spot) {
.d => true,
else => false,
};
}
fn isEmpty(spot: Spot) bool {
return switch (spot) {
.empty => true,
else => false,
};
}
const Position = struct {
room: []Spot,
pos: usize,
};
fn move(state: *State, start: Position, end: Position, cost: *u64) void {
if (&start.room[0] != &state.hallway[0] and &end.room[0] == &state.hallway[0]) {
// Room to hallway
// All positions above start are clear
{
var i: usize = 0;
while (i < start.pos) : (i += 1) {
std.debug.assert(isEmpty(start.room[i]));
cost.* += moveCost(start.room[start.pos]);
}
}
// End position is outside room
const hallway_positions_outside_room = hallwayPositionsOutsideRoom(state, start.room);
std.debug.assert(end.pos == hallway_positions_outside_room.left_pos or end.pos == hallway_positions_outside_room.right_pos);
cost.* += moveCost(start.room[start.pos]) * 2;
}
else if (&start.room[0] == &state.hallway[0] and &end.room[0] != &state.hallway[0]) {
// Hallway to room
// Start position is outside room
const hallway_positions_outside_room = hallwayPositionsOutsideRoom(state, end.room);
std.debug.assert(start.pos == hallway_positions_outside_room.left_pos or start.pos == hallway_positions_outside_room.right_pos);
cost.* += moveCost(start.room[start.pos]);
// All positions until end are clear
{
var i: usize = 0;
while (i <= end.pos) : (i += 1) {
std.debug.assert(isEmpty(end.room[i]));
cost.* += moveCost(start.room[start.pos]);
}
}
}
else if (&start.room[0] == &state.hallway[0] and &end.room[0] == &state.hallway[0]) {
// Hallway to hallway
switch (std.math.order(start.pos, end.pos)) {
.lt => {
var i = start.pos + 1;
while (i <= end.pos) : (i += 1) {
std.debug.assert(isEmpty(state.hallway[i]));
cost.* += moveCost(start.room[start.pos]);
switch (i) {
2, 3, 4, 5 => cost.* += moveCost(start.room[start.pos]),
else => {},
}
}
},
.eq => unreachable,
.gt => {
var i = end.pos;
while (i < start.pos) : (i += 1) {
std.debug.assert(isEmpty(state.hallway[i]));
cost.* += moveCost(start.room[start.pos]);
switch (i) {
1, 2, 3, 4 => cost.* += moveCost(start.room[start.pos]),
else => {},
}
}
},
}
}
else {
unreachable;
}
end.room[end.pos] = start.room[start.pos];
start.room[start.pos] = .empty;
}
fn moveCost(spot: Spot) u64 {
return switch (spot) {
.a => 1,
.b => 10,
.c => 100,
.d => 1000,
else => unreachable,
};
}
const HallwayPositionOutsideRoomResult = struct {
left_pos: usize,
right_pos: usize,
};
fn hallwayPositionsOutsideRoom(state: *const State, room: []const Spot) HallwayPositionOutsideRoomResult {
return
if (&room[0] == &state.a_room[0]) HallwayPositionOutsideRoomResult { .left_pos = 1, .right_pos = 2 }
else if (&room[0] == &state.b_room[0]) HallwayPositionOutsideRoomResult { .left_pos = 2, .right_pos = 3 }
else if (&room[0] == &state.c_room[0]) HallwayPositionOutsideRoomResult { .left_pos = 3, .right_pos = 4 }
else if (&room[0] == &state.d_room[0]) HallwayPositionOutsideRoomResult { .left_pos = 4, .right_pos = 5 }
else unreachable;
} | src/day23.zig |
const std = @import("../../std.zig");
const maxInt = std.math.maxInt;
pub fn S_ISCHR(m: u32) bool {
return m & S_IFMT == S_IFCHR;
}
// See:
// - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/include/unistd.h
// - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/sys/sys/types.h
// TODO: mode_t should probably be changed to a u16, audit pid_t/off_t as well
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const off_t = c_long;
pub const mode_t = c_uint;
pub const uid_t = u32;
pub const gid_t = u32;
pub const time_t = isize;
pub const suseconds_t = c_long;
pub const ENOTSUP = EOPNOTSUPP;
pub const EWOULDBLOCK = EAGAIN;
pub const EPERM = 1;
pub const ENOENT = 2;
pub const ESRCH = 3;
pub const EINTR = 4;
pub const EIO = 5;
pub const ENXIO = 6;
pub const E2BIG = 7;
pub const ENOEXEC = 8;
pub const EBADF = 9;
pub const ECHILD = 10;
pub const EDEADLK = 11;
pub const ENOMEM = 12;
pub const EACCES = 13;
pub const EFAULT = 14;
pub const ENOTBLK = 15;
pub const EBUSY = 16;
pub const EEXIST = 17;
pub const EXDEV = 18;
pub const ENODEV = 19;
pub const ENOTDIR = 20;
pub const EISDIR = 21;
pub const EINVAL = 22;
pub const ENFILE = 23;
pub const EMFILE = 24;
pub const ENOTTY = 25;
pub const ETXTBSY = 26;
pub const EFBIG = 27;
pub const ENOSPC = 28;
pub const ESPIPE = 29;
pub const EROFS = 30;
pub const EMLINK = 31;
pub const EPIPE = 32;
pub const EDOM = 33;
pub const ERANGE = 34;
pub const EAGAIN = 35;
pub const EINPROGRESS = 36;
pub const EALREADY = 37;
pub const ENOTSOCK = 38;
pub const EDESTADDRREQ = 39;
pub const EMSGSIZE = 40;
pub const EPROTOTYPE = 41;
pub const ENOPROTOOPT = 42;
pub const EPROTONOSUPPORT = 43;
pub const ESOCKTNOSUPPORT = 44;
pub const EOPNOTSUPP = 45;
pub const EPFNOSUPPORT = 46;
pub const EAFNOSUPPORT = 47;
pub const EADDRINUSE = 48;
pub const EADDRNOTAVAIL = 49;
pub const ENETDOWN = 50;
pub const ENETUNREACH = 51;
pub const ENETRESET = 52;
pub const ECONNABORTED = 53;
pub const ECONNRESET = 54;
pub const ENOBUFS = 55;
pub const EISCONN = 56;
pub const ENOTCONN = 57;
pub const ESHUTDOWN = 58;
pub const ETOOMANYREFS = 59;
pub const ETIMEDOUT = 60;
pub const ECONNREFUSED = 61;
pub const ELOOP = 62;
pub const ENAMETOOLONG = 63;
pub const EHOSTDOWN = 64;
pub const EHOSTUNREACH = 65;
pub const ENOTEMPTY = 66;
pub const EPROCLIM = 67;
pub const EUSERS = 68;
pub const EDQUOT = 69;
pub const ESTALE = 70;
pub const EREMOTE = 71;
pub const EBADRPC = 72;
pub const ERPCMISMATCH = 73;
pub const EPROGUNAVAIL = 74;
pub const EPROGMISMATCH = 75;
pub const EPROCUNAVAIL = 76;
pub const ENOLCK = 77;
pub const ENOSYS = 78;
pub const EFTYPE = 79;
pub const EAUTH = 80;
pub const ENEEDAUTH = 81;
pub const EIDRM = 82;
pub const ENOMSG = 83;
pub const EOVERFLOW = 84;
pub const ECANCELED = 85;
pub const EILSEQ = 86;
pub const ENOATTR = 87;
pub const EDOOFUS = 88;
pub const EBADMSG = 89;
pub const EMULTIHOP = 90;
pub const ENOLINK = 91;
pub const EPROTO = 92;
pub const ENOMEDIUM = 93;
pub const ELAST = 99;
pub const EASYNC = 99;
pub const STDIN_FILENO = 0;
pub const STDOUT_FILENO = 1;
pub const STDERR_FILENO = 2;
pub const PROT_NONE = 0;
pub const PROT_READ = 1;
pub const PROT_WRITE = 2;
pub const PROT_EXEC = 4;
pub const MAP_FILE = 0;
pub const MAP_FAILED = @intToPtr(*c_void, maxInt(usize));
pub const MAP_ANONYMOUS = MAP_ANON;
pub const MAP_COPY = MAP_PRIVATE;
pub const MAP_SHARED = 1;
pub const MAP_PRIVATE = 2;
pub const MAP_FIXED = 16;
pub const MAP_RENAME = 32;
pub const MAP_NORESERVE = 64;
pub const MAP_INHERIT = 128;
pub const MAP_NOEXTEND = 256;
pub const MAP_HASSEMAPHORE = 512;
pub const MAP_STACK = 1024;
pub const MAP_NOSYNC = 2048;
pub const MAP_ANON = 4096;
pub const MAP_VPAGETABLE = 8192;
pub const MAP_TRYFIXED = 65536;
pub const MAP_NOCORE = 131072;
pub const MAP_SIZEALIGN = 262144;
pub const WNOHANG = 0x0001;
pub const WUNTRACED = 0x0002;
pub const WCONTINUED = 0x0004;
pub const WSTOPPED = WUNTRACED;
pub const WNOWAIT = 0x0008;
pub const WEXITED = 0x0010;
pub const WTRAPPED = 0x0020;
pub const SA_ONSTACK = 0x0001;
pub const SA_RESTART = 0x0002;
pub const SA_RESETHAND = 0x0004;
pub const SA_NODEFER = 0x0010;
pub const SA_NOCLDWAIT = 0x0020;
pub const SA_SIGINFO = 0x0040;
pub const PATH_MAX = 1024;
pub const ino_t = c_ulong;
pub const libc_stat = extern struct {
ino: ino_t,
nlink: c_uint,
dev: c_uint,
mode: c_ushort,
padding1: u16,
uid: uid_t,
gid: gid_t,
rdev: c_uint,
atim: timespec,
mtim: timespec,
ctim: timespec,
size: c_ulong,
blocks: i64,
blksize: u32,
flags: u32,
gen: u32,
lspare: i32,
qspare1: i64,
qspare2: i64,
pub fn atime(self: @This()) timespec {
return self.atim;
}
pub fn mtime(self: @This()) timespec {
return self.mtim;
}
pub fn ctime(self: @This()) timespec {
return self.ctim;
}
};
pub const timespec = extern struct {
tv_sec: c_long,
tv_nsec: c_long,
};
pub const timeval = extern struct {
/// seconds
tv_sec: time_t,
/// microseconds
tv_usec: suseconds_t,
};
pub const CTL_UNSPEC = 0;
pub const CTL_KERN = 1;
pub const CTL_VM = 2;
pub const CTL_VFS = 3;
pub const CTL_NET = 4;
pub const CTL_DEBUG = 5;
pub const CTL_HW = 6;
pub const CTL_MACHDEP = 7;
pub const CTL_USER = 8;
pub const CTL_LWKT = 10;
pub const CTL_MAXID = 11;
pub const CTL_MAXNAME = 12;
pub const KERN_PROC_ALL = 0;
pub const KERN_OSTYPE = 1;
pub const KERN_PROC_PID = 1;
pub const KERN_OSRELEASE = 2;
pub const KERN_PROC_PGRP = 2;
pub const KERN_OSREV = 3;
pub const KERN_PROC_SESSION = 3;
pub const KERN_VERSION = 4;
pub const KERN_PROC_TTY = 4;
pub const KERN_MAXVNODES = 5;
pub const KERN_PROC_UID = 5;
pub const KERN_MAXPROC = 6;
pub const KERN_PROC_RUID = 6;
pub const KERN_MAXFILES = 7;
pub const KERN_PROC_ARGS = 7;
pub const KERN_ARGMAX = 8;
pub const KERN_PROC_CWD = 8;
pub const KERN_PROC_PATHNAME = 9;
pub const KERN_SECURELVL = 9;
pub const KERN_PROC_SIGTRAMP = 10;
pub const KERN_HOSTNAME = 10;
pub const KERN_HOSTID = 11;
pub const KERN_CLOCKRATE = 12;
pub const KERN_VNODE = 13;
pub const KERN_PROC = 14;
pub const KERN_FILE = 15;
pub const KERN_PROC_FLAGMASK = 16;
pub const KERN_PROF = 16;
pub const KERN_PROC_FLAG_LWP = 16;
pub const KERN_POSIX1 = 17;
pub const KERN_NGROUPS = 18;
pub const KERN_JOB_CONTROL = 19;
pub const KERN_SAVED_IDS = 20;
pub const KERN_BOOTTIME = 21;
pub const KERN_NISDOMAINNAME = 22;
pub const KERN_UPDATEINTERVAL = 23;
pub const KERN_OSRELDATE = 24;
pub const KERN_NTP_PLL = 25;
pub const KERN_BOOTFILE = 26;
pub const KERN_MAXFILESPERPROC = 27;
pub const KERN_MAXPROCPERUID = 28;
pub const KERN_DUMPDEV = 29;
pub const KERN_IPC = 30;
pub const KERN_DUMMY = 31;
pub const KERN_PS_STRINGS = 32;
pub const KERN_USRSTACK = 33;
pub const KERN_LOGSIGEXIT = 34;
pub const KERN_IOV_MAX = 35;
pub const KERN_MAXPOSIXLOCKSPERUID = 36;
pub const KERN_MAXID = 37;
pub const HOST_NAME_MAX = 255;
// access function
pub const F_OK = 0; // test for existence of file
pub const X_OK = 1; // test for execute or search permission
pub const W_OK = 2; // test for write permission
pub const R_OK = 4; // test for read permission
pub const O_RDONLY = 0;
pub const O_NDELAY = O_NONBLOCK;
pub const O_WRONLY = 1;
pub const O_RDWR = 2;
pub const O_ACCMODE = 3;
pub const O_NONBLOCK = 4;
pub const O_APPEND = 8;
pub const O_SHLOCK = 16;
pub const O_EXLOCK = 32;
pub const O_ASYNC = 64;
pub const O_FSYNC = 128;
pub const O_SYNC = 128;
pub const O_NOFOLLOW = 256;
pub const O_CREAT = 512;
pub const O_TRUNC = 1024;
pub const O_EXCL = 2048;
pub const O_NOCTTY = 32768;
pub const O_DIRECT = 65536;
pub const O_CLOEXEC = 131072;
pub const O_FBLOCKING = 262144;
pub const O_FNONBLOCKING = 524288;
pub const O_FAPPEND = 1048576;
pub const O_FOFFSET = 2097152;
pub const O_FSYNCWRITE = 4194304;
pub const O_FASYNCWRITE = 8388608;
pub const O_DIRECTORY = 134217728;
pub const SEEK_SET = 0;
pub const SEEK_CUR = 1;
pub const SEEK_END = 2;
pub const SEEK_DATA = 3;
pub const SEEK_HOLE = 4;
pub const F_ULOCK = 0;
pub const F_LOCK = 1;
pub const F_TLOCK = 2;
pub const F_TEST = 3;
pub const FD_CLOEXEC = 1;
pub const AT_FDCWD = -328243;
pub const AT_SYMLINK_NOFOLLOW = 1;
pub const AT_REMOVEDIR = 2;
pub const AT_EACCESS = 4;
pub const AT_SYMLINK_FOLLOW = 8;
pub fn WEXITSTATUS(s: u32) u32 {
return (s & 0xff00) >> 8;
}
pub fn WTERMSIG(s: u32) u32 {
return s & 0x7f;
}
pub fn WSTOPSIG(s: u32) u32 {
return WEXITSTATUS(s);
}
pub fn WIFEXITED(s: u32) bool {
return WTERMSIG(s) == 0;
}
pub fn WIFSTOPPED(s: u32) bool {
return @intCast(u16, (((s & 0xffff) *% 0x10001) >> 8)) > 0x7f00;
}
pub fn WIFSIGNALED(s: u32) bool {
return (s & 0xffff) -% 1 < 0xff;
}
pub const dirent = extern struct {
d_fileno: c_ulong,
d_namlen: u16,
d_type: u8,
d_unused1: u8,
d_unused2: u32,
d_name: [256]u8,
pub fn reclen(self: dirent) u16 {
return (@byteOffsetOf(dirent, "d_name") + self.d_namlen + 1 + 7) & ~@as(u16, 7);
}
};
pub const DT_UNKNOWN = 0;
pub const DT_FIFO = 1;
pub const DT_CHR = 2;
pub const DT_DIR = 4;
pub const DT_BLK = 6;
pub const DT_REG = 8;
pub const DT_LNK = 10;
pub const DT_SOCK = 12;
pub const DT_WHT = 14;
pub const DT_DBF = 15;
pub const CLOCK_REALTIME = 0;
pub const CLOCK_VIRTUAL = 1;
pub const CLOCK_PROF = 2;
pub const CLOCK_MONOTONIC = 4;
pub const CLOCK_UPTIME = 5;
pub const CLOCK_UPTIME_PRECISE = 7;
pub const CLOCK_UPTIME_FAST = 8;
pub const CLOCK_REALTIME_PRECISE = 9;
pub const CLOCK_REALTIME_FAST = 10;
pub const CLOCK_MONOTONIC_PRECISE = 11;
pub const CLOCK_MONOTONIC_FAST = 12;
pub const CLOCK_SECOND = 13;
pub const CLOCK_THREAD_CPUTIME_ID = 14;
pub const CLOCK_PROCESS_CPUTIME_ID = 15;
pub const sockaddr = extern struct {
len: u8,
family: u8,
data: [14]u8,
};
pub const Kevent = extern struct {
ident: usize,
filter: c_short,
flags: c_ushort,
fflags: c_uint,
data: isize,
udata: usize,
};
pub const EVFILT_FS = -10;
pub const EVFILT_USER = -9;
pub const EVFILT_EXCEPT = -8;
pub const EVFILT_TIMER = -7;
pub const EVFILT_SIGNAL = -6;
pub const EVFILT_PROC = -5;
pub const EVFILT_VNODE = -4;
pub const EVFILT_AIO = -3;
pub const EVFILT_WRITE = -2;
pub const EVFILT_READ = -1;
pub const EVFILT_SYSCOUNT = 10;
pub const EVFILT_MARKER = 15;
pub const EV_ADD = 1;
pub const EV_DELETE = 2;
pub const EV_ENABLE = 4;
pub const EV_DISABLE = 8;
pub const EV_ONESHOT = 16;
pub const EV_CLEAR = 32;
pub const EV_RECEIPT = 64;
pub const EV_DISPATCH = 128;
pub const EV_NODATA = 4096;
pub const EV_FLAG1 = 8192;
pub const EV_ERROR = 16384;
pub const EV_EOF = 32768;
pub const EV_SYSFLAGS = 61440;
pub const NOTE_FFNOP = 0;
pub const NOTE_TRACK = 1;
pub const NOTE_DELETE = 1;
pub const NOTE_LOWAT = 1;
pub const NOTE_TRACKERR = 2;
pub const NOTE_OOB = 2;
pub const NOTE_WRITE = 2;
pub const NOTE_EXTEND = 4;
pub const NOTE_CHILD = 4;
pub const NOTE_ATTRIB = 8;
pub const NOTE_LINK = 16;
pub const NOTE_RENAME = 32;
pub const NOTE_REVOKE = 64;
pub const NOTE_PDATAMASK = 1048575;
pub const NOTE_FFLAGSMASK = 16777215;
pub const NOTE_TRIGGER = 16777216;
pub const NOTE_EXEC = 536870912;
pub const NOTE_FFAND = 1073741824;
pub const NOTE_FORK = 1073741824;
pub const NOTE_EXIT = 2147483648;
pub const NOTE_FFOR = 2147483648;
pub const NOTE_FFCTRLMASK = 3221225472;
pub const NOTE_FFCOPY = 3221225472;
pub const NOTE_PCTRLMASK = 4026531840;
pub const stack_t = extern struct {
ss_sp: [*]u8,
ss_size: isize,
ss_flags: i32,
};
pub const S_IREAD = S_IRUSR;
pub const S_IEXEC = S_IXUSR;
pub const S_IWRITE = S_IWUSR;
pub const S_IXOTH = 1;
pub const S_IWOTH = 2;
pub const S_IROTH = 4;
pub const S_IRWXO = 7;
pub const S_IXGRP = 8;
pub const S_IWGRP = 16;
pub const S_IRGRP = 32;
pub const S_IRWXG = 56;
pub const S_IXUSR = 64;
pub const S_IWUSR = 128;
pub const S_IRUSR = 256;
pub const S_IRWXU = 448;
pub const S_ISTXT = 512;
pub const S_BLKSIZE = 512;
pub const S_ISVTX = 512;
pub const S_ISGID = 1024;
pub const S_ISUID = 2048;
pub const S_IFIFO = 4096;
pub const S_IFCHR = 8192;
pub const S_IFDIR = 16384;
pub const S_IFBLK = 24576;
pub const S_IFREG = 32768;
pub const S_IFDB = 36864;
pub const S_IFLNK = 40960;
pub const S_IFSOCK = 49152;
pub const S_IFWHT = 57344;
pub const S_IFMT = 61440;
pub const SIG_DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
pub const SIG_IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
pub const SIG_ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
pub const BADSIG = SIG_ERR;
pub const SIG_BLOCK = 1;
pub const SIG_UNBLOCK = 2;
pub const SIG_SETMASK = 3;
pub const SIGIOT = SIGABRT;
pub const SIGHUP = 1;
pub const SIGINT = 2;
pub const SIGQUIT = 3;
pub const SIGILL = 4;
pub const SIGTRAP = 5;
pub const SIGABRT = 6;
pub const SIGEMT = 7;
pub const SIGFPE = 8;
pub const SIGKILL = 9;
pub const SIGBUS = 10;
pub const SIGSEGV = 11;
pub const SIGSYS = 12;
pub const SIGPIPE = 13;
pub const SIGALRM = 14;
pub const SIGTERM = 15;
pub const SIGURG = 16;
pub const SIGSTOP = 17;
pub const SIGTSTP = 18;
pub const SIGCONT = 19;
pub const SIGCHLD = 20;
pub const SIGTTIN = 21;
pub const SIGTTOU = 22;
pub const SIGIO = 23;
pub const SIGXCPU = 24;
pub const SIGXFSZ = 25;
pub const SIGVTALRM = 26;
pub const SIGPROF = 27;
pub const SIGWINCH = 28;
pub const SIGINFO = 29;
pub const SIGUSR1 = 30;
pub const SIGUSR2 = 31;
pub const SIGTHR = 32;
pub const SIGCKPT = 33;
pub const SIGCKPTEXIT = 34;
pub const siginfo_t = extern struct {
signo: c_int,
errno: c_int,
code: c_int,
pid: c_int,
uid: uid_t,
status: c_int,
addr: ?*c_void,
value: sigval,
band: c_long,
__spare__: [7]c_int,
};
pub const sigval = extern union {
sival_int: c_int,
sival_ptr: ?*c_void,
};
pub const _SIG_WORDS = 4;
pub const sigset_t = extern struct {
__bits: [_SIG_WORDS]c_uint,
};
pub const empty_sigset = sigset_t{ .__bits = [_]c_uint{0} ** _SIG_WORDS };
pub const sig_atomic_t = c_int;
pub const Sigaction = extern struct {
pub const handler_fn = fn (c_int) callconv(.C) void;
pub const sigaction_fn = fn (c_int, *const siginfo_t, ?*const c_void) callconv(.C) void;
/// signal handler
handler: extern union {
handler: ?handler_fn,
sigaction: ?sigaction_fn,
},
flags: c_uint,
mask: sigset_t,
};
pub const sig_t = [*c]fn (c_int) callconv(.C) void;
pub const SOCK_STREAM = 1;
pub const SOCK_DGRAM = 2;
pub const SOCK_RAW = 3;
pub const SOCK_RDM = 4;
pub const SOCK_SEQPACKET = 5;
pub const SOCK_MAXADDRLEN = 255;
pub const SOCK_CLOEXEC = 0x10000000;
pub const SOCK_NONBLOCK = 0x20000000;
pub const SO_DEBUG = 0x0001;
pub const SO_ACCEPTCONN = 0x0002;
pub const SO_REUSEADDR = 0x0004;
pub const SO_KEEPALIVE = 0x0008;
pub const SO_DONTROUTE = 0x0010;
pub const SO_BROADCAST = 0x0020;
pub const SO_USELOOPBACK = 0x0040;
pub const SO_LINGER = 0x0080;
pub const SO_OOBINLINE = 0x0100;
pub const SO_REUSEPORT = 0x0200;
pub const SO_TIMESTAMP = 0x0400;
pub const SO_NOSIGPIPE = 0x0800;
pub const SO_ACCEPTFILTER = 0x1000;
pub const SO_RERROR = 0x2000;
pub const SO_PASSCRED = 0x4000;
pub const SO_SNDBUF = 0x1001;
pub const SO_RCVBUF = 0x1002;
pub const SO_SNDLOWAT = 0x1003;
pub const SO_RCVLOWAT = 0x1004;
pub const SO_SNDTIMEO = 0x1005;
pub const SO_RCVTIMEO = 0x1006;
pub const SO_ERROR = 0x1007;
pub const SO_TYPE = 0x1008;
pub const SO_SNDSPACE = 0x100a;
pub const SO_CPUHINT = 0x1030;
pub const SOL_SOCKET = 0xffff;
pub const PF_INET6 = AF_INET6;
pub const PF_IMPLINK = AF_IMPLINK;
pub const PF_ROUTE = AF_ROUTE;
pub const PF_ISO = AF_ISO;
pub const PF_PIP = pseudo_AF_PIP;
pub const PF_CHAOS = AF_CHAOS;
pub const PF_DATAKIT = AF_DATAKIT;
pub const PF_INET = AF_INET;
pub const PF_APPLETALK = AF_APPLETALK;
pub const PF_SIP = AF_SIP;
pub const PF_OSI = AF_ISO;
pub const PF_CNT = AF_CNT;
pub const PF_LINK = AF_LINK;
pub const PF_HYLINK = AF_HYLINK;
pub const PF_MAX = AF_MAX;
pub const PF_KEY = pseudo_AF_KEY;
pub const PF_PUP = AF_PUP;
pub const PF_COIP = AF_COIP;
pub const PF_SNA = AF_SNA;
pub const PF_LOCAL = AF_LOCAL;
pub const PF_NETBIOS = AF_NETBIOS;
pub const PF_NATM = AF_NATM;
pub const PF_BLUETOOTH = AF_BLUETOOTH;
pub const PF_UNSPEC = AF_UNSPEC;
pub const PF_NETGRAPH = AF_NETGRAPH;
pub const PF_ECMA = AF_ECMA;
pub const PF_IPX = AF_IPX;
pub const PF_DLI = AF_DLI;
pub const PF_ATM = AF_ATM;
pub const PF_CCITT = AF_CCITT;
pub const PF_ISDN = AF_ISDN;
pub const PF_RTIP = pseudo_AF_RTIP;
pub const PF_LAT = AF_LAT;
pub const PF_UNIX = PF_LOCAL;
pub const PF_XTP = pseudo_AF_XTP;
pub const PF_DECnet = AF_DECnet;
pub const AF_UNSPEC = 0;
pub const AF_OSI = AF_ISO;
pub const AF_UNIX = AF_LOCAL;
pub const AF_LOCAL = 1;
pub const AF_INET = 2;
pub const AF_IMPLINK = 3;
pub const AF_PUP = 4;
pub const AF_CHAOS = 5;
pub const AF_NETBIOS = 6;
pub const AF_ISO = 7;
pub const AF_ECMA = 8;
pub const AF_DATAKIT = 9;
pub const AF_CCITT = 10;
pub const AF_SNA = 11;
pub const AF_DLI = 13;
pub const AF_LAT = 14;
pub const AF_HYLINK = 15;
pub const AF_APPLETALK = 16;
pub const AF_ROUTE = 17;
pub const AF_LINK = 18;
pub const AF_COIP = 20;
pub const AF_CNT = 21;
pub const AF_IPX = 23;
pub const AF_SIP = 24;
pub const AF_ISDN = 26;
pub const AF_INET6 = 28;
pub const AF_NATM = 29;
pub const AF_ATM = 30;
pub const AF_NETGRAPH = 32;
pub const AF_BLUETOOTH = 33;
pub const AF_MPLS = 34;
pub const AF_MAX = 36;
pub const in_port_t = u16;
pub const sa_family_t = u8;
pub const socklen_t = u32;
pub const sockaddr_storage = extern struct {
ss_len: u8,
ss_family: sa_family_t,
__ss_pad1: [5]u8,
__ss_align: i64,
__ss_pad2: [112]u8,
};
pub const sockaddr_in = extern struct {
len: u8 = @sizeOf(sockaddr_in),
family: sa_family_t = AF_INET,
port: in_port_t,
addr: u32,
zero: [8]u8 = [8]u8{ 0, 0, 0, 0, 0, 0, 0, 0 },
};
pub const sockaddr_in6 = extern struct {
len: u8 = @sizeOf(sockaddr_in6),
family: sa_family_t = AF_INET6,
port: in_port_t,
flowinfo: u32,
addr: [16]u8,
scope_id: u32,
};
pub const EAI = enum(c_int) {
ADDRFAMILY = 1,
AGAIN = 2,
BADFLAGS = 3,
FAIL = 4,
FAMILY = 5,
MEMORY = 6,
NODATA = 7,
NONAME = 8,
SERVICE = 9,
SOCKTYPE = 10,
SYSTEM = 11,
BADHINTS = 12,
PROTOCOL = 13,
OVERFLOW = 14,
_,
};
pub const AI_PASSIVE = 0x00000001;
pub const AI_CANONNAME = 0x00000002;
pub const AI_NUMERICHOST = 0x00000004;
pub const AI_NUMERICSERV = 0x00000008;
pub const AI_MASK = AI_PASSIVE | AI_CANONNAME | AI_NUMERICHOST | AI_NUMERICSERV | AI_ADDRCONFIG;
pub const AI_ALL = 0x00000100;
pub const AI_V4MAPPED_CFG = 0x00000200;
pub const AI_ADDRCONFIG = 0x00000400;
pub const AI_V4MAPPED = 0x00000800;
pub const AI_DEFAULT = AI_V4MAPPED_CFG | AI_ADDRCONFIG;
pub const RTLD_LAZY = 1;
pub const RTLD_NOW = 2;
pub const RTLD_MODEMASK = 0x3;
pub const RTLD_GLOBAL = 0x100;
pub const RTLD_LOCAL = 0;
pub const RTLD_TRACE = 0x200;
pub const RTLD_NODELETE = 0x01000;
pub const RTLD_NOLOAD = 0x02000;
pub const RTLD_NEXT = @intToPtr(*c_void, @bitCast(usize, @as(isize, -1)));
pub const RTLD_DEFAULT = @intToPtr(*c_void, @bitCast(usize, @as(isize, -2)));
pub const RTLD_SELF = @intToPtr(*c_void, @bitCast(usize, @as(isize, -3)));
pub const RTLD_ALL = @intToPtr(*c_void, @bitCast(usize, @as(isize, -4)));
pub const dl_phdr_info = extern struct {
dlpi_addr: usize,
dlpi_name: ?[*:0]const u8,
dlpi_phdr: [*]std.elf.Phdr,
dlpi_phnum: u16,
};
pub const msghdr = extern struct {
msg_name: ?*c_void,
msg_namelen: socklen_t,
msg_iov: [*c]iovec,
msg_iovlen: c_int,
msg_control: ?*c_void,
msg_controllen: socklen_t,
msg_flags: c_int,
};
pub const cmsghdr = extern struct {
cmsg_len: socklen_t,
cmsg_level: c_int,
cmsg_type: c_int,
};
pub const cmsgcred = extern struct {
cmcred_pid: pid_t,
cmcred_uid: uid_t,
cmcred_euid: uid_t,
cmcred_gid: gid_t,
cmcred_ngroups: c_short,
cmcred_groups: [16]gid_t,
};
pub const sf_hdtr = extern struct {
headers: [*c]iovec,
hdr_cnt: c_int,
trailers: [*c]iovec,
trl_cnt: c_int,
};
pub const MS_SYNC = 0;
pub const MS_ASYNC = 1;
pub const MS_INVALIDATE = 2;
pub const POSIX_MADV_SEQUENTIAL = 2;
pub const POSIX_MADV_RANDOM = 1;
pub const POSIX_MADV_DONTNEED = 4;
pub const POSIX_MADV_NORMAL = 0;
pub const POSIX_MADV_WILLNEED = 3;
pub const MADV_SEQUENTIAL = 2;
pub const MADV_CONTROL_END = MADV_SETMAP;
pub const MADV_DONTNEED = 4;
pub const MADV_RANDOM = 1;
pub const MADV_WILLNEED = 3;
pub const MADV_NORMAL = 0;
pub const MADV_CONTROL_START = MADV_INVAL;
pub const MADV_FREE = 5;
pub const MADV_NOSYNC = 6;
pub const MADV_AUTOSYNC = 7;
pub const MADV_NOCORE = 8;
pub const MADV_CORE = 9;
pub const MADV_INVAL = 10;
pub const MADV_SETMAP = 11;
pub const F_DUPFD = 0;
pub const F_GETFD = 1;
pub const F_RDLCK = 1;
pub const F_SETFD = 2;
pub const F_UNLCK = 2;
pub const F_WRLCK = 3;
pub const F_GETFL = 3;
pub const F_SETFL = 4;
pub const F_GETOWN = 5;
pub const F_SETOWN = 6;
pub const F_GETLK = 7;
pub const F_SETLK = 8;
pub const F_SETLKW = 9;
pub const F_DUP2FD = 10;
pub const F_DUPFD_CLOEXEC = 17;
pub const F_DUP2FD_CLOEXEC = 18;
pub const LOCK_SH = 1;
pub const LOCK_EX = 2;
pub const LOCK_UN = 8;
pub const LOCK_NB = 4;
pub const Flock = extern struct {
l_start: off_t,
l_len: off_t,
l_pid: pid_t,
l_type: c_short,
l_whence: c_short,
};
pub const addrinfo = extern struct {
flags: i32,
family: i32,
socktype: i32,
protocol: i32,
addrlen: socklen_t,
canonname: ?[*:0]u8,
addr: ?*sockaddr,
next: ?*addrinfo,
};
pub const IPPROTO_IP = 0;
pub const IPPROTO_ICMP = 1;
pub const IPPROTO_TCP = 6;
pub const IPPROTO_UDP = 17;
pub const IPPROTO_IPV6 = 41;
pub const IPPROTO_RAW = 255;
pub const IPPROTO_HOPOPTS = 0;
pub const IPPROTO_IGMP = 2;
pub const IPPROTO_GGP = 3;
pub const IPPROTO_IPV4 = 4;
pub const IPPROTO_IPIP = IPPROTO_IPV4;
pub const IPPROTO_ST = 7;
pub const IPPROTO_EGP = 8;
pub const IPPROTO_PIGP = 9;
pub const IPPROTO_RCCMON = 10;
pub const IPPROTO_NVPII = 11;
pub const IPPROTO_PUP = 12;
pub const IPPROTO_ARGUS = 13;
pub const IPPROTO_EMCON = 14;
pub const IPPROTO_XNET = 15;
pub const IPPROTO_CHAOS = 16;
pub const IPPROTO_MUX = 18;
pub const IPPROTO_MEAS = 19;
pub const IPPROTO_HMP = 20;
pub const IPPROTO_PRM = 21;
pub const IPPROTO_IDP = 22;
pub const IPPROTO_TRUNK1 = 23;
pub const IPPROTO_TRUNK2 = 24;
pub const IPPROTO_LEAF1 = 25;
pub const IPPROTO_LEAF2 = 26;
pub const IPPROTO_RDP = 27;
pub const IPPROTO_IRTP = 28;
pub const IPPROTO_TP = 29;
pub const IPPROTO_BLT = 30;
pub const IPPROTO_NSP = 31;
pub const IPPROTO_INP = 32;
pub const IPPROTO_SEP = 33;
pub const IPPROTO_3PC = 34;
pub const IPPROTO_IDPR = 35;
pub const IPPROTO_XTP = 36;
pub const IPPROTO_DDP = 37;
pub const IPPROTO_CMTP = 38;
pub const IPPROTO_TPXX = 39;
pub const IPPROTO_IL = 40;
pub const IPPROTO_SDRP = 42;
pub const IPPROTO_ROUTING = 43;
pub const IPPROTO_FRAGMENT = 44;
pub const IPPROTO_IDRP = 45;
pub const IPPROTO_RSVP = 46;
pub const IPPROTO_GRE = 47;
pub const IPPROTO_MHRP = 48;
pub const IPPROTO_BHA = 49;
pub const IPPROTO_ESP = 50;
pub const IPPROTO_AH = 51;
pub const IPPROTO_INLSP = 52;
pub const IPPROTO_SWIPE = 53;
pub const IPPROTO_NHRP = 54;
pub const IPPROTO_MOBILE = 55;
pub const IPPROTO_TLSP = 56;
pub const IPPROTO_SKIP = 57;
pub const IPPROTO_ICMPV6 = 58;
pub const IPPROTO_NONE = 59;
pub const IPPROTO_DSTOPTS = 60;
pub const IPPROTO_AHIP = 61;
pub const IPPROTO_CFTP = 62;
pub const IPPROTO_HELLO = 63;
pub const IPPROTO_SATEXPAK = 64;
pub const IPPROTO_KRYPTOLAN = 65;
pub const IPPROTO_RVD = 66;
pub const IPPROTO_IPPC = 67;
pub const IPPROTO_ADFS = 68;
pub const IPPROTO_SATMON = 69;
pub const IPPROTO_VISA = 70;
pub const IPPROTO_IPCV = 71;
pub const IPPROTO_CPNX = 72;
pub const IPPROTO_CPHB = 73;
pub const IPPROTO_WSN = 74;
pub const IPPROTO_PVP = 75;
pub const IPPROTO_BRSATMON = 76;
pub const IPPROTO_ND = 77;
pub const IPPROTO_WBMON = 78;
pub const IPPROTO_WBEXPAK = 79;
pub const IPPROTO_EON = 80;
pub const IPPROTO_VMTP = 81;
pub const IPPROTO_SVMTP = 82;
pub const IPPROTO_VINES = 83;
pub const IPPROTO_TTP = 84;
pub const IPPROTO_IGP = 85;
pub const IPPROTO_DGP = 86;
pub const IPPROTO_TCF = 87;
pub const IPPROTO_IGRP = 88;
pub const IPPROTO_OSPFIGP = 89;
pub const IPPROTO_SRPC = 90;
pub const IPPROTO_LARP = 91;
pub const IPPROTO_MTP = 92;
pub const IPPROTO_AX25 = 93;
pub const IPPROTO_IPEIP = 94;
pub const IPPROTO_MICP = 95;
pub const IPPROTO_SCCSP = 96;
pub const IPPROTO_ETHERIP = 97;
pub const IPPROTO_ENCAP = 98;
pub const IPPROTO_APES = 99;
pub const IPPROTO_GMTP = 100;
pub const IPPROTO_IPCOMP = 108;
pub const IPPROTO_PIM = 103;
pub const IPPROTO_CARP = 112;
pub const IPPROTO_PGM = 113;
pub const IPPROTO_PFSYNC = 240;
pub const IPPROTO_DIVERT = 254;
pub const IPPROTO_MAX = 256;
pub const IPPROTO_DONE = 257;
pub const IPPROTO_UNKNOWN = 258;
pub const rlimit_resource = enum(c_int) {
CPU = 0,
FSIZE = 1,
DATA = 2,
STACK = 3,
CORE = 4,
RSS = 5,
MEMLOCK = 6,
NPROC = 7,
NOFILE = 8,
SBSIZE = 9,
VMEM = 10,
POSIXLOCKS = 11,
_,
pub const AS: rlimit_resource = .VMEM;
};
pub const rlim_t = i64;
/// No limit
pub const RLIM_INFINITY: rlim_t = (1 << 63) - 1;
pub const RLIM_SAVED_MAX = RLIM_INFINITY;
pub const RLIM_SAVED_CUR = RLIM_INFINITY;
pub const rlimit = extern struct {
/// Soft limit
cur: rlim_t,
/// Hard limit
max: rlim_t,
};
pub const SHUT_RD = 0;
pub const SHUT_WR = 1;
pub const SHUT_RDWR = 2;
pub const nfds_t = u32;
pub const pollfd = extern struct {
fd: fd_t,
events: i16,
revents: i16,
};
/// Requestable events.
pub const POLLIN = 0x0001;
pub const POLLPRI = 0x0002;
pub const POLLOUT = 0x0004;
pub const POLLRDNORM = 0x0040;
pub const POLLWRNORM = POLLOUT;
pub const POLLRDBAND = 0x0080;
pub const POLLWRBAND = 0x0100;
/// These events are set if they occur regardless of whether they were requested.
pub const POLLERR = 0x0008;
pub const POLLHUP = 0x0010;
pub const POLLNVAL = 0x0020; | lib/std/os/bits/dragonfly.zig |
const std = @import("std");
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer if (gpa.deinit()) @panic("leaked");
const allocator = &gpa.allocator;
const files = .{
.{ "chapter-0.md", "test-c0.zig" },
.{ "chapter-1.md", "test-c1.zig" },
.{ "chapter-2.md", "test-c2.zig" },
.{ "chapter-3.md", "test-c3.zig" },
.{ "chapter-4.md", "test-c4.zig" },
.{ "chapter-5.md", "test-c5.zig" },
};
std.fs.cwd().makeDir("test-out") catch |err| switch (err) {
error.PathAlreadyExists => {},
else => return err,
};
const out_dir = try std.fs.cwd().openDir("test-out", .{});
inline for (files) |f| {
const in = try std.fs.cwd().openFile(f[0], .{});
defer in.close();
const out = try out_dir.createFile(f[1], .{});
defer out.close();
const text = try allocator.alloc(u8, (try in.stat()).size);
defer allocator.free(text);
_ = try in.readAll(text);
var iter = std.mem.split(text, "```");
outer: while (iter.next()) |token| {
if (!std.mem.startsWith(u8, token, "zig")) continue;
// skip tests with special prefixes
for (&[_][]const u8{
"<!--no_test-->\n", // this should not be run as a test
"<!--fail_test-->\n", // this test should fail TODO: actually check that these tests fail
}) |skip_prefix| {
const offset = @ptrToInt(token.ptr) - @ptrToInt(text.ptr);
if (offset >= skip_prefix.len + "```".len) {
if (std.mem.eql(
u8,
skip_prefix,
@intToPtr([*]const u8, @ptrToInt(token.ptr) - (skip_prefix.len + "```".len))[0..skip_prefix.len],
)) continue :outer;
}
}
const this_test = std.mem.trim(u8, token[3..], " \n\t");
try out.writer().print("{s}\n\n", .{this_test});
}
}
} | test-out.zig |
pub const ContentType = enum(u8) {
invalid = 0,
change_cipher_spec = 20,
alert = 21,
handshake = 22,
application_data = 23,
heartbeat = 24,
_,
};
pub const CompressedY = enum(u8) {
even = 2,
odd = 3,
_,
};
pub const HashAlgorithm = enum(u8) {
none = 0,
md5 = 1,
sha1 = 2,
sha224 = 3,
sha256 = 4,
sha384 = 5,
sha512 = 6,
_,
};
pub const SignatureAlgorithm = enum(u8) {
anonymous = 0,
rsa = 1,
dsa = 2,
ecdsa = 3,
_,
};
pub const ECCurveType = enum(u8) {
explicit_prime = 1, // deprecated
explicit_char2 = 2, // deprecated
named_curve = 3,
_,
};
// 1. PROTOCOL
// 2. KEY_EXCHANGE_ALGORITHM
// 3. DIGITAL_SIGNATURE_ALGORITHM
// 4. BULK_ENCRYPTION_ALGORITHM
// 5. HASHING_ALGORITHM
// ECDHE - Elliptic Curve Diffie-Hellman Ephemeral
// ECDSA - Elliptic Curve Digital Signature Algorithm
// GCM - Galois/Counter mode
// AES - Advanced encryption standard
// SHA - Secure Hash Algorithm
// CBC - Cipher Block Chaining
// RSA - Rivest Shamir Adleman algorithm
/// CipherSuites formated as IANA names
pub const CipherSuite = enum(u16) {
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B, // Recommended
TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C, // Recommended
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023, // Weak
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024, // Weak
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F, // Secure
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030, // Secure
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027, // Weak
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028, // Weak
TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E, // Secure
TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F, // Secure
TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033, // Weak
TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039, // Weak
TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067, // Weak
TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B, // Weak
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9, // Recommended
TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8, // Secure
_,
};
/// ClientHello/ServerHello Extension types
pub const ExtensionType = enum(u16) {
server_name = 0,
status_request = 5,
supported_groups = 10,
supported_formats = 11,
_,
};
/// Levels of alert record
pub const AlertLevel = enum(u8) {
warning = 1,
fatal = 2,
_,
};
/// Short description of alerts
pub const AlertDescription = enum(u8) {
close_notify = 0,
unexpected_message = 10,
bad_record_mac = 20,
decryption_failed = 21,
record_overflow = 22,
decompression_failure = 30,
handshake_failure = 40,
no_certificate = 41,
bad_certificate = 42,
unsupported_certificate = 43,
certificate_revoked = 44,
certificate_expired = 45,
certificate_unknown = 46,
illegal_parameter = 47,
unknown_ca = 48,
access_denied = 49,
decode_error = 50,
decrypt_error = 51,
export_restriction = 60,
protocol_version = 70,
insufficient_security = 71,
internal_error = 80,
inappropriate_fallback = 86,
user_canceled = 90,
no_renegotiation = 100,
missing_extension = 109,
unsupported_extension = 110,
certificate_unobtainable = 111,
unrecognized_name = 112,
bad_certificate_status_response = 113,
bad_certificate_hash_value = 114,
unknown_psk_identity = 115,
certificate_required = 116,
no_application_protocol = 120,
_,
};
/// Type of handshake message
pub const HandshakeType = enum(u8) {
hello_request = 0,
client_hello = 1,
server_hello = 2,
new_session_ticket = 4,
end_of_early_data = 5,
encrypted_extensions = 8,
certificate = 11,
server_key_exchange = 12,
certificate_request = 13,
server_hello_done = 14,
certificate_verify = 15,
client_key_exchange = 16,
finished = 20,
certificate_url = 21,
certificate_status = 22,
key_update = 24,
message_hash = 254,
_,
};
/// TLS versions
pub const Version = enum(u16) {
SSL_3_0 = 0x0300,
TLS_1_0 = 0x0301,
TLS_1_1 = 0x0302,
TLS_1_2 = 0x0303,
TLS_1_3 = 0x0304,
_,
};
/// Named eliptic curves codes
pub const EllipticCurve = enum(u16) {
sect163k1 = 1, // deprecated
sect163r1 = 2, // deprecated
sect163r2 = 3, // deprecated
sect193r1 = 4, // deprecated
sect193r2 = 5, // deprecated
sect233k1 = 6, // deprecated
sect233r1 = 7, // deprecated
sect239k1 = 8, // deprecated
sect283k1 = 9, // deprecated
sect283r1 = 10, // deprecated
sect409k1 = 11, // deprecated
sect409r1 = 12, // deprecated
sect571k1 = 13, // deprecated
sect571r1 = 14, // deprecated
secp160k1 = 15, // deprecated
secp160r1 = 16, // deprecated
secp160r2 = 17, // deprecated
secp192k1 = 18, // deprecated
secp192r1 = 19, // deprecated
secp224k1 = 20, // deprecated
secp224r1 = 21, // deprecated
secp256k1 = 22, // deprecated
secp256r1 = 23,
secp384r1 = 24,
secp521r1 = 25,
x25519 = 29,
x448 = 30,
_,
}; | src/enums.zig |
const std = @import("std");
/// Adds all BearSSL sources to the exeobj step
/// Allows simple linking from build scripts.
pub fn linkBearSSL(comptime path_prefix: []const u8, module: *std.build.LibExeObjStep, target: std.zig.CrossTarget) void {
module.linkLibC();
module.addIncludeDir(path_prefix ++ "/BearSSL/inc");
module.addIncludeDir(path_prefix ++ "/BearSSL/src");
inline for (bearssl_sources) |srcfile| {
module.addCSourceFile(path_prefix ++ srcfile, &[_][]const u8{
"-Wall",
"-DBR_LE_UNALIGNED=0", // this prevent BearSSL from using undefined behaviour when doing potential unaligned access
});
}
if (target.isWindows()) {
module.linkSystemLibrary("advapi32");
}
}
// Export C for advanced interfacing
pub const c = @cImport({
@cInclude("bearssl.h");
});
pub const BearError = error{
BAD_PARAM,
BAD_STATE,
UNSUPPORTED_VERSION,
BAD_VERSION,
BAD_LENGTH,
TOO_LARGE,
BAD_MAC,
NO_RANDOM,
UNKNOWN_TYPE,
UNEXPECTED,
BAD_CCS,
BAD_ALERT,
BAD_HANDSHAKE,
OVERSIZED_ID,
BAD_CIPHER_SUITE,
BAD_COMPRESSION,
BAD_FRAGLEN,
BAD_SECRENEG,
EXTRA_EXTENSION,
BAD_SNI,
BAD_HELLO_DONE,
LIMIT_EXCEEDED,
BAD_FINISHED,
RESUME_MISMATCH,
INVALID_ALGORITHM,
BAD_SIGNATURE,
WRONG_KEY_USAGE,
NO_CLIENT_AUTH,
IO,
X509_INVALID_VALUE,
X509_TRUNCATED,
X509_EMPTY_CHAIN,
X509_INNER_TRUNC,
X509_BAD_TAG_CLASS,
X509_BAD_TAG_VALUE,
X509_INDEFINITE_LENGTH,
X509_EXTRA_ELEMENT,
X509_UNEXPECTED,
X509_NOT_CONSTRUCTED,
X509_NOT_PRIMITIVE,
X509_PARTIAL_BYTE,
X509_BAD_BOOLEAN,
X509_OVERFLOW,
X509_BAD_DN,
X509_BAD_TIME,
X509_UNSUPPORTED,
X509_LIMIT_EXCEEDED,
X509_WRONG_KEY_TYPE,
X509_BAD_SIGNATURE,
X509_TIME_UNKNOWN,
X509_EXPIRED,
X509_DN_MISMATCH,
X509_BAD_SERVER_NAME,
X509_CRITICAL_EXTENSION,
X509_NOT_CA,
X509_FORBIDDEN_KEY_USAGE,
X509_WEAK_PUBLIC_KEY,
X509_NOT_TRUSTED,
};
fn convertError(err: c_int) BearError {
return switch (err) {
c.BR_ERR_BAD_PARAM => error.BAD_PARAM,
c.BR_ERR_BAD_STATE => error.BAD_STATE,
c.BR_ERR_UNSUPPORTED_VERSION => error.UNSUPPORTED_VERSION,
c.BR_ERR_BAD_VERSION => error.BAD_VERSION,
c.BR_ERR_BAD_LENGTH => error.BAD_LENGTH,
c.BR_ERR_TOO_LARGE => error.TOO_LARGE,
c.BR_ERR_BAD_MAC => error.BAD_MAC,
c.BR_ERR_NO_RANDOM => error.NO_RANDOM,
c.BR_ERR_UNKNOWN_TYPE => error.UNKNOWN_TYPE,
c.BR_ERR_UNEXPECTED => error.UNEXPECTED,
c.BR_ERR_BAD_CCS => error.BAD_CCS,
c.BR_ERR_BAD_ALERT => error.BAD_ALERT,
c.BR_ERR_BAD_HANDSHAKE => error.BAD_HANDSHAKE,
c.BR_ERR_OVERSIZED_ID => error.OVERSIZED_ID,
c.BR_ERR_BAD_CIPHER_SUITE => error.BAD_CIPHER_SUITE,
c.BR_ERR_BAD_COMPRESSION => error.BAD_COMPRESSION,
c.BR_ERR_BAD_FRAGLEN => error.BAD_FRAGLEN,
c.BR_ERR_BAD_SECRENEG => error.BAD_SECRENEG,
c.BR_ERR_EXTRA_EXTENSION => error.EXTRA_EXTENSION,
c.BR_ERR_BAD_SNI => error.BAD_SNI,
c.BR_ERR_BAD_HELLO_DONE => error.BAD_HELLO_DONE,
c.BR_ERR_LIMIT_EXCEEDED => error.LIMIT_EXCEEDED,
c.BR_ERR_BAD_FINISHED => error.BAD_FINISHED,
c.BR_ERR_RESUME_MISMATCH => error.RESUME_MISMATCH,
c.BR_ERR_INVALID_ALGORITHM => error.INVALID_ALGORITHM,
c.BR_ERR_BAD_SIGNATURE => error.BAD_SIGNATURE,
c.BR_ERR_WRONG_KEY_USAGE => error.WRONG_KEY_USAGE,
c.BR_ERR_NO_CLIENT_AUTH => error.NO_CLIENT_AUTH,
c.BR_ERR_IO => error.IO,
c.BR_ERR_X509_INVALID_VALUE => error.X509_INVALID_VALUE,
c.BR_ERR_X509_TRUNCATED => error.X509_TRUNCATED,
c.BR_ERR_X509_EMPTY_CHAIN => error.X509_EMPTY_CHAIN,
c.BR_ERR_X509_INNER_TRUNC => error.X509_INNER_TRUNC,
c.BR_ERR_X509_BAD_TAG_CLASS => error.X509_BAD_TAG_CLASS,
c.BR_ERR_X509_BAD_TAG_VALUE => error.X509_BAD_TAG_VALUE,
c.BR_ERR_X509_INDEFINITE_LENGTH => error.X509_INDEFINITE_LENGTH,
c.BR_ERR_X509_EXTRA_ELEMENT => error.X509_EXTRA_ELEMENT,
c.BR_ERR_X509_UNEXPECTED => error.X509_UNEXPECTED,
c.BR_ERR_X509_NOT_CONSTRUCTED => error.X509_NOT_CONSTRUCTED,
c.BR_ERR_X509_NOT_PRIMITIVE => error.X509_NOT_PRIMITIVE,
c.BR_ERR_X509_PARTIAL_BYTE => error.X509_PARTIAL_BYTE,
c.BR_ERR_X509_BAD_BOOLEAN => error.X509_BAD_BOOLEAN,
c.BR_ERR_X509_OVERFLOW => error.X509_OVERFLOW,
c.BR_ERR_X509_BAD_DN => error.X509_BAD_DN,
c.BR_ERR_X509_BAD_TIME => error.X509_BAD_TIME,
c.BR_ERR_X509_UNSUPPORTED => error.X509_UNSUPPORTED,
c.BR_ERR_X509_LIMIT_EXCEEDED => error.X509_LIMIT_EXCEEDED,
c.BR_ERR_X509_WRONG_KEY_TYPE => error.X509_WRONG_KEY_TYPE,
c.BR_ERR_X509_BAD_SIGNATURE => error.X509_BAD_SIGNATURE,
c.BR_ERR_X509_TIME_UNKNOWN => error.X509_TIME_UNKNOWN,
c.BR_ERR_X509_EXPIRED => error.X509_EXPIRED,
c.BR_ERR_X509_DN_MISMATCH => error.X509_DN_MISMATCH,
c.BR_ERR_X509_BAD_SERVER_NAME => error.X509_BAD_SERVER_NAME,
c.BR_ERR_X509_CRITICAL_EXTENSION => error.X509_CRITICAL_EXTENSION,
c.BR_ERR_X509_NOT_CA => error.X509_NOT_CA,
c.BR_ERR_X509_FORBIDDEN_KEY_USAGE => error.X509_FORBIDDEN_KEY_USAGE,
c.BR_ERR_X509_WEAK_PUBLIC_KEY => error.X509_WEAK_PUBLIC_KEY,
c.BR_ERR_X509_NOT_TRUSTED => error.X509_NOT_TRUSTED,
else => std.debug.panic("missing error code: {}", .{err}),
};
}
pub const PublicKey = struct {
const Self = @This();
arena: std.heap.ArenaAllocator,
key: KeyStore,
usages: ?c_uint,
pub fn fromX509(allocator: std.mem.Allocator, inkey: c.br_x509_pkey) !Self {
var arena = std.heap.ArenaAllocator.init(allocator);
errdefer arena.deinit();
var key = switch (inkey.key_type) {
c.BR_KEYTYPE_RSA => KeyStore{
.rsa = .{
.n = try arena.allocator().dupe(u8, inkey.key.rsa.n[0..inkey.key.rsa.nlen]),
.e = try arena.allocator().dupe(u8, inkey.key.rsa.e[0..inkey.key.rsa.elen]),
},
},
c.BR_KEYTYPE_EC => KeyStore{
.ec = .{
.curve = inkey.key.ec.curve,
.q = try arena.allocator().dupe(u8, inkey.key.ec.q[0..inkey.key.ec.qlen]),
},
},
else => return error.UnsupportedKeyType,
};
return Self{
.arena = arena,
.key = key,
.usages = null,
};
}
pub fn toX509(self: Self) c.br_x509_pkey {
switch (self.key) {
.rsa => |rsa| {
return c.br_x509_pkey{
.key_type = c.BR_KEYTYPE_RSA,
.key = .{
.rsa = .{
.n = rsa.n.ptr,
.nlen = rsa.n.len,
.e = rsa.e.ptr,
.elen = rsa.e.len,
},
},
};
},
.ec => |ec| {
return c.br_x509_pkey{
.key_type = c.BR_KEYTYPE_EC,
.key = .{
.ec = .{
.curve = ec.curve,
.q = ec.q.ptr,
.qlen = ec.q.len,
},
},
};
},
}
}
pub fn deinit(self: Self) void {
self.arena.deinit();
}
/// Encodes the public key with DER ASN.1 encoding into `target`.
/// If `target` is not set, the function will only calculate the required
/// buffer size.
///
/// https://tools.ietf.org/html/rfc8017#appendix-A.1.1
/// RSAPublicKey ::= SEQUENCE {
/// modulus INTEGER, -- n
/// publicExponent INTEGER -- e
/// }
pub fn asn1Encode(self: Self, target: ?[]u8) !usize {
if (self.key != .rsa)
return error.KeytypeNotSupportedYet;
var sequence_content = [_]asn1.Value{
asn1.Value{
.integer = asn1.Integer{ .value = self.key.rsa.n },
},
asn1.Value{
.integer = asn1.Integer{ .value = self.key.rsa.e },
},
};
var sequence = asn1.Value{
.sequence = asn1.Sequence{ .items = &sequence_content },
};
return try asn1.encode(target, sequence);
}
pub const KeyStore = union(enum) {
ec: EC,
rsa: RSA,
pub const EC = struct {
curve: c_int,
q: []u8,
};
pub const RSA = struct {
n: []u8,
e: []u8,
};
};
};
pub const DERCertificate = struct {
const Self = @This();
allocator: std.mem.Allocator,
data: []u8,
pub fn deinit(self: Self) void {
self.allocator.free(self.data);
}
fn fromX509(allocator: std.mem.Allocator, cert: *c.br_x509_certificate) !Self {
return Self{
.allocator = allocator,
.data = try allocator.dupe(u8, cert.data[0..cert.data_len]),
};
}
fn toX509(self: *Self) c.br_x509_certificate {
return c.br_x509_certificate{
.data_len = self.data.len,
.data = self.data.ptr,
};
}
};
pub const TrustAnchorCollection = struct {
const Self = @This();
arena: std.heap.ArenaAllocator,
items: std.ArrayList(c.br_x509_trust_anchor),
pub fn init(allocator: std.mem.Allocator) Self {
return Self{
.items = std.ArrayList(c.br_x509_trust_anchor).init(allocator),
.arena = std.heap.ArenaAllocator.init(allocator),
};
}
pub fn appendFromPEM(self: *Self, pem_text: []const u8) !void {
var objectBuffer = std.ArrayList(u8).init(self.items.allocator);
defer objectBuffer.deinit();
try objectBuffer.ensureUnusedCapacity(8192);
var x509_decoder: c.br_pem_decoder_context = undefined;
c.br_pem_decoder_init(&x509_decoder);
var current_obj_is_certificate = false;
var offset: usize = 0;
while (offset < pem_text.len) {
var diff = c.br_pem_decoder_push(&x509_decoder, pem_text.ptr + offset, pem_text.len - offset);
offset += diff;
var event = c.br_pem_decoder_event(&x509_decoder);
switch (event) {
0 => unreachable, // there must be an event, we always push the full file
c.BR_PEM_BEGIN_OBJ => {
const name = std.mem.trim(
u8,
std.mem.span(c.br_pem_decoder_name(&x509_decoder)),
"-",
);
current_obj_is_certificate = std.mem.eql(u8, name, "CERTIFICATE") or std.mem.eql(u8, name, "X509 CERTIFICATE");
if (current_obj_is_certificate) {
try objectBuffer.resize(0);
c.br_pem_decoder_setdest(&x509_decoder, appendToBuffer, &objectBuffer);
} else {
std.log.warn("ignore object of type '{s}'\n", .{name});
c.br_pem_decoder_setdest(&x509_decoder, null, null);
}
},
c.BR_PEM_END_OBJ => {
if (current_obj_is_certificate) {
var certificate = c.br_x509_certificate{
.data = objectBuffer.items.ptr,
.data_len = objectBuffer.items.len,
};
var trust_anchor = try convertToTrustAnchor(self.arena.allocator(), certificate);
try self.items.append(trust_anchor);
// ignore end of
} else {
std.log.warn("end of ignored object.\n", .{});
}
},
c.BR_PEM_ERROR => {
std.log.warn("pem error:\n", .{});
},
else => unreachable, // no other values are specified
}
}
}
pub fn deinit(self: Self) void {
self.items.deinit();
self.arena.deinit();
}
fn convertToTrustAnchor(allocator: std.mem.Allocator, cert: c.br_x509_certificate) !c.br_x509_trust_anchor {
var dc: c.br_x509_decoder_context = undefined;
var vdn = std.ArrayList(u8).init(allocator);
defer vdn.deinit();
c.br_x509_decoder_init(&dc, appendToBuffer, &vdn);
c.br_x509_decoder_push(&dc, cert.data, cert.data_len);
const public_key: *c.br_x509_pkey = if (@ptrCast(?*c.br_x509_pkey, c.br_x509_decoder_get_pkey(&dc))) |pk|
pk
else
return convertError(c.br_x509_decoder_last_error(&dc));
var ta = c.br_x509_trust_anchor{
.dn = undefined,
.flags = 0,
.pkey = undefined,
};
if (c.br_x509_decoder_isCA(&dc) != 0) {
ta.flags |= c.BR_X509_TA_CA;
}
switch (public_key.key_type) {
c.BR_KEYTYPE_RSA => {
var n = try allocator.dupe(u8, public_key.key.rsa.n[0..public_key.key.rsa.nlen]);
errdefer allocator.free(n);
var e = try allocator.dupe(u8, public_key.key.rsa.e[0..public_key.key.rsa.elen]);
errdefer allocator.free(e);
ta.pkey = .{
.key_type = c.BR_KEYTYPE_RSA,
.key = .{
.rsa = .{
.n = n.ptr,
.nlen = n.len,
.e = e.ptr,
.elen = e.len,
},
},
};
},
c.BR_KEYTYPE_EC => {
var q = try allocator.dupe(u8, public_key.key.ec.q[0..public_key.key.ec.qlen]);
errdefer allocator.free(q);
ta.pkey = .{
.key_type = c.BR_KEYTYPE_EC,
.key = .{
.ec = .{
.curve = public_key.key.ec.curve,
.q = q.ptr,
.qlen = q.len,
},
},
};
},
else => return error.UnsupportedKeyType,
}
errdefer switch (public_key.key_type) {
c.BR_KEYTYPE_RSA => {
allocator.free(ta.pkey.key.rsa.n[0..ta.pkey.key.rsa.nlen]);
allocator.free(ta.pkey.key.rsa.e[0..ta.pkey.key.rsa.elen]);
},
c.BR_KEYTYPE_EC => allocator.free(ta.pkey.key.ec.q[0..ta.pkey.key.ec.qlen]),
else => unreachable,
};
const dn = vdn.toOwnedSlice();
ta.dn = .{
.data = dn.ptr,
.len = dn.len,
};
return ta;
}
};
// The "full" profile supports all implemented cipher suites.
//
// Rationale for suite order, from most important to least
// important rule:
//
// -- Don't use 3DES if AES or ChaCha20 is available.
// -- Try to have Forward Secrecy (ECDHE suite) if possible.
// -- When not using Forward Secrecy, ECDH key exchange is
// better than RSA key exchange (slightly more expensive on the
// client, but much cheaper on the server, and it implies smaller
// messages).
// -- ChaCha20+Poly1305 is better than AES/GCM (faster, smaller code).
// -- GCM is better than CCM and CBC. CCM is better than CBC.
// -- CCM is preferable over CCM_8 (with CCM_8, forgeries may succeed
// with probability 2^(-64)).
// -- AES-128 is preferred over AES-256 (AES-128 is already
// strong enough, and AES-256 is 40% more expensive).
//
const cypher_suites = [_]u16{
c.BR_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
c.BR_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
c.BR_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
c.BR_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
c.BR_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
c.BR_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
c.BR_TLS_ECDHE_ECDSA_WITH_AES_128_CCM,
c.BR_TLS_ECDHE_ECDSA_WITH_AES_256_CCM,
c.BR_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8,
c.BR_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8,
c.BR_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
c.BR_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
c.BR_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
c.BR_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
c.BR_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
c.BR_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
c.BR_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
c.BR_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
c.BR_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
c.BR_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
c.BR_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
c.BR_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
c.BR_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
c.BR_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
c.BR_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
c.BR_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
c.BR_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
c.BR_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
c.BR_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
c.BR_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
c.BR_TLS_RSA_WITH_AES_128_GCM_SHA256,
c.BR_TLS_RSA_WITH_AES_256_GCM_SHA384,
c.BR_TLS_RSA_WITH_AES_128_CCM,
c.BR_TLS_RSA_WITH_AES_256_CCM,
c.BR_TLS_RSA_WITH_AES_128_CCM_8,
c.BR_TLS_RSA_WITH_AES_256_CCM_8,
c.BR_TLS_RSA_WITH_AES_128_CBC_SHA256,
c.BR_TLS_RSA_WITH_AES_256_CBC_SHA256,
c.BR_TLS_RSA_WITH_AES_128_CBC_SHA,
c.BR_TLS_RSA_WITH_AES_256_CBC_SHA,
c.BR_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
c.BR_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
c.BR_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
c.BR_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
c.BR_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
};
// All hash functions are activated.
// Note: the X.509 validation engine will nonetheless refuse to
// validate signatures that use MD5 as hash function.
//
fn getHashClasses() [6]*const c.br_hash_class {
return .{
&c.br_md5_vtable,
&c.br_sha1_vtable,
&c.br_sha224_vtable,
&c.br_sha256_vtable,
&c.br_sha384_vtable,
&c.br_sha512_vtable,
};
}
pub const x509 = struct {
pub const Minimal = struct {
const Self = @This();
engine: c.br_x509_minimal_context,
pub fn init(tac: TrustAnchorCollection) Self {
var self = Self{
.engine = undefined,
};
const xc = &self.engine;
// X.509 engine uses SHA-256 to hash certificate DN (for
// comparisons).
//
c.br_x509_minimal_init(xc, &c.br_sha256_vtable, tac.items.items.ptr, tac.items.items.len);
c.br_x509_minimal_set_rsa(xc, c.br_rsa_pkcs1_vrfy_get_default());
c.br_x509_minimal_set_ecdsa(xc, &c.br_ec_all_m31, c.br_ecdsa_i31_vrfy_asn1);
// Set supported hash functions, for the SSL engine and for the
// X.509 engine.
const hash_classes = getHashClasses();
var id: usize = c.br_md5_ID;
while (id <= c.br_sha512_ID) : (id += 1) {
const hc = hash_classes[id - 1];
c.br_x509_minimal_set_hash(xc, @intCast(c_int, id), hc);
}
return self;
}
pub fn getEngine(self: *Self) *[*c]const c.br_x509_class {
return &self.engine.vtable;
}
};
pub const KnownKey = struct {
const Self = @This();
engine: c.br_x509_knownkey_context,
pub fn init(key: PublicKey, allowKeyExchange: bool, allowSigning: bool) Self {
return KnownKey{
.engine = c.br_x509_knownkey_context{
.vtable = &c.br_x509_knownkey_vtable,
.pkey = key.toX509(),
.usages = (key.usages orelse 0) |
(if (allowKeyExchange) @as(c_uint, c.BR_KEYTYPE_KEYX) else 0) |
(if (allowSigning) @as(c_uint, c.BR_KEYTYPE_SIGN) else 0), // always allow a stored key for key-exchange
},
};
}
pub fn getEngine(self: *Self) *[*c]const c.br_x509_class {
return &self.engine.vtable;
}
};
};
pub const Client = struct {
const Self = @This();
client: c.br_ssl_client_context,
iobuf: [c.BR_SSL_BUFSIZE_BIDI]u8,
pub fn init(engine: *[*c]const c.br_x509_class) Self {
var ctx = Self{
.client = undefined,
.iobuf = undefined,
};
const cc = &ctx.client;
// Reset client context and set supported versions from TLS-1.0
// to TLS-1.2 (inclusive).
//
c.br_ssl_client_zero(cc);
c.br_ssl_engine_set_versions(&cc.eng, c.BR_TLS10, c.BR_TLS12);
// Set suites and asymmetric crypto implementations. We use the
// "i31" code for RSA (it is somewhat faster than the "i32"
// implementation).
// TODO: change that when better implementations are made available.
c.br_ssl_engine_set_suites(&cc.eng, &cypher_suites[0], cypher_suites.len);
c.br_ssl_client_set_default_rsapub(cc);
c.br_ssl_engine_set_default_rsavrfy(&cc.eng);
c.br_ssl_engine_set_default_ecdsa(&cc.eng);
// Set supported hash functions, for the SSL engine and for the
// X.509 engine.
const hash_classes = getHashClasses();
var id: c_int = c.br_md5_ID;
while (id <= c.br_sha512_ID) : (id += 1) {
const hc = hash_classes[@intCast(usize, id - 1)];
c.br_ssl_engine_set_hash(&cc.eng, id, hc);
}
// Set the PRF implementations.
c.br_ssl_engine_set_prf10(&cc.eng, c.br_tls10_prf);
c.br_ssl_engine_set_prf_sha256(&cc.eng, c.br_tls12_sha256_prf);
c.br_ssl_engine_set_prf_sha384(&cc.eng, c.br_tls12_sha384_prf);
// Symmetric encryption. We use the "default" implementations
// (fastest among constant-time implementations).
c.br_ssl_engine_set_default_aes_cbc(&cc.eng);
c.br_ssl_engine_set_default_aes_ccm(&cc.eng);
c.br_ssl_engine_set_default_aes_gcm(&cc.eng);
c.br_ssl_engine_set_default_des_cbc(&cc.eng);
c.br_ssl_engine_set_default_chapol(&cc.eng);
// Link the X.509 engine in the SSL engine.
c.br_ssl_engine_set_x509(&cc.eng, @ptrCast([*c][*c]const c.br_x509_class, engine));
return ctx;
}
pub fn relocate(self: *Self) void {
c.br_ssl_engine_set_buffer(&self.client.eng, &self.iobuf, self.iobuf.len, 1);
}
pub fn reset(self: *Self, host: [:0]const u8, resumeSession: bool) !void {
const err = c.br_ssl_client_reset(&self.client, host, if (resumeSession) @as(c_int, 1) else 0);
if (err < 0)
return convertError(c.br_ssl_engine_last_error(&self.client.eng));
}
pub fn getEngine(self: *Self) *c.br_ssl_engine_context {
return &self.client.eng;
}
};
const fd_is_int = (@typeInfo(std.os.fd_t) == .Int);
pub fn initStream(engine: *c.br_ssl_engine_context, in_stream: anytype, out_stream: anytype) Stream(@TypeOf(in_stream), @TypeOf(out_stream)) {
std.debug.assert(@typeInfo(@TypeOf(in_stream)) == .Pointer);
std.debug.assert(@typeInfo(@TypeOf(out_stream)) == .Pointer);
return Stream(@TypeOf(in_stream), @TypeOf(out_stream)).init(engine, in_stream, out_stream);
}
pub fn Stream(comptime SrcReader: type, comptime SrcWriter: type) type {
return struct {
const Self = @This();
engine: *c.br_ssl_engine_context,
ioc: c.br_sslio_context,
/// Initializes a new SSLStream backed by the ssl engine and file descriptor.
pub fn init(engine: *c.br_ssl_engine_context, in_stream: SrcReader, out_stream: SrcWriter) Self {
var stream = Self{
.engine = engine,
.ioc = undefined,
};
c.br_sslio_init(
&stream.ioc,
stream.engine,
sockRead,
@ptrCast(*anyopaque, in_stream),
sockWrite,
@ptrCast(*anyopaque, out_stream),
);
return stream;
}
/// Closes the connection. Note that this may fail when the remote part does not terminate the SSL stream correctly.
pub fn close(self: *Self) !void {
if (c.br_sslio_close(&self.ioc) < 0)
return convertError(c.br_ssl_engine_last_error(self.engine));
}
/// Flushes all pending data into the fd.
pub fn flush(self: *Self) !void {
if (c.br_sslio_flush(&self.ioc) < 0)
return convertError(c.br_ssl_engine_last_error(self.engine));
}
/// low level read from fd to ssl library
fn sockRead(ctx: ?*anyopaque, buf: [*c]u8, len: usize) callconv(.C) c_int {
var input = @ptrCast(SrcReader, @alignCast(@alignOf(std.meta.Child(SrcReader)), ctx.?));
const num = input.read(buf[0..len]) catch return -1;
return if (num > 0) @intCast(c_int, num) else -1;
}
/// low level write from ssl library to fd
fn sockWrite(ctx: ?*anyopaque, buf: [*c]const u8, len: usize) callconv(.C) c_int {
var output = @ptrCast(SrcWriter, @alignCast(@alignOf(std.meta.Child(SrcWriter)), ctx.?));
const num = output.write(buf[0..len]) catch return -1;
return if (num > 0) @intCast(c_int, num) else -1;
}
const ReadError = error{EndOfStream} || BearError;
/// reads some data from the ssl stream.
pub fn read(self: *Self, buffer: []u8) ReadError!usize {
var result = c.br_sslio_read(&self.ioc, buffer.ptr, buffer.len);
if (result < 0) {
const errc = c.br_ssl_engine_last_error(self.engine);
if (errc == c.BR_ERR_OK)
return 0;
return convertError(errc);
}
return @intCast(usize, result);
}
const WriteError = error{EndOfStream} || BearError;
/// writes some data to the ssl stream.
pub fn write(self: *Self, bytes: []const u8) WriteError!usize {
var result = c.br_sslio_write(&self.ioc, bytes.ptr, bytes.len);
if (result < 0) {
const errc = c.br_ssl_engine_last_error(self.engine);
if (errc == c.BR_ERR_OK)
return 0;
return convertError(errc);
}
return @intCast(usize, result);
}
pub const DstReader = std.io.Reader(*Self, ReadError, read);
pub fn reader(self: *Self) DstReader {
return .{ .context = self };
}
pub const DstWriter = std.io.Writer(*Self, WriteError, write);
pub fn writer(self: *Self) DstWriter {
return .{ .context = self };
}
};
}
fn appendToBuffer(dest_ctx: ?*anyopaque, buf: ?*const anyopaque, len: usize) callconv(.C) void {
var dest_buffer = @ptrCast(*std.ArrayList(u8), @alignCast(@alignOf(std.ArrayList(u8)), dest_ctx));
// std.log.warn("read chunk of {} bytes...\n", .{len});
dest_buffer.appendSlice(@ptrCast([*]const u8, buf)[0..len]) catch {
std.log.warn("failed to read chunk of {} bytes...\n", .{len});
};
}
fn Vector(comptime T: type) type {
return extern struct {
buf: ?[*]T,
ptr: usize,
len: usize,
};
}
const asn1 = struct {
const Type = enum {
bit_string,
boolean,
integer,
@"null",
object_identifier,
octet_string,
bmpstring,
ia5string,
printable_string,
utf8_string,
sequence,
set,
};
const Value = union(Type) {
bit_string: void,
boolean: void,
integer: Integer,
@"null": void,
object_identifier: void,
octet_string: void,
bmpstring: void,
ia5string: void,
printable_string: void,
utf8_string: void,
sequence: void,
set: void,
};
const Integer = struct {
value: []u8,
};
const Sequence = struct {
items: []Value,
};
fn encode(buffer: ?[]u8, value: Value) !usize {
_ = buffer;
_ = value;
}
};
const bearssl_sources = [_][]const u8{
"/BearSSL/src/settings.c",
"/BearSSL/src/aead/ccm.c",
"/BearSSL/src/aead/eax.c",
"/BearSSL/src/aead/gcm.c",
"/BearSSL/src/codec/ccopy.c",
"/BearSSL/src/codec/dec16be.c",
"/BearSSL/src/codec/dec16le.c",
"/BearSSL/src/codec/dec32be.c",
"/BearSSL/src/codec/dec32le.c",
"/BearSSL/src/codec/dec64be.c",
"/BearSSL/src/codec/dec64le.c",
"/BearSSL/src/codec/enc16be.c",
"/BearSSL/src/codec/enc16le.c",
"/BearSSL/src/codec/enc32be.c",
"/BearSSL/src/codec/enc32le.c",
"/BearSSL/src/codec/enc64be.c",
"/BearSSL/src/codec/enc64le.c",
"/BearSSL/src/codec/pemdec.c",
"/BearSSL/src/codec/pemenc.c",
"/BearSSL/src/ec/ec_all_m15.c",
"/BearSSL/src/ec/ec_all_m31.c",
"/BearSSL/src/ec/ec_c25519_i15.c",
"/BearSSL/src/ec/ec_c25519_i31.c",
"/BearSSL/src/ec/ec_c25519_m15.c",
"/BearSSL/src/ec/ec_c25519_m31.c",
"/BearSSL/src/ec/ec_c25519_m62.c",
"/BearSSL/src/ec/ec_c25519_m64.c",
"/BearSSL/src/ec/ec_curve25519.c",
"/BearSSL/src/ec/ec_default.c",
"/BearSSL/src/ec/ec_keygen.c",
"/BearSSL/src/ec/ec_p256_m15.c",
"/BearSSL/src/ec/ec_p256_m31.c",
"/BearSSL/src/ec/ec_p256_m62.c",
"/BearSSL/src/ec/ec_p256_m64.c",
"/BearSSL/src/ec/ec_prime_i15.c",
"/BearSSL/src/ec/ec_prime_i31.c",
"/BearSSL/src/ec/ec_pubkey.c",
"/BearSSL/src/ec/ec_secp256r1.c",
"/BearSSL/src/ec/ec_secp384r1.c",
"/BearSSL/src/ec/ec_secp521r1.c",
"/BearSSL/src/ec/ecdsa_atr.c",
"/BearSSL/src/ec/ecdsa_default_sign_asn1.c",
"/BearSSL/src/ec/ecdsa_default_sign_raw.c",
"/BearSSL/src/ec/ecdsa_default_vrfy_asn1.c",
"/BearSSL/src/ec/ecdsa_default_vrfy_raw.c",
"/BearSSL/src/ec/ecdsa_i15_bits.c",
"/BearSSL/src/ec/ecdsa_i15_sign_asn1.c",
"/BearSSL/src/ec/ecdsa_i15_sign_raw.c",
"/BearSSL/src/ec/ecdsa_i15_vrfy_asn1.c",
"/BearSSL/src/ec/ecdsa_i15_vrfy_raw.c",
"/BearSSL/src/ec/ecdsa_i31_bits.c",
"/BearSSL/src/ec/ecdsa_i31_sign_asn1.c",
"/BearSSL/src/ec/ecdsa_i31_sign_raw.c",
"/BearSSL/src/ec/ecdsa_i31_vrfy_asn1.c",
"/BearSSL/src/ec/ecdsa_i31_vrfy_raw.c",
"/BearSSL/src/ec/ecdsa_rta.c",
"/BearSSL/src/hash/dig_oid.c",
"/BearSSL/src/hash/dig_size.c",
"/BearSSL/src/hash/ghash_ctmul.c",
"/BearSSL/src/hash/ghash_ctmul32.c",
"/BearSSL/src/hash/ghash_ctmul64.c",
"/BearSSL/src/hash/ghash_pclmul.c",
"/BearSSL/src/hash/ghash_pwr8.c",
"/BearSSL/src/hash/md5.c",
"/BearSSL/src/hash/md5sha1.c",
"/BearSSL/src/hash/mgf1.c",
"/BearSSL/src/hash/multihash.c",
"/BearSSL/src/hash/sha1.c",
"/BearSSL/src/hash/sha2big.c",
"/BearSSL/src/hash/sha2small.c",
"/BearSSL/src/int/i15_add.c",
"/BearSSL/src/int/i15_bitlen.c",
"/BearSSL/src/int/i15_decmod.c",
"/BearSSL/src/int/i15_decode.c",
"/BearSSL/src/int/i15_decred.c",
"/BearSSL/src/int/i15_encode.c",
"/BearSSL/src/int/i15_fmont.c",
"/BearSSL/src/int/i15_iszero.c",
"/BearSSL/src/int/i15_moddiv.c",
"/BearSSL/src/int/i15_modpow.c",
"/BearSSL/src/int/i15_modpow2.c",
"/BearSSL/src/int/i15_montmul.c",
"/BearSSL/src/int/i15_mulacc.c",
"/BearSSL/src/int/i15_muladd.c",
"/BearSSL/src/int/i15_ninv15.c",
"/BearSSL/src/int/i15_reduce.c",
"/BearSSL/src/int/i15_rshift.c",
"/BearSSL/src/int/i15_sub.c",
"/BearSSL/src/int/i15_tmont.c",
"/BearSSL/src/int/i31_add.c",
"/BearSSL/src/int/i31_bitlen.c",
"/BearSSL/src/int/i31_decmod.c",
"/BearSSL/src/int/i31_decode.c",
"/BearSSL/src/int/i31_decred.c",
"/BearSSL/src/int/i31_encode.c",
"/BearSSL/src/int/i31_fmont.c",
"/BearSSL/src/int/i31_iszero.c",
"/BearSSL/src/int/i31_moddiv.c",
"/BearSSL/src/int/i31_modpow.c",
"/BearSSL/src/int/i31_modpow2.c",
"/BearSSL/src/int/i31_montmul.c",
"/BearSSL/src/int/i31_mulacc.c",
"/BearSSL/src/int/i31_muladd.c",
"/BearSSL/src/int/i31_ninv31.c",
"/BearSSL/src/int/i31_reduce.c",
"/BearSSL/src/int/i31_rshift.c",
"/BearSSL/src/int/i31_sub.c",
"/BearSSL/src/int/i31_tmont.c",
"/BearSSL/src/int/i32_add.c",
"/BearSSL/src/int/i32_bitlen.c",
"/BearSSL/src/int/i32_decmod.c",
"/BearSSL/src/int/i32_decode.c",
"/BearSSL/src/int/i32_decred.c",
"/BearSSL/src/int/i32_div32.c",
"/BearSSL/src/int/i32_encode.c",
"/BearSSL/src/int/i32_fmont.c",
"/BearSSL/src/int/i32_iszero.c",
"/BearSSL/src/int/i32_modpow.c",
"/BearSSL/src/int/i32_montmul.c",
"/BearSSL/src/int/i32_mulacc.c",
"/BearSSL/src/int/i32_muladd.c",
"/BearSSL/src/int/i32_ninv32.c",
"/BearSSL/src/int/i32_reduce.c",
"/BearSSL/src/int/i32_sub.c",
"/BearSSL/src/int/i32_tmont.c",
"/BearSSL/src/int/i62_modpow2.c",
"/BearSSL/src/kdf/hkdf.c",
"/BearSSL/src/kdf/shake.c",
"/BearSSL/src/mac/hmac.c",
"/BearSSL/src/mac/hmac_ct.c",
"/BearSSL/src/rand/aesctr_drbg.c",
"/BearSSL/src/rand/hmac_drbg.c",
"/BearSSL/src/rand/sysrng.c",
"/BearSSL/src/rsa/rsa_default_keygen.c",
"/BearSSL/src/rsa/rsa_default_modulus.c",
"/BearSSL/src/rsa/rsa_default_oaep_decrypt.c",
"/BearSSL/src/rsa/rsa_default_oaep_encrypt.c",
"/BearSSL/src/rsa/rsa_default_pkcs1_sign.c",
"/BearSSL/src/rsa/rsa_default_pkcs1_vrfy.c",
"/BearSSL/src/rsa/rsa_default_priv.c",
"/BearSSL/src/rsa/rsa_default_privexp.c",
"/BearSSL/src/rsa/rsa_default_pss_sign.c",
"/BearSSL/src/rsa/rsa_default_pss_vrfy.c",
"/BearSSL/src/rsa/rsa_default_pub.c",
"/BearSSL/src/rsa/rsa_default_pubexp.c",
"/BearSSL/src/rsa/rsa_i15_keygen.c",
"/BearSSL/src/rsa/rsa_i15_modulus.c",
"/BearSSL/src/rsa/rsa_i15_oaep_decrypt.c",
"/BearSSL/src/rsa/rsa_i15_oaep_encrypt.c",
"/BearSSL/src/rsa/rsa_i15_pkcs1_sign.c",
"/BearSSL/src/rsa/rsa_i15_pkcs1_vrfy.c",
"/BearSSL/src/rsa/rsa_i15_priv.c",
"/BearSSL/src/rsa/rsa_i15_privexp.c",
"/BearSSL/src/rsa/rsa_i15_pss_sign.c",
"/BearSSL/src/rsa/rsa_i15_pss_vrfy.c",
"/BearSSL/src/rsa/rsa_i15_pub.c",
"/BearSSL/src/rsa/rsa_i15_pubexp.c",
"/BearSSL/src/rsa/rsa_i31_keygen.c",
"/BearSSL/src/rsa/rsa_i31_keygen_inner.c",
"/BearSSL/src/rsa/rsa_i31_modulus.c",
"/BearSSL/src/rsa/rsa_i31_oaep_decrypt.c",
"/BearSSL/src/rsa/rsa_i31_oaep_encrypt.c",
"/BearSSL/src/rsa/rsa_i31_pkcs1_sign.c",
"/BearSSL/src/rsa/rsa_i31_pkcs1_vrfy.c",
"/BearSSL/src/rsa/rsa_i31_priv.c",
"/BearSSL/src/rsa/rsa_i31_privexp.c",
"/BearSSL/src/rsa/rsa_i31_pss_sign.c",
"/BearSSL/src/rsa/rsa_i31_pss_vrfy.c",
"/BearSSL/src/rsa/rsa_i31_pub.c",
"/BearSSL/src/rsa/rsa_i31_pubexp.c",
"/BearSSL/src/rsa/rsa_i32_oaep_decrypt.c",
"/BearSSL/src/rsa/rsa_i32_oaep_encrypt.c",
"/BearSSL/src/rsa/rsa_i32_pkcs1_sign.c",
"/BearSSL/src/rsa/rsa_i32_pkcs1_vrfy.c",
"/BearSSL/src/rsa/rsa_i32_priv.c",
"/BearSSL/src/rsa/rsa_i32_pss_sign.c",
"/BearSSL/src/rsa/rsa_i32_pss_vrfy.c",
"/BearSSL/src/rsa/rsa_i32_pub.c",
"/BearSSL/src/rsa/rsa_i62_keygen.c",
"/BearSSL/src/rsa/rsa_i62_oaep_decrypt.c",
"/BearSSL/src/rsa/rsa_i62_oaep_encrypt.c",
"/BearSSL/src/rsa/rsa_i62_pkcs1_sign.c",
"/BearSSL/src/rsa/rsa_i62_pkcs1_vrfy.c",
"/BearSSL/src/rsa/rsa_i62_priv.c",
"/BearSSL/src/rsa/rsa_i62_pss_sign.c",
"/BearSSL/src/rsa/rsa_i62_pss_vrfy.c",
"/BearSSL/src/rsa/rsa_i62_pub.c",
"/BearSSL/src/rsa/rsa_oaep_pad.c",
"/BearSSL/src/rsa/rsa_oaep_unpad.c",
"/BearSSL/src/rsa/rsa_pkcs1_sig_pad.c",
"/BearSSL/src/rsa/rsa_pkcs1_sig_unpad.c",
"/BearSSL/src/rsa/rsa_pss_sig_pad.c",
"/BearSSL/src/rsa/rsa_pss_sig_unpad.c",
"/BearSSL/src/rsa/rsa_ssl_decrypt.c",
"/BearSSL/src/ssl/prf.c",
"/BearSSL/src/ssl/prf_md5sha1.c",
"/BearSSL/src/ssl/prf_sha256.c",
"/BearSSL/src/ssl/prf_sha384.c",
"/BearSSL/src/ssl/ssl_ccert_single_ec.c",
"/BearSSL/src/ssl/ssl_ccert_single_rsa.c",
"/BearSSL/src/ssl/ssl_client.c",
"/BearSSL/src/ssl/ssl_client_default_rsapub.c",
"/BearSSL/src/ssl/ssl_client_full.c",
"/BearSSL/src/ssl/ssl_engine.c",
"/BearSSL/src/ssl/ssl_engine_default_aescbc.c",
"/BearSSL/src/ssl/ssl_engine_default_aesccm.c",
"/BearSSL/src/ssl/ssl_engine_default_aesgcm.c",
"/BearSSL/src/ssl/ssl_engine_default_chapol.c",
"/BearSSL/src/ssl/ssl_engine_default_descbc.c",
"/BearSSL/src/ssl/ssl_engine_default_ec.c",
"/BearSSL/src/ssl/ssl_engine_default_ecdsa.c",
"/BearSSL/src/ssl/ssl_engine_default_rsavrfy.c",
"/BearSSL/src/ssl/ssl_hashes.c",
"/BearSSL/src/ssl/ssl_hs_client.c",
"/BearSSL/src/ssl/ssl_hs_server.c",
"/BearSSL/src/ssl/ssl_io.c",
"/BearSSL/src/ssl/ssl_keyexport.c",
"/BearSSL/src/ssl/ssl_lru.c",
"/BearSSL/src/ssl/ssl_rec_cbc.c",
"/BearSSL/src/ssl/ssl_rec_ccm.c",
"/BearSSL/src/ssl/ssl_rec_chapol.c",
"/BearSSL/src/ssl/ssl_rec_gcm.c",
"/BearSSL/src/ssl/ssl_scert_single_ec.c",
"/BearSSL/src/ssl/ssl_scert_single_rsa.c",
"/BearSSL/src/ssl/ssl_server.c",
"/BearSSL/src/ssl/ssl_server_full_ec.c",
"/BearSSL/src/ssl/ssl_server_full_rsa.c",
"/BearSSL/src/ssl/ssl_server_mine2c.c",
"/BearSSL/src/ssl/ssl_server_mine2g.c",
"/BearSSL/src/ssl/ssl_server_minf2c.c",
"/BearSSL/src/ssl/ssl_server_minf2g.c",
"/BearSSL/src/ssl/ssl_server_minr2g.c",
"/BearSSL/src/ssl/ssl_server_minu2g.c",
"/BearSSL/src/ssl/ssl_server_minv2g.c",
"/BearSSL/src/symcipher/aes_big_cbcdec.c",
"/BearSSL/src/symcipher/aes_big_cbcenc.c",
"/BearSSL/src/symcipher/aes_big_ctr.c",
"/BearSSL/src/symcipher/aes_big_ctrcbc.c",
"/BearSSL/src/symcipher/aes_big_dec.c",
"/BearSSL/src/symcipher/aes_big_enc.c",
"/BearSSL/src/symcipher/aes_common.c",
"/BearSSL/src/symcipher/aes_ct.c",
"/BearSSL/src/symcipher/aes_ct64.c",
"/BearSSL/src/symcipher/aes_ct64_cbcdec.c",
"/BearSSL/src/symcipher/aes_ct64_cbcenc.c",
"/BearSSL/src/symcipher/aes_ct64_ctr.c",
"/BearSSL/src/symcipher/aes_ct64_ctrcbc.c",
"/BearSSL/src/symcipher/aes_ct64_dec.c",
"/BearSSL/src/symcipher/aes_ct64_enc.c",
"/BearSSL/src/symcipher/aes_ct_cbcdec.c",
"/BearSSL/src/symcipher/aes_ct_cbcenc.c",
"/BearSSL/src/symcipher/aes_ct_ctr.c",
"/BearSSL/src/symcipher/aes_ct_ctrcbc.c",
"/BearSSL/src/symcipher/aes_ct_dec.c",
"/BearSSL/src/symcipher/aes_ct_enc.c",
"/BearSSL/src/symcipher/aes_pwr8.c",
"/BearSSL/src/symcipher/aes_pwr8_cbcdec.c",
"/BearSSL/src/symcipher/aes_pwr8_cbcenc.c",
"/BearSSL/src/symcipher/aes_pwr8_ctr.c",
"/BearSSL/src/symcipher/aes_pwr8_ctrcbc.c",
"/BearSSL/src/symcipher/aes_small_cbcdec.c",
"/BearSSL/src/symcipher/aes_small_cbcenc.c",
"/BearSSL/src/symcipher/aes_small_ctr.c",
"/BearSSL/src/symcipher/aes_small_ctrcbc.c",
"/BearSSL/src/symcipher/aes_small_dec.c",
"/BearSSL/src/symcipher/aes_small_enc.c",
"/BearSSL/src/symcipher/aes_x86ni.c",
"/BearSSL/src/symcipher/aes_x86ni_cbcdec.c",
"/BearSSL/src/symcipher/aes_x86ni_cbcenc.c",
"/BearSSL/src/symcipher/aes_x86ni_ctr.c",
"/BearSSL/src/symcipher/aes_x86ni_ctrcbc.c",
"/BearSSL/src/symcipher/chacha20_ct.c",
"/BearSSL/src/symcipher/chacha20_sse2.c",
"/BearSSL/src/symcipher/des_ct.c",
"/BearSSL/src/symcipher/des_ct_cbcdec.c",
"/BearSSL/src/symcipher/des_ct_cbcenc.c",
"/BearSSL/src/symcipher/des_support.c",
"/BearSSL/src/symcipher/des_tab.c",
"/BearSSL/src/symcipher/des_tab_cbcdec.c",
"/BearSSL/src/symcipher/des_tab_cbcenc.c",
"/BearSSL/src/symcipher/poly1305_ctmul.c",
"/BearSSL/src/symcipher/poly1305_ctmul32.c",
"/BearSSL/src/symcipher/poly1305_ctmulq.c",
"/BearSSL/src/symcipher/poly1305_i15.c",
"/BearSSL/src/x509/asn1enc.c",
"/BearSSL/src/x509/encode_ec_pk8der.c",
"/BearSSL/src/x509/encode_ec_rawder.c",
"/BearSSL/src/x509/encode_rsa_pk8der.c",
"/BearSSL/src/x509/encode_rsa_rawder.c",
"/BearSSL/src/x509/skey_decoder.c",
"/BearSSL/src/x509/x509_decoder.c",
"/BearSSL/src/x509/x509_knownkey.c",
"/BearSSL/src/x509/x509_minimal.c",
"/BearSSL/src/x509/x509_minimal_full.c",
}; | src/lib.zig |
// The MIT License (Expat)
//
// Copyright (c) 2019 dbandstra
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
const std = @import("std");
pub const Format = enum {
unsigned8,
signed16_lsb,
signed24_lsb,
signed32_lsb,
pub fn getNumBytes(self: Format) u16 {
return switch (self) {
.unsigned8 => 1,
.signed16_lsb => 2,
.signed24_lsb => 3,
.signed32_lsb => 4,
};
}
};
pub const PreloadedInfo = struct {
num_channels: usize,
sample_rate: usize,
format: Format,
num_samples: usize,
pub fn getNumBytes(self: PreloadedInfo) usize {
return self.num_samples * self.num_channels *
self.format.getNumBytes();
}
};
// verbose is comptime so we can avoid using std.debug.warn which doesn't
// exist on some targets (e.g. wasm)
pub fn Loader(comptime InStream: type, comptime verbose: bool) type {
return struct {
fn readIdentifier(stream: *InStream) ![4]u8 {
var quad: [4]u8 = undefined;
try stream.readNoEof(&quad);
return quad;
}
fn preloadError(comptime message: []const u8) !PreloadedInfo {
if (verbose) {
std.debug.warn("{}\n", .{message});
}
return error.WavLoadFailed;
}
pub fn preload(stream: *InStream) !PreloadedInfo {
// read RIFF chunk descriptor (12 bytes)
const chunk_id = try readIdentifier(stream);
if (!std.mem.eql(u8, &chunk_id, "RIFF")) {
return preloadError("missing \"RIFF\" header");
}
try stream.skipBytes(4); // ignore chunk_size
const format_id = try readIdentifier(stream);
if (!std.mem.eql(u8, &format_id, "WAVE")) {
return preloadError("missing \"WAVE\" identifier");
}
// read "fmt" sub-chunk
const subchunk1_id = try readIdentifier(stream);
if (!std.mem.eql(u8, &subchunk1_id, "fmt ")) {
return preloadError("missing \"fmt \" header");
}
const subchunk1_size = try stream.readIntLittle(u32);
if (subchunk1_size != 16) {
return preloadError("not PCM (subchunk1_size != 16)");
}
const audio_format = try stream.readIntLittle(u16);
if (audio_format != 1) {
return preloadError("not integer PCM (audio_format != 1)");
}
const num_channels = try stream.readIntLittle(u16);
const sample_rate = try stream.readIntLittle(u32);
const byte_rate = try stream.readIntLittle(u32);
const block_align = try stream.readIntLittle(u16);
const bits_per_sample = try stream.readIntLittle(u16);
if (num_channels < 1 or num_channels > 16) {
return preloadError("invalid number of channels");
}
if (sample_rate < 1 or sample_rate > 192000) {
return preloadError("invalid sample_rate");
}
const format: Format = switch (bits_per_sample) {
8 => .unsigned8,
16 => .signed16_lsb,
24 => .signed24_lsb,
32 => .signed32_lsb,
else => return preloadError("invalid number of bits per sample"),
};
const bytes_per_sample = format.getNumBytes();
if (byte_rate != sample_rate * num_channels * bytes_per_sample) {
return preloadError("invalid byte_rate");
}
if (block_align != num_channels * bytes_per_sample) {
return preloadError("invalid block_align");
}
// read "data" sub-chunk header
const subchunk2_id = try readIdentifier(stream);
if (!std.mem.eql(u8, &subchunk2_id, "data")) {
return preloadError("missing \"data\" header");
}
const subchunk2_size = try stream.readIntLittle(u32);
if ((subchunk2_size % (num_channels * bytes_per_sample)) != 0) {
return preloadError("invalid subchunk2_size");
}
const num_samples = subchunk2_size / (num_channels * bytes_per_sample);
return PreloadedInfo{
.num_channels = num_channels,
.sample_rate = sample_rate,
.format = format,
.num_samples = num_samples,
};
}
pub fn load(
stream: *InStream,
preloaded: PreloadedInfo,
out_buffer: []u8,
) !void {
const num_bytes = preloaded.getNumBytes();
std.debug.assert(out_buffer.len >= num_bytes);
try stream.readNoEof(out_buffer[0..num_bytes]);
}
};
}
pub const SaveInfo = struct {
num_channels: usize,
sample_rate: usize,
format: Format,
data: []const u8,
};
pub fn Saver(comptime OutStream: type) type {
return struct {
pub fn save(stream: *OutStream, info: SaveInfo) !void {
const data_len = @intCast(u32, info.data.len);
const bytes_per_sample = info.format.getNumBytes();
// location of "data" header
const data_chunk_pos: u32 = 36;
// length of file
const file_length = data_chunk_pos + 8 + data_len;
try stream.writeAll("RIFF");
try stream.writeIntLittle(u32, file_length - 8);
try stream.writeAll("WAVE");
try stream.writeAll("fmt ");
try stream.writeIntLittle(u32, 16); // PCM
try stream.writeIntLittle(u16, 1); // uncompressed
try stream.writeIntLittle(u16, @intCast(u16, info.num_channels));
try stream.writeIntLittle(u32, @intCast(u32, info.sample_rate));
try stream.writeIntLittle(u32, @intCast(u32, info.sample_rate * info.num_channels) *
bytes_per_sample);
try stream.writeIntLittle(u16, @intCast(u16, info.num_channels) * bytes_per_sample);
try stream.writeIntLittle(u16, bytes_per_sample * 8);
try stream.writeAll("data");
try stream.writeIntLittle(u32, data_len);
try stream.writeAll(info.data);
}
};
}
test "basic coverage (loading)" {
const null_wav = [_]u8{
0x52, 0x49, 0x46, 0x46, 0x7C, 0x00, 0x00, 0x00, 0x57, 0x41, 0x56,
0x45, 0x66, 0x6D, 0x74, 0x20, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00,
0x01, 0x00, 0x44, 0xAC, 0x00, 0x00, 0x88, 0x58, 0x01, 0x00, 0x02,
0x00, 0x10, 0x00, 0x64, 0x61, 0x74, 0x61, 0x58, 0x00, 0x00, 0x00,
0x00, 0x00, 0xFF, 0xFF, 0x02, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0x02, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0xFE, 0xFF, 0x01, 0x00, 0x01,
0x00, 0xFE, 0xFF, 0x03, 0x00, 0xFD, 0xFF, 0x02, 0x00, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0xFF, 0xFF, 0x01, 0x00, 0xFE,
0xFF, 0x02, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x01, 0x00, 0xFF, 0xFF,
0x00, 0x00, 0x01, 0x00, 0xFE, 0xFF, 0x02, 0x00, 0xFF, 0xFF, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0x03, 0x00, 0xFC, 0xFF, 0x03, 0x00,
};
var stream = std.io.fixedBufferStream(&null_wav).inStream();
const MyLoader = Loader(@TypeOf(stream), true);
const preloaded = try MyLoader.preload(&stream);
std.testing.expectEqual(@as(usize, 1), preloaded.num_channels);
std.testing.expectEqual(@as(usize, 44100), preloaded.sample_rate);
std.testing.expectEqual(@as(Format, .signed16_lsb), preloaded.format);
std.testing.expectEqual(@as(usize, 44), preloaded.num_samples);
var buffer: [88]u8 = undefined;
try MyLoader.load(&stream, preloaded, &buffer);
}
test "basic coverage (saving)" {
var buffer: [1000]u8 = undefined;
var stream = std.io.fixedBufferStream(&buffer).outStream();
const MySaver = Saver(@TypeOf(stream));
try MySaver.save(&stream, .{
.num_channels = 1,
.sample_rate = 44100,
.format = .signed16_lsb,
.data = &[_]u8{ 0, 0, 0, 0, 0, 0, 0, 0 },
});
std.testing.expectEqualSlices(u8, "RIFF", buffer[0..4]);
} | deps/zig_wav.zig |
const std = @import("std");
pub fn main() !u8 {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const argv = try std.process.argsAlloc(gpa.allocator());
defer std.process.argsFree(gpa.allocator(), argv);
if (argv.len != 3) {
return 1;
}
var src_dir = try std.fs.cwd().openDir(argv[1], .{ .iterate = true });
defer src_dir.close();
var dst_dir = try std.fs.cwd().openDir(argv[2], .{});
defer dst_dir.close();
var data = std.ArrayList(Series).init(gpa.allocator());
{
var iter = src_dir.iterate();
while (try iter.next()) |entry| {
if (!std.mem.eql(u8, std.fs.path.extension(entry.name), ".csv"))
continue;
const name_no_ext = entry.name[0 .. entry.name.len - 4];
const idx = std.mem.lastIndexOfScalar(u8, name_no_ext, '-') orelse continue;
var series = Series{
.benchmark = try gpa.allocator().dupe(u8, name_no_ext[0..idx]),
.mode = std.meta.stringToEnum(std.builtin.Mode, name_no_ext[idx + 1 ..]) orelse @panic("unexpected name"),
.data = undefined,
};
var file = try src_dir.openFile(entry.name, .{ .mode = .read_only });
defer file.close();
series.data = try loadSeries(gpa.allocator(), file);
std.sort.sort(DataPoint, series.data, {}, orderDataPoint);
try data.append(series);
}
}
try renderSeriesSet(dst_dir, "compile-ReleaseSafe.svg", data.items, "compile_time", filterReleaseSafe);
try renderSeriesSet(dst_dir, "setup-ReleaseSafe.svg", data.items, "setup_time", filterReleaseSafe);
try renderSeriesSet(dst_dir, "run-ReleaseSafe.svg", data.items, "run_time", filterReleaseSafe);
try renderSeriesSet(dst_dir, "compile-ReleaseSmall.svg", data.items, "compile_time", filterReleaseSmall);
try renderSeriesSet(dst_dir, "setup-ReleaseSmall.svg", data.items, "setup_time", filterReleaseSmall);
try renderSeriesSet(dst_dir, "run-ReleaseSmall.svg", data.items, "run_time", filterReleaseSmall);
try renderSeriesSet(dst_dir, "compile-ReleaseFast.svg", data.items, "compile_time", filterReleaseFast);
try renderSeriesSet(dst_dir, "setup-ReleaseFast.svg", data.items, "setup_time", filterReleaseFast);
try renderSeriesSet(dst_dir, "run-ReleaseFast.svg", data.items, "run_time", filterReleaseFast);
return 0;
}
pub fn renderSeriesSet(dst_dir: std.fs.Dir, file_name: []const u8, all_series: []Series, comptime field: []const u8, comptime filter: fn (series: Series) bool) !void {
var file = try dst_dir.createFile(file_name, .{});
defer file.close();
const writer = file.writer();
var start_time: u128 = std.math.maxInt(u128);
var end_time: u128 = 0;
var high: f32 = 0;
const scale_base = 5;
for (all_series) |series| {
if (filter(series)) {
start_time = std.math.min(start_time, series.data[0].date.getLinearSortVal());
end_time = std.math.max(end_time, series.data[series.data.len - 1].date.getLinearSortVal());
for (series.data) |dp| {
high = std.math.max(high, @intToFloat(f32, @field(dp, field)));
}
}
}
high = std.math.pow(f32, scale_base, @ceil(std.math.log(f32, scale_base, 1.3 * high)));
const time_range = end_time - start_time;
var size_x: f32 = 350;
var size_y: f32 = 200;
var legend_size: f32 = 50;
const viewport_size: f32 = size_x - legend_size;
try writer.print("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n", .{});
try writer.print("<svg version=\"1.1\" viewBox=\"0 0 {d} {d}\" xmlns=\"http://www.w3.org/2000/svg\">\n", .{
size_x,
size_y,
});
const color_palette = [_][]const u8{
"#442434",
"#30346d",
"#4e4a4e",
"#854c30",
"#346524",
"#d04648",
"#757161",
"#597dce",
"#d27d2c",
"#8595a1",
"#6daa2c",
"#d2aa99",
"#6dc2ca",
"#dad45e",
};
var index: u32 = 0;
for (all_series) |series| {
if (filter(series)) {
const color = color_palette[index % color_palette.len];
try writer.print(
\\ <text x="{d:.3}" y="{d:.3}" fill="{s}" font-family="sans-serif" font-size="5" xml:space="preserve">{s}</text>
\\
, .{
viewport_size + 5,
10 + 7 * index,
color,
series.benchmark,
});
try writer.print(" <path d=\"M", .{});
for (series.data) |dp| {
const dx = viewport_size * @intToFloat(f32, dp.date.getLinearSortVal() - start_time) / @intToFloat(f32, time_range);
const dy = size_y * (1.0 - @intToFloat(f32, @field(dp, field)) / high);
try writer.print(" {d:.4} {d:.4}", .{ dx, dy });
}
try writer.print("\" fill=\"none\" stroke=\"{s}\" stroke-width=\"1.00\" />\n", .{
color,
});
index += 1;
}
}
try writer.print("</svg>\n", .{});
}
fn filterReleaseSafe(series: Series) bool {
return (series.mode == .ReleaseSafe);
}
fn filterReleaseSmall(series: Series) bool {
return (series.mode == .ReleaseSmall);
}
fn filterReleaseFast(series: Series) bool {
return (series.mode == .ReleaseFast);
}
fn orderDataPoint(_: void, lhs: DataPoint, rhs: DataPoint) bool {
return lhs.date.getLinearSortVal() < rhs.date.getLinearSortVal();
}
pub const Date = struct {
year: u32,
day: u8,
month: u8,
hour: u8,
minute: u8,
second: u8,
pub fn getLinearSortVal(date: Date) u64 {
return 1 * @as(u64, date.second) +
1_00 * @as(u64, date.minute) +
1_00_00 * @as(u64, date.hour) +
1_00_00_00 * @as(u64, date.month) +
1_00_00_00_00 * @as(u64, date.day) +
1_00_00_00_000 * @as(u64, date.year);
}
};
pub const DataPoint = struct {
date: Date,
compile_time: u64,
setup_time: u64,
run_time: u64,
};
pub const Series = struct {
benchmark: []const u8,
mode: std.builtin.Mode,
data: []DataPoint,
};
pub fn loadSeries(allocator: std.mem.Allocator, file: std.fs.File) ![]DataPoint {
const reader = file.reader();
var line_buffer: [4096]u8 = undefined;
const first_line = (try reader.readUntilDelimiterOrEof(&line_buffer, '\n')) orelse return error.UnexpectedData;
if (!std.mem.eql(u8, first_line, "time;compile;setup;run"))
return error.UnexpectedData;
var data_set = std.ArrayList(DataPoint).init(allocator);
defer data_set.deinit();
while (true) {
const line_or_eof = try reader.readUntilDelimiterOrEof(&line_buffer, '\n');
if (line_or_eof) |line| {
if (line.len == 0)
continue;
var iter = std.mem.split(u8, line, ";");
const time_str = iter.next() orelse return error.UnexpectedData;
const compile_str = try std.fmt.parseInt(u64, iter.next() orelse return error.UnexpectedData, 10);
const setup_str = try std.fmt.parseInt(u64, iter.next() orelse return error.UnexpectedData, 10);
const run_str = try std.fmt.parseInt(u64, iter.next() orelse return error.UnexpectedData, 10);
if (time_str.len != 19) return error.UnexpectedData;
try data_set.append(DataPoint{
.date = Date{
// 2022-03-14 14:25:56
.year = try std.fmt.parseInt(u32, time_str[0..4], 10),
.day = try std.fmt.parseInt(u8, time_str[5..7], 10),
.month = try std.fmt.parseInt(u8, time_str[8..10], 10),
.hour = try std.fmt.parseInt(u8, time_str[11..13], 10),
.minute = try std.fmt.parseInt(u8, time_str[14..16], 10),
.second = try std.fmt.parseInt(u8, time_str[17..19], 10),
},
.compile_time = compile_str,
.setup_time = setup_str,
.run_time = run_str,
});
} else {
break;
}
}
return data_set.toOwnedSlice();
} | src/benchmark/render.zig |
const Vector = @import("../Mathematics/Mathematics.zig").Vector;
const Matrix = @import("../Mathematics/Mathematics.zig").Matrix;
const wgi = @import("../WindowGraphicsInput/WindowGraphicsInput.zig");
const FrameBuffer = wgi.FrameBuffer;
const MeshRenderer = @import("MeshRenderer.zig").MeshRenderer;
const rtRenderEngine = @import("RTRenderEngine.zig");
const blur_shader_program = &rtRenderEngine.blur_shader_program;
const lights = &rtRenderEngine.lights;
const Object = rtRenderEngine.Object;
const getSettings = rtRenderEngine.getSettings;
const renderObjects = rtRenderEngine.renderObjects;
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const MinFilter = wgi.MinFilter;
const ImageType = wgi.ImageType;
const window = wgi.window;
const ShaderProgram = wgi.ShaderProgram;
const anim = @import("Animation.zig");
pub const Animation = anim.Animation;
pub const Mesh = @import("Mesh.zig").Mesh;
pub const Texture2D = @import("Texture2D.zig").Texture2D;
const PostProcess = @import("PostProcess.zig");
const ShaderObject = wgi.ShaderObject;
const ShaderType = wgi.ShaderType;
const Buffer = wgi.Buffer;
const shdr = @import("Shader.zig");
const ReferenceCounter = @import("../RefCount.zig").ReferenceCounter;
const files = @import("../Files.zig");
const loadFileWithNullTerminator = files.loadFileWithNullTerminator;
const VertexMeta = wgi.VertexMeta;
const ArrayList = std.ArrayList;
pub const Light = struct {
pub const LightType = enum(u32) {
Point = 0,
Directional = 1,
Spotlight = 2,
};
light_type: LightType,
angle: f32 = 0.9,
colour: [3]f32,
attenuation: f32 = 1.0, // how fast the light dissipates
cast_realtime_shadows: bool = false,
shadow_width: f32 = 20.0,
shadow_height: f32 = 20.0,
shadow_near: f32 = 1.0,
shadow_far: f32 = 50.0,
// Must be multiple of 16
shadow_resolution_width: u32 = 512,
// shadow_resolution_height is calculated using shadow_resolution_width and the aspect ratio
// of shadow_width and shadow_height, then rounded up to the nearest 16
// internal variables
lum: f32 = 0.0,
effect: f32 = 0.0,
distance_divider: f32 = 1.0,
light_pos: Vector(f32, 3) = Vector(f32, 3).init([3]f32{ 0, 0, 0 }),
uniform_array_index: u32 = 0,
depth_framebuffer: ?FrameBuffer = null,
average_depth_framebuffer: ?FrameBuffer = null,
light_matrix: Matrix(f32, 4) = Matrix(f32, 4).identity(),
// Checks the mesh renderer variables and global settings to determine whether this light
// shouldbe used this frame
pub fn lightShouldBeUsed(self: *Light, mesh_renderer: *MeshRenderer) bool {
if (self.light_type == Light.LightType.Point) {
return getSettings().enable_point_lights and mesh_renderer.enable_point_lights;
}
if (self.light_type == Light.LightType.Directional) {
return getSettings().enable_directional_lights and mesh_renderer.enable_directional_lights;
}
if (self.light_type == Light.LightType.Spotlight) {
return getSettings().enable_spot_lights and mesh_renderer.enable_spot_lights;
}
assert(false);
return false;
}
// Draws scene from lights POV to create a depth image. Vertex processing only.
pub fn createShadowMap(self: *Light, root_object: *Object, light_transform: *Matrix(f32, 4), allocator: *Allocator) !void {
if (!self.cast_realtime_shadows or !getSettings().enable_shadows) {
return;
}
if (self.light_type == LightType.Point) {
self.cast_realtime_shadows = false;
return;
}
// Position of light in 3D space
const pos = light_transform.*.position3D();
// Create frame buffer object
var shadow_resolution_height = @floatToInt(u32, (@intToFloat(f32, self.shadow_resolution_width) * self.shadow_height) / self.shadow_width);
if (shadow_resolution_height % 16 != 0) {
shadow_resolution_height += 16 - (shadow_resolution_height % 16);
}
if (self.depth_framebuffer == null) {
self.depth_framebuffer = FrameBuffer.init(null, self.shadow_resolution_width, shadow_resolution_height, FrameBuffer.DepthType.I16, allocator) catch null;
if (self.depth_framebuffer == null) {
self.cast_realtime_shadows = false;
return;
}
try self.depth_framebuffer.?.depth_texture.?.setFiltering(true, MinFilter.Linear);
}
if (self.average_depth_framebuffer == null) {
self.average_depth_framebuffer = FrameBuffer.init(ImageType.RG32F, self.shadow_resolution_width / 16, shadow_resolution_height / 16, FrameBuffer.DepthType.None, allocator) catch null;
if (self.average_depth_framebuffer == null) {
self.cast_realtime_shadows = false;
return;
}
try self.average_depth_framebuffer.?.setTextureFiltering(true, true);
}
var projection_matrix: ?Matrix(f32, 4) = null;
if (self.light_type == LightType.Directional) {
projection_matrix = Matrix(f32, 4).orthoProjectionOpenGLInverseZ(-self.shadow_width * 0.5, self.shadow_width * 0.5, -self.shadow_height * 0.5, self.shadow_height * 0.5, self.shadow_near, self.shadow_far);
} else {
const angle = std.math.acos(self.angle) * 2.0;
projection_matrix = Matrix(f32, 4).perspectiveProjectionOpenGLInverseZ(self.shadow_width / self.shadow_height, angle, self.shadow_near, self.shadow_far);
}
var view_matrix = try light_transform.*.inverse();
view_matrix.data[3][2] += 1.0;
self.light_matrix = view_matrix.mul(projection_matrix.?);
try self.depth_framebuffer.?.bind();
window.setCullMode(window.CullMode.AntiClockwise);
wgi.cullFace(wgi.CullFaceMode.Back);
wgi.enableDepthWriting();
wgi.setDepthModeDirectX(false, false);
window.clear(false, true);
renderObjects(root_object, allocator, &view_matrix, &projection_matrix.?, true);
// Shadow map is now in depth_framebuffer
// Now blur it
window.setCullMode(window.CullMode.None);
wgi.disableDepthTesting();
wgi.disableDepthWriting();
try blur_shader_program.*.?.bind();
try self.average_depth_framebuffer.?.bind();
try self.depth_framebuffer.?.bindDepthTexture();
try VertexMeta.drawWithoutData(VertexMeta.PrimitiveType.Triangles, 0, 3);
}
};
// See StandardShader.glsl
pub const UniformDataLight = packed struct {
positionAndType: [4]f32,
directionAndAngle: [4]f32,
intensity: [4]f32,
};
pub fn getLightData(object: *Object, max_vertex_lights: u32, max_fragment_lights: u32, per_obj_light: *([3]f32), vertex_light_indices: *([8]i32), fragment_light_indices: *([4]i32), fragment_light_matrices: *([4]Matrix(f32, 4)), fragment_light_shadow_textures: *([4](?*const FrameBuffer))) void {
if (lights.*.?.items.len == 0) {
return;
}
const obj_pos = object.true_transform.?.position3D();
// Calculate effect of each light on the object
for (lights.*.?.items) |*light| {
if (light.*.light.?.light_type == Light.LightType.Point or light.*.light.?.light_type == Light.LightType.Spotlight) {
// TODO If the bounding box of the object was known then we could determine if the light effects the object for Spotlights
var v = light.*.light.?.light_pos;
v.sub(obj_pos);
const x = v.length() * light.*.light.?.attenuation;
const distDiv = x * x;
light.*.light.?.distance_divider = distDiv;
if (distDiv == 0.0) {
// Light is inside the object
light.*.light.?.distance_divider = 0.001;
light.*.light.?.effect = 0.0;
} else {
light.*.light.?.effect = light.*.light.?.lum / distDiv;
}
} else if (light.*.light.?.light_type == Light.LightType.Directional) {
light.*.light.?.effect = light.*.light.?.lum;
} else {
assert(false);
}
}
// Pick most significant 4* lights to be per-fragment
// Then next 8* to be per-vertex
// Then all other lights are per-object
// * Max number of lights can be decreased
if (lights.*.?.items.len > 1) {
// Sort the lights by the effect on this object (most -> least effect)
const sortFunction = struct {
fn f(a: *Object, b: *Object) bool {
return a.*.light.?.effect > b.*.light.?.effect;
}
};
std.sort.sort(*Object, lights.*.?.items, sortFunction.f);
}
// Set light indices
const lights_slice = lights.*.?.items;
var i: u32 = 0; // index into lights_slice
var j: u32 = 0; // index into light arrays
while (i < getSettings().max_fragment_lights and i < max_fragment_lights and i < lights_slice.len) : (i += 1) {
if (lights_slice[i].*.light.?.lightShouldBeUsed(object.mesh_renderer.?)) {
fragment_light_indices[j] = @intCast(i32, lights_slice[i].*.light.?.uniform_array_index);
if (lights_slice[i].*.light.?.cast_realtime_shadows and getSettings().enable_shadows) {
if (lights_slice[i].*.light.?.light_type != Light.LightType.Point) {
fragment_light_matrices[j] = lights_slice[i].*.light.?.light_matrix;
fragment_light_shadow_textures[j] = &lights_slice[i].*.light.?.average_depth_framebuffer.?;
}
}
j += 1;
}
}
j = 0;
while (j < 8 and i < lights_slice.len and i < max_vertex_lights and i < getSettings().max_vertex_lights) {
if (lights_slice[i].*.light.?.lightShouldBeUsed(object.mesh_renderer.?)) {
vertex_light_indices[j] = @intCast(i32, lights_slice[i].*.light.?.uniform_array_index);
i += 1;
j += 1;
}
}
if (object.mesh_renderer.?.*.enable_per_object_light) {
// Everything else is applied per-object
while (i < lights_slice.len) : (i += 1) {
const light = &lights_slice[i].*.light.?;
if (light.light_type == Light.LightType.Point) {
per_obj_light[0] += (light.colour[0] / light.distance_divider) * 0.7;
per_obj_light[1] += (light.colour[1] / light.distance_divider) * 0.7;
per_obj_light[2] += (light.colour[2] / light.distance_divider) * 0.7;
} else if (light.light_type == Light.LightType.Directional) {
per_obj_light[0] += light.colour[0] * 0.7;
per_obj_light[1] += light.colour[1] * 0.7;
per_obj_light[2] += light.colour[2] * 0.7;
}
// TODO do Spotlights if bounding box is available
}
}
} | src/RTRenderEngine/Light.zig |
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Compilation = @import("../Compilation.zig");
const llvm = @import("llvm/bindings.zig");
const link = @import("../link.zig");
const log = std.log.scoped(.codegen);
const math = std.math;
const Module = @import("../Module.zig");
const TypedValue = @import("../TypedValue.zig");
const ir = @import("../air.zig");
const Inst = ir.Inst;
const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type;
const LazySrcLoc = Module.LazySrcLoc;
pub fn targetTriple(allocator: *Allocator, target: std.Target) ![:0]u8 {
const llvm_arch = switch (target.cpu.arch) {
.arm => "arm",
.armeb => "armeb",
.aarch64 => "aarch64",
.aarch64_be => "aarch64_be",
.aarch64_32 => "aarch64_32",
.arc => "arc",
.avr => "avr",
.bpfel => "bpfel",
.bpfeb => "bpfeb",
.csky => "csky",
.hexagon => "hexagon",
.mips => "mips",
.mipsel => "mipsel",
.mips64 => "mips64",
.mips64el => "mips64el",
.msp430 => "msp430",
.powerpc => "powerpc",
.powerpcle => "powerpcle",
.powerpc64 => "powerpc64",
.powerpc64le => "powerpc64le",
.r600 => "r600",
.amdgcn => "amdgcn",
.riscv32 => "riscv32",
.riscv64 => "riscv64",
.sparc => "sparc",
.sparcv9 => "sparcv9",
.sparcel => "sparcel",
.s390x => "s390x",
.tce => "tce",
.tcele => "tcele",
.thumb => "thumb",
.thumbeb => "thumbeb",
.i386 => "i386",
.x86_64 => "x86_64",
.xcore => "xcore",
.nvptx => "nvptx",
.nvptx64 => "nvptx64",
.le32 => "le32",
.le64 => "le64",
.amdil => "amdil",
.amdil64 => "amdil64",
.hsail => "hsail",
.hsail64 => "hsail64",
.spir => "spir",
.spir64 => "spir64",
.kalimba => "kalimba",
.shave => "shave",
.lanai => "lanai",
.wasm32 => "wasm32",
.wasm64 => "wasm64",
.renderscript32 => "renderscript32",
.renderscript64 => "renderscript64",
.ve => "ve",
.spu_2 => return error.LLVMBackendDoesNotSupportSPUMarkII,
.spirv32 => return error.LLVMBackendDoesNotSupportSPIRV,
.spirv64 => return error.LLVMBackendDoesNotSupportSPIRV,
};
const llvm_os = switch (target.os.tag) {
.freestanding => "unknown",
.ananas => "ananas",
.cloudabi => "cloudabi",
.dragonfly => "dragonfly",
.freebsd => "freebsd",
.fuchsia => "fuchsia",
.ios => "ios",
.kfreebsd => "kfreebsd",
.linux => "linux",
.lv2 => "lv2",
.macos => "macosx",
.netbsd => "netbsd",
.openbsd => "openbsd",
.solaris => "solaris",
.windows => "windows",
.zos => "zos",
.haiku => "haiku",
.minix => "minix",
.rtems => "rtems",
.nacl => "nacl",
.aix => "aix",
.cuda => "cuda",
.nvcl => "nvcl",
.amdhsa => "amdhsa",
.ps4 => "ps4",
.elfiamcu => "elfiamcu",
.tvos => "tvos",
.watchos => "watchos",
.mesa3d => "mesa3d",
.contiki => "contiki",
.amdpal => "amdpal",
.hermit => "hermit",
.hurd => "hurd",
.wasi => "wasi",
.emscripten => "emscripten",
.uefi => "windows",
.opencl => return error.LLVMBackendDoesNotSupportOpenCL,
.glsl450 => return error.LLVMBackendDoesNotSupportGLSL450,
.vulkan => return error.LLVMBackendDoesNotSupportVulkan,
.plan9 => return error.LLVMBackendDoesNotSupportPlan9,
.other => "unknown",
};
const llvm_abi = switch (target.abi) {
.none => "unknown",
.gnu => "gnu",
.gnuabin32 => "gnuabin32",
.gnuabi64 => "gnuabi64",
.gnueabi => "gnueabi",
.gnueabihf => "gnueabihf",
.gnux32 => "gnux32",
.gnuilp32 => "gnuilp32",
.code16 => "code16",
.eabi => "eabi",
.eabihf => "eabihf",
.android => "android",
.musl => "musl",
.musleabi => "musleabi",
.musleabihf => "musleabihf",
.msvc => "msvc",
.itanium => "itanium",
.cygnus => "cygnus",
.coreclr => "coreclr",
.simulator => "simulator",
.macabi => "macabi",
};
return std.fmt.allocPrintZ(allocator, "{s}-unknown-{s}-{s}", .{ llvm_arch, llvm_os, llvm_abi });
}
pub const Object = struct {
llvm_module: *const llvm.Module,
context: *const llvm.Context,
target_machine: *const llvm.TargetMachine,
object_pathZ: [:0]const u8,
pub fn create(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Object {
_ = sub_path;
const self = try allocator.create(Object);
errdefer allocator.destroy(self);
const obj_basename = try std.zig.binNameAlloc(allocator, .{
.root_name = options.root_name,
.target = options.target,
.output_mode = .Obj,
});
defer allocator.free(obj_basename);
const o_directory = options.module.?.zig_cache_artifact_directory;
const object_path = try o_directory.join(allocator, &[_][]const u8{obj_basename});
defer allocator.free(object_path);
const object_pathZ = try allocator.dupeZ(u8, object_path);
errdefer allocator.free(object_pathZ);
const context = llvm.Context.create();
errdefer context.dispose();
initializeLLVMTargets();
const root_nameZ = try allocator.dupeZ(u8, options.root_name);
defer allocator.free(root_nameZ);
const llvm_module = llvm.Module.createWithName(root_nameZ.ptr, context);
errdefer llvm_module.dispose();
const llvm_target_triple = try targetTriple(allocator, options.target);
defer allocator.free(llvm_target_triple);
var error_message: [*:0]const u8 = undefined;
var target: *const llvm.Target = undefined;
if (llvm.Target.getFromTriple(llvm_target_triple.ptr, &target, &error_message).toBool()) {
defer llvm.disposeMessage(error_message);
const stderr = std.io.getStdErr().writer();
try stderr.print(
\\Zig is expecting LLVM to understand this target: '{s}'
\\However LLVM responded with: "{s}"
\\
,
.{ llvm_target_triple, error_message },
);
return error.InvalidLLVMTriple;
}
const opt_level: llvm.CodeGenOptLevel = if (options.optimize_mode == .Debug) .None else .Aggressive;
const target_machine = llvm.TargetMachine.create(
target,
llvm_target_triple.ptr,
"",
"",
opt_level,
.Static,
.Default,
);
errdefer target_machine.dispose();
self.* = .{
.llvm_module = llvm_module,
.context = context,
.target_machine = target_machine,
.object_pathZ = object_pathZ,
};
return self;
}
pub fn deinit(self: *Object, allocator: *Allocator) void {
self.target_machine.dispose();
self.llvm_module.dispose();
self.context.dispose();
allocator.free(self.object_pathZ);
allocator.destroy(self);
}
fn initializeLLVMTargets() void {
llvm.initializeAllTargets();
llvm.initializeAllTargetInfos();
llvm.initializeAllTargetMCs();
llvm.initializeAllAsmPrinters();
llvm.initializeAllAsmParsers();
}
pub fn flushModule(self: *Object, comp: *Compilation) !void {
if (comp.verbose_llvm_ir) {
const dump = self.llvm_module.printToString();
defer llvm.disposeMessage(dump);
const stderr = std.io.getStdErr().writer();
try stderr.writeAll(std.mem.spanZ(dump));
}
{
var error_message: [*:0]const u8 = undefined;
// verifyModule always allocs the error_message even if there is no error
defer llvm.disposeMessage(error_message);
if (self.llvm_module.verify(.ReturnStatus, &error_message).toBool()) {
const stderr = std.io.getStdErr().writer();
try stderr.print("broken LLVM module found: {s}\nThis is a bug in the Zig compiler.", .{error_message});
return error.BrokenLLVMModule;
}
}
var error_message: [*:0]const u8 = undefined;
if (self.target_machine.emitToFile(
self.llvm_module,
self.object_pathZ.ptr,
.ObjectFile,
&error_message,
).toBool()) {
defer llvm.disposeMessage(error_message);
const stderr = std.io.getStdErr().writer();
try stderr.print("LLVM failed to emit file: {s}\n", .{error_message});
return error.FailedToEmit;
}
}
pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void {
var dg: DeclGen = .{
.object = self,
.module = module,
.decl = decl,
.err_msg = null,
.gpa = module.gpa,
};
dg.genDecl() catch |err| switch (err) {
error.CodegenFail => {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, dg.err_msg.?);
dg.err_msg = null;
return;
},
else => |e| return e,
};
}
};
pub const DeclGen = struct {
object: *Object,
module: *Module,
decl: *Module.Decl,
err_msg: ?*Module.ErrorMsg,
gpa: *Allocator,
fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@setCold(true);
assert(self.err_msg == null);
const src_loc = @as(LazySrcLoc, .{ .node_offset = 0 }).toSrcLocWithDecl(self.decl);
self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, "TODO (LLVM): " ++ format, args);
return error.CodegenFail;
}
fn llvmModule(self: *DeclGen) *const llvm.Module {
return self.object.llvm_module;
}
fn context(self: *DeclGen) *const llvm.Context {
return self.object.context;
}
fn genDecl(self: *DeclGen) !void {
const decl = self.decl;
assert(decl.has_tv);
log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty, decl.val });
if (decl.val.castTag(.function)) |func_payload| {
const func = func_payload.data;
const llvm_func = try self.resolveLLVMFunction(func.owner_decl);
// This gets the LLVM values from the function and stores them in `self.args`.
const fn_param_len = func.owner_decl.ty.fnParamLen();
var args = try self.gpa.alloc(*const llvm.Value, fn_param_len);
for (args) |*arg, i| {
arg.* = llvm.getParam(llvm_func, @intCast(c_uint, i));
}
// We remove all the basic blocks of a function to support incremental
// compilation!
// TODO: remove all basic blocks if functions can have more than one
if (llvm_func.getFirstBasicBlock()) |bb| {
bb.deleteBasicBlock();
}
const builder = self.context().createBuilder();
const entry_block = self.context().appendBasicBlock(llvm_func, "Entry");
builder.positionBuilderAtEnd(entry_block);
var fg: FuncGen = .{
.dg = self,
.builder = builder,
.args = args,
.arg_index = 0,
.func_inst_table = .{},
.entry_block = entry_block,
.latest_alloca_inst = null,
.llvm_func = llvm_func,
.blocks = .{},
};
defer fg.deinit();
try fg.genBody(func.body);
} else if (decl.val.castTag(.extern_fn)) |extern_fn| {
_ = try self.resolveLLVMFunction(extern_fn.data);
} else {
_ = try self.resolveGlobalDecl(decl);
}
}
/// If the llvm function does not exist, create it
fn resolveLLVMFunction(self: *DeclGen, func: *Module.Decl) !*const llvm.Value {
// TODO: do we want to store this in our own datastructure?
if (self.llvmModule().getNamedFunction(func.name)) |llvm_fn| return llvm_fn;
assert(func.has_tv);
const zig_fn_type = func.ty;
const return_type = zig_fn_type.fnReturnType();
const fn_param_len = zig_fn_type.fnParamLen();
const fn_param_types = try self.gpa.alloc(Type, fn_param_len);
defer self.gpa.free(fn_param_types);
zig_fn_type.fnParamTypes(fn_param_types);
const llvm_param = try self.gpa.alloc(*const llvm.Type, fn_param_len);
defer self.gpa.free(llvm_param);
for (fn_param_types) |fn_param, i| {
llvm_param[i] = try self.getLLVMType(fn_param);
}
const fn_type = llvm.Type.functionType(
try self.getLLVMType(return_type),
if (fn_param_len == 0) null else llvm_param.ptr,
@intCast(c_uint, fn_param_len),
.False,
);
const llvm_fn = self.llvmModule().addFunction(func.name, fn_type);
if (return_type.tag() == .noreturn) {
self.addFnAttr(llvm_fn, "noreturn");
}
return llvm_fn;
}
fn resolveGlobalDecl(self: *DeclGen, decl: *Module.Decl) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
// TODO: do we want to store this in our own datastructure?
if (self.llvmModule().getNamedGlobal(decl.name)) |val| return val;
assert(decl.has_tv);
// TODO: remove this redundant `getLLVMType`, it is also called in `genTypedValue`.
const llvm_type = try self.getLLVMType(decl.ty);
const val = try self.genTypedValue(.{ .ty = decl.ty, .val = decl.val }, null);
const global = self.llvmModule().addGlobal(llvm_type, decl.name);
llvm.setInitializer(global, val);
// TODO ask the Decl if it is const
// https://github.com/ziglang/zig/issues/7582
return global;
}
fn getLLVMType(self: *DeclGen, t: Type) error{ OutOfMemory, CodegenFail }!*const llvm.Type {
log.debug("getLLVMType for {}", .{t});
switch (t.zigTypeTag()) {
.Void => return self.context().voidType(),
.NoReturn => return self.context().voidType(),
.Int => {
const info = t.intInfo(self.module.getTarget());
return self.context().intType(info.bits);
},
.Bool => return self.context().intType(1),
.Pointer => {
if (t.isSlice()) {
return self.todo("implement slices", .{});
} else {
const elem_type = try self.getLLVMType(t.elemType());
return elem_type.pointerType(0);
}
},
.Array => {
const elem_type = try self.getLLVMType(t.elemType());
return elem_type.arrayType(@intCast(c_uint, t.abiSize(self.module.getTarget())));
},
.Optional => {
if (!t.isPtrLikeOptional()) {
var buf: Type.Payload.ElemType = undefined;
const child_type = t.optionalChild(&buf);
var optional_types: [2]*const llvm.Type = .{
try self.getLLVMType(child_type),
self.context().intType(1),
};
return self.context().structType(&optional_types, 2, .False);
} else {
return self.todo("implement optional pointers as actual pointers", .{});
}
},
.ComptimeInt => unreachable,
.ComptimeFloat => unreachable,
.Type => unreachable,
.Undefined => unreachable,
.Null => unreachable,
.EnumLiteral => unreachable,
.BoundFn => @panic("TODO remove BoundFn from the language"),
.Float,
.Struct,
.ErrorUnion,
.ErrorSet,
.Enum,
.Union,
.Fn,
.Opaque,
.Frame,
.AnyFrame,
.Vector,
=> return self.todo("implement getLLVMType for type '{}'", .{t}),
}
}
// TODO: figure out a way to remove the FuncGen argument
fn genTypedValue(self: *DeclGen, tv: TypedValue, fg: ?*FuncGen) error{ OutOfMemory, CodegenFail }!*const llvm.Value {
const llvm_type = try self.getLLVMType(tv.ty);
if (tv.val.isUndef())
return llvm_type.getUndef();
switch (tv.ty.zigTypeTag()) {
.Bool => return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(),
.Int => {
var bigint_space: Value.BigIntSpace = undefined;
const bigint = tv.val.toBigInt(&bigint_space);
if (bigint.eqZero()) return llvm_type.constNull();
if (bigint.limbs.len != 1) {
return self.todo("implement bigger bigint", .{});
}
const llvm_int = llvm_type.constInt(bigint.limbs[0], .False);
if (!bigint.positive) {
return llvm.constNeg(llvm_int);
}
return llvm_int;
},
.Pointer => switch (tv.val.tag()) {
.decl_ref => {
const decl = tv.val.castTag(.decl_ref).?.data;
const val = try self.resolveGlobalDecl(decl);
const usize_type = try self.getLLVMType(Type.initTag(.usize));
// TODO: second index should be the index into the memory!
var indices: [2]*const llvm.Value = .{
usize_type.constNull(),
usize_type.constNull(),
};
// TODO: consider using buildInBoundsGEP2 for opaque pointers
return fg.?.builder.buildInBoundsGEP(val, &indices, 2, "");
},
.ref_val => {
const elem_value = tv.val.castTag(.ref_val).?.data;
const elem_type = tv.ty.castPointer().?.data;
const alloca = fg.?.buildAlloca(try self.getLLVMType(elem_type));
_ = fg.?.builder.buildStore(try self.genTypedValue(.{ .ty = elem_type, .val = elem_value }, fg), alloca);
return alloca;
},
else => return self.todo("implement const of pointer type '{}'", .{tv.ty}),
},
.Array => {
if (tv.val.castTag(.bytes)) |payload| {
const zero_sentinel = if (tv.ty.sentinel()) |sentinel| blk: {
if (sentinel.tag() == .zero) break :blk true;
return self.todo("handle other sentinel values", .{});
} else false;
return self.context().constString(payload.data.ptr, @intCast(c_uint, payload.data.len), llvm.Bool.fromBool(!zero_sentinel));
} else {
return self.todo("handle more array values", .{});
}
},
.Optional => {
if (!tv.ty.isPtrLikeOptional()) {
var buf: Type.Payload.ElemType = undefined;
const child_type = tv.ty.optionalChild(&buf);
const llvm_child_type = try self.getLLVMType(child_type);
if (tv.val.tag() == .null_value) {
var optional_values: [2]*const llvm.Value = .{
llvm_child_type.constNull(),
self.context().intType(1).constNull(),
};
return self.context().constStruct(&optional_values, 2, .False);
} else {
var optional_values: [2]*const llvm.Value = .{
try self.genTypedValue(.{ .ty = child_type, .val = tv.val }, fg),
self.context().intType(1).constAllOnes(),
};
return self.context().constStruct(&optional_values, 2, .False);
}
} else {
return self.todo("implement const of optional pointer", .{});
}
},
else => return self.todo("implement const of type '{}'", .{tv.ty}),
}
}
// Helper functions
fn addAttr(self: *DeclGen, val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len);
assert(kind_id != 0);
const llvm_attr = self.context().createEnumAttribute(kind_id, 0);
val.addAttributeAtIndex(index, llvm_attr);
}
fn addFnAttr(self: *DeclGen, val: *const llvm.Value, attr_name: []const u8) void {
// TODO: improve this API, `addAttr(-1, attr_name)`
self.addAttr(val, std.math.maxInt(llvm.AttributeIndex), attr_name);
}
};
pub const FuncGen = struct {
dg: *DeclGen,
builder: *const llvm.Builder,
/// This stores the LLVM values used in a function, such that they can be
/// referred to in other instructions. This table is cleared before every function is generated.
/// TODO: Change this to a stack of Branch. Currently we store all the values from all the blocks
/// in here, however if a block ends, the instructions can be thrown away.
func_inst_table: std.AutoHashMapUnmanaged(*Inst, *const llvm.Value),
/// These fields are used to refer to the LLVM value of the function paramaters in an Arg instruction.
args: []*const llvm.Value,
arg_index: usize,
entry_block: *const llvm.BasicBlock,
/// This fields stores the last alloca instruction, such that we can append more alloca instructions
/// to the top of the function.
latest_alloca_inst: ?*const llvm.Value,
llvm_func: *const llvm.Value,
/// This data structure is used to implement breaking to blocks.
blocks: std.AutoHashMapUnmanaged(*Inst.Block, struct {
parent_bb: *const llvm.BasicBlock,
break_bbs: *BreakBasicBlocks,
break_vals: *BreakValues,
}),
const BreakBasicBlocks = std.ArrayListUnmanaged(*const llvm.BasicBlock);
const BreakValues = std.ArrayListUnmanaged(*const llvm.Value);
fn deinit(self: *FuncGen) void {
self.builder.dispose();
self.func_inst_table.deinit(self.gpa());
self.gpa().free(self.args);
self.blocks.deinit(self.gpa());
}
fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@setCold(true);
return self.dg.todo(format, args);
}
fn llvmModule(self: *FuncGen) *const llvm.Module {
return self.dg.object.llvm_module;
}
fn context(self: *FuncGen) *const llvm.Context {
return self.dg.object.context;
}
fn gpa(self: *FuncGen) *Allocator {
return self.dg.gpa;
}
fn resolveInst(self: *FuncGen, inst: *ir.Inst) !*const llvm.Value {
if (inst.value()) |val| {
return self.dg.genTypedValue(.{ .ty = inst.ty, .val = val }, self);
}
if (self.func_inst_table.get(inst)) |value| return value;
return self.todo("implement global llvm values (or the value is not in the func_inst_table table)", .{});
}
fn genBody(self: *FuncGen, body: ir.Body) error{ OutOfMemory, CodegenFail }!void {
for (body.instructions) |inst| {
const opt_value = switch (inst.tag) {
.add => try self.genAdd(inst.castTag(.add).?),
.alloc => try self.genAlloc(inst.castTag(.alloc).?),
.arg => try self.genArg(inst.castTag(.arg).?),
.bitcast => try self.genBitCast(inst.castTag(.bitcast).?),
.block => try self.genBlock(inst.castTag(.block).?),
.br => try self.genBr(inst.castTag(.br).?),
.breakpoint => try self.genBreakpoint(inst.castTag(.breakpoint).?),
.br_void => try self.genBrVoid(inst.castTag(.br_void).?),
.call => try self.genCall(inst.castTag(.call).?),
.cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?, .eq),
.cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?, .gt),
.cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?, .gte),
.cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?, .lt),
.cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?, .lte),
.cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?, .neq),
.condbr => try self.genCondBr(inst.castTag(.condbr).?),
.intcast => try self.genIntCast(inst.castTag(.intcast).?),
.is_non_null => try self.genIsNonNull(inst.castTag(.is_non_null).?, false),
.is_non_null_ptr => try self.genIsNonNull(inst.castTag(.is_non_null_ptr).?, true),
.is_null => try self.genIsNull(inst.castTag(.is_null).?, false),
.is_null_ptr => try self.genIsNull(inst.castTag(.is_null_ptr).?, true),
.load => try self.genLoad(inst.castTag(.load).?),
.loop => try self.genLoop(inst.castTag(.loop).?),
.not => try self.genNot(inst.castTag(.not).?),
.ret => try self.genRet(inst.castTag(.ret).?),
.retvoid => self.genRetVoid(inst.castTag(.retvoid).?),
.store => try self.genStore(inst.castTag(.store).?),
.sub => try self.genSub(inst.castTag(.sub).?),
.unreach => self.genUnreach(inst.castTag(.unreach).?),
.optional_payload => try self.genOptionalPayload(inst.castTag(.optional_payload).?, false),
.optional_payload_ptr => try self.genOptionalPayload(inst.castTag(.optional_payload_ptr).?, true),
.dbg_stmt => blk: {
// TODO: implement debug info
break :blk null;
},
else => |tag| return self.todo("implement TZIR instruction: {}", .{tag}),
};
if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa(), inst, val);
}
}
fn genCall(self: *FuncGen, inst: *Inst.Call) !?*const llvm.Value {
if (inst.func.value()) |func_value| {
const fn_decl = if (func_value.castTag(.extern_fn)) |extern_fn|
extern_fn.data
else if (func_value.castTag(.function)) |func_payload|
func_payload.data.owner_decl
else
unreachable;
assert(fn_decl.has_tv);
const zig_fn_type = fn_decl.ty;
const llvm_fn = try self.dg.resolveLLVMFunction(fn_decl);
const num_args = inst.args.len;
const llvm_param_vals = try self.gpa().alloc(*const llvm.Value, num_args);
defer self.gpa().free(llvm_param_vals);
for (inst.args) |arg, i| {
llvm_param_vals[i] = try self.resolveInst(arg);
}
// TODO: LLVMBuildCall2 handles opaque function pointers, according to llvm docs
// Do we need that?
const call = self.builder.buildCall(
llvm_fn,
if (num_args == 0) null else llvm_param_vals.ptr,
@intCast(c_uint, num_args),
"",
);
const return_type = zig_fn_type.fnReturnType();
if (return_type.tag() == .noreturn) {
_ = self.builder.buildUnreachable();
}
// No need to store the LLVM value if the return type is void or noreturn
if (!return_type.hasCodeGenBits()) return null;
return call;
} else {
return self.todo("implement calling runtime known function pointer", .{});
}
}
fn genRetVoid(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
_ = inst;
_ = self.builder.buildRetVoid();
return null;
}
fn genRet(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
if (!inst.operand.ty.hasCodeGenBits()) {
// TODO: in astgen these instructions should turn into `retvoid` instructions.
_ = self.builder.buildRetVoid();
return null;
}
_ = self.builder.buildRet(try self.resolveInst(inst.operand));
return null;
}
fn genCmp(self: *FuncGen, inst: *Inst.BinOp, op: math.CompareOperator) !?*const llvm.Value {
const lhs = try self.resolveInst(inst.lhs);
const rhs = try self.resolveInst(inst.rhs);
if (!inst.base.ty.isInt())
if (inst.base.ty.tag() != .bool)
return self.todo("implement 'genCmp' for type {}", .{inst.base.ty});
const is_signed = inst.base.ty.isSignedInt();
const operation = switch (op) {
.eq => .EQ,
.neq => .NE,
.lt => @as(llvm.IntPredicate, if (is_signed) .SLT else .ULT),
.lte => @as(llvm.IntPredicate, if (is_signed) .SLE else .ULE),
.gt => @as(llvm.IntPredicate, if (is_signed) .SGT else .UGT),
.gte => @as(llvm.IntPredicate, if (is_signed) .SGE else .UGE),
};
return self.builder.buildICmp(operation, lhs, rhs, "");
}
fn genBlock(self: *FuncGen, inst: *Inst.Block) !?*const llvm.Value {
const parent_bb = self.context().createBasicBlock("Block");
// 5 breaks to a block seems like a reasonable default.
var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa(), 5);
var break_vals = try BreakValues.initCapacity(self.gpa(), 5);
try self.blocks.putNoClobber(self.gpa(), inst, .{
.parent_bb = parent_bb,
.break_bbs = &break_bbs,
.break_vals = &break_vals,
});
defer {
assert(self.blocks.remove(inst));
break_bbs.deinit(self.gpa());
break_vals.deinit(self.gpa());
}
try self.genBody(inst.body);
self.llvm_func.appendExistingBasicBlock(parent_bb);
self.builder.positionBuilderAtEnd(parent_bb);
// If the block does not return a value, we dont have to create a phi node.
if (!inst.base.ty.hasCodeGenBits()) return null;
const phi_node = self.builder.buildPhi(try self.dg.getLLVMType(inst.base.ty), "");
phi_node.addIncoming(
break_vals.items.ptr,
break_bbs.items.ptr,
@intCast(c_uint, break_vals.items.len),
);
return phi_node;
}
fn genBr(self: *FuncGen, inst: *Inst.Br) !?*const llvm.Value {
var block = self.blocks.get(inst.block).?;
// If the break doesn't break a value, then we don't have to add
// the values to the lists.
if (!inst.operand.ty.hasCodeGenBits()) {
// TODO: in astgen these instructions should turn into `br_void` instructions.
_ = self.builder.buildBr(block.parent_bb);
} else {
const val = try self.resolveInst(inst.operand);
// For the phi node, we need the basic blocks and the values of the
// break instructions.
try block.break_bbs.append(self.gpa(), self.builder.getInsertBlock());
try block.break_vals.append(self.gpa(), val);
_ = self.builder.buildBr(block.parent_bb);
}
return null;
}
fn genBrVoid(self: *FuncGen, inst: *Inst.BrVoid) !?*const llvm.Value {
var block = self.blocks.get(inst.block).?;
_ = self.builder.buildBr(block.parent_bb);
return null;
}
fn genCondBr(self: *FuncGen, inst: *Inst.CondBr) !?*const llvm.Value {
const condition_value = try self.resolveInst(inst.condition);
const then_block = self.context().appendBasicBlock(self.llvm_func, "Then");
const else_block = self.context().appendBasicBlock(self.llvm_func, "Else");
{
const prev_block = self.builder.getInsertBlock();
defer self.builder.positionBuilderAtEnd(prev_block);
self.builder.positionBuilderAtEnd(then_block);
try self.genBody(inst.then_body);
self.builder.positionBuilderAtEnd(else_block);
try self.genBody(inst.else_body);
}
_ = self.builder.buildCondBr(condition_value, then_block, else_block);
return null;
}
fn genLoop(self: *FuncGen, inst: *Inst.Loop) !?*const llvm.Value {
const loop_block = self.context().appendBasicBlock(self.llvm_func, "Loop");
_ = self.builder.buildBr(loop_block);
self.builder.positionBuilderAtEnd(loop_block);
try self.genBody(inst.body);
_ = self.builder.buildBr(loop_block);
return null;
}
fn genNot(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
return self.builder.buildNot(try self.resolveInst(inst.operand), "");
}
fn genUnreach(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
_ = inst;
_ = self.builder.buildUnreachable();
return null;
}
fn genIsNonNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
const operand = try self.resolveInst(inst.operand);
if (operand_is_ptr) {
const index_type = self.context().intType(32);
var indices: [2]*const llvm.Value = .{
index_type.constNull(),
index_type.constInt(1, .False),
};
return self.builder.buildLoad(self.builder.buildInBoundsGEP(operand, &indices, 2, ""), "");
} else {
return self.builder.buildExtractValue(operand, 1, "");
}
}
fn genIsNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
return self.builder.buildNot((try self.genIsNonNull(inst, operand_is_ptr)).?, "");
}
fn genOptionalPayload(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value {
const operand = try self.resolveInst(inst.operand);
if (operand_is_ptr) {
const index_type = self.context().intType(32);
var indices: [2]*const llvm.Value = .{
index_type.constNull(),
index_type.constNull(),
};
return self.builder.buildInBoundsGEP(operand, &indices, 2, "");
} else {
return self.builder.buildExtractValue(operand, 0, "");
}
}
fn genAdd(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
const lhs = try self.resolveInst(inst.lhs);
const rhs = try self.resolveInst(inst.rhs);
if (!inst.base.ty.isInt())
return self.todo("implement 'genAdd' for type {}", .{inst.base.ty});
return if (inst.base.ty.isSignedInt())
self.builder.buildNSWAdd(lhs, rhs, "")
else
self.builder.buildNUWAdd(lhs, rhs, "");
}
fn genSub(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
const lhs = try self.resolveInst(inst.lhs);
const rhs = try self.resolveInst(inst.rhs);
if (!inst.base.ty.isInt())
return self.todo("implement 'genSub' for type {}", .{inst.base.ty});
return if (inst.base.ty.isSignedInt())
self.builder.buildNSWSub(lhs, rhs, "")
else
self.builder.buildNUWSub(lhs, rhs, "");
}
fn genIntCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
const val = try self.resolveInst(inst.operand);
const signed = inst.base.ty.isSignedInt();
// TODO: Should we use intcast here or just a simple bitcast?
// LLVM does truncation vs bitcast (+signed extension) in the intcast depending on the sizes
return self.builder.buildIntCast2(val, try self.dg.getLLVMType(inst.base.ty), llvm.Bool.fromBool(signed), "");
}
fn genBitCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
const val = try self.resolveInst(inst.operand);
const dest_type = try self.dg.getLLVMType(inst.base.ty);
return self.builder.buildBitCast(val, dest_type, "");
}
fn genArg(self: *FuncGen, inst: *Inst.Arg) !?*const llvm.Value {
const arg_val = self.args[self.arg_index];
self.arg_index += 1;
const ptr_val = self.buildAlloca(try self.dg.getLLVMType(inst.base.ty));
_ = self.builder.buildStore(arg_val, ptr_val);
return self.builder.buildLoad(ptr_val, "");
}
fn genAlloc(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value {
// buildAlloca expects the pointee type, not the pointer type, so assert that
// a Payload.PointerSimple is passed to the alloc instruction.
const pointee_type = inst.base.ty.castPointer().?.data;
// TODO: figure out a way to get the name of the var decl.
// TODO: set alignment and volatile
return self.buildAlloca(try self.dg.getLLVMType(pointee_type));
}
/// Use this instead of builder.buildAlloca, because this function makes sure to
/// put the alloca instruction at the top of the function!
fn buildAlloca(self: *FuncGen, t: *const llvm.Type) *const llvm.Value {
const prev_block = self.builder.getInsertBlock();
defer self.builder.positionBuilderAtEnd(prev_block);
if (self.latest_alloca_inst) |latest_alloc| {
// builder.positionBuilder adds it before the instruction,
// but we want to put it after the last alloca instruction.
self.builder.positionBuilder(self.entry_block, latest_alloc.getNextInstruction().?);
} else {
// There might have been other instructions emitted before the
// first alloca has been generated. However the alloca should still
// be first in the function.
if (self.entry_block.getFirstInstruction()) |first_inst| {
self.builder.positionBuilder(self.entry_block, first_inst);
}
}
const val = self.builder.buildAlloca(t, "");
self.latest_alloca_inst = val;
return val;
}
fn genStore(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value {
const val = try self.resolveInst(inst.rhs);
const ptr = try self.resolveInst(inst.lhs);
_ = self.builder.buildStore(val, ptr);
return null;
}
fn genLoad(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value {
const ptr_val = try self.resolveInst(inst.operand);
return self.builder.buildLoad(ptr_val, "");
}
fn genBreakpoint(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value {
_ = inst;
const llvn_fn = self.getIntrinsic("llvm.debugtrap");
_ = self.builder.buildCall(llvn_fn, null, 0, "");
return null;
}
fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value {
const id = llvm.lookupIntrinsicID(name.ptr, name.len);
assert(id != 0);
// TODO: add support for overload intrinsics by passing the prefix of the intrinsic
// to `lookupIntrinsicID` and then passing the correct types to
// `getIntrinsicDeclaration`
return self.llvmModule().getIntrinsicDeclaration(id, null, 0);
}
}; | src/codegen/llvm.zig |
const std = @import("std");
const wasm = @import("wasm.zig");
const Allocator = std.mem.Allocator;
const leb = std.leb;
const meta = std.meta;
pub const Result = struct {
module: wasm.Module,
arena: std.heap.ArenaAllocator.State,
/// Frees all memory that was allocated when parsing.
/// Usage of `module` or `Result` itself is therefore illegal.
pub fn deinit(self: *Result, gpa: *Allocator) void {
self.arena.promote(gpa).deinit();
self.* = undefined;
}
};
/// Parses a wasm stream into a `Result` containing both the `wasm.Module` as well
/// as an arena state that contains all allocated memory for easy cleanup.
pub fn parse(gpa: *Allocator, reader: anytype) Parser(@TypeOf(reader)).Error!Result {
var parser = Parser(@TypeOf(reader)).init(reader);
return parser.parseWasm(gpa);
}
/// Error set containing parsing errors.
/// Merged with reader's errorset by `Parser`
pub const ParseError = error{
/// The magic byte is either missing or does not contain \0Asm
InvalidMagicByte,
/// The wasm version is either missing or does not match the supported version.
InvalidWasmVersion,
/// Expected the functype byte while parsing the Type section but did not find it.
ExpectedFuncType,
/// Missing an 'end' opcode when defining a constant expression.
MissingEndForExpression,
/// Missing an 'end' opcode at the end of a body expression.
MissingEndForBody,
/// The size defined in the section code mismatches with the actual payload size.
MalformedSection,
/// Stream has reached the end. Unreachable for caller and must be handled internally
/// by the parser.
EndOfStream,
/// Ran out of memory when allocating.
OutOfMemory,
};
const LebError = error{Overflow};
fn Parser(comptime ReaderType: type) type {
return struct {
const Self = @This();
const Error = ReaderType.Error || ParseError || LebError;
reader: std.io.CountingReader(ReaderType),
fn init(reader: ReaderType) Self {
return .{ .reader = std.io.countingReader(reader) };
}
fn parseWasm(self: *Self, gpa: *Allocator) Error!Result {
var arena = std.heap.ArenaAllocator.init(gpa);
errdefer arena.deinit();
return Result{
.module = try self.parseModule(&arena.allocator),
.arena = arena.state,
};
}
/// Verifies that the first 4 bytes contains \0Asm
fn verifyMagicBytes(self: *Self) Error!void {
var magic_bytes: [4]u8 = undefined;
try self.reader.reader().readNoEof(&magic_bytes);
if (!std.mem.eql(u8, &magic_bytes, &std.wasm.magic)) return error.InvalidMagicByte;
}
fn parseModule(self: *Self, gpa: *Allocator) Error!wasm.Module {
try self.verifyMagicBytes();
const version = try self.reader.reader().readIntLittle(u32);
var module: wasm.Module = .{ .version = version };
// custom sections do not provide a count, as they are each their very own
// section that simply share the same section ID. For this reason we use
// an arraylist so we can append them individually.
var custom_sections = std.ArrayList(wasm.sections.Custom).init(gpa);
while (self.reader.reader().readByte()) |byte| {
const len = try readLeb(u32, self.reader.reader());
var reader = std.io.limitedReader(self.reader.reader(), len).reader();
switch (@intToEnum(wasm.Section, byte)) {
.custom => {
const start = self.reader.bytes_read;
const custom = try custom_sections.addOne();
const name_len = try readLeb(u32, reader);
const name = try gpa.alloc(u8, name_len);
try reader.readNoEof(name);
const data = try gpa.alloc(u8, reader.context.bytes_left);
try reader.readNoEof(data);
custom.* = .{ .name = name, .data = data, .start = start, .end = self.reader.bytes_read };
},
.type => {
module.types.start = self.reader.bytes_read;
for (try readVec(&module.types.data, reader, gpa)) |*type_val| {
if ((try reader.readByte()) != std.wasm.function_type) return error.ExpectedFuncType;
for (try readVec(&type_val.params, reader, gpa)) |*param| {
param.* = try readEnum(wasm.ValueType, reader);
}
for (try readVec(&type_val.returns, reader, gpa)) |*result| {
result.* = try readEnum(wasm.ValueType, reader);
}
}
module.types.end = self.reader.bytes_read;
try assertEnd(reader);
},
.import => {
module.imports.start = self.reader.bytes_read;
for (try readVec(&module.imports.data, reader, gpa)) |*import| {
const module_len = try readLeb(u32, reader);
const module_name = try gpa.alloc(u8, module_len);
import.module = module_name;
try reader.readNoEof(module_name);
const name_len = try readLeb(u32, reader);
const name = try gpa.alloc(u8, name_len);
import.name = name;
try reader.readNoEof(name);
const kind = try readEnum(wasm.ExternalType, reader);
import.kind = switch (kind) {
.function => .{ .function = try readEnum(wasm.indexes.Type, reader) },
.memory => .{ .memory = try readLimits(reader) },
.global => .{ .global = .{
.valtype = try readEnum(wasm.ValueType, reader),
.mutable = (try reader.readByte()) == 0x01,
} },
.table => .{ .table = .{
.reftype = try readEnum(wasm.RefType, reader),
.limits = try readLimits(reader),
} },
};
}
module.imports.end = self.reader.bytes_read;
try assertEnd(reader);
},
.function => {
module.functions.start = self.reader.bytes_read;
for (try readVec(&module.functions.data, reader, gpa)) |*func| {
func.type_idx = try readEnum(wasm.indexes.Type, reader);
}
module.functions.end = self.reader.bytes_read;
try assertEnd(reader);
},
.table => {
module.tables.start = self.reader.bytes_read;
for (try readVec(&module.tables.data, reader, gpa)) |*table| {
table.* = .{
.reftype = try readEnum(wasm.RefType, reader),
.limits = try readLimits(reader),
};
}
module.tables.end = self.reader.bytes_read;
try assertEnd(reader);
},
.memory => {
module.memories.start = self.reader.bytes_read;
for (try readVec(&module.memories.data, reader, gpa)) |*memory| {
memory.* = .{ .limits = try readLimits(reader) };
}
module.memories.end = self.reader.bytes_read;
try assertEnd(reader);
},
.global => {
module.globals.start = self.reader.bytes_read;
for (try readVec(&module.globals.data, reader, gpa)) |*global| {
global.* = .{
.valtype = try readEnum(wasm.ValueType, reader),
.mutable = (try reader.readByte()) == 0x01,
.init = try readInit(reader),
};
}
module.globals.end = self.reader.bytes_read;
try assertEnd(reader);
},
.@"export" => {
module.exports.start = self.reader.bytes_read;
for (try readVec(&module.exports.data, reader, gpa)) |*exp| {
const name_len = try readLeb(u32, reader);
const name = try gpa.alloc(u8, name_len);
try reader.readNoEof(name);
exp.* = .{
.name = name,
.kind = try readEnum(wasm.ExternalType, reader),
.index = try readLeb(u32, reader),
};
}
module.exports.end = self.reader.bytes_read;
try assertEnd(reader);
},
.start => {
module.start = try readEnum(wasm.indexes.Func, reader);
try assertEnd(reader);
},
.element => {
module.elements.start = self.reader.bytes_read;
for (try readVec(&module.elements.data, reader, gpa)) |*elem| {
elem.table_idx = try readEnum(wasm.indexes.Table, reader);
elem.offset = try readInit(reader);
for (try readVec(&elem.func_idxs, reader, gpa)) |*idx| {
idx.* = try readEnum(wasm.indexes.Func, reader);
}
}
module.elements.end = self.reader.bytes_read;
try assertEnd(reader);
},
.code => {
module.code.start = self.reader.bytes_read;
for (try readVec(&module.code.data, reader, gpa)) |*code| {
const body_len = try readLeb(u32, reader);
var code_reader = std.io.limitedReader(reader, body_len).reader();
// first parse the local declarations
{
const locals_len = try readLeb(u32, code_reader);
const locals = try gpa.alloc(wasm.sections.Code.Local, locals_len);
for (locals) |*local| {
local.* = .{
.count = try readLeb(u32, code_reader),
.valtype = try readEnum(wasm.ValueType, code_reader),
};
}
code.locals = locals;
}
{
var instructions = std.ArrayList(wasm.Instruction).init(gpa);
defer instructions.deinit();
while (readEnum(std.wasm.Opcode, code_reader)) |opcode| {
const instr = try buildInstruction(opcode, gpa, code_reader);
try instructions.append(instr);
} else |err| switch (err) {
error.EndOfStream => {
const maybe_end = instructions.popOrNull() orelse return error.MissingEndForBody;
if (maybe_end.opcode != .end) return error.MissingEndForBody;
},
else => |e| return e,
}
code.body = instructions.toOwnedSlice();
}
try assertEnd(code_reader);
}
module.code.end = self.reader.bytes_read;
try assertEnd(reader);
},
.data => {
module.data.start = self.reader.bytes_read;
for (try readVec(&module.data.data, reader, gpa)) |*data| {
data.index = try readEnum(wasm.indexes.Mem, reader);
data.offset = try readInit(reader);
const init_len = try readLeb(u32, reader);
const init_data = try gpa.alloc(u8, init_len);
data.data = init_data;
try reader.readNoEof(init_data);
}
module.data.end = self.reader.bytes_read;
try assertEnd(reader);
},
.module => @panic("TODO: Implement 'module' section"),
.instance => @panic("TODO: Implement 'instance' section"),
.alias => @panic("TODO: Implement 'alias' section"),
_ => |id| std.log.scoped(.wasmparser).debug("Found unimplemented section with id '{d}'", .{id}),
}
} else |err| switch (err) {
error.EndOfStream => {},
else => |e| return e,
}
module.custom = custom_sections.toOwnedSlice();
return module;
}
};
}
/// First reads the count from the reader and then allocate
/// a slice of ptr child's element type.
fn readVec(ptr: anytype, reader: anytype, gpa: *Allocator) ![]ElementType(@TypeOf(ptr)) {
const len = try readLeb(u32, reader);
const slice = try gpa.alloc(ElementType(@TypeOf(ptr)), len);
ptr.* = slice;
return slice;
}
fn ElementType(comptime ptr: type) type {
return meta.Elem(meta.Child(ptr));
}
/// Uses either `readILEB128` or `readULEB128` depending on the
/// signedness of the given type `T`.
/// Asserts `T` is an integer.
fn readLeb(comptime T: type, reader: anytype) !T {
if (comptime std.meta.trait.isSignedInt(T)) {
return try leb.readILEB128(T, reader);
} else {
return try leb.readULEB128(T, reader);
}
}
/// Reads an enum type from the given reader.
/// Asserts `T` is an enum
fn readEnum(comptime T: type, reader: anytype) !T {
switch (@typeInfo(T)) {
.Enum => |enum_type| return @intToEnum(T, try readLeb(enum_type.tag_type, reader)),
else => @compileError("T must be an enum. Instead was given type " ++ @typeName(T)),
}
}
fn readLimits(reader: anytype) !wasm.Limits {
const flags = try readLeb(u1, reader);
const min = try readLeb(u32, reader);
return wasm.Limits{
.min = min,
.max = if (flags == 0) null else try readLeb(u32, reader),
};
}
fn readInit(reader: anytype) !wasm.InitExpression {
const opcode = try reader.readByte();
const init: wasm.InitExpression = switch (@intToEnum(std.wasm.Opcode, opcode)) {
.i32_const => .{ .i32_const = try readLeb(i32, reader) },
.global_get => .{ .global_get = try readLeb(u32, reader) },
else => unreachable,
};
if ((try readEnum(std.wasm.Opcode, reader)) != .end) return error.MissingEndForExpression;
return init;
}
fn assertEnd(reader: anytype) !void {
var buf: [1]u8 = undefined;
const len = try reader.read(&buf);
if (len != 0) return error.MalformedSection;
if (reader.context.bytes_left != 0) return error.MalformedSection;
}
fn buildInstruction(opcode: std.wasm.Opcode, gpa: *Allocator, reader: anytype) !wasm.Instruction {
var instr: wasm.Instruction = .{
.opcode = opcode,
.value = undefined,
};
instr.value = switch (opcode) {
.block,
.loop,
.@"if",
=> .{ .blocktype = try readEnum(wasm.BlockType, reader) },
.br,
.br_if,
.call,
// ref.func 'x'
@intToEnum(std.wasm.Opcode, 0xD2),
.local_get,
.local_set,
.local_tee,
.global_get,
.global_set,
wasm.table_get,
wasm.table_set,
.memory_size,
.memory_grow,
=> .{ .u32 = try readLeb(u32, reader) },
.call_indirect,
.i32_load,
.i64_load,
.f32_load,
.f64_load,
.i32_load8_s,
.i32_load8_u,
.i32_load16_s,
.i32_load16_u,
.i64_load8_s,
.i64_load8_u,
.i64_load16_s,
.i64_load16_u,
.i64_load32_s,
.i64_load32_u,
.i32_store,
.i64_store,
.f32_store,
.f64_store,
.i32_store8,
.i32_store16,
.i64_store8,
.i64_store16,
.i64_store32,
=> .{ .multi = .{
.x = try readLeb(u32, reader),
.y = try readLeb(u32, reader),
} },
.br_table => blk: {
const len = try readLeb(u32, reader);
const list = try gpa.alloc(u32, len);
for (list) |*item| {
item.* = try readLeb(u32, reader);
}
break :blk .{ .list = .{ .data = list.ptr, .len = len } };
},
// ref.null 't'
@intToEnum(std.wasm.Opcode, 0xD0) => .{ .reftype = try readEnum(wasm.RefType, reader) },
// select 'vec(t)'
@intToEnum(std.wasm.Opcode, 0x1C) => blk: {
const len = try readLeb(u32, reader);
const list = try gpa.alloc(wasm.ValueType, len);
errdefer gpa.free(list);
for (list) |*item| {
item.* = try readEnum(wasm.ValueType, reader);
}
break :blk .{ .multi_valtype = .{ .data = list.ptr, .len = len } };
},
wasm.need_secondary => @as(wasm.Instruction.InstrValue, blk: {
const secondary = try readEnum(wasm.SecondaryOpcode, reader);
instr.secondary = secondary;
switch (secondary) {
.i32_trunc_sat_f32_s,
.i32_trunc_sat_f32_u,
.i32_trunc_sat_f64_s,
.i32_trunc_sat_f64_u,
.i64_trunc_sat_f32_s,
.i64_trunc_sat_f32_u,
.i64_trunc_sat_f64_s,
.i64_trunc_sat_f64_u,
=> break :blk .{ .none = {} },
.table_init,
.table_copy,
.memory_init,
.data_drop,
.memory_copy,
=> break :blk .{ .multi = .{
.x = try readLeb(u32, reader),
.y = try readLeb(u32, reader),
} },
else => break :blk .{ .u32 = try readLeb(u32, reader) },
}
}),
.i32_const => .{ .i32 = try readLeb(i32, reader) },
.i64_const => .{ .i64 = try readLeb(i64, reader) },
.f32_const => .{ .f32 = @bitCast(f32, try readLeb(u32, reader)) },
.f64_const => .{ .f64 = @bitCast(f64, try readLeb(u64, reader)) },
else => .{ .none = {} },
};
return instr;
} | src/parser.zig |
const std = @import("std");
const zp = @import("zplay");
const Camera = zp.graphics.@"3d".Camera;
const Light = zp.graphics.@"3d".Light;
const Mesh = zp.graphics.@"3d".Mesh;
const Material = zp.graphics.@"3d".Material;
const Texture2D = zp.graphics.texture.Texture2D;
const SimpleRenderer = zp.graphics.@"3d".SimpleRenderer;
const PhongRenderer = zp.graphics.@"3d".PhongRenderer;
const alg = zp.deps.alg;
const Vec2 = alg.Vec2;
const Vec3 = alg.Vec3;
const Vec4 = alg.Vec4;
const Mat4 = alg.Mat4;
var simple_renderer: SimpleRenderer = undefined;
var phong_renderer: PhongRenderer = undefined;
var cube: Mesh = undefined;
var light_material: Material = undefined;
var phong_material: Material = undefined;
var camera = Camera.fromPositionAndTarget(
Vec3.new(1, 2, 3),
Vec3.zero(),
null,
);
const cube_positions = [_]Vec3{
Vec3.new(0.0, 0.0, 0.0),
Vec3.new(2.0, 5.0, -15.0),
Vec3.new(-1.5, -2.2, -2.5),
Vec3.new(-3.8, -2.0, -12.3),
Vec3.new(2.4, -0.4, -3.5),
Vec3.new(-1.7, 3.0, -7.5),
Vec3.new(1.3, -2.0, -2.5),
Vec3.new(1.5, 2.0, -2.5),
Vec3.new(1.5, 0.2, -1.5),
Vec3.new(-1.3, 1.0, -1.5),
};
fn init(ctx: *zp.Context) anyerror!void {
std.log.info("game init", .{});
// simple renderer
simple_renderer = SimpleRenderer.init();
// phong renderer
phong_renderer = PhongRenderer.init(std.testing.allocator);
phong_renderer.setDirLight(Light.init(.{
.directional = .{
.ambient = Vec3.new(0.1, 0.1, 0.1),
.diffuse = Vec3.new(0.1, 0.1, 0.1),
.specular = Vec3.new(0.1, 0.1, 0.1),
.direction = Vec3.down(),
},
}));
_ = try phong_renderer.addLight(Light.init(.{
.point = .{
.ambient = Vec3.new(0.2, 0.2, 0.2),
.diffuse = Vec3.new(0.5, 0.5, 0.5),
.position = Vec3.new(1.2, 1, -2),
.linear = 0.09,
.quadratic = 0.032,
},
}));
_ = try phong_renderer.addLight(Light.init(.{
.spot = .{
.ambient = Vec3.new(0.2, 0.2, 0.2),
.diffuse = Vec3.new(0.8, 0.1, 0.1),
.position = Vec3.new(1.2, 1, 2),
.direction = Vec3.new(1.2, 1, 2).negate(),
.linear = 0.09,
.quadratic = 0.032,
.cutoff = 12.5,
.outer_cutoff = 14.5,
},
}));
// generate a cube
cube = try Mesh.genCube(std.testing.allocator, 1, 1, 1);
// material init
var diffuse_texture = try Texture2D.fromFilePath(
std.testing.allocator,
"assets/container2.png",
false,
.{},
);
var specular_texture = try Texture2D.fromFilePath(
std.testing.allocator,
"assets/container2_specular.png",
false,
.{},
);
light_material = Material.init(.{
.single_texture = try Texture2D.fromPixelData(
std.testing.allocator,
&.{ 255, 255, 255, 255 },
1,
1,
.{},
),
});
phong_material = Material.init(.{
.phong = .{
.diffuse_map = diffuse_texture,
.specular_map = specular_texture,
.shiness = 32,
},
});
var unit = phong_material.allocTextureUnit(0);
_ = light_material.allocTextureUnit(unit);
// enable depth test
ctx.graphics.toggleCapability(.depth_test, true);
}
fn loop(ctx: *zp.Context) void {
// camera movement
const distance = ctx.delta_tick * camera.move_speed;
if (ctx.isKeyPressed(.w)) {
camera.move(.forward, distance);
}
if (ctx.isKeyPressed(.s)) {
camera.move(.backward, distance);
}
if (ctx.isKeyPressed(.a)) {
camera.move(.left, distance);
}
if (ctx.isKeyPressed(.d)) {
camera.move(.right, distance);
}
while (ctx.pollEvent()) |e| {
switch (e) {
.window_event => |we| {
switch (we.data) {
.resized => |size| {
ctx.graphics.setViewport(0, 0, size.width, size.height);
},
else => {},
}
},
.keyboard_event => |key| {
if (key.trigger_type == .up) {
switch (key.scan_code) {
.escape => ctx.kill(),
.f1 => ctx.toggleFullscreeen(null),
else => {},
}
}
},
.mouse_event => |me| {
switch (me.data) {
.motion => |motion| {
// camera rotation
camera.rotate(
camera.mouse_sensitivity * @intToFloat(f32, -motion.yrel),
camera.mouse_sensitivity * @intToFloat(f32, motion.xrel),
);
},
else => {},
}
},
.quit_event => ctx.kill(),
else => {},
}
}
var width: u32 = undefined;
var height: u32 = undefined;
ctx.getWindowSize(&width, &height);
// clear frame
ctx.graphics.clear(true, true, false, [_]f32{ 0.2, 0.2, 0.2, 1.0 });
// lighting scene
const projection = Mat4.perspective(
camera.zoom,
@intToFloat(f32, width) / @intToFloat(f32, height),
0.1,
100,
);
var renderer = phong_renderer.renderer();
renderer.begin();
for (cube_positions) |pos, i| {
const model = Mat4.fromRotation(
20 * @intToFloat(f32, i),
Vec3.new(1, 0.3, 0.5),
).translate(pos);
renderer.renderMesh(
cube,
model,
projection,
camera,
phong_material,
null,
) catch unreachable;
}
renderer.end();
// draw lights
renderer = simple_renderer.renderer();
renderer.begin();
for (phong_renderer.point_lights.items) |light| {
const model = Mat4.fromScale(Vec3.set(0.1)).translate(light.getPosition().?);
renderer.renderMesh(
cube,
model,
projection,
camera,
light_material,
null,
) catch unreachable;
}
for (phong_renderer.spot_lights.items) |light| {
const model = Mat4.fromScale(Vec3.set(0.1)).translate(light.getPosition().?);
renderer.renderMesh(
cube,
model,
projection,
camera,
light_material,
null,
) catch unreachable;
}
renderer.end();
}
fn quit(ctx: *zp.Context) void {
_ = ctx;
std.log.info("game quit", .{});
}
pub fn main() anyerror!void {
try zp.run(.{
.initFn = init,
.loopFn = loop,
.quitFn = quit,
.enable_relative_mouse_mode = true,
});
} | examples/phong_lighting.zig |
const std = @import("std");
const sqlite = @import("sqlite");
const manage_main = @import("main.zig");
const Context = manage_main.Context;
const log = std.log.scoped(.awtfdb_watcher);
const VERSION = "0.0.1";
const HELPTEXT =
\\ awtfdb-watcher: watch the entire operating system for renames,
\\ updating the database with such
\\
\\ currently only supports linux with bpftrace installed.
\\
\\ MUST be run as root.
\\
\\ usage:
\\ awtfdb-watcher [options...] path_to_home_directory
\\
\\ options:
\\ -h prints this help and exits
\\ -V prints version and exits
;
const PidTid = struct { pid: std.os.pid_t, tid: std.os.pid_t };
const StringAsList = std.ArrayList(u8);
const ChunkedName = struct { state: enum { NeedMore, Complete }, data: StringAsList };
const ChunkedNameMap = std.AutoHashMap(PidTid, ChunkedName);
const NameMap = std.AutoHashMap(PidTid, []const u8);
const RenameContext = struct {
allocator: std.mem.Allocator,
oldnames: *ChunkedNameMap,
newnames: *ChunkedNameMap,
cwds: *NameMap,
ctx: *Context,
keep_running: bool = true,
const Self = @This();
pub fn deinit(self: *Self) void {
self.cwds.deinit();
self.oldnames.deinit();
self.newnames.deinit();
}
pub fn processLine(self: *Self, line: []const u8) !void {
var line_it = std.mem.split(u8, line, ":");
const version_string = line_it.next().?;
const is_v1_message = std.mem.eql(u8, version_string, "v1");
if (!is_v1_message) return;
const message_type = line_it.next().?;
const pid_string = line_it.next().?;
const tid_string = line_it.next().?;
const pid = try std.fmt.parseInt(std.os.pid_t, pid_string, 10);
const tid = try std.fmt.parseInt(std.os.pid_t, tid_string, 10);
const pid_tid_key = PidTid{ .pid = pid, .tid = tid };
const is_oldname_message = std.mem.eql(u8, message_type, "oldname");
const is_newname_message = std.mem.eql(u8, message_type, "newname");
if (std.mem.eql(u8, message_type, "execve")) {
var cwd_proc_path = try std.fmt.allocPrint(self.allocator, "/proc/{d}/cwd", .{pid});
defer self.allocator.free(cwd_proc_path);
var cwd_path = std.fs.realpathAlloc(self.allocator, cwd_proc_path) catch |err| switch (err) {
error.AccessDenied, error.FileNotFound => {
log.debug("can't access cwd for {d}, ignoring rename", .{pid});
return;
},
else => return err,
};
try self.cwds.put(pid_tid_key, cwd_path);
} else if (std.mem.eql(u8, message_type, "exit_execve")) {
const return_value_as_string = line_it.next().?;
// if unsuccessful execve, remove cwd
if (!std.mem.eql(u8, return_value_as_string, "0")) {
const cwd_path = self.cwds.get(pid_tid_key);
defer if (cwd_path) |unpacked| self.allocator.free(unpacked);
_ = self.cwds.remove(pid_tid_key);
}
} else if (std.mem.eql(u8, message_type, "exit_process")) {
const cwd_path = self.cwds.get(pid_tid_key);
defer if (cwd_path) |unpacked| self.allocator.free(unpacked);
_ = self.cwds.remove(pid_tid_key);
} else if (is_oldname_message or is_newname_message) {
// i do this to account for paths that have the : character
// in them. do not use line_it after this, or, use it very cautiously
const chunk_data = line[(version_string.len + 1 + message_type.len + 1 + pid_string.len + 1 + tid_string.len + 1)..line.len];
var map_to_put_in: *ChunkedNameMap =
if (is_oldname_message) self.oldnames else self.newnames;
var maybe_chunk = map_to_put_in.getPtr(pid_tid_key);
if (maybe_chunk) |chunk| {
switch (chunk.state) {
.NeedMore => {
const written_bytes = try chunk.data.writer().write(chunk_data);
std.debug.assert(written_bytes == chunk_data.len);
if (chunk_data.len < 200) {
chunk.state = .Complete;
}
},
.Complete => {},
}
} else {
var chunk = ChunkedName{
.state = .NeedMore,
.data = StringAsList.init(self.allocator),
};
const written_bytes = try chunk.data.writer().write(chunk_data);
std.debug.assert(written_bytes == chunk_data.len);
if (chunk_data.len < 200) {
chunk.state = .Complete;
}
try map_to_put_in.put(
pid_tid_key,
chunk,
);
}
std.debug.assert(map_to_put_in.count() > 0);
} else if (std.mem.eql(u8, message_type, "exit_rename")) {
// got a return from one of the rename syscalls,
// we must (try to) resolve it
const return_value_as_string = line_it.next().?;
const old_name = self.oldnames.get(pid_tid_key);
const new_name = self.newnames.get(pid_tid_key);
const maybe_cwd = self.cwds.get(pid_tid_key);
std.debug.assert(self.oldnames.count() > 0);
std.debug.assert(self.newnames.count() > 0);
if (std.mem.eql(u8, return_value_as_string, "0")) {
defer _ = self.oldnames.remove(pid_tid_key);
defer _ = self.newnames.remove(pid_tid_key);
defer _ = self.cwds.remove(pid_tid_key);
defer old_name.?.data.deinit();
defer new_name.?.data.deinit();
defer if (maybe_cwd) |cwd| self.allocator.free(cwd);
try self.handleSucessfulRename(pid_tid_key, old_name.?.data.items, new_name.?.data.items, maybe_cwd);
}
}
}
fn handleSucessfulRename(
self: *Self,
pidtid_pair: PidTid,
relative_old_name: []const u8,
relative_new_name: []const u8,
maybe_cwd: ?[]const u8,
) !void {
const pid = pidtid_pair.pid;
const is_oldname_absolute = std.fs.path.isAbsolute(relative_old_name);
const is_newname_absolute = std.fs.path.isAbsolute(relative_new_name);
var cwd_path: ?[]const u8 = null;
if (maybe_cwd) |unpacked| {
cwd_path = unpacked;
// if any of them is relative, construct cwd_path and use it later
} else if (!(is_oldname_absolute and is_newname_absolute)) {
// if we don't have it already, try to fetch it from procfs
// as this might be a process we didn't know about before
var cwd_proc_path = try std.fmt.allocPrint(self.allocator, "/proc/{d}/cwd", .{pid});
defer self.allocator.free(cwd_proc_path);
cwd_path = std.fs.realpathAlloc(self.allocator, cwd_proc_path) catch |err| switch (err) {
error.AccessDenied, error.FileNotFound => {
log.debug("can't access cwd for {d}, ignoring rename", .{pid});
return;
},
else => return err,
};
}
// if we didn't receive maybe_cwd, that means we had to allocate
// cwd_path ourselves by reading from /proc. so we own the lifetime here
defer if (maybe_cwd == null and cwd_path != null)
self.allocator.free(cwd_path.?);
// applying cwd_path if the path is already absolute is incorrect behavior.
var oldpath = if (!is_oldname_absolute)
try std.fs.path.resolve(self.allocator, &[_][]const u8{
cwd_path.?,
relative_old_name,
})
else
try std.fs.path.resolve(self.allocator, &[_][]const u8{relative_old_name});
defer self.allocator.free(oldpath);
var newpath = if (!is_newname_absolute)
try std.fs.path.resolve(self.allocator, &[_][]const u8{
cwd_path.?,
relative_new_name,
})
else
try std.fs.path.resolve(self.allocator, &[_][]const u8{relative_new_name});
defer self.allocator.free(newpath);
const is_old_in_home = std.mem.startsWith(u8, oldpath, self.ctx.home_path.?);
const is_new_in_home = std.mem.startsWith(u8, newpath, self.ctx.home_path.?);
if (!(is_new_in_home or is_old_in_home)) {
log.debug("{d}: neither {s} or {s} are in home", .{ pid, oldpath, newpath });
return;
}
log.info("{d}: relevant rename: {s} -> {s}", .{ pid, oldpath, newpath });
// find out if this is a folder or not by sql count(*)
// with (local_path LIKE ? || '%')
// if its 1, we need to compare paths to see if newpath is a folder
// that only has 1 indexed file or not
// if its more than 1, it's 100% a folder, and we don't need to openDir
var stmt = try self.ctx.db.?.prepare(
\\ select file_hash, hashes.hash_data, local_path
\\ from files
\\ join hashes
\\ on files.file_hash = hashes.id
\\ where local_path LIKE ? || '%'
);
defer stmt.deinit();
const raw_files = try stmt.all(
struct {
file_hash: i64,
hash_data: sqlite.Blob,
local_path: []const u8,
},
self.allocator,
.{},
.{ .local_path = oldpath },
);
defer {
for (raw_files) |*raw_file| self.allocator.free(raw_file.hash_data.data);
self.allocator.free(raw_files);
}
// find out if the target newpath is a folder or not by searching
// if there are multiple entries with it already
var newpath_count = (try self.ctx.db.?.one(
i64,
\\ select count(*)
\\ from files
\\ where local_path LIKE ? || '%'
,
.{},
.{newpath},
)).?;
var is_newpath_dir: ?bool = null;
if (newpath_count > 1) is_newpath_dir = true;
if (raw_files.len >= 1) {
// consider the following folder structure:
//
// /home/luna/b
// /home/luna/abc/d
//
// if /home/luna/b gets renamed to /home/luna/a we would get
// two elements in raw_files, so we have to disambiguate
//
// we do not want to access the filesystem as that can crash us
// due to race conditions, so we must try to infer as much as
// possible from db data
// fact 1: if we have a file that has an exact match with newpath,
// then we have a single file rather than folder (as folders cant
// be indexed themselves)
var starts_with_count: usize = 0;
for (raw_files) |raw_file| {
if (std.mem.eql(u8, raw_file.local_path, oldpath)) {
const real_hash = (Context.HashWithBlob{
.id = raw_file.file_hash,
.hash_data = raw_file.hash_data,
}).toRealHash();
var file = Context.File{
.ctx = self.ctx,
.local_path = raw_file.local_path,
.hash = real_hash,
};
// since setLocalPath copies ownership, deinit afterwards
defer file.deinit();
// if we coulnd't find out from db, try to find from fs
if (is_newpath_dir == null) {
log.debug("newpath:{s}", .{newpath});
var maybe_newpath_dir: ?std.fs.Dir = std.fs.openDirAbsolute(newpath, .{}) catch |err| switch (err) {
error.FileNotFound, error.NotDir => blk: {
is_newpath_dir = false;
break :blk null;
},
else => return err,
};
if (maybe_newpath_dir) |*newpath_dir| {
newpath_dir.close();
is_newpath_dir = true;
}
}
if (is_newpath_dir == true) {
const old_newpath = newpath;
const local_basename = std.fs.path.basename(raw_file.local_path);
// free the old one, create a new one that's freed
// later on the defer block.
newpath = try std.fs.path.resolve(self.allocator, &[_][]const u8{
old_newpath,
local_basename,
});
self.allocator.free(old_newpath);
}
// confirmed single file
log.info(
"single File {s} was renamed from {s} to {s}",
.{ real_hash, oldpath, newpath },
);
try file.setLocalPath(newpath);
return;
} else if (std.mem.startsWith(u8, oldpath, raw_file.local_path)) {
starts_with_count += 1;
}
}
var is_directory_move = false;
// fact 2: if we had more than one path that starts with
// the newpath, it's definitely a folder. rename it accordingly
if (starts_with_count > 0) is_directory_move = true;
// fact 3: if neither 1 or 2 are true, go to the filesystem and
// find out
if (!is_directory_move) {
var dir: ?std.fs.Dir = std.fs.cwd().openDir(newpath, .{}) catch |err| switch (err) {
error.NotDir => null,
else => return err,
};
defer if (dir) |*unpacked_dir| unpacked_dir.close();
is_directory_move = dir != null;
}
if (is_directory_move) {
var oldpath_assumed_folder_buffer: [std.os.PATH_MAX]u8 = undefined;
const oldpath_assumed_folder = try std.fmt.bufPrint(
&oldpath_assumed_folder_buffer,
"{s}{s}",
.{ oldpath, std.fs.path.sep_str },
);
for (raw_files) |raw_file| {
var replace_buffer: [std.os.PATH_MAX]u8 = undefined;
if (std.mem.startsWith(u8, raw_file.local_path, oldpath_assumed_folder)) {
// this is a file in a folder, update it accordingly
// to do this, we need to replace oldpath by newpath
// since we know it starts with oldpath, we just need
// to slice oldpath out of local_path
//
// then construct it back together by prepending
// newpath_assumed_folder into this
const path_after_oldpath = raw_file.local_path[oldpath.len + 1 ..];
const replaced_path = try std.fmt.bufPrint(
&replace_buffer,
"{s}{s}{s}",
.{ newpath, std.fs.path.sep_str, path_after_oldpath },
);
log.info(
"(direcotry move) File {s} was renamed from {s} to {s}",
.{ &raw_file.file_hash, raw_file.local_path, replaced_path },
);
const real_hash = (Context.HashWithBlob{
.id = raw_file.file_hash,
.hash_data = raw_file.hash_data,
}).toRealHash();
var file = Context.File{
.ctx = self.ctx,
.local_path = raw_file.local_path,
.hash = real_hash,
};
// since setLocalPath copies ownership, deinit here
defer file.deinit();
try file.setLocalPath(replaced_path);
}
}
} else {
// if not, then we don't update anything (we already should
// have updated from fact 1).
}
} else {
// nothing about this path is in the database, so, don't give a fuck.
}
}
pub fn handleNewSignals(self: *Self) !void {
while (true) {
const signal_data = maybe_self_pipe.?.reader.reader().readStruct(SignalData) catch |err| switch (err) {
error.EndOfStream => break,
else => return err,
};
log.info("exiting! with signal {d}", .{signal_data.signal});
self.keep_running = false;
return;
}
}
};
const Pipe = struct {
reader: std.fs.File,
writer: std.fs.File,
};
var maybe_self_pipe: ?Pipe = null;
const SignalData = extern struct {
signal: c_int,
info: std.os.siginfo_t,
uctx: ?*const anyopaque,
};
const SignalList = std.ArrayList(SignalData);
fn signalHandler(
signal: c_int,
info: *const std.os.siginfo_t,
uctx: ?*const anyopaque,
) callconv(.C) void {
if (maybe_self_pipe) |self_pipe| {
const signal_data = SignalData{
.signal = signal,
.info = info.*,
.uctx = uctx,
};
self_pipe.writer.writer().writeStruct(signal_data) catch return;
}
}
pub fn main() anyerror!void {
const rc = sqlite.c.sqlite3_config(sqlite.c.SQLITE_CONFIG_LOG, manage_main.sqliteLog, @as(?*anyopaque, null));
if (rc != sqlite.c.SQLITE_OK) {
std.log.err("failed to configure: {d} '{s}'", .{
rc, sqlite.c.sqlite3_errstr(rc),
});
return error.ConfigFail;
}
const self_pipe_fds = try std.os.pipe();
maybe_self_pipe = .{
.reader = .{ .handle = self_pipe_fds[0] },
.writer = .{ .handle = self_pipe_fds[1] },
};
defer {
maybe_self_pipe.?.reader.close();
maybe_self_pipe.?.writer.close();
}
var mask = std.os.empty_sigset;
// only linux and darwin implement sigaddset() on zig stdlib. huh.
std.os.linux.sigaddset(&mask, std.os.SIG.TERM);
std.os.linux.sigaddset(&mask, std.os.SIG.INT);
var sa = std.os.Sigaction{
.handler = .{ .sigaction = signalHandler },
.mask = mask,
.flags = 0,
};
try std.os.sigaction(std.os.SIG.TERM, &sa, null);
try std.os.sigaction(std.os.SIG.INT, &sa, null);
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
var allocator = gpa.allocator();
var args_it = std.process.args();
_ = args_it.skip();
const Args = struct {
help: bool = false,
version: bool = false,
home_path: ?[]const u8 = null,
};
var given_args = Args{};
while (args_it.next()) |arg| {
if (std.mem.eql(u8, arg, "-h")) {
given_args.help = true;
} else if (std.mem.eql(u8, arg, "-V")) {
given_args.version = true;
} else {
given_args.home_path = arg;
}
}
if (given_args.help) {
std.debug.print(HELPTEXT, .{});
return;
} else if (given_args.version) {
std.debug.print("awtfdb-watcher {s}\n", .{VERSION});
return;
}
if (given_args.home_path == null) {
std.debug.print("home path is a required argument", .{});
return;
}
var ctx = Context{
.home_path = given_args.home_path,
.args_it = undefined,
.stdout = undefined,
.db = null,
.allocator = allocator,
};
defer ctx.deinit();
try ctx.loadDatabase(.{});
std.log.info("args: {}", .{given_args});
const bpftrace_program = @embedFile("./rename_trace.bt");
var proc = try std.ChildProcess.init(
&[_][]const u8{ "bpftrace", "-e", bpftrace_program },
allocator,
);
var envmap = std.BufMap.init(allocator);
defer envmap.deinit();
try envmap.put("BPFTRACE_STRLEN", "200");
proc.env_map = &envmap;
defer proc.deinit();
proc.stdout_behavior = .Pipe;
proc.stderr_behavior = .Pipe;
try proc.spawn();
var wait_pipe = try std.os.pipe();
defer std.os.close(wait_pipe[0]);
defer std.os.close(wait_pipe[1]);
var pidfd: ?std.os.fd_t = null;
const pidfd_rc = std.os.linux.pidfd_open(proc.pid, 0);
switch (std.os.errno(pidfd_rc)) {
.SUCCESS => pidfd = @intCast(std.os.fd_t, pidfd_rc),
.INVAL => unreachable,
.NFILE, .MFILE => return error.TooManyFileDescriptors,
.NODEV => return error.NoDevice,
.NOMEM => return error.SystemResources,
.SRCH => unreachable, // race condition
else => |err| return std.os.unexpectedErrno(err),
}
var sockets = [_]std.os.pollfd{
.{ .fd = proc.stdout.?.handle, .events = std.os.POLL.IN, .revents = 0 },
.{ .fd = proc.stderr.?.handle, .events = std.os.POLL.IN, .revents = 0 },
.{ .fd = pidfd orelse return error.InvalidPidFd, .events = std.os.POLL.IN, .revents = 0 },
.{ .fd = maybe_self_pipe.?.reader.handle, .events = std.os.POLL.IN, .revents = 0 },
};
var oldnames = ChunkedNameMap.init(allocator);
var newnames = ChunkedNameMap.init(allocator);
var cwds = NameMap.init(allocator);
var rename_ctx = RenameContext{
.allocator = allocator,
.oldnames = &oldnames,
.newnames = &newnames,
.cwds = &cwds,
.ctx = &ctx,
};
defer rename_ctx.deinit();
while (rename_ctx.keep_running) {
const available = try std.os.poll(&sockets, -1);
if (available == 0) {
log.info("timed out, retrying", .{});
continue;
}
// have a max of 16kb per thing given by bpftrace
var line_buffer: [16 * 1024]u8 = undefined;
for (sockets) |pollfd| {
if (pollfd.revents == 0) continue;
if (pollfd.fd == maybe_self_pipe.?.reader.handle) {
try rename_ctx.handleNewSignals();
_ = try proc.kill();
} else if (proc.stdout != null and pollfd.fd == proc.stdout.?.handle) {
const line = proc.stdout.?.reader().readUntilDelimiter(&line_buffer, '\n') catch |err| {
log.err("error reading from stdout {s}", .{@errorName(err)});
switch (err) {
// process might have died while we're in the middle of a read
error.NotOpenForReading, error.EndOfStream => {
proc.stdout = null;
continue;
},
else => return err,
}
};
//log.info("got out: {s}", .{line});
try rename_ctx.processLine(line);
} else if (proc.stderr != null and pollfd.fd == proc.stderr.?.handle) {
const buffer_offset = proc.stderr.?.reader().readAll(&line_buffer) catch |err| {
log.err("error reading from stderr {s}", .{@errorName(err)});
switch (err) {
// process might have died while we're in the middle of a read
error.NotOpenForReading => {
proc.stderr = null;
continue;
},
else => return err,
}
};
const line = line_buffer[0..buffer_offset];
log.warn("got stderr: {s}", .{line});
} else if (pollfd.fd == pidfd) {
var siginfo: std.os.siginfo_t = undefined;
const waitid_rc = std.os.linux.waitid(.PIDFD, pidfd.?, &siginfo, 0);
switch (std.os.errno(waitid_rc)) {
.SUCCESS => {},
.CHILD => unreachable, // unknown process. race condition
.INVAL => unreachable, // programming error
else => |err| {
log.err("wtf {}", .{err});
return std.os.unexpectedErrno(err);
},
}
log.err("bpftrace exited with {d}", .{siginfo.signo});
return;
}
}
}
log.info("exiting main loop", .{});
}
test "rename syscalls trigger db rename" {
const allocator = std.testing.allocator;
var ctx = try manage_main.makeTestContext();
defer ctx.deinit();
var oldnames = ChunkedNameMap.init(allocator);
var newnames = ChunkedNameMap.init(allocator);
var cwds = NameMap.init(allocator);
var rename_ctx = RenameContext{
.allocator = allocator,
.oldnames = &oldnames,
.newnames = &newnames,
.cwds = &cwds,
.ctx = &ctx,
};
defer rename_ctx.deinit();
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
var file = try tmp.dir.createFile("test_file", .{});
defer file.close();
_ = try file.write("awooga");
var indexed_file = try ctx.createFileFromDir(tmp.dir, "test_file");
defer indexed_file.deinit();
// TODO system layer so we can attach a test procfs and test filesystem too
// also should help if we think about going beyond bpftrace
// (dtrace for macos and bsds maybe?)
var full_tmp_dir_path = try tmp.dir.realpathAlloc(allocator, ".");
defer allocator.free(full_tmp_dir_path);
var oldname = try std.fs.path.resolve(allocator, &[_][]const u8{
full_tmp_dir_path,
"test_file",
});
defer allocator.free(oldname);
var newname = try std.fs.path.resolve(allocator, &[_][]const u8{
full_tmp_dir_path,
"test_file2",
});
defer allocator.free(newname);
const lines_preprint =
\\v1:oldname:6969:6969:{s}
\\v1:newname:6969:6969:{s}
\\v1:exit_rename:6969:6969:0
;
var buf: [8192]u8 = undefined;
const lines = try std.fmt.bufPrint(
&buf,
lines_preprint,
.{ oldname, newname },
);
// give those lines to context
var it = std.mem.split(u8, lines, "\n");
while (it.next()) |line|
try rename_ctx.processLine(line);
const oldname_count = (try ctx.db.?.one(
usize,
"select count(*) from files where local_path = ?",
.{},
.{oldname},
)).?;
try std.testing.expectEqual(@as(usize, 0), oldname_count);
const newname_count = (try ctx.db.?.one(
usize,
"select count(*) from files where local_path = ?",
.{},
.{newname},
)).?;
try std.testing.expectEqual(@as(usize, 1), newname_count);
}
test "rename syscalls trigger db rename (target being a folder)" {
const allocator = std.testing.allocator;
var ctx = try manage_main.makeTestContext();
defer ctx.deinit();
var oldnames = ChunkedNameMap.init(allocator);
var newnames = ChunkedNameMap.init(allocator);
var cwds = NameMap.init(allocator);
var rename_ctx = RenameContext{
.allocator = allocator,
.oldnames = &oldnames,
.newnames = &newnames,
.cwds = &cwds,
.ctx = &ctx,
};
defer rename_ctx.deinit();
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
var target_tmp = std.testing.tmpDir(.{});
defer target_tmp.cleanup();
var file = try tmp.dir.createFile("test_file", .{});
defer file.close();
_ = try file.write("awooga");
var indexed_file = try ctx.createFileFromDir(tmp.dir, "test_file");
defer indexed_file.deinit();
// TODO system layer so we can attach a test procfs and test filesystem too
// also should help if we think about going beyond bpftrace
// (dtrace for macos and bsds maybe?)
var full_tmp_dir_path = try tmp.dir.realpathAlloc(allocator, ".");
defer allocator.free(full_tmp_dir_path);
var full_target_tmp_dir_path = try target_tmp.dir.realpathAlloc(allocator, ".");
defer allocator.free(full_target_tmp_dir_path);
var oldname = try std.fs.path.resolve(allocator, &[_][]const u8{
full_tmp_dir_path,
"test_file",
});
defer allocator.free(oldname);
var newname = full_target_tmp_dir_path;
var actual_newname = try std.fs.path.resolve(allocator, &[_][]const u8{
full_target_tmp_dir_path,
"test_file",
});
defer allocator.free(actual_newname);
const lines_preprint =
\\v1:oldname:6969:6969:{s}
\\v1:newname:6969:6969:{s}
\\v1:exit_rename:6969:6969:0
;
var buf: [8192]u8 = undefined;
const lines = try std.fmt.bufPrint(
&buf,
lines_preprint,
.{ oldname, newname },
);
// give those lines to context
var it = std.mem.split(u8, lines, "\n");
while (it.next()) |line|
try rename_ctx.processLine(line);
const oldname_count = (try ctx.db.?.one(
usize,
"select count(*) from files where local_path = ?",
.{},
.{oldname},
)).?;
try std.testing.expectEqual(@as(usize, 0), oldname_count);
const newname_count = (try ctx.db.?.one(
usize,
"select count(*) from files where local_path = ?",
.{},
.{actual_newname},
)).?;
try std.testing.expectEqual(@as(usize, 1), newname_count);
} | src/rename_watcher_main.zig |
const std = @import("std");
const stdx = @import("../stdx.zig");
const t = stdx.testing;
const log = stdx.log.scoped(.dynamic_array_list);
/// ArrayList with variable sized items of multiple of @sizeOf(T).
/// TODO: Allow items that are not a multiple of @sizeOf(T).
pub fn DynamicArrayList(comptime Id: type, comptime T: type) type {
return struct {
const Self = @This();
pub const SizedPtr = struct {
id: Id,
len: usize,
pub fn init(id: Id, len: usize) @This() {
return .{ .id = id, .len = len };
}
};
buf: std.ArrayList(T),
pub fn init(alloc: std.mem.Allocator) Self {
return .{
.buf = std.ArrayList(T).init(alloc),
};
}
pub fn toOwnedSlice(self: *Self) []const T {
return self.buf.toOwnedSlice();
}
pub fn clearRetainingCapacity(self: *Self) void {
self.buf.clearRetainingCapacity();
}
pub fn shrinkRetainingCapacity(self: *Self, len: usize) void {
self.buf.shrinkRetainingCapacity(len);
}
/// Appends a variable sized item and returns it's index and size.
/// Item size must be a multiple of @sizeOf(T).
pub fn append(self: *Self, item: anytype) !SizedPtr {
// const Item = @TypeOf(item);
const id = @intCast(Id, self.buf.items.len);
const slice = @ptrCast([*]const T, &item)[0..@sizeOf(@TypeOf(item))];
try self.buf.appendSlice(slice);
return SizedPtr.init(id, slice.len);
}
pub fn get(self: *Self, comptime Child: type, ptr: SizedPtr) Child {
return std.mem.bytesToValue(Child, self.buf.items[ptr.id..ptr.id+ptr.len][0..@sizeOf(Child)]);
}
pub fn getPtr(self: *Self, comptime ChildPtr: type, ptr: SizedPtr) *align(@alignOf(T)) std.meta.Child(ChildPtr) {
const Child = std.meta.Child(ChildPtr);
return std.mem.bytesAsValue(Child, self.buf.items[ptr.id..ptr.id+ptr.len][0..@sizeOf(Child)]);
}
pub fn getBytesPtr(self: *Self, ptr: SizedPtr) [*]const T {
return @ptrCast([*]const u8, &self.buf.items[ptr.id]);
}
pub fn getBytes(self: *Self, ptr: SizedPtr) []const T {
const end_idx = ptr.id + ptr.len;
return self.buf.items[ptr.id..end_idx];
}
pub fn deinit(self: Self) void {
self.buf.deinit();
}
};
}
test "DynamicArrayList" {
var arr = DynamicArrayList(u32, u8).init(t.alloc);
defer arr.deinit();
var ptr = try arr.append(true);
try t.eq(ptr, .{ .id = 0, .len = 1 });
try t.eq(arr.get(bool, ptr), true);
try t.eq(arr.getPtr(*bool, ptr).*, true);
ptr = try arr.append(@as(u32, 100));
try t.eq(ptr, .{ .id = 1, .len = 4 });
try t.eq(arr.get(u32, ptr), 100);
try t.eq(arr.getPtr(*u32, ptr).*, 100);
} | stdx/ds/dynamic_array_list.zig |
const std = @import("std");
const fs = std.fs;
const lang = @import("lang.zig");
const mdown = @import("mdown.zig");
const util = @import("util.zig");
const str = []const u8;
const testing = std.testing;
const process = std.process;
const Dir = std.fs.Dir;
const EntryKind = Dir.Entry.Kind;
const Thread = std.Thread;
const Walker = std.fs.Dir.Walker;
const event = std.event;
const mt = std.meta;
const asset_exts = [_]u8{ ".html", ".mdx", ".svx", ".xml", ".jpeg", ".png", ".jpg" };
pub fn main() !void {
const al = std.heap.GeneralPurposeAllocator(.{});
var gpa = al.allocator();
const wwik = try WalkExt.walk(&try WalkExt.initAbs(gpa, "/Users/clp/wiki/", ".md", asset_exts));
const mdf = if (wwik.map) |md| md else std.StringArrayHashMap(str).init(al);
const scf = if (wwik.sec_map) |md| md else std.StringArrayHashMap(str).init(al);
const stdo = try std.io.getStdOut();
for (mdf.keys()) |md_path, i| {
const val = try mdf.get(md_path) orelse continue;
try stdo.writer().writeAll(val);
try stdo.write("\n\x1b33m [{d}] \x1b[32;1m MARKDOWN FILE @ {s}: ", i, md_path, val);
}
for (scf.keys()) |sc_path, i| {
const val = try scf.get(sc_path) orelse continue;
try stdo.writer().writeAll(val);
try stdo.write("\n\x1b33m [{d}] \x1b[32;1m SECONDARY FILE @ {s}: ", i, sc_path, val);
}
}
pub fn relDir(d: str) !Dir {
return std.fs.cwd().openDir(d, .{ .iterate = true });
}
pub fn walkRel(a: std.mem.Allocator, dir: str) !std.fs.Dir.Walker {
return std.fs.cwd().openDir(dir, .{}).walk(a);
}
pub const WalkExt = struct {
ext: str,
secondary_exts: std.ArrayList(str),
a: std.mem.Allocator,
abs_dir: Dir = std.fs.cwd(),
map: ?std.StringArrayHashMap(str) = null,
sec_map: ?std.StringArrayHashMap(str) = null,
const Self = @This();
pub fn initAbs(a: std.mem.Allocator, dir: str, ext: str, sec_ext: ?[]str) !Self {
var smap = std.ArrayList(str).init(a);
if (sec_ext) |se| try smap.appendSlice(se);
return Self{
.ext = ext,
.a = a,
.abs_dir = dir,
.secondary_exts = smap,
};
}
pub fn initRel(a: std.mem.Allocator, rel_dir: str, ext: str, sec_ext: ?[]str) !Self {
var smap = std.ArrayList(str).init(a);
if (sec_ext) |se| try smap.appendSlice(se);
return Self{
.ext = ext,
.a = a,
.abs_dir = try std.fs.cwd().openDir(rel_dir),
.secondary_exts = smap,
};
}
fn hasExt(p1: str, p2: str) bool {
return std.mem.endsWith(u8, p1, p2);
}
pub fn walk(self: *Self) !*Self {
var wk = try self.abs_dir.walk();
var paths = std.ArrayList(str);
var sec_paths = std.StringArrayHashMap(str).init(self.a);
while (try wk.next()) |ent| {
if (ent.kind != .File) continue;
const p = ent.path;
if (hasExt(p, self.ext)) {
try paths.append(p);
continue;
}
for (self.secondary_exts) |se| if (hasExt(p, se)) try sec_paths.put(se, p);
}
self.*.map = paths;
self.*.sec_map = sec_paths;
return self;
}
}; | lib/idot/src/main.zig |
const std = @import("std");
const c = @import("../c.zig");
const zupnp = @import("../lib.zig");
const Allocator = std.mem.Allocator;
const Headers = @This();
const HeaderItems = std.StringHashMap([:0]const u8);
const logger = std.log.scoped(.@"zupnp.web.Headers");
const ExtraHeadersIterator = struct {
header_list: ?*c.UpnpListHead,
pos: c.UpnpListIter,
fn init(header_list: ?*c.UpnpListHead) ExtraHeadersIterator {
return .{
.header_list = header_list,
.pos = if (header_list) |hl| @ptrCast(c.UpnpListIter, hl) else undefined,
};
}
fn next(self: *ExtraHeadersIterator) ?*c.UpnpExtraHeaders {
if (self.header_list == null or self.pos == c.UpnpListEnd(self.header_list, self.pos)) {
return null;
}
var res = @ptrCast(*c.UpnpExtraHeaders, self.pos);
self.pos = c.UpnpListNext(self.header_list, self.pos);
return res;
}
};
allocator: Allocator,
items: HeaderItems,
pub fn init(allocator: Allocator) Headers {
return .{
.allocator = allocator,
.items = HeaderItems.init(allocator),
};
}
pub fn fromHeaderList(allocator: Allocator, header_list: ?*c.UpnpListHead) !Headers {
var items = HeaderItems.init(allocator);
errdefer items.deinit();
var iter = ExtraHeadersIterator.init(header_list);
while (iter.next()) {
try items.put(
c.UpnpExtraHeaders_get_name_cstr[0..c.UpnpExtraHeaders_get_name_Length],
c.UpnpExtraHeaders_get_value_cstr[0..c.UpnpExtraHeaders_get_value_Length]
);
}
return Headers {
.items = items,
};
}
pub fn deinit(self: *Headers) void {
self.items.deinit();
}
pub fn addHeadersToList(self: *const Headers, list: *const c.UpnpListHead) !void {
var mut_list = c.mutate(*c.UpnpListHead, list);
var last: ?*c.UpnpExtraHeaders = null;
var iter = self.items.iterator();
while (iter.next()) |kv| {
var header_str_tmp = try std.fmt.allocPrintZ(self.allocator, "{s}: {s}", .{kv.key_ptr.*, kv.value_ptr.*});
defer self.allocator.free(header_str_tmp);
var header = c.UpnpExtraHeaders_new();
logger.debug("resp err {d}", .{
c.UpnpExtraHeaders_set_resp(header, c.ixmlCloneDOMString(header_str_tmp)),
});
if (last) |l| {
c.UpnpExtraHeaders_add_to_list_node(l, c.mutate(*c.UpnpListHead, c.UpnpExtraHeaders_get_node(header)));
}
last = header;
}
if (last) |l| {
c.UpnpExtraHeaders_add_to_list_node(l, mut_list);
}
}
pub fn toString(self: *const Headers, url: []const u8) !std.ArrayList(u8) {
var buf = std.ArrayList(u8).init(self.allocator);
errdefer buf.deinit();
try appendHeader(&buf, "HOST", try getHostFromUrl(url));
var iter = self.items.iterator();
while (iter.next()) |kv| {
try appendHeader(&buf, kv.key_ptr.*, kv.value_ptr.*);
}
return buf;
}
fn appendHeader(buf: *std.ArrayList(u8), name: []const u8, value: []const u8) !void {
const CRLF = "\r\n";
try buf.appendSlice(name);
try buf.appendSlice(": ");
try buf.appendSlice(value);
try buf.appendSlice(CRLF);
}
// Port of httpreadwrite.c :: get_hoststr()
fn getHostFromUrl(url: []const u8) ![]const u8 {
const double_slash = (std.mem.indexOf(u8, url, "//") orelse {
logger.warn("Invalid URL {s}", .{url});
return zupnp.Error;
}) + 2;
const next_slash = std.mem.indexOfScalarPos(u8, url, double_slash, '/') orelse url.len;
return url[double_slash..next_slash];
} | src/web/headers.zig |
const std = @import("std");
const dtb = @import("dtb.zig");
const pl011 = @import("pl011.zig");
const mailbox = @import("mailbox.zig");
const gpio = @import("gpio.zig");
const log = @import("log.zig");
const arm = @import("arm.zig");
const interrupt = @import("interrupt.zig");
const panic_ = @import("panic.zig");
const system_timer = @import("system_timer.zig");
const intc = @import("intc.zig");
pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn {
_ = error_return_trace;
log.println("PANIC (0x{x}): {s}", .{ @returnAddress(), msg });
panic_.hang();
}
export fn kernelMain(dtb_ptr32: u64) callconv(.C) noreturn {
const dtb_header = @intToPtr(*dtb.Header, dtb_ptr32 & 0xffffffff);
var dt_buffer: [1 << 16]u8 = undefined;
var dt = dtb.DeviceTree.parse(dt_buffer[0..], dtb_header) catch unreachable;
// TODO: Don't hard-code stuff, walk the device tree instead.
const mailbox_node = dt.getNodeByPath("/soc/mailbox@7e00b880") orelse unreachable;
const pl011_node = dt.getNodeByPath("/soc/serial@7e201000") orelse unreachable;
const gpio_node = dt.getNodeByPath("/soc/gpio@7e200000") orelse unreachable;
const gpio_dev = gpio.GPIO.probe(gpio_node.*) catch unreachable;
const mbox_dev = mailbox.Mailbox.probe(mailbox_node.*) catch unreachable;
const uart_dev = pl011.pl011.probe(pl011_node.*) catch unreachable;
// Enable the UART.
{
const freq = 3_000_000;
// Disable pull-down on the UART GPIO pins (14, 15).
gpio_dev.controlPull(.GPPUDCLK0, .Disable, 1 << 14 | 1 << 15);
// Set the UART clock frequency to a constant rate so that we can set
// the baud rate properly.
mbox_dev.setClockRate(2, freq, false);
// Enable it.
uart_dev.enable(freq, 115200);
// Redirect all logs there.
log.setSerialConsoleDevice(&uart_dev);
log.puts("Serial console enabled.\r\n");
}
// TODO: Don't hard-code stuff, walk the device tree instead.
const st_node = dt.getNodeByPath("/soc/timer@7e003000") orelse unreachable;
const intc_node = dt.getNodeByPath("/soc/interrupt-controller@7e00b200") orelse unreachable;
var st_dev = system_timer.SystemTimer.probe(st_node.*) catch unreachable;
var intc_dev = intc.Intc.probe(intc_node.*) catch unreachable;
interrupt.init(&intc_dev);
st_dev.installIrqHandlers();
// Enable IRQs and FIQs.
arm.msr("DAIF", 0b0011_0000_0000);
// Test system timer and IRQs:
st_dev.dev.writeReg(.C1, 0x20000);
st_dev.dev.writeReg(.C3, 0x200000);
intc_dev.enableIrqs(0, 1 << 1 | 1 << 3);
log.puts("Entering infinite loop.\r\n");
panic_.hang();
} | kernel.zig |
const sabaton = @import("root").sabaton;
const pte = u64;
const table_ptr = [*]pte;
pub const Perms = enum {
none = 0,
x = 1,
w = 2,
r = 4,
rw = 6,
rwx = 7,
rx = 5,
};
pub const MemoryType = enum {
memory,
mmio,
};
pub const Root = struct {
ttbr0: table_ptr,
ttbr1: table_ptr,
};
fn make_table_at(e: *pte) table_ptr {
switch(decode(e.*, false)) {
.Mapping => unreachable,
.Table => {
return @intToPtr(table_ptr, e.* & 0x0000FFFFFFFFF000);
},
.Empty => {
const page_size = sabaton.platform.get_page_size();
const ret = sabaton.pmm.alloc_aligned(page_size, .PageTable);
//sabaton.log_hex("Allocated new table at ", ret.ptr);
e.* = @ptrToInt(ret.ptr) | 1 << 63 | 3;
return @ptrCast(table_ptr, ret.ptr);
},
}
}
fn get_index(vaddr: u64, base_bits: u6, level: u64) usize {
const shift_bits = @intCast(u6, base_bits + (base_bits - 3) * level);
return (vaddr >> shift_bits) & ((@as(u64, 1) << (base_bits - 3)) - 1);
}
fn extra_bits(perm: Perms, mt: MemoryType, page_size: usize, botlevel: bool) u64 {
var bits: u64 = 0x1 | (1 << 5) | (1 << 10);
// Set the walk bit
if(page_size < 0x10000 and botlevel) bits |= 2;
if(@enumToInt(perm) & @enumToInt(Perms.w) == 0) bits |= 1 << 7;
if(@enumToInt(perm) & @enumToInt(Perms.x) == 0) bits |= 1 << 54;
bits |= switch(mt) {
.memory => @as(u64, 0 << 2 | 2 << 8 | 1 << 11),
.mmio => @as(u64, 1 << 2 | 2 << 8),
};
return bits;
}
fn make_mapping_at(ent: *pte, paddr: u64, bits: u64) void {
ent.* = paddr | bits;
}
pub fn detect_page_size() u64 {
var aa64mmfr0 = asm volatile(
"MRS %[reg], ID_AA64MMFR0_EL1\n\t"
: [reg] "=r" (-> u64)
);
var psz: u64 = undefined;
if(((aa64mmfr0 >> 28) & 0x0F) == 0b0000) {
psz = 0x1000;
}
else if(((aa64mmfr0 >> 20) & 0x0F) == 0b0001) {
psz = 0x4000;
}
else if(((aa64mmfr0 >> 24) & 0x0F) == 0b0000) {
psz = 0x10000;
}
else if(sabaton.safety) {
@panic("Unknown page size!");
} else {
unreachable;
}
return psz;
}
pub fn init_paging() Root {
const page_size = sabaton.platform.get_page_size();
return .{
.ttbr0 = @ptrCast(table_ptr, sabaton.pmm.alloc_aligned(page_size, .PageTable)),
.ttbr1 = @ptrCast(table_ptr, sabaton.pmm.alloc_aligned(page_size, .PageTable)),
};
}
fn can_map(size: u64, vaddr: u64, paddr: u64, large_step: u64) bool {
if(large_step > 0x40000000)
return false;
if(size < large_step)
return false;
const mask = large_step - 1;
if(vaddr & mask != 0)
return false;
if(paddr & mask != 0)
return false;
return true;
}
fn choose_root(r: *const Root, vaddr: u64) table_ptr {
if(sabaton.util.upper_half(vaddr)) {
return r.ttbr1;
} else {
return r.ttbr0;
}
}
pub fn current_root() Root {
return .{
.ttbr0 = asm("MRS %[br0], TTBR0_EL1": [br0] "=r" (-> table_ptr)),
.ttbr1 = asm("MRS %[br1], TTBR1_EL1": [br1] "=r" (-> table_ptr)),
};
}
const Decoded = enum {
Mapping,
Table,
Empty,
};
pub fn decode(e: pte, bottomlevel: bool) Decoded {
if(e & 1 == 0)
return .Empty;
if(bottomlevel or e & 2 == 0)
return .Mapping;
return .Table;
}
pub fn map(vaddr_c: u64, paddr_c: u64, size_c: u64, perm: Perms, mt: MemoryType, in_root: ?*Root) void {
const page_size = sabaton.platform.get_page_size();
var vaddr = vaddr_c;
var paddr = paddr_c;
var size = size_c;
size += page_size - 1;
size &= ~(page_size - 1);
var root: table_ptr = undefined;
if(in_root) |r| {
root = choose_root(r, vaddr);
}
else {
const roots = current_root();
root = choose_root(&roots, vaddr);
}
const levels: usize =
switch(page_size) {
0x1000, 0x4000 => 4,
0x10000 => 3,
else => unreachable,
};
const base_bits = @intCast(u6, @ctz(u64, page_size));
const small_bits = extra_bits(perm, mt, page_size, true);
const large_bits = extra_bits(perm, mt, page_size, false);
while(size != 0) {
var current_step_size = page_size << @intCast(u6, (base_bits - 3) * (levels - 1));
var level = levels - 1;
var current_table = root;
while(true) {
const ind = get_index(vaddr, base_bits, level);
// We can only map at this level if it's not a table
switch(decode(current_table[ind], level == 0)) {
.Mapping => {
sabaton.log_hex("Overlapping mapping at ", vaddr);
sabaton.log_hex("PTE is ", current_table[ind]);
unreachable;
},
.Table => { }, // Just iterate to the next level
.Empty => {
// If we can map at this level, do so
if(can_map(size, vaddr, paddr, current_step_size)) {
const bits = if(level == 0) small_bits else large_bits;
make_mapping_at(¤t_table[ind], paddr, bits);
break;
}
// Otherwise, just iterate to the next level
}
}
if(level == 0)
unreachable;
current_table = make_table_at(¤t_table[ind]);
current_step_size >>= (base_bits - 3);
level -= 1;
}
vaddr += current_step_size;
paddr += current_step_size;
size -= current_step_size;
}
}
pub fn apply_paging(r: *Root) void {
var sctlr = asm(
\\MRS %[sctlr], SCTLR_EL1
: [sctlr] "=r" (-> u64)
);
var aa64mmfr0 = asm(
\\MRS %[id], ID_AA64MMFR0_EL1
: [id] "=r" (-> u64)
);
// Documentation? Nah, be a professional guesser.
sctlr |= 1;
aa64mmfr0 &= 0x0F;
if(aa64mmfr0 > 5)
aa64mmfr0 = 5;
var paging_granule_br0: u64 = undefined;
var paging_granule_br1: u64 = undefined;
var region_size_offset: u64 = undefined;
switch(sabaton.platform.get_page_size()) {
0x1000 => {
paging_granule_br0 = 0b00;
paging_granule_br1 = 0b10;
region_size_offset = 16;
},
0x4000 => {
paging_granule_br0 = 0b10;
paging_granule_br1 = 0b01;
region_size_offset = 8;
},
0x10000 => {
paging_granule_br0 = 0b01;
paging_granule_br1 = 0b11;
region_size_offset = 0;
},
else => unreachable,
}
const tcr: u64 = 0
| (region_size_offset << 0) // T0SZ
| (region_size_offset << 16) // T1SZ
| (1 << 8) // TTBR0 Inner WB RW-Allocate
| (1 << 10) // TTBR0 Outer WB RW-Allocate
| (1 << 24) // TTBR1 Inner WB RW-Allocate
| (1 << 26) // TTBR1 Outer WB RW-Allocate
| (2 << 12) // TTBR0 Inner shareable
| (2 << 28) // TTBR1 Inner shareable
| (aa64mmfr0 << 32) // intermediate address size
| (paging_granule_br0 << 14) // TTBR0 granule
| (paging_granule_br1 << 30) // TTBR1 granule
| (1 << 56) // Fault on TTBR1 access from EL0
| (0 << 55) // Don't fault on TTBR0 access from EL0
;
const mair: u64 = 0
| (0b11111111 << 0) // Normal, Write-back RW-Allocate non-transient
| (0b00000000 << 8) // Device, nGnRnE
;
if(sabaton.debug) {
sabaton.log("Enabling paging... ", .{});
}
asm volatile(
\\MSR TTBR0_EL1, %[ttbr0]
\\MSR TTBR1_EL1, %[ttbr1]
\\MSR MAIR_EL1, %[mair]
\\MSR TCR_EL1, %[tcr]
\\MSR SCTLR_EL1, %[sctlr]
\\DSB SY
\\ISB SY
:
: [ttbr0] "r" (r.ttbr0)
, [ttbr1] "r" (r.ttbr1)
, [sctlr] "r" (sctlr)
, [tcr] "r" (tcr)
, [mair] "r" (mair)
: "memory"
);
if(sabaton.debug) {
sabaton.log("Paging enabled!\n", .{});
}
} | src/platform/paging.zig |
const std = @import("std");
const Allocator = std.mem.Allocator;
const List = std.ArrayList;
const Map = std.AutoHashMap;
const StrMap = std.StringHashMap;
const BitSet = std.DynamicBitSet;
const Str = []const u8;
const util = @import("util.zig");
const gpa = util.gpa;
const data = @embedFile("../data/day15.txt");
const num_rows = 100;
const num_cols = 100;
//const data = "1163751742\n1381373672\n2136511328\n3694931569\n7463417111\n1319128137\n1359912421\n3125421639\n1293138521\n2311944581";
//const num_cols = 10;
//const num_rows = 10;
const Size = struct {
height: usize,
width: usize,
};
const Location = struct {
row: usize,
col: usize,
};
fn locationToIndex(location: Location, size: Size) usize {
assert(location.row < size.height);
assert(location.col < size.width);
const index = location.row * size.height + location.col;
return index;
}
fn indexToLocation(index: usize, size: Size) Location {
const col = index % size.width;
const row = index / size.width;
return .{ .col = col, .row = row };
}
fn taxiLocation(a: Location, b: Location) usize {
const row_delta = max(a.row, b.row) - min(a.row, b.row);
const col_delta = max(a.col, b.col) - min(a.col, b.col);
return row_delta + col_delta;
}
fn taxiIndex(a: usize, b: usize, size: Size) usize {
const a_loc = indexToLocation(a, size);
const b_loc = indexToLocation(b, size);
assert(a_loc.row < size.height and a_loc.col < size.width);
assert(b_loc.row < size.height and b_loc.col < size.width);
return taxiLocation(a_loc, b_loc);
}
fn getNeighbors(buf: []usize, index: usize, size: Size) []usize {
var buffer = buf;
assert(buffer.len >= 4);
const loc = indexToLocation(index, size);
assert(loc.row < size.height);
assert(loc.col < size.width);
const up_exists = loc.row > 0;
const left_exists = loc.col > 0;
const down_exists = loc.row + 1 < size.height;
const right_exists = loc.col + 1 < size.width;
var i: usize = 0;
if (up_exists) {
const up = locationToIndex(.{ .row = loc.row - 1, .col = loc.col }, size);
buffer[i] = up;
i += 1;
}
if (left_exists) {
const left = locationToIndex(.{ .row = loc.row, .col = loc.col - 1 }, size);
buffer[i] = left;
i += 1;
}
if (down_exists) {
const down = locationToIndex(.{ .row = loc.row + 1, .col = loc.col }, size);
buffer[i] = down;
i += 1;
}
if (right_exists) {
const left = locationToIndex(.{ .row = loc.row, .col = loc.col + 1 }, size);
buffer[i] = left;
i += 1;
}
return buffer[0..i];
}
fn getNeighborhood1(buf: []usize, index: usize) []usize {
var buffer = buf;
assert(buffer.len >= 4);
const up_exists = loc.row > 0;
const left_exists = loc.col > 0;
const down_exists = loc.row + 1 < size.height;
const right_exists = loc.col + 1 < size.width;
}
fn getNeighborhood2(buf: []usize, index: usize) []usize {
}
fn aStar(input: []const u8, start: usize, end: usize, getNeighborhood: fn (buf: []usize, node: usize) []usize) !usize {
assert(start < input.len);
assert(end < input.len);
// set of nodes that need to be expanded
var open_set = try BitSet.initEmpty(input.len, gpa);
open_set.set(start); // start index
// value is preceding node on cheapest path from start to key
var came_from = Map(usize, usize).init(gpa);
defer came_from.deinit();
// value is cost of cheapest path from start to key
var g_score = Map(usize, usize).init(gpa);
defer g_score.deinit();
try g_score.put(start, 0);
// value is best guess of cheapest total cost from start to finish which goes through key
var f_score = Map(usize, usize).init(gpa);
defer f_score.deinit();
try f_score.put(start, taxiIndex(start, end, size));
while (open_set.count() > 0) {
// node in open_set with lowest f_score
// O(N) - but would be O(1) if open_set were min-heap or priority queue
const current: usize = blk: {
var iter = open_set.iterator(.{});
var min_f_score: usize = std.math.maxInt(usize);
var ret: usize = undefined; // ret must be set at least once in while loop
while (iter.next()) |index| {
if (min_f_score >= f_score.get(index).?) {
min_f_score = f_score.get(index).?;
ret = index;
}
}
break :blk ret;
};
if (current == end) {
break;
}
// print("{x}\n", .{current});
open_set.unset(current);
var buf: [4]usize = undefined;
const neighbors: []usize = getNeighborhood(&buf, current);
for (neighbors) |neighbor| {
// cost of edge from current to neighbor
// same as value in cell of neighbor
const d = input[neighbor];
// cost of path from start to neighbor through current
const tentative_g_score = if (g_score.get(current)) |g| d + g else std.math.maxInt(usize);
if (tentative_g_score < if (g_score.get(neighbor)) |g| g else std.math.maxInt(usize)) {
// This path to neighbor is cheaper than previously recorded paths to neighbor
try came_from.put(neighbor, current);
try g_score.put(neighbor, tentative_g_score);
try f_score.put(neighbor, tentative_g_score + taxiIndex(neighbor, end, size));
open_set.set(neighbor);
// print("{d:0>2}: set gs to {d}\n", .{neighbor, tentative_g_score});
}
// print("neighbor: {d:0>2} :: tgs: {d}\n", .{neighbor, tentative_g_score});
}
}
// print("end: {d}\n", .{end});
// cost is the g_score of the end node
const cost = g_score.get(end).?;
return cost;
}
pub fn main() !void {
const input: [num_rows][num_cols]u8 = blk: { // parse data
var rows = try std.BoundedArray([num_cols]u8, num_rows).init(0);
var lines = tokenize(u8, data, "\r\n");
while (lines.next()) |line| {
var cols = try std.BoundedArray(u8, num_rows).init(0);
for (line) |char| {
const n = try parseInt(u8, &[1]u8{char}, 10);
try cols.append(n);
}
assert(cols.len == num_cols);
try rows.append(cols.buffer);
}
assert(rows.len == num_rows);
break :blk rows.buffer;
};
const input_1: [num_rows * num_cols]u8 = blk: {
var list = try std.BoundedArray(u8, num_cols * num_rows).init(0);
for (input) |rows| {
for (rows) |cell| {
try list.append(cell);
}
}
break :blk list.buffer;
};
const input_2: [num_rows * num_cols * 25]u8 = blk: {
var ret: [num_rows * num_cols * 25]u8 = undefined;
for (ret) |*cell, index| {
const row = index / (num_rows * 5);
const col = index % (num_cols * 5);
const row_in = row % num_rows;
const col_in = col % num_cols;
const row_add = row / num_rows;
const col_add = col / num_cols;
cell.* = @truncate(u8, (input[row_in][col_in] + row_add + col_add - 1) % 9 + 1);
}
break :blk ret;
};
const part1 = try aStar(&input_1, 0, input_1.len - 1, Size{ .height = num_rows, .width = num_cols });
assert(part1 == 415);
print("{d}\n", .{part1});
const part2 = try aStar(&input_2, 0, input_2.len - 1, Size{ .height = num_rows * 5, .width = num_cols * 5 });
assert(part2 == 2864);
print("{d}\n", .{part2});
if (false) {
for (input_2) |n, index| {
print("{d}", .{n});
if ((index + 1) % (num_cols * 5) == 0) print("\n", .{});
}
}
}
// Useful stdlib functions
const tokenize = std.mem.tokenize;
const split = std.mem.split;
const indexOf = std.mem.indexOfScalar;
const indexOfAny = std.mem.indexOfAny;
const indexOfStr = std.mem.indexOfPosLinear;
const lastIndexOf = std.mem.lastIndexOfScalar;
const lastIndexOfAny = std.mem.lastIndexOfAny;
const lastIndexOfStr = std.mem.lastIndexOfLinear;
const trim = std.mem.trim;
const sliceMin = std.mem.min;
const sliceMax = std.mem.max;
const parseInt = std.fmt.parseInt;
const parseFloat = std.fmt.parseFloat;
const min = std.math.min;
const min3 = std.math.min3;
const max = std.math.max;
const max3 = std.math.max3;
const print = std.debug.print;
const assert = std.debug.assert;
const sort = std.sort.sort;
const asc = std.sort.asc;
const desc = std.sort.desc; | src/day15.zig |
const std = @import("std");
const sf = @import("sfml");
const allocator = std.heap.page_allocator;
// I only use things I've wrapped here, but the other csfml functions seem to work, just need to wrap them
pub fn main() anyerror!void {
// Create a window
var window = try sf.RenderWindow.init(.{ .x = 800, .y = 600 }, 32, "This is zig!");
defer window.deinit();
//window.setVerticalSyncEnabled(false);
window.setFramerateLimit(60);
// Shapes creation
var circle = try sf.CircleShape.init(100);
defer circle.deinit();
circle.setFillColor(sf.Color.Green);
circle.setPosition(.{ .x = 0, .y = 0 });
circle.setOrigin(.{ .x = 100, .y = 100 });
circle.setTexture(null);
var bob = try sf.CircleShape.init(10);
defer bob.deinit();
bob.setFillColor(sf.Color.Red);
bob.setOrigin(.{ .x = 10, .y = 10 });
var tex = try sf.Texture.init(.{ .x = 12, .y = 10 });
defer tex.deinit();
std.debug.print("{} * {} = ", .{ tex.getSize().x, tex.getSize().y });
std.debug.print("{}\n", .{tex.getPixelCount()});
var pixel_data = try allocator.alloc(sf.Color, 120);
defer allocator.free(pixel_data);
for (pixel_data) |c, i| {
pixel_data[i] = sf.Color.fromHSVA(@intToFloat(f32, i) / 144 * 360, 100, 100, 1);
}
try tex.updateFromPixels(pixel_data, null);
var rect = try sf.RectangleShape.init(.{ .x = 50, .y = 70 });
defer rect.deinit();
rect.setPosition(.{ .x = 100, .y = 100 });
rect.setTexture(tex);
// Clock
var clock = try sf.Clock.init();
defer clock.deinit();
var view = window.getDefaultView();
// Game loop
while (window.isOpen()) {
//Event polling
while (window.pollEvent()) |event| {
switch (event) {
.closed => window.close(),
.mouseButtonPressed => |e| {
var coords = window.mapPixelToCoords(e.pos, null);
rect.setPosition(coords);
},
else => {},
}
}
//Updating
var total = clock.getElapsedTime().asSeconds();
rect.setRotation(total * 12);
view.center = bob.getPosition();
if (sf.Keyboard.isKeyPressed(.A))
window.setView(view);
bob.setPosition(window.mapPixelToCoords(sf.Mouse.getPosition(window), null));
//std.debug.print("{}\n", .{sf.Mouse.getPosition(window)});
//Drawing
window.clear(sf.Color.Black);
window.draw(circle, null);
window.draw(bob, null);
window.draw(rect, null);
window.display();
}
} | src/examples/my_tests.zig |
const std = @import("std");
const zlm = @import("zlm");
const Renderer = @import("renderer.zig").Renderer;
const ROM = @import("../rom.zig").ROM;
const Track = @import("../track/track.zig").Track;
const Window = @import("window.zig").Window;
const CameraMoveMode = enum {
None,
Pan,
Rotate,
};
const PAN_FACTOR = 0.4;
const ZOOM_FACTOR = 5;
const ROTATE_FACTOR = 0.01;
pub const Viewer = struct {
// window for doing window things
window: *Window,
// renderer to render stuff on
renderer: *Renderer,
// track to display
track: Track,
// previous mouse position, undefined if not needed
prev_mouse_pos: zlm.Vec2,
// mode of moving camera
move_mode: CameraMoveMode,
// frame to resume after each frame, to allow concurrent execution
resumer: ?anyframe,
// set to false when the viewer should be closed
running: bool,
pub fn init(allocator: *std.mem.Allocator) !Viewer {
const window = try Window.init(allocator, 1024, 768);
errdefer window.deinit(allocator);
const renderer = try allocator.create(Renderer);
errdefer allocator.destroy(renderer);
renderer.* = try Renderer.init(allocator);
errdefer renderer.deinit(allocator);
window.renderer = renderer;
try window.updateViewport();
return Viewer{
.window = window,
.renderer = renderer,
.track = Track.init(),
.prev_mouse_pos = undefined,
.move_mode = .None,
.resumer = null,
.running = true,
};
}
pub fn deinit(self: *Viewer, allocator: *std.mem.Allocator) void {
self.renderer.deinit(allocator);
allocator.destroy(self.renderer);
self.window.deinit(allocator);
self.track.deinit(allocator);
}
fn camMatrix(self: Viewer) zlm.Mat4 {
return zlm.Mat4.createAngleAxis(zlm.Vec3.unitX, self.renderer.camera_rot.x)
.mul(zlm.Mat4.createAngleAxis(zlm.Vec3.unitY, self.renderer.camera_rot.y))
.mul(zlm.Mat4.createTranslation(self.renderer.camera_pos));
}
pub fn update(self: *Viewer) void {
Window.pollEvents();
const mouse_pos = self.window.getCursor();
defer self.prev_mouse_pos = mouse_pos;
// mouse delta is flipped, feels better imo
const dx = self.prev_mouse_pos.x - mouse_pos.x;
const dy = self.prev_mouse_pos.y - mouse_pos.y;
const is_mouse_down = self.window.isMouseDown(.middle);
if (self.move_mode != .None and !is_mouse_down) {
self.move_mode = .None;
return;
}
switch (self.move_mode) {
.None => {
if (is_mouse_down) {
if (self.window.isKeyDown(.left_shift)) {
self.move_mode = .Pan;
} else {
self.move_mode = .Rotate;
}
} else {
const scroll = @atomicRmw(f32, &self.window.scroll, .Xchg, 0, .SeqCst);
if (scroll != 0) {
self.renderer.camera_pos = zlm.Vec3.unitZ.scale(scroll * ZOOM_FACTOR)
.transformPosition(self.camMatrix());
}
}
},
.Pan => {
self.renderer.camera_pos = zlm.Vec3.unitX.scale(dx * PAN_FACTOR)
.sub(zlm.Vec3.unitY.scale(dy * PAN_FACTOR))
.transformPosition(self.camMatrix());
},
.Rotate => {
self.renderer.camera_rot.x = @rem(self.renderer.camera_rot.x + dy * ROTATE_FACTOR, 2 * std.math.pi);
self.renderer.camera_rot.y = @rem(self.renderer.camera_rot.y + dx * ROTATE_FACTOR, 2 * std.math.pi);
},
}
}
pub fn run(self: *Viewer, allocator: *std.mem.Allocator, rom: ROM) !void {
defer while (self.resumer) |r| {
// this loop makes sure the resumer returns before we do, so resources are cleaned up gracefully
self.resumer = null;
resume r;
};
// we expect that this is running on the main thread
while (self.running) {
self.update();
// TODO animation
self.track.renderIfNeeded(allocator, rom, self.renderer, 0);
try self.renderer.draw();
self.window.swapBuffers();
if (self.window.shouldClose()) {
self.running = false;
break;
}
if (self.resumer) |r| {
self.resumer = null;
resume r;
}
}
}
}; | src/render/viewer.zig |
use @import("./4_0_tokenization.zig");
// https://drafts.csswg.org/css-syntax/#parsing
// 5. Parsing
//
// The input to the parsing stage is a stream or list of tokens from the tokenization stage. The output depends on how the parser is invoked, as defined by the entry points listed later in this section. The parser output can consist of at-rules, qualified rules, and/or declarations.
//
// The parser’s output is constructed according to the fundamental syntax of CSS, without regards for the validity of any specific item. Implementations may check the validity of items as they are returned by the various parser algorithms and treat the algorithm as returning nothing if the item was invalid according to the implementation’s own grammar knowledge, or may construct a full tree as specified and "clean up" afterwards by removing any invalid items.
const EntryData = enum {
Tokenizer: Tokenizer,
CVs: []ComponentValue,
fn nextToken(self: EntryData) -> Token {
comptime switch(self) {
Tokenizer => |tok| nextToken(tok),
CVs => |c| c.shift() %% Token.EOF,
}
}
};
export fn NewParser(comptime T: type, data: &T) -> type {
return Parser {
.source = switch(T) {
[]u8 => |d| EntryData.Tokenizer(Tokenizer(d)),
Tokenizer => |d| EntryData.Tokenizer(d),
[]ComponentValue => |d| EntryData.CVs(d),
},
.curr = undefined,
.next = undefined,
.reconsumed = false,
};
}
pub const AtRule = struct {
name: []u32,
prelude: []ComponentValue,
block: ?SimpleBlock,
};
pub const QualifiedRule = struct {
prelude: []ComponentValue,
block: SimpleBlock,
};
pub const Declaration = struct {
name: []u32,
value: []ComponentValue,
is_important: bool,
};
pub const ComponentValue = enum {
Preserved: PreservedToken,
Function: struct{name: []u32, value: []ComponentValue},
SimpleBlock: struct{assoc_token: AssocToken, value: []ComponentValue},
};
pub const Parser = struct {
source: EntryData,
curr: Token,
next: Token,
reconsumed: bool,
// https://drafts.csswg.org/css-syntax/#parser-definitions
// 5.2. Definitions
// current input token
// The token or component value currently being operated on, from the list of tokens produced by the tokenizer.
fn currToken(self: &const Parser) -> Token {
self.curr;
}
// next input token
// The token or component value following the current input token in the list of tokens produced by the tokenizer. If there isn’t a token following the current input token, the next input token is an <EOF-token>.
fn nextToken(self: &const Parser) -> Token {
if (self.reconsumed) {
return self.curr;
}
return self.next;
}
// <EOF-token>
// A conceptual token representing the end of the list of tokens. Whenever the list of tokens is empty, the next input token is always an <EOF-token>.
// consume the next input token
// Let the current input token be the current next input token, adjusting the next input token accordingly.
fn consumeToken(self: &Parser) -> Token {
if (self.reconsumed) {
self.reconsumed = false;
} else {
self.curr = self.next;
self.next = nextToken(self.source);
}
self.curr;
}
// reconsume the current input token
// The next time an algorithm instructs you to consume the next input token, instead do nothing (retain the current input token unchanged).
fn reconsume(self: &Parser) {
self.reconsumed = true;
}
// https://drafts.csswg.org/css-syntax/#parser-entry-points
// 5.3. Parser Entry Points
// The algorithms defined in this section produce high-level CSS objects from lower-level objects. They assume that they are invoked on a token stream, but they may also be invoked on a string; if so, first perform input preprocessing to produce a code point stream, then perform tokenization to produce a token stream.
//
// "Parse a stylesheet" can also be invoked on a byte stream, in which case The input byte stream defines how to decode it into Unicode.
//
// All of the algorithms defined in this spec may be called with either a list of tokens or of component values. Either way produces an identical result.
fn skipWhitespace(self: &Parser) {
while (true) {
switch (self.nextToken()) {
Token.Whitespace => self.consumeToken(),
else => return
}
}
}
// https://drafts.csswg.org/css-syntax/#parse-stylesheet
// 5.3.2. Parse a stylesheet
pub fn ParseStylesheet(comptime T: type, data: T) -> Stylesheet {
var p = ParserFromEntryData(d);
var ss = Stylesheet{};
const rules = consumeListOfRules(p, true);
if (rules.len != 0) {
switch (rules[0]) {
Token.AtRule => |ar|
if (ar.name == "charset") {
rules.shift();
}
}
}
ss.value = rules;
return ss;
}
// https://drafts.csswg.org/css-syntax/#parse-list-of-rules
// 5.3.3. Parse a list of rules
pub fn ParseListOfRules(d: &EntryData) -> []Rule {
var p = ParserFromEntryData(d);
return consumeListOfRules(p, false);
}
// https://drafts.csswg.org/css-syntax/#parse-rule
// 5.3.4. Parse a rule
pub fn ParseRule(d: &EntryData) -> %Rule {
var p = ParserFromEntryData(d);
skipWhitespace(p);
const rule = switch (nextToken(p)) {
Token.EOF => null,
Token.AtRule => return consumeAtRule(p),
else => return consumeQualifiedRule(p),
};
if (rule == null) {
// TODO: Handle syntax error
}
skipWhitespace(p);
switch (nextToken(p)) {
Token.EOF => return rule,
else => return null, // TODO: Handle syntax error
}
}
// https://drafts.csswg.org/css-syntax/#parse-declaration
// 5.3.5. Parse a declaration
pub fn ParseDeclaration(d: EntryData) -> %Declaration {
var p = ParserFromEntryData(d);
skipWhitespace(p);
const d = switch (nextToken(p)) {
Token.Ident => consumeDeclaration(p),
else => null,
};
if (d) {
return d
} else {
// TODO: Handle syntax error
}
}
// https://drafts.csswg.org/css-syntax/#parse-list-of-declarations
// 5.3.6. Parse a list of declarations
pub fn ParseListOfDeclarations(d: EntryData) -> []Declaration {
var p = ParserFromEntryData(d);
consumeListOfDeclarations(p);
}
// https://drafts.csswg.org/css-syntax/#parse-component-value
// 5.3.7. Parse a component value
pub fn ParseComponentValue(d: EntryData) -> ComponentValue {
var p = ParserFromEntryData(d);
skipWhitespace(p);
switch (nextToken(p)) {
Token.EOF => return {}, // TODO: Handle syntax error
}
const value = consumeComponentValue(p);
skipWhitespace(p);
switch (value) {
Token.EOF => return value,
else => return {}, // TODO: Handle syntax error
}
}
// https://drafts.csswg.org/css-syntax/#parse-list-of-component-values
// 5.3.8. Parse a list of component values
pub fn ParseListOfComponentValues(d: EntryData) -> []ComponentValue {
var p = ParserFromEntryData(d);
const list = []ComponentValue{};
var cv = consumeComponentValue(p);
while (cv != Token.EOF) {
list.append();
cv = consumeComponentValue(p);
}
list;
}
//https://drafts.csswg.org/css-syntax/#parse-comma-separated-list-of-component-values
// 5.3.9. Parse a comma-separated list of component values
pub fn ParseCommaSeparatedComponentValues(d: EntryData,) -> [][]ComponentValue {
var p = ParserFromEntryData(d);
const cvls = [][]ComponentValue{};
var list = []ComponentValue{};
var cv: ComponentValue;
while (true) {
cv = consumeComponentValue(p);
switch (cv) {
Token.EOF => {
cvls.append(list);
return cvls;
},
Token.Comma => {
cvls.append(list);
list = []ComponentValue{};
},
else => list.append(cv),
}
}
}
// https://drafts.csswg.org/css-syntax/#parser-algorithms
// 5.4. Parser Algorithms
// The following algorithms comprise the parser. They are called by the parser entry points above.
//
// These algorithms may be called with a list of either tokens or of component values. (The difference being that some tokens are replaced by functions and simple blocks in a list of component values.) Similar to how the input stream returned EOF code points to represent when it was empty during the tokenization stage, the lists in this stage must return an <EOF-token> when the next token is requested but they are empty.
//
// An algorithm may be invoked with a specific list, in which case it consumes only that list (and when that list is exhausted, it begins returning <EOF-token>s). Otherwise, it is implicitly invoked with the same list as the invoking algorithm.
// https://drafts.csswg.org/css-syntax/#consume-list-of-rules
// 5.4.1. Consume a list of rules
fn consumeListOfRules(p: Parser, top_level: bool) -> []Rule {
const rules = []Rule{};
var tok = consumeToken(p);
while (true) : (tok = consumeToken(p)) {
switch (tok) {
Token.Whitespace => {}, // do nothing
Token.EOF => return rules,
Token.AtKeyword => {
reconsume(p);
// ISSUE: https://github.com/w3c/csswg-drafts/issues/1839
rules.append(consumeAtRule(p));
},
Token.CDO, Token.CDC => if (!top_level) {
reconsume(p);
if (consumeQualifiedRule(p)) |qr| {
rules.append(qr);
}
},
else => {
reconsume(p);
if (consumeQualifiedRule(p)) |qr| {
rules.append(qr);
}
},
}
}
}
// https://drafts.csswg.org/css-syntax/#consume-at-rule
// 5.4.2. Consume an at-rule
fn consumeAtRule(p: Parser) -> AtRule {
var tok = consumeToken(p);
var ar = AtRule{
name = tok.data,
prelude = []ComponentValue{},
block = null,
};
while (true) {
tok = consumeToken(p);
switch (tok) {
Token.Semicolon => return ar,
Token.EOF => return ar, // TODO: Handle parser error
Token.LBrace => {
ar.block = consumeSimpleBlock(p);
return ar
},
ComponentValue.SimpleBlock => |block| {
ar.block = block;
return ar
},
else => {
reconsume(p);
ar.prelude.append(consumeComponentValue(p));
},
}
}
}
// https://drafts.csswg.org/css-syntax/#consume-qualified-rule
// 5.4.3. Consume a qualified rule
fn consumeQualifiedRule(p: Parser) -> ?QualifiedRule {
var qr = QualifiedRule{
prelude = []ComponentValue{},
};
var tok = consumeToken(p);
while (true) : (tok = consumeToken(p)) {
switch (tok) {
Token.EOF => return null, // TODO: Handle parse error
Token.LBrace => {
qr.block = consumeSimpleBlock(p);
return qr;
},
ComponentValue.SimpleBlock => |block| {
qr.block = block;
return qr;
},
else => {
reconsume(p);
qr.prelude.append(consumeComponentValue(p));
},
}
}
}
// https://drafts.csswg.org/css-syntax/#consume-list-of-declarations
// 5.4.4. Consume a list of declarations
fn consumeListOfDeclarations(p: Parser) -> []Declaration {
const list = []Declaration{};
var tok = consumeToken(p);
while (true) : (tok = consumeToken(p)) {
switch (tok) {
Token.Whitespace, Token.Semicolon => {}, // do nothing
Token.EOF => return list,
Token.AtKeyword => {
reconsume(p);
list.append(consumeAtRule(p));
},
Token.Ident => {
var temp_list = []Token{tok};
while (true) {
switch (nextToken(p)) {
Token.Semicolon, Token.EOF => {}, // do nothing
else => {
temp_list.append(consumeComponentValue(p));
const d = consumeDeclaration(temp_list);
if (d) |decl| {
list.append(decl);
}
}
}
}
},
else => {
// TODO: Handle parser error
reconsume(p);
while (true) { // Discard until semicolon or end
switch (nextToken(p)) {
Token.Semicolon, Token.EOF => break,
else => consumeComponentValue(p), // discard
}
}
}
}
}
}
// https://drafts.csswg.org/css-syntax/#consume-declaration
// 5.4.5. Consume a declaration
//
// Note: This algorithm assumes that the next input token has already been checked to be an <ident-token>.
fn consumeDeclaration(p: Parser) -> ?Declaration {
const decl = Declaration{
name = consumeToken(p),
value = []ComponentValue{},
};
skipWhitespace(p);
var tok = nextToken(p);
switch (tok) {
':' => _ = consumeToken(p),
else => return null, // TODO: Handle parser error
}
tok = nextToken(p);
while (tok != Token.EOF) : (tok = nextToken(p)) {
decl.value.append(consumeComponentValue(p));
}
// 4. If the last two non-<whitespace-token>s in the declaration’s value are a <delim-token> with the value "!" followed by an <ident-token> with a value that is an ASCII case-insensitive match for "important", remove them from the declaration’s value and set the declaration’s important flag to true.
var i = decl.value.len-1;
while ((decl.value[i] %% Token.EOF) == Token.Whitespace) : (i -= 1) {
}
switch (decl.value[i] %% Token.EOF) {
Token.Ident => |ident| if (ident == "important") {// TODO: Case-insensitive
i -= 1;
while ((decl.value[i] %% Token.EOF) == Token.Whitespace) : (i -= 1) {
}
switch (decl.value[i] %% Token.EOF) {
Token.Delim => |delim| if (delim == '!') {
// Though the spec does not instruct to remove whitespace tokens
// encountered, it's harmless and improves performance and simplicity.
decl.value = decl.value.slice(0, i); // TODO: Use the proper method
decl.is_important = true;
},
}
}
}
return decl;
}
// https://drafts.csswg.org/css-syntax/#consume-component-value
// 5.4.6. Consume a component value
fn consumeComponentValue(p: Parser) -> ComponentValue {
switch (consumeToken(p)) {
Token.Assoc => |opening| consumeSimpleBlock(p, opening),
Token.Function => consumeFunction(p),
Token.Preserved => |tok| ComponentValue.Preserved(tok),
}
}
// https://drafts.csswg.org/css-syntax/#consume-simple-block
// 5.4.7. Consume a simple block
//
// Note: This algorithm assumes that the current input token has already been checked to be an <{-token>, <[-token>, or <(-token>.
fn consumeSimpleBlock(p: Parser, o: AssocToken) -> ComponentValue.SimpleBlock {
const ending = getEnding(o);
const block = ComponentValue.SimpleBlock{
token = a,
value = []ComponentValue{},
};
while (true) {
switch (consumeToken(p)) {
ending => return block,
Token.EOF => return block, // TODO: Handle parse error
else => {
reconsume(p);
block.value.append(consumeComponentValue(p));
},
}
}
}
// https://drafts.csswg.org/css-syntax/#consume-function
// 5.4.8. Consume a function
//
// Note: This algorithm assumes that the current input token has already been checked to be a <function-token>.
fn consumeFunction(p: Parser, f: Token.Function) -> ComponentValue.Function {
const fn_cv = ComponentValue.Function{
name = f,
value = []ComponentValue{},
};
while (true) {
switch (consumeToken(p)) {
AssocEnding.RParen => return fn_cv,
token.EOF => return fn_cv, // TODO: Handle parse error
else => {
reconsume(p);
fn_cv.value.append(consumeComponentValue(p));
}
}
}
}
} | parser/5_0_parsing.zig |
const std = @import("std");
/// Uppermost four bits of a word. Can address all memory.
pub const GlobalOpcode = enum(u4) {
/// noopr — Skips instruction.
NoOp = 0x0,
/// pgjmp — Selects the highest four bits of the address as page and unconditionally continues execution at address.
PageAndJump = 0xa,
/// fftch — Dereference address and copy content into ACC.
FarFetch = 0xb,
/// fwrte — Copy ACC into address.
FarWrite = 0xc,
/// Interprets the entirety of the word as an extended opcode.
Extend = 0xf,
};
/// Uppermost eight bits of a word. Can only use addresses within the current page.
pub const PagedOpcode = enum(u8) {
/// incby — Dereference address and add it to ACC. Overflow gets silently truncated to 65535.
IncrementBy = 0x11,
/// minus — Dereference address and subtract it from ACC. Underflow gets silently truncated to 0.
Minus = 0x12,
/// fetch — Dereference address and copy it into ACC.
Fetch = 0x20,
/// write — Copy ACC into address.
Write = 0x21,
/// jmpto — Unconditionally continue execution at address.
Jump = 0x30,
/// jmpez — Continue execution at address if ACC is 0, otherwise skip instruction.
JumpEZ = 0x31,
};
/// The lower twelve bits of a word as used in conjunction with GlobalOpcode.Extend (0xf). Can't address memory.
pub const ExtendedOpcode = enum(u12) {
/// cease — Halts execution.
Halt = 0x00f,
/// outnm — Writes the content of ACC to stderr.
OutputNumeric = 0x010,
/// outch — Writes the lower eight bits of ACC interpreted as ASCII to stderr.
OutputChar = 0x011,
/// outlf — Writes \n to stderr.
OutputLinefeed = 0x012,
// outhx — Writes the content of ACC formatted as hexadecimal to stderr.
OutputHex = 0x013,
/// inacc — Awaits one-word input from user and writes it into ACC.
InputACC = 0x020,
/// rando — Randomizes ACC using the default PRNG.
Randomize = 0x030,
/// augmt — Increase ACC by one. Overflow gets silently truncated to 65535.
Augment = 0x040,
/// dimin — Diminish ACC by one. Underflow gets silently truncated to 0.
Diminish = 0x041,
/// shfl4 — Shift ACC left by four bits.
ShiftLeftFour = 0x042,
/// shfr4 — Shift ACC left by four bits.
ShiftRightFour = 0x043,
/// shfl1 — Shift ACC left by one bit.
ShiftLeftOne = 0x044,
/// shfr1 — Shift ACC right by one bit.
ShiftRightOne = 0x045,
};
pub const OidaVm = struct {
memory: [4096]u16 = [_]u16{0} ** 4096,
accumulator: u16 = 0,
instruction_ptr: u12 = 0,
page: u4 = 0,
rng: std.rand.Random,
/// Executes a single instruction
fn exec(this: *OidaVm, word: u16) void {
if (word >> 12 > 0 and word >> 12 < 0xa) {
// Paged Opcodes: 0x01..0x99
const global_address = (@as(u12, this.page) << 8) + @truncate(u8, word);
const opcode = @intCast(u8, word >> 8);
if (!enum_check(PagedOpcode, opcode)) {
this.vm_panic("Encountered invalid opcode 0x{X:0^2} at address 0x{X:0^3}.", .{ opcode, this.instruction_ptr });
}
switch (@intToEnum(PagedOpcode, opcode)) {
.IncrementBy => if (@as(usize, this.accumulator) + this.memory[global_address] >= 65535) {
this.accumulator = 65535;
} else {
this.accumulator += this.memory[global_address];
this.instruction_ptr += 1;
},
.Minus => if (this.accumulator >= this.memory[global_address]) {
this.accumulator -= this.memory[global_address];
this.instruction_ptr += 1;
} else {
this.accumulator = 0;
},
.Fetch => this.accumulator = this.memory[global_address],
.Write => this.memory[global_address] = this.accumulator,
.Jump => this.instruction_ptr = global_address - 1,
.JumpEZ => if (this.accumulator == 0) {
this.instruction_ptr = global_address - 1;
} else {
return;
},
}
} else {
// Unpaged Opcodes: 0x0000, 0xa..0xf
const global_address = @truncate(u12, word);
const opcode = @intCast(u4, word >> 12);
if (!enum_check(GlobalOpcode, opcode)) {
this.vm_panic("Encountered invalid opcode 0x{X} at address 0x{X:0^3}.", .{ opcode, this.instruction_ptr });
}
switch (@intToEnum(GlobalOpcode, opcode)) {
.NoOp => return,
.FarFetch => this.accumulator = this.memory[global_address],
.FarWrite => this.memory[global_address] = this.accumulator,
.PageAndJump => {
this.instruction_ptr = global_address - 1;
this.page = @intCast(u4, global_address >> 8);
},
.Extend => {
if (!enum_check(ExtendedOpcode, global_address)) {
this.vm_panic("Encountered invalid opcode 0xF{X:0^3} at address 0x{X:0^3}.", .{ global_address, this.instruction_ptr });
}
switch (@intToEnum(ExtendedOpcode, global_address)) {
.Halt => return, // Handled by eval()
.OutputNumeric => std.debug.warn("{}", .{this.accumulator}),
.OutputChar => std.debug.warn("{c}", .{@truncate(u8, this.accumulator)}),
.OutputHex => std.debug.warn("{X:0^4}", .{this.accumulator}),
.OutputLinefeed => std.debug.warn("\n", .{}),
.InputACC => {
var in_buffer = std.ArrayList(u8).init(std.heap.page_allocator);
defer in_buffer.deinit();
this.accumulator = while (true) : (std.debug.warn("Please use hex format: 0000-ffff\n", .{})) {
std.debug.warn("Instruction at 0x{X:0^3} requests one word input: ", .{this.instruction_ptr});
std.io.getStdIn().inStream().readUntilDelimiterArrayList(&in_buffer, '\n', 1024) catch this.vm_panic("Failed to read from stdin", .{});
break std.fmt.parseInt(u16, in_buffer.items, 16) catch continue;
} else unreachable;
},
.Randomize => {
this.accumulator = this.rng.int(u16);
},
.Augment => {
if (this.accumulator < 65535) {
this.accumulator += 1;
this.instruction_ptr += 1;
}
},
.Diminish => {
if (this.accumulator > 0) {
this.accumulator -= 1;
this.instruction_ptr += 1;
}
},
.ShiftLeftFour => {
this.accumulator = this.accumulator << 4;
},
.ShiftRightFour => {
this.accumulator = this.accumulator >> 4;
},
.ShiftLeftOne => {
this.accumulator = this.accumulator << 1;
},
.ShiftRightOne => {
this.accumulator = this.accumulator >> 1;
},
}
},
}
}
}
/// Starts to evaluate memory as instructions starting at `addr`.
/// If the VM encounters invalid opcodes, it will exit with status 1
/// and dump its memory to stderr.
fn eval(this: *OidaVm, addr: u12) void {
this.instruction_ptr = addr;
while (this.instruction_ptr < 4095) : (this.instruction_ptr += 1) {
if (this.memory[this.instruction_ptr] == 0xf00f) return; // Extend-Halt opcode
this.step();
}
}
fn step(this: *OidaVm) void {
this.exec(this.memory[this.instruction_ptr]);
}
/// Writes `value` to `addr` in the VM's memory.
fn load(this: *OidaVm, addr: u12, value: u16) void {
this.memory[addr] = value;
}
/// Dumps the VM's state to stderr.
fn dump(this: OidaVm) void {
std.debug.warn(
\\== OidaVM dump
\\Instruction Pointer: 0x{X:0^3}
\\Accumulator: 0x{X:0^4}
\\Page {X} [{X:0^3}..{X:0^3}]
\\
, .{
this.instruction_ptr,
this.accumulator,
this.page,
this.page * @as(u16, 256),
this.page * @as(u16, 256) + 255,
});
var elided = false;
std.debug.warn("Memory: \n", .{});
for (this.memory) |val, addr| {
// Check if this row is entirely made up of zeroes, if yes, skip it
const row_start = addr - addr % 8;
if (std.mem.eql(u16, this.memory[row_start .. row_start + 8], &[_]u16{0} ** 8)) {
elided = true;
continue;
}
if (elided) {
std.debug.warn(" [elided]\n", .{});
elided = false;
}
// Format instruction pointer location inverted
const fmt_iptr_inverted = if (addr == this.instruction_ptr) "\x1b[7m" else "";
// Format non-null values bold
const fmt_nonnull_bold = if (val != 0) "\x1b[1m" else "";
// Reset formatting
const fmt_reset = if (addr == this.instruction_ptr or val != 0) "\x1b[0m" else "";
// If next entry is 8, 16, … print newline
const fmt_cond_newline = if ((addr + 1) % 8 == 0) "\n" else "| ";
std.debug.warn("{}{}0x{X:0^3}: 0x{X:0^4}{} {}", .{
fmt_iptr_inverted,
fmt_nonnull_bold,
addr,
val,
fmt_reset,
fmt_cond_newline,
});
}
if (elided) std.debug.warn(" [elided]\n", .{});
std.debug.warn("== end dump ==\n", .{});
}
/// Resets the VM to starting conditions.
fn flush(this: *OidaVm) void {
this.instruction_ptr = 0;
this.accumulator = 0;
this.memory = [_]u16{0} ** 4096;
}
fn vm_panic(this: *OidaVm, comptime format: []const u8, args: var) noreturn {
std.debug.warn("\n== VM PANIC ==\n" ++ format ++ "\n", args);
this.dump();
std.process.exit(1);
}
};
const builtin = @import("builtin");
/// Checks if the supplied `enum_type` has a field with the value `tag`.
fn enum_check(comptime enum_type: type, tag: usize) bool {
inline for (@typeInfo(enum_type).Enum.fields) |field| {
if (field.value == tag) {
return true;
}
}
return false;
} | src/oidavm.zig |
const std = @import("std");
const assert = std.debug.assert;
// ----------------------- Public API -------------------------
/// Sorts the array in-place
pub fn sort(
comptime T: type,
array: []T,
context: anytype,
comptime lessThan: fn(context: @TypeOf(context), lhs: T, rhs: T) bool,
) void {
sortCommon(T, array, context, lessThan, emptySlice(T));
}
/// Sorts with an allocated buffer. If allocation of the optimal buffer
/// size fails, performs an in-place sort instead.
pub fn sortWithAllocatedBuffer(
comptime T: type,
array: []T,
context: anytype,
comptime lessThan: fn(context: @TypeOf(context), lhs: T, rhs: T) bool,
allocator: *std.mem.Allocator,
) void {
const len = findOptimalBufferLength(array.len);
var allocated = true;
const buffer = allocator.alloc(T, len) catch blk: {
allocated = false;
break :blk emptySlice(T);
};
defer if (allocated) allocator.free(items);
sortCommon(T, array, context, lessThan, buffer);
}
/// Returns the optimal buffer length for a given array size.
/// Meant for use with sortExternal.
pub fn findOptimalBufferLength(array_len: usize) usize {
// find the smallest power of two that is at least
// the sqrt of the length of the array. This method
// handles overflow when the length is close to MAX_USIZE.
var block_len: usize = 4;
var block_len_sq: usize = 16;
while (block_len_sq + block_len < array_len and block_len_sq != 0) {
block_len_sq <<= 2;
block_len <<= 1;
}
return block_len;
}
/// Sorts using an external buffer to help make things faster.
/// Use findOptimalBufferLength to find the best buffer length for a given size.
/// Buffers longer than findOptimalBufferLength are still optimal.
/// Buffers smaller than findOptimalBufferLength may still be helpful.
pub fn sortExternal(
comptime T: type,
array: []T,
context: anytype,
comptime lessThan: fn(context:@TypeOf(context), lhs: T, rhs: T) bool,
buffer: []T,
) void {
sortCommon(T, array, context, lessThan, buffer);
}
// ---------------------------- Implementation -----------------------------
const tc = @import("tracy.zig");
// TODO-OPT: There are many places where this implementation could specify
// noalias but does not. There may be a perf benefit to doing so, especially
// when keys are passed separately from data, or if the comparator is opaque.
//
// TODO-OPT: There are many places where this implementation divides by a
// power of two. We could consider passing around the log2 value and using
// shifts instead, but these divisions are not usually on critical paths so
// it may not be worth very much.
//
// TODO-ZEN: This implementation is not very consistent between using slices
// and using ptr + len parameters. It may be worth using pointers in more
// places to make the indexing nicer, especially in places where there is
// a junk buffer before the valid data.
/// This sort can handle three different methods for buffer management.
const BufferMode = enum {
/// There are not enough unique values in the array to make an
/// inline buffer. Fall back to in place merge sorts with no
/// buffer.
no_buffer,
/// There is an inline buffer which may be used to accelerate
/// merge sorts, but must be preserved.
inline_buffer,
/// An external buffer has been provided, don't worry about
/// preserving values in the internal buffer.
external_buffer,
};
/// Arrays smaller than this will do insertion sort instead
/// TODO-OPT: Tune this value.
const TUNE_TOO_SMALL = 16;
/// If the array has fewer than this many unique elements, use a sort that
/// works better on arrays with few actual items.
/// TODO-OPT: Tune this value.
const TUNE_TOO_FEW_KEYS = 4;
/// If the external buffer is smaller than this many items,
/// it's not worth the extra overhead of the out of place merge.
/// This must always be at least 2, otherwise the out of place
/// merge will crash. If the buffer is less than 4 items,
/// the out of place merge degrades to the in place merge plus
/// some checks, so it's not worth it.
/// TODO-OPT: Tune this value.
const TUNE_MIN_BUILD_BLOCKS_BUFFER = 4;
/// If this is true, when running with an external buffer,
/// the junk buffer will be filled with undefined.
const DEBUG_SET_UNDEFINED = false;
/// If this is true, the sort will validate that pieces are sorted
/// at key points during the algorithm.
const DEBUG_VALIDATE = false;
/// Root sorting function, used by all public api points.
fn sortCommon(
comptime T: type,
array: []T,
context: anytype,
comptime lessThanFn: fn(context:@TypeOf(context), lhs: T, rhs: T) bool,
buffer: []T,
) void {
const tcc = tc.Zone(@src());
defer tcc.End();
const Comparator = struct {
//! This struct just cleans up the parameter lists of inner functions.
context: @TypeOf(context),
pub inline fn lessThan(self: @This(), lhs: T, rhs: T) bool {
// TODO-API: do some tests to see if a compare function
// can actually be performant here
return lessThanFn(self.context, lhs, rhs);
}
pub inline fn compare(self: @This(), lhs: T, rhs: T) std.math.Order {
// TODO-OPT: use an actual compare function here
if (lessThanFn(self.context, lhs, rhs)) return .lt;
if (lessThanFn(self.context, rhs, lhs)) return .gt;
return .eq;
}
};
const cmp = Comparator{ .context = context };
// If the array is too small, just do insertion sort.
if (array.len < TUNE_TOO_SMALL) {
insertSort(T, array, cmp);
return;
}
// This sort works by building a "junk buffer" at the beginning
// of the array. The junk buffer will be used as scratch space
// to accelerate other parts of the sort. We will need to rearrange
// the items in the junk buffer freely. In order to do that
// and maintain stability, we require that all items in the junk
// buffer are unique.
// The rest of the list will be split up into fixed-size blocks.
// These blocks must be a power of two in size, so that we can
// use a fast scrolling merge sort to create them. As shown in
// the paper, the best size for this list is the square root of
// the length of the array.
// This calculates the smallest power of two that is greater
// than or equal to sqrt(array.len).
var block_len = findOptimalBufferLength(array.len);
// We will also need a key item at the beginning of the array
// for each block, which we will use to maintain stability in
// the sort.
// key_len = ceil(array.len / block_len)
// this is safe because array.len >= 16 (and therefore > 0).
// This could be a floor division instead if the use is updated.
// TODO-OPT: Test a bunch of cases with and without this.
// Does it make a meaningful difference?
var key_len = ((array.len - 1) / block_len) + 1;
const ideal_external_buffer = buffer.len >= block_len;
// So the total number of unique items needed at the start of
// the list is the sum of the number of keys and the block length.
// We only need to include the junk buffer in the keys if we
// don't have a large enough external buffer. Otherwise the
// junk buffer is stable so it doesn't need to be unique.
var ideal_keys = if (ideal_external_buffer) key_len else (key_len + block_len);
// Now we can go find those keys, and move them to the front of
// the array.
// TODO-OPT: We can probably use the external buffer to speed this
// up if present.
const keys_found = collectKeys(T, array, ideal_keys, cmp);
verifyKeys(T, array[0..keys_found], cmp);
// In the event that the array contains very many duplicate elements,
// we may not be able to find enough unique keys. In that case, use
// one of two fallback strategies.
const ideal_buffer = keys_found >= ideal_keys;
if (!ideal_buffer) {
// If we didn't find enough keys, use one of two fallback strategies.
if (keys_found < TUNE_TOO_FEW_KEYS) {
// There are very few unique items, use a different sort.
// TODO-OPT: Should this value be a percentage of the array length instead?
// Or some more complicated calculation?
// TODO-OPT: We can probably use the external buffer to speed this up if present.
lazyStableSort(T, array, cmp);
return;
} else {
// Strategy 2: Block swaps with small scrolling buffer and/or lazy merges
// In this case, we will use the buffer we have to try to optimize
// things for a bit, and then fall back to the keyed selection +
// in place merge sort. Start by reducing our initial
// block length to be the size of our actual buffer.
// TODO-OPT: key_len is a power of two, consider using @clz instead.
// TODO-OPT: Without an external buffer, the optimal assignment is
// to use half of the keys as keys and half as junk, but with an
// external buffer it is better to use the external buffer as
// junk and all of the keys as keys. Later code assumes that
// these two values are related by the in place optimal relation,
// but we could pass separate parameters for these two values in
// the external case to speed up this calculation.
key_len = std.math.floorPowerOfTwo(usize, keys_found);
block_len = 0;
}
}
// In the normal case, this is the end of the keys buffer and
// is equal to keys_found. If there aren't enough keys, this
// is just block_len instead. If we have an external buffer,
// this includes the junk buffer but we may have only found
// key_len keys.
var first_data_idx = if (ideal_external_buffer and ideal_buffer) block_len else (block_len + key_len);
// Our initial blocks are normally the block length.
// If we are in strategy 2, we will use our shortened
// key length instead, to account for our limited buffer.
// TODO-OPT: When we use all available space with an external
// buffer in strategy 2, we will also need to update this.
var subarray_len = if (ideal_buffer) block_len else key_len;
// First, we need to sort each block. This algorithm uses either
// the junk buffer or an external buffer to do an in place merge
// sort, displacing the junk buffer and moving it through the array.
// When all is said and done, the junk buffer will be back where
// it started.
// TODO-OPT: With an ideal buffer, we don't even need to write the
// junk buffer back to the array, we can keep it external forever.
// TODO-ZEN: This `- subarray_len` is awkward, maybe use an array
// pointer and data length instead?
if (buffer.len >= TUNE_MIN_BUILD_BLOCKS_BUFFER) {
buildBlocksExternal(T, array[first_data_idx - subarray_len..], subarray_len, buffer, cmp);
} else {
buildBlocksInPlace(T, array[first_data_idx - subarray_len..], subarray_len, cmp);
}
verifySorted(T, array[first_data_idx..], 2*subarray_len, cmp);
// At this point, pairs of blocks are sorted, so double the length.
subarray_len *= 2;
// save off our data length for future use
const data_len = array.len - first_data_idx;
// At the start of this loop, the array always looks as follows:
// the keys and junk buffer are at the start of the array, unsorted.
// They are followed by subarrays of length subarray_len, which are
// each sorted. In each iteration, we will combine and sort each
// pair of subarrays, to yield larger subarrays.
while (subarray_len < data_len) : (subarray_len *= 2) {
var curr_block_len = block_len;
var mode: BufferMode = .inline_buffer;
if (!ideal_buffer) {
// Even if we don't have the ideal buffer, we might still
// be able to use a junk buffer for this pass. We just
// need to make sure that we have enough keys for both the
// selection sort and the junk buffer. We need to divvy up
// our keys for use as block keys vs junk buffer.
// since the junk buffer is the size of a block, and a
// subarray is the remaining number of keys times the
// block size, the maximum subarray size we can support
// is (junk_keys * block_keys) / 2. The maximum of this
// function happens when junk_keys == block_keys, so our
// check is (subarray_len * 2) <= (key_len / 2)^2.
// TODO-OPT: We can get away with a slightly smaller key
// buffer for the last iteration, because we don't use
// the full subarray_len.
const key_buffer = key_len / 2;
if (key_buffer * key_buffer >= subarray_len * 2) {
curr_block_len = key_buffer;
} else {
// If we can't use the scrolling buffer, we are going
// to be doing in-place merging. Since that's expensive,
// we want to use the smallest block size possible,
// and put more work on the selection sort.
curr_block_len = (subarray_len * 2) / key_len;
mode = .no_buffer;
}
}
// If we have enough external buffer to put the junk buffer
// in it, (and to hold all needed keys), use that to optimize
// combineBlocks.
// TODO-OPT: The second check here can only be different from
// the first in strategy 2, in all other cases they are equal.
if (buffer.len >= curr_block_len and buffer.len >= (subarray_len * 2 / curr_block_len)) {
mode = .external_buffer;
}
// TODO-OPT: We don't need to do this repeatedly if we have an ideal buffer.
if (mode == .external_buffer) {
copyNoAlias(T, buffer.ptr, array.ptr, curr_block_len);
setUndefined(T, array[0..curr_block_len]);
moveBackToClobberFront(T, array[0..first_data_idx], first_data_idx - curr_block_len);
}
combineBlocks(T, array, first_data_idx, subarray_len, curr_block_len, mode, buffer, cmp);
verifySorted(T, array[first_data_idx..], 2*subarray_len, cmp);
verifyKeys(T, array[0..key_len], cmp);
// TODO-OPT: We don't need to do this repeatedly if we have an ideal buffer.
if (mode == .external_buffer) {
moveFrontToClobberBack(T, array[0..first_data_idx], first_data_idx - curr_block_len);
copyNoAlias(T, array.ptr, buffer.ptr, curr_block_len);
setUndefined(T, buffer[0..curr_block_len]);
}
}
// When all is said and done, we just have the keys at the beginning
// of the array, and one sorted subarray of everything else that follows
// it. We'll finish by sorting the keys, and then performing an in-place
// merge sort of the keys and everything else.
// TODO-OPT: We could maybe do a better merge sort taking advantage of the
// external buffer. Even though it's not big enough to hold the entire
// keys array, it can still help reduce swaps (or let us do them as blocks).
insertSort(T, array[0..first_data_idx], cmp);
verifySorted(T, array[0..first_data_idx], 0, cmp);
verifySorted(T, array[first_data_idx..], 0, cmp);
lazyMerge(T, array, first_data_idx, cmp);
verifySorted(T, array, 0, cmp);
}
/// This function searches the list for the first ideal_keys unique elements.
/// It puts those items at the beginning of the array in sorted order, and
/// returns the number of unique values that it found to put in that buffer.
/// TODO-OPT: The keys at the beginning do not actually need to be in sorted order.
/// There might be a way to optimize this that clobbers order.
/// TODO-OPT: With an external buffer, we don't need to rewind or preserve keys,
/// we just need to move any not-chosen keys up past the key area.
fn collectKeys(comptime T: type, array: []T, ideal_keys: usize, cmp: anytype) usize {
const tcc = tc.Zone(@src());
defer tcc.End();
// The number of keys we have found so far. Since the first
// item in the array is always unique, we always count it.
var keys_found: usize = 1;
// The index of the array of the start of the key buffer.
// The sorted buffer of keys will move around in the array,
// but it starts at the very beginning.
// This rotation is only necessary to preserve stability.
// Without that, we could keep the list at the beginning
// and swap displaced elements with keys. But since we
// need stability, we will move the keys list instead.
// Moving the small keys list is a lot faster in the worst
// case than moving the non-keys to preserve order.
var first_key_idx: usize = 0;
// The index in the array of the item we are currently considering
// adding as a key. The key buffer is always somewhere on the left
// of this, but there may be non-key elements between the buffer and
// this index.
var curr_key_idx: usize = 1;
// We will check every item in the list in order to see if it can be a key.
while (curr_key_idx < array.len) : (curr_key_idx += 1) {
// this is the current set of unique keys. It moves over the course of
// this search, which is why we have first_key_idx. But it is always
// sorted in ascending order.
const keys = array[first_key_idx..][0..keys_found];
// determine the index in the key buffer where we would insert this key.
// TODO-OPT: If we use a compare function that can recognize equals,
// this search can early out on equals, because we reject duplicate keys.
const insert_key_idx = binarySearchLeft(T, keys, array[curr_key_idx], cmp);
// at this point we know that array[curr_key_idx] <= keys[insert_key_idx].
// So we can check if the key we are considering is unique by checking
// if array[curr_key_idx] < keys[insert_key_idx].
if (insert_key_idx == keys_found or
cmp.lessThan(array[curr_key_idx], keys[insert_key_idx])) {
// Move the keys list to butt up against the current key.
// Moving the keys list like this instead of inserting at
// the beginning of the array saves a ton of copies.
// Note that this invalidates `keys`.
rotate(T, array[first_key_idx..curr_key_idx], keys_found);
first_key_idx = curr_key_idx - keys_found;
// Now use rotate to insert the new key at the right position.
const array_insert_idx = first_key_idx + insert_key_idx;
const keys_after_insert = keys_found - insert_key_idx;
rotate(T, array[array_insert_idx..][0..keys_after_insert + 1], keys_after_insert);
// If we've found enough keys, we don't need to keep looking. Break immediately.
keys_found += 1;
if (keys_found >= ideal_keys) break;
}
}
// Ok, now we need to move the keys buffer back to the beginning of the array.
rotate(T, array[0..first_key_idx + keys_found], first_key_idx);
return keys_found;
}
/// This function sorts every block of block_len elements, starting at index block_len.
/// It uses a junk buffer of block_len elements at the beginning to accelerate the sort.
/// This buffer may be reordered, but will be back at the beginning of the array when
/// this function returns.
/// TODO-OPT: We should probably use a small stack buffer of 8 or 16 elements
/// and always call buildBlocksExternal, to optimize the first few merges.
/// As a bonus, this would let us delete the pairwiseSwaps function.
fn buildBlocksInPlace(comptime T: type, array: []T, block_len: usize, cmp: anytype) void {
const tcc = tc.Zone(@src());
defer tcc.End();
// We'll use iterative merge sorts to perform this sort.
// Start with an optimized version of merge sort with a size of 1.
// This function will also move two items of the junk buffer to the
// end of the array, not just one.
const use_pairwise_swaps = false;
if (use_pairwise_swaps) {
pairwiseSwaps(T, array[block_len-2..], cmp);
buildInPlace(T, array, 2, block_len, cmp);
} else {
const merge_len = 2*block_len;
if (merge_len <= 16) {
var start = block_len;
while (start + merge_len <= array.len) : (start += merge_len) {
insertSort(T, array[start..][0..merge_len], cmp);
}
if (start < array.len) {
insertSort(T, array[start..], cmp);
}
verifyKeys(T, array[0..block_len], cmp);
} else {
var start = block_len;
while (start + 16 <= array.len) : (start += 16) {
// TODO-OPT: See if an in place insert sort and swap
// backwards is faster.
// TODO-OPT: Also do this with an external buffer.
// TODO-OPT: We could optimize the crap out of this at
// the assembly level.
blockSwap(T, array.ptr + (start - 16), array.ptr + start, 16);
insertSort(T, array[start-16..start], cmp);
}
if (start < array.len) {
moveBackToFront(T, array[start - 16..], array.len - start);
insertSort(T, array[start - 16..][0..array.len - start], cmp);
}
// Then continue doing larger merge sorts and moving items to the end
// of the array. The last merge sort is done in reverse to move the
// junk buffer back to the beginning.
buildInPlace(T, array, 16, block_len, cmp);
verifyKeys(T, array[0..block_len], cmp);
}
}
}
/// Builds merge chunks in-place. This function accepts an array that consists of
/// a set of keys followed by data items. It must start with exactly
/// `block_len - already_merged_size` keys, followed by all the data items, followed by
/// `already_merged_size` more keys. In other words, there are exactly block_len keys,
/// but already_merged_size of them have been moved to the end of the array already.
/// Additionally, before calling this function, every `already_merged_size`
/// items after the starting keys must be sorted. The last partial block must also be sorted.
/// After calling this function, all of the keys will be at the beginning of the array,
/// and every chunk of 2*block_len items after the keys will be sorted.
/// The keys may be reordered during this process. block_len and already_merged_size
/// must both be powers of two.
fn buildInPlace(comptime T: type, array: []T, already_merged_size: usize, block_len: usize, cmp: anytype) void {
const tcc = tc.Zone(@src());
defer tcc.End();
assert(std.math.isPowerOfTwo(block_len));
assert(std.math.isPowerOfTwo(already_merged_size));
assert(already_merged_size <= block_len);
// start_position marks the beginning of the data elements.
// It starts at block_len, moved back by the number of keys
// that have already been merged to the back of the array.
// As we move keys to the back, this will move left.
var start_position = block_len - already_merged_size;
const data_len = array.len - block_len;
// We'll start with a merge size of smallest_merged_size items, and increase that until
// it contains the whole list. For each pass, we will merge pairs of chunks,
// and also move some keys to the end.
var merge_len = already_merged_size;
while (merge_len < block_len) : (merge_len *= 2) {
const tc_merge = tc.ZoneN(@src(), "merge");
defer tc_merge.End();
tc_merge.Value(merge_len);
const full_merge_len = 2 * merge_len;
// Our buffer will be the same size as our merge chunk. That's how many
// keys we will move in one pass over the array.
const buffer_len = merge_len;
// We'll consider each pair of merge chunks, and merge them together into one chunk.
// While we do that, we'll propagate some keys from the front to the back of the list.
var merge_index = start_position;
var remaining_unmerged = data_len;
while (remaining_unmerged >= full_merge_len) : ({
merge_index += full_merge_len;
remaining_unmerged -= full_merge_len;
}) {
mergeForwards(T, array.ptr + merge_index, buffer_len, merge_len, merge_len, cmp);
}
// We need special handling for partial blocks at the end
if (remaining_unmerged > merge_len) {
// If we have more than one block, do a merge with the partial last block.
mergeForwards(T, array.ptr + merge_index, buffer_len, merge_len, remaining_unmerged - merge_len, cmp);
} else {
// Otherwise, rotate the buffer past the sorted chunk of remaining array.
// TODO-OPT: Rotate preserves the order of both halves, but we only care about
// the order of the right half. The left half can be completely unordered. So
// there's room here for a faster implementation.
rotate(T, array[merge_index-buffer_len..][0..merge_len + remaining_unmerged], merge_len);
}
// Finally, move the start position back to get new keys next iteration
start_position -= merge_len;
}
assert(start_position == 0);
// Now that we've created sorted chunks of size block_len, we need
// to do the last chunk. For this one, we'll go backwards through the
// array, moving the keys back to the beginning.
const full_merge_len = 2 * block_len;
// TODO-OPT: Pretty sure this is a power of two, we can use a mask here instead.
const last_block_len = data_len % full_merge_len;
var remaining_full_blocks = data_len / full_merge_len;
const final_offset = data_len - last_block_len;
// First we have to consider the special case of the partially sorted block
// at the end of the array. If it's smaller than a block, we can just rotate it.
// Otherwise, we need to do a merge.
if (last_block_len <= block_len) {
rotate(T, array[final_offset..], last_block_len);
} else {
mergeBackwards(T, array.ptr + final_offset, block_len, last_block_len - block_len, block_len, cmp);
}
// Now continue to merge backwards through the rest of the array, back to the beginning.
var merge_index = final_offset;
while (remaining_full_blocks > 0) : (remaining_full_blocks -= 1) {
merge_index -= full_merge_len;
mergeBackwards(T, array.ptr + merge_index, block_len, block_len, block_len, cmp);
}
}
/// This is similar to buildBlocksInPlace, but uses an external storage buffer
/// to reduce swaps. It will copy junk items into the external array, and then
/// use a variant of the merge operation which clobbers the junk buffer. Once
/// the merge size exceeds the size of the external buffer, it falls back to the
/// in-place version.
/// TODO-ZEN: Combine this with buildBlocksInPlace
fn buildBlocksExternal(comptime T: type, array: []T, block_len: usize, buffer: []T, cmp: anytype) void {
const tcc = tc.Zone(@src());
defer tcc.End();
// round the buffer length down to a power of two
var pow2_buffer_len = if (buffer.len >= block_len) block_len
else std.math.floorPowerOfTwo(usize, buffer.len);
// copy as many keys as we can into the external block
copyNoAlias(T, buffer.ptr, array.ptr + (block_len - pow2_buffer_len), pow2_buffer_len);
setUndefined(T, (array.ptr + block_len - pow2_buffer_len)[0..pow2_buffer_len]);
// Use a separate code path for pairs because it's much faster
assert(pow2_buffer_len >= 2);
var start_position = block_len - 2;
pairwiseWrites(T, array[start_position..], cmp);
// Then do forward merges just like in the in place version,
// but clobber the junk buffer instead of preserving it.
// TODO-ZEN: There is probably a better expression of this indexing,
// see lazyStableSort for inspiration.
var merge_len: usize = 2;
while (merge_len < pow2_buffer_len) : (merge_len *= 2) {
const full_merge_len = merge_len * 2;
var remaining = array.len - block_len;
var merge_position = start_position;
while (remaining >= full_merge_len) : ({
remaining -= full_merge_len;
merge_position += full_merge_len;
}) {
mergeForwardExternal(T, array.ptr + merge_position, merge_len, merge_len, merge_len, cmp);
}
if (remaining > merge_len) {
mergeForwardExternal(T, array.ptr + merge_position, merge_len, merge_len, remaining - merge_len, cmp);
} else {
copyNoAlias(T, array.ptr + merge_position - merge_len, array.ptr + merge_position, remaining);
setUndefined(T, (array.ptr + merge_position)[0..remaining]);
}
start_position -= merge_len;
}
assert(start_position + pow2_buffer_len == block_len);
// If our buffer can hold the whole block size, we can
// do an out of place reverse merge as well.
if (pow2_buffer_len == block_len) {
const full_merge_len = 2 * block_len;
const data_len = array.len - block_len;
// TODO-OPT: Pretty sure this is a power of two, we can use a mask here instead.
const last_block_len = data_len % full_merge_len;
var remaining_full_blocks = data_len / full_merge_len;
const final_offset = data_len - last_block_len;
// First we have to consider the special case of the partially sorted block
// at the end of the array. If it's smaller than a block, we can just rotate it.
// Otherwise, we need to do a merge.
if (last_block_len <= block_len) {
rotate(T, array[final_offset..], last_block_len);
} else {
mergeBackwardExternal(T, array.ptr + final_offset, block_len, last_block_len - block_len, block_len, cmp);
}
// Now continue to merge backwards through the rest of the array, back to the beginning.
var merge_index = final_offset;
while (remaining_full_blocks > 0) : (remaining_full_blocks -= 1) {
merge_index -= full_merge_len;
mergeBackwardExternal(T, array.ptr + merge_index, block_len, block_len, block_len, cmp);
}
// Then restore the buffer.
// TODO-OPT: This isn't necessary, we can keep using the buffer.
copyNoAlias(T, array.ptr + (block_len - pow2_buffer_len), buffer.ptr, pow2_buffer_len);
setUndefined(T, buffer[0..pow2_buffer_len]);
} else {
copyNoAlias(T, array.ptr + (array.len - pow2_buffer_len), buffer.ptr, pow2_buffer_len);
setUndefined(T, buffer[0..pow2_buffer_len]);
// TODO-OPT: If the external buffer is large enough, we could do an
// out-of-place reverse merge for even more speed.
buildInPlace(T, array, merge_len, block_len, cmp);
}
}
/// This function inspects every pair of elements in the given array,
/// starting with the third and fourth element, and moving forward by
/// two each time. It puts each pair in the correct order, and moves
/// it back two elements. The first two displaced elements are moved
/// to the end, but may not necessarily be in the correct order.
/// The caller should ensure that the first two elements are not equal,
/// otherwise the sort may not be stable.
/// The passed array must contain at least two elements.
/// So with this array:
/// [1 2 6 5 3 4 8 7 9]
/// It will sort it to
/// [5 6 3 4 7 8 9 2 1].
fn pairwiseSwaps(comptime T: type, array: []T, cmp: anytype) void {
// save the keys to the stack
const first_key = array[0];
const second_key = array[1];
// move all the items down two, while sorting them
pairwiseWrites(T, array, cmp);
// then stamp the saved keys on the end
array[array.len-2] = first_key;
array[array.len-1] = second_key;
}
/// This function inspects every pair of elements in the given array,
/// starting with the third and fourth element, and moving forward by
/// two each time. It puts each pair in the correct order, and moves
/// it back two elements. The first two displaced elements are clobbered.
/// So with this array:
/// [1 2 6 5 3 4 8 7 9]
/// It will sort it to
/// [5 6 3 4 7 8 9 X X]
fn pairwiseWrites(comptime T: type, array: []T, cmp: anytype) void {
const tcc = tc.Zone(@src());
defer tcc.End();
var index: usize = 3;
while (index < array.len) : (index += 2) {
// check if the items are out of order, ensuring that equal items
// are considered to be in order.
// TODO-OPT: This wouldn't be difficult to write in a branchless way,
// see if that makes a difference.
if (cmp.lessThan(array[index], array[index-1])) {
// here they are out of order, swap them as we move them back.
array[index - 3] = array[index];
array[index - 2] = array[index - 1];
} else {
// here they are in order, copy them back preserving order.
array[index - 3] = array[index - 1];
array[index - 2] = array[index];
}
}
// check if there's one extra item on the end
if (index == array.len) {
array[index - 3] = array[index-1];
}
}
/// This function iterates pairs of subarrays of length subarray_len,
/// starting at first_block_idx, and including the partial subarray
/// at the end of the list. Each subarray must be sorted, and after
/// this call the pairs will be combined into larger sorted subarrays.
/// There must be at least one item in the array before first_block_idx
/// for each block in the array, excluding the final partial block.
/// If use_junk_buffer is true, there must also be at least block_len
/// more elements for the junk buffer, which will be used to accelerate
/// the merge operation.
fn combineBlocks(
comptime T: type,
array: []T,
first_block_idx: usize,
subarray_len: usize,
block_len: usize,
mode: BufferMode,
external_buffer: []T,
cmp: anytype,
) void {
const tcc = tc.Zone(@src());
defer tcc.End();
tcc.Value(subarray_len);
// The total number of data items, excluding keys and the junk buffer
const data_len = array.len - first_block_idx;
// The length of a merged subarray
const full_merge_len = 2 * subarray_len;
// The number of full merges we will do
var merge_count = data_len / full_merge_len;
// The number of items that do not fit into a full merge
const leftover_count = data_len % full_merge_len;
// The number of blocks in a full merge
const block_count = full_merge_len / block_len;
// The number of blocks in a subarray
const half_block_count = @divExact(block_count, 2);
// The block index of the first block of the second subarray
const initial_median = half_block_count;
// The length of our inline junk buffer.
const junk_len = if (mode == .no_buffer) 0 else block_len;
// The array from which we will pull keys.
const keys_base = if (mode == .external_buffer) external_buffer
else array[0..first_block_idx - junk_len];
// The index of the first subarray to be merged
// TODO-ZEN: Consider making merge_start a pointer
var merge_start: usize = first_block_idx;
// Iterate through pairs of subarrays, merging them.
// If we are using a junk buffer, this operation also
// transfers it from the left to the right.
while (merge_count > 0) : (merge_count -= 1) {
// The keys are used to preserve stability in blockSelectSort.
const keys = keys_base[0..block_count];
// We need to make sure they are in order.
insertSort(T, keys, cmp);
verifySortedStrict(T, keys, cmp);
// blockSelectSort will do a selection sort on the blocks in the array.
// It also permutes the keys to match. This puts blocks in closer to the
// right order, allowing us to combine them with mergeBlocks.
const median_key = blockSelectSort(T, keys.ptr, array.ptr + merge_start, initial_median, block_count, block_len, cmp);
// TODO-ZIG: Replace this with inline switch when that's implemented
// TODO-ZEN: Make the parameters the same for all of these in preparation
switch (mode) {
.no_buffer => mergeBlocks(T, keys.ptr, median_key, array.ptr + merge_start, block_count, block_len, 0, 0, .no_buffer, cmp),
.inline_buffer => mergeBlocks(T, keys.ptr, median_key, array.ptr + merge_start, block_count, block_len, 0, 0, .inline_buffer, cmp),
.external_buffer => mergeBlocks(T, keys.ptr, median_key, array.ptr + merge_start, block_count, block_len, 0, 0, .external_buffer, cmp),
}
verifySorted(T, array[merge_start - junk_len..][0..full_merge_len], 0, cmp);
merge_start += full_merge_len;
}
// If the number of left over items is greater than the size of a subarray,
// we need to merge them. This is a more difficult merge because the last
// subarray cannot participate in the selection sort. Because of that,
// we cannot necessarily merge all full blocks, because some of them are
// not ordered correctly with regard to the trailer.
if (leftover_count > subarray_len) {
const trailer_blocks = leftover_count / block_len;
const trailer_items = leftover_count % block_len;
// The +1 here adds an extra key which tracks the trailer items.
// TODO-OPT: I don't think we actually need a key for the trailer.
// hmm, we do always need one for the median though.
const keys = keys_base[0..trailer_blocks+1];
insertSort(T, keys, cmp);
verifySortedStrict(T, keys, cmp);
// perform our selection sort as usual, including even blocks that
// may not end up being part of the standard sort. Only do this if
// we have full blocks on the right side. If it's just a partial block,
// everything except the partial block is already sorted (because it's
// all the left side).
const median_key = if (trailer_blocks <= half_block_count) initial_median
else blockSelectSort(T, keys.ptr, array.ptr + merge_start, initial_median, trailer_blocks, block_len, cmp);
// This counts the number of full blocks at the end of the trailer
// that cannot participate in the standard merge because they come
// after the start of the trailer in sorted order.
// TODO-OPT: We know that any unfit blocks must come from the left
// array. This means we could find the unfit blocks before doing the
// selection sort, and copy them directly to the end of the array.
// This allows for a smaller selection sort, and means that the two
// largest items (which will likely be swapped multiple times during
// the sort) no longer participate. It also means we need to insertion
// sort three fewer keys :P
const unfit_trailer_blocks: usize = if (trailer_items > 0) countLastMergeBlocks(T, array[merge_start..], trailer_blocks, block_len, cmp) else 0;
// The number of blocks that do participate in the normal merge follows immediately.
const normal_blocks = trailer_blocks - unfit_trailer_blocks;
// If there are no normal blocks, the trailer comes first.
if (normal_blocks == 0) {
// Note that this can only happen if there are no trailer blocks
// after the last full subarray. If there were, those blocks
// would be normal blocks.
assert(trailer_blocks == half_block_count);
// In this case, the selection sort did nothing, and the blocks
// are all in sorted order. So we just need to merge the trailer
// items with the sorted left half.
const left_len = trailer_blocks * block_len;
const right_len = trailer_items;
// TODO-ZIG: Change mode to an inline parameter once those are
// implemented, to avoid this switch.
switch (mode) {
.no_buffer => mergeIntoPrecedingJunk(T, array.ptr + merge_start, junk_len, left_len, trailer_items, .no_buffer, cmp),
.inline_buffer => mergeIntoPrecedingJunk(T, array.ptr + merge_start, junk_len, left_len, trailer_items, .inline_buffer, cmp),
.external_buffer => mergeIntoPrecedingJunk(T, array.ptr + merge_start, junk_len, left_len, trailer_items, .external_buffer, cmp),
}
verifySorted(T, array.ptr[merge_start - junk_len..array.len - junk_len], 0, cmp);
} else {
const unfit_items = block_len * unfit_trailer_blocks + trailer_items;
// Common case, some blocks participate in the merge.
// TODO-ZIG: Replace this with inline switch when that's implemented
switch (mode) {
.no_buffer => mergeBlocks(T, keys.ptr, median_key, array.ptr + merge_start, normal_blocks, block_len, unfit_trailer_blocks, unfit_items, .no_buffer, cmp),
.inline_buffer => mergeBlocks(T, keys.ptr, median_key, array.ptr + merge_start, normal_blocks, block_len, unfit_trailer_blocks, unfit_items, .inline_buffer, cmp),
.external_buffer => mergeBlocks(T, keys.ptr, median_key, array.ptr + merge_start, normal_blocks, block_len, unfit_trailer_blocks, unfit_items, .external_buffer, cmp),
}
verifySorted(T, array.ptr[merge_start - junk_len..array.len - junk_len], 0, cmp);
}
merge_start += leftover_count;
assert(merge_start == array.len);
}
const reset_tcc = tc.ZoneN(@src(), "BufferReset");
defer reset_tcc.End();
// If we have a junk buffer, we need to move it back to the beginning of the list.
// TODO-ZIG: Replace this with an inline switch or parameter when those are in.
switch (mode) {
.no_buffer => moveFrontPastJunk(T, array[first_block_idx - junk_len..merge_start], merge_start - first_block_idx, .no_buffer),
.inline_buffer => moveFrontPastJunk(T, array[first_block_idx - junk_len..merge_start], merge_start - first_block_idx, .inline_buffer),
.external_buffer => moveFrontPastJunk(T, array[first_block_idx - junk_len..merge_start], merge_start - first_block_idx, .external_buffer),
}
verifySorted(T, array[first_block_idx..], full_merge_len, cmp);
}
/// Performs a selection sort on the blocks in the array. Only the first item
/// in each sorted block is used for comparison. If the first items in two blocks
/// tie, the keys are used to break the tie. This keeps the sort stable. But
/// it also means we need a full comparison function, not just a less than function,
/// so that we can detect ties. Swaps made to the block data are also made to the keys.
/// The initial_median parameter is the index of a particular block. That index is tracked
/// through swaps made by this function, and the sorted index of that block is returned.
/// This function does not handle the partial block on the end of the array. That must be
/// done externally.
/// Note that the blocks_ptr parameter points to the first valid block. It does not include
/// the junk buffer.
fn blockSelectSort(comptime T: type, noalias keys_ptr: [*]T, noalias blocks_ptr: [*]T, initial_median: usize, block_count: usize, block_len: usize, cmp: anytype) usize {
const tcc = tc.Zone(@src());
defer tcc.End();
const keys = keys_ptr[0..block_count];
const blocks = blocks_ptr[0..block_count * block_len];
assert(initial_median < block_count);
// track the index of this block through the sort
var median_key = initial_median;
const debug_median_value = if (DEBUG_VALIDATE) keys[initial_median] else {};
// blocks to the left of left_block are sorted.
var left_block: usize = 0;
while (left_block < block_count) : (left_block += 1) {
// Search for the smallest block after or including this block
var smallest_block = left_block;
var right_block = left_block + 1;
while (right_block < block_count) : (right_block += 1) {
// Compare blocks by comparing the first item in each block.
// Individual blocks are sorted, so this is comparing the smallest
// item in each block.
const order = cmp.compare(blocks[block_len * right_block],
blocks[block_len * smallest_block]);
// If the blocks tie, use the keys to break the tie.
// This keeps the sort stable, ensuring that the original
// order of the input array is preserved. It works because
// keys are guaranteed to be unique, so they cannot be equal.
if (order == .lt or
(order == .eq and
cmp.lessThan(keys[right_block], keys[smallest_block]))
) {
smallest_block = right_block;
}
}
// If the left block is the smallest, nothing needs to be swapped.
// It's already in the correct position.
if (smallest_block != left_block) {
// Swap the block contents
blockSwap(
T,
blocks.ptr + block_len * left_block,
blocks.ptr + block_len * smallest_block,
block_len,
);
// Also swap the keys, to preserve stability.
// TODO-OPT: If we have an external buffer and there is room,
// we could store block indexes in it instead. Those are faster
// to compare than keys, and faster to swap. They also don't
// need to be sorted back afterwards.
const tmp = keys[left_block];
keys[left_block] = keys[smallest_block];
keys[smallest_block] = tmp;
// If one of the blocks we swapped was the one referenced by,
// the median key, update the median key to track it.
if (median_key == left_block) {
median_key = smallest_block;
} else if (median_key == smallest_block) {
median_key = left_block;
}
}
}
if (DEBUG_VALIDATE) {
assert(std.meta.eql(@as(T, debug_median_value), keys[median_key]));
var block_index: usize = 1;
while (block_index < block_count) : (block_index += 1) {
const rel = cmp.compare(blocks[(block_index-1) * block_len], blocks[block_index * block_len]);
assert(rel == .lt or (rel == .eq and cmp.lessThan(keys[block_index-1], keys[block_index])));
}
}
return median_key;
}
/// When handling the trailing elements at the end of the array,
/// we cannot necessarily sort all complete blocks as normal, and
/// then merge in the trailer. The reason is that the trailer does
/// not undergo the selection sort before merging, so it is not
/// necessarily true that the first element of the trailer would be
/// greater than the last sorted element. Because of this, we
/// can't actually merge all trailer blocks as normal. This function
/// counts how many trailer blocks we need to merge manually in order
/// to make sure the sort works.
fn countLastMergeBlocks(
comptime T: type,
trailer: []T,
trailer_blocks: usize,
block_len: usize,
cmp: anytype,
) usize {
var blocks_to_merge_manually: usize = 0;
var first_item_after_blocks = trailer_blocks * block_len;
var curr_full_block = first_item_after_blocks;
// We can include a block in the normal sort as long as that
// block's first item is less than or equal to the trailer.
// If it's greater than the the first item in the trailer,
// we can't sort it normally because we may end up with items
// greater than the start of the trailer in the "sorted" part
// of the list. In the case that the start of a block is
// equal to the start of the trailer, the block's starting
// value is considered to be less than the trailer, because
// the trailer consists of the items that were at the end of the
// array when the sort began.
while (blocks_to_merge_manually < trailer_blocks and
cmp.lessThan(trailer[first_item_after_blocks],
trailer[curr_full_block - block_len])) {
blocks_to_merge_manually += 1;
curr_full_block -= block_len;
}
return blocks_to_merge_manually;
}
var debug_count: usize = 0;
/// This function accepts a list of keys and a list of blocks.
/// After calling it, the scratch buffer at the beginning of the
/// array will be at the end (reordered), and all other items
/// will be sorted on the left. If the mode needs a junk buffer,
/// the list of blocks must be preceded by block_len items of junk,
/// and be followed by block_count * block_len items of data.
/// For that data, within each block, the items must be sorted.
/// The blocks themselves must be sorted based on the first item
/// in each block. The data may optionally be followed by a set
/// of "trailer blocks", which must also be sorted in the same way.
/// There may be yet more items in a partial block after the
/// trailer blocks. trailer_len is the total of the items in the
/// trailer blocks and the items afterwards.
/// Trailer blocks must match the criteria in countLastMergeBlocks.
fn mergeBlocks(
comptime T : type,
noalias keys_ptr : [*]const T,
median_key : usize,
noalias blocks_ptr : [*]T,
block_count : usize,
block_len : usize,
trailer_blocks : usize,
trailer_len : usize,
comptime mode : BufferMode,
cmp : anytype,
) void {
const tcc = tc.Zone(@src());
defer tcc.End();
const dbg = debug_count;
debug_count += 1;
const junk_len = if (mode == .no_buffer) 0 else block_len;
// if this is the last segment, we may need one extra item for the median key.
const keys = keys_ptr[0..block_count + trailer_blocks + 1];
const blocks = (blocks_ptr - junk_len)[0..junk_len + block_count * block_len + trailer_len];
// When we start, our first block is after the junk buffer
// (at index block_len), and the next block is one block
// after that.
var next_block = junk_len + block_len;
var curr_block_len = block_len;
// This origin tells us which subarray the current block
// came from, before we did the selection sort on blocks.
// true means left, false means right. The subarrays were
// originally sorted, so if we find two adjacent blocks
// from the same subarray, we know they are already sorted,
// and we can skip forward a block without any work.
// TODO-OPT: I think we can just check median_key to see if it's
// equal to zero here, keys[0] must be either zero or block_count/2.
var curr_block_from_left_subarray = cmp.lessThan(keys[0], keys[median_key]);
var key_idx: usize = 1; // TODO-ZEN: rename to next_block
while (key_idx < block_count) : (key_idx += 1) {
// This is the beginning of one of the two arrays to be merged.
// The other array starts at index next_block. At this point
// in the loop, the junk buffer is in one single block immediately
// before this index.
const curr_block = next_block - curr_block_len;
const next_block_from_left_subarray = cmp.lessThan(keys[key_idx], keys[median_key]);
// If both blocks came from the same sub-array, they are already sorted.
// We just need to move the junk buffer past the current block, so that
// we can grab a new block and check again.
if (next_block_from_left_subarray == curr_block_from_left_subarray) {
// We know that everything to the left of the buffer is sorted,
// and that everything in the current block is less than anything
// in the next block. The current block is also sorted, so we
// can just move it to the sorted part of the list, and swap it
// with the displaced junk buffer.
swapWithPrecedingJunk(T, blocks.ptr + curr_block, junk_len, curr_block_len, mode);
verifySorted(T, blocks[0..next_block - junk_len], 0, cmp);
// After doing that, we know that the next block we grab will be a
// full block, so set the length accordingly.
curr_block_len = block_len;
} else {
// const tc_merge = tc.ZoneN(@src(), "Smart Merge");
// defer tc_merge.End();
// If the next two blocks don't come from same sub-array, they need
// to be merged. We know that the last item in the left array is smaller
// than the first item in the following block from the left array.
// Similarly, the last item in the right array is smaller than the first
// item in the following block of the right array. Therefore, the smaller
// of the last items from the two blocks we are merging is smaller than both
// of the next blocks we might encounter. So we don't need to look at any
// blocks after these two until we've reached the end of one of the blocks.
// At that point, we will stop merging, change our current block to be
// the remaining list, and then grab the next unchecked block as the new second list.
if (mode != .no_buffer) {
// Iterators for our two arrays and output buffer
var buffer_idx = curr_block - junk_len;
// For stability, when items are equal, we need to prefer items
// from the left subarray. For this merge, we are always merging
// between separate sub-arrays. `left` will always be items from
// the `left` subarray, and `right` will always be from the right.
// TODO-ZEN: I think we can keep these iterators consistent
// and avoid all this ugliness by making them persistent outside
// of this scope. It would also let us get rid of the
// curr_block_from_left_subarray variable.
var left = if (curr_block_from_left_subarray) curr_block
else next_block;
const left_end = left + if (curr_block_from_left_subarray) curr_block_len
else block_len;
var right = if (curr_block_from_left_subarray) next_block
else curr_block;
const right_end = right + if (curr_block_from_left_subarray) block_len
else curr_block_len;
// Merge sort until we reach the end of one of the merge arrays.
// TODO-ZEN: Can we combine this with other merge sort code?
while (left < left_end and right < right_end) {
if (cmp.lessThan(blocks[right], blocks[left])) {
if (comptime mode == .inline_buffer) {
const tmp = blocks[right];
blocks[right] = blocks[buffer_idx];
blocks[buffer_idx] = tmp;
} else {
comptime assert(mode == .external_buffer);
blocks[buffer_idx] = blocks[right];
if (DEBUG_SET_UNDEFINED) blocks[right] = undefined;
}
right += 1;
} else {
if (comptime mode == .inline_buffer) {
const tmp = blocks[left];
blocks[left] = blocks[buffer_idx];
blocks[buffer_idx] = tmp;
} else {
comptime assert(mode == .external_buffer);
blocks[buffer_idx] = blocks[left];
if (DEBUG_SET_UNDEFINED) blocks[left] = undefined;
}
left += 1;
}
buffer_idx += 1;
}
// TODO-ZEN: I think we can get rid of all of this cleanup,
// but I'm not totally sure how yet.
// At this point, only one of the arrays is empty. Check if
// it's the current one. If so, we have junk buffer that moved into
// the right array, which is now on the right of the current
// array. We need to move the remaining parts of the current
// array to the right of the junk buffer.
// TODO-OPT: I don't think we actually need to do this if we
// use smarter iterators.
if (left < left_end and curr_block_from_left_subarray) {
moveFrontPastJunk(T, blocks[left..right_end], left_end - left, mode);
} else if (right < right_end and !curr_block_from_left_subarray) {
moveFrontPastJunk(T, blocks[right..left_end], right_end - right, mode);
} else {
// Our "current" block is now the block that was "next", so
// update the subarray to match.
curr_block_from_left_subarray = next_block_from_left_subarray;
}
// Update the current block to point to the remaining items.
// One of the following two terms must be zero.
curr_block_len = (right_end - right) + (left_end - left);
verifySorted(T, blocks[0..next_block + block_len - curr_block_len - junk_len], 0, cmp);
} else {
// mode == .no_buffer
// In place merge sort. This is pretty expensive, so make
// sure that the blocks aren't already sorted, just in case.
// TODO-ZEN: There must be a way to merge these cases. They
// are EXACTLY the same except one calls binarySearchLeft
// and the other calls binarySearchRight.
// ... and the lessThan check is different.
if (curr_block_from_left_subarray) {
if (cmp.lessThan(blocks[next_block], blocks[next_block-1])) {
var rest = blocks[next_block - curr_block_len .. next_block + block_len];
while (curr_block_len > 0) {
// find the insert index in the right list
const insert_idx = curr_block_len + binarySearchLeft(T, rest[curr_block_len..], rest[0], cmp);
// move the left list to that index
rotate(T, rest[0..insert_idx], curr_block_len);
// If we moved it to the end of the right list, we need
// to grab the next chunk. Stop merging here.
if (insert_idx == rest.len) {
curr_block_len = rest.len;
break;
}
// leave one item here
curr_block_len -= 1;
// TODO-OPT: Probably worth doing a greedy check
// here to see if we can immediately cement more items.
rest = rest[insert_idx - curr_block_len..];
} else {
// the loop terminated normally, meaning the left list ran out.
// leave the rest of the right list as the curr block.
curr_block_len = rest.len;
curr_block_from_left_subarray = next_block_from_left_subarray;
}
} else {
curr_block_len = block_len;
curr_block_from_left_subarray = next_block_from_left_subarray;
}
verifySorted(T, blocks[0..next_block + block_len - curr_block_len - junk_len], 0, cmp);
} else {
if (!cmp.lessThan(blocks[next_block-1], blocks[next_block])) {
var rest = blocks[next_block - curr_block_len .. next_block + block_len];
while (curr_block_len > 0) {
// find the insert index in the right list
const insert_idx = curr_block_len + binarySearchRight(T, rest[curr_block_len..], rest[0], cmp);
// move the left list to that index
rotate(T, rest[0..insert_idx], curr_block_len);
// If we moved it to the end of the right list, we need
// to grab the next chunk. Stop merging here.
if (insert_idx == rest.len) {
curr_block_len = rest.len;
break;
}
// leave one item here
curr_block_len -= 1;
// TODO-OPT: Probably worth doing a greedy check
// here to see if we can immediately cement more items.
rest = rest[insert_idx - curr_block_len..];
} else {
// the loop terminated normally, meaning the left list ran out.
// leave the rest of the right list as the curr block.
curr_block_len = rest.len;
curr_block_from_left_subarray = next_block_from_left_subarray;
}
} else {
curr_block_len = block_len;
curr_block_from_left_subarray = next_block_from_left_subarray;
}
verifySorted(T, blocks[0..next_block + block_len - curr_block_len - junk_len], 0, cmp);
} // curr_block_from_left_subarray
} // mode != .no_buffer
}
// After all that, move to the next block.
next_block += block_len;
}
// At this point, we still have one partial array left to the right of the junk buffer.
// curr_block here points to the start of that partial array.
var curr_block = next_block - curr_block_len;
verifySorted(T, blocks[0..next_block - curr_block_len - junk_len], 0, cmp);
// The trailer is the special case set of items at the very end of the array
// that cannot be part of the normal merge process. This is because they are
// either part of the elements at the very end of the array that do not fit
// into a block, or they are blocks from the left array which conflict with
// the elements at the very end (see countLastMergeBlocks).
if (trailer_len != 0) {
if (curr_block_from_left_subarray) {
// If the current block is from the left subarray, then everything
// that remains except the final items is from the left subarray,
// and is already sorted. So we can safely extend the current
// block all the way up to the last block, and then merge.
curr_block_len += block_len * trailer_blocks;
} else {
// If the current block is from the right subarray, we know that
// the remaining items are less than the first item after the blocks.
// But we also know that the smallest item in each remaining block
// is greater than the first item after the blocks (because that's
// the condition required for the block to be part of the trailer).
// So we know that everything remaining in our current buffer is
// sorted, and no items to the right of it will be part of it.
// This means we can move the remaining items directly into the
// sorted part of the array.
verifySorted(T, blocks[0..curr_block - junk_len], 0, cmp);
verifySorted(T, blocks[curr_block..next_block], 0, cmp);
if (DEBUG_VALIDATE) assert(!cmp.lessThan(blocks[curr_block], blocks[curr_block-junk_len-1]));
swapWithPrecedingJunk(T, blocks.ptr + curr_block, junk_len, curr_block_len, mode);
curr_block = next_block;
verifySorted(T, blocks[0..curr_block - junk_len], 0, cmp);
// Everything after this point must come from the left array, for
// the same reasons as in the other case. We can fall back to a merge.
curr_block_len = block_len * trailer_blocks;
curr_block_from_left_subarray = true;
}
// In either case above, there are only two sorted arrays left - the remaining items from the left
// side blocks that were incompatible with the trailer items, and the trailer items.
// Use a merge sort. We can use the buffer here because the trailer is shorter
// than a block.
mergeIntoPrecedingJunk(T, blocks.ptr + curr_block, junk_len, curr_block_len, trailer_len - block_len * trailer_blocks, mode, cmp);
verifySorted(T, blocks[0..blocks.len - junk_len], 0, cmp);
} else {
// If there's no trailer (the common case), any remaining items are
// sorted. We just need to move the buffer across them, so that the
// next iteration can use it.
swapWithPrecedingJunk(T, blocks.ptr + curr_block, junk_len, curr_block_len, mode);
verifySorted(T, blocks[0..blocks.len - junk_len], 0, cmp);
}
}
/// Merges two arrays, using the preceding junk buffer to accelerate the operation.
/// Picks an implementation based on the buffer mode.
inline fn mergeIntoPrecedingJunk(comptime T: type, data_ptr: [*]T, junk_len: usize, left_len: usize, right_len: usize, comptime mode: BufferMode, cmp: anytype) void {
switch (comptime mode) {
.no_buffer => lazyMerge(T, data_ptr[0..left_len + right_len], left_len, cmp),
.inline_buffer => mergeForwards (T, data_ptr, junk_len, left_len, right_len, cmp),
.external_buffer => mergeForwardExternal(T, data_ptr, junk_len, left_len, right_len, cmp),
}
}
/// Swaps a data block with part of the junk buffer. Uses one of several
/// implementations, depending on the buffer mode. The segment of junk
/// and the segment of data must not overlap. After this call the data
/// at data_ptr will be moved to data_ptr - junk_len, and will be the same
/// length. The mode determines what happens to the data in the junk buffer.
inline fn swapWithPrecedingJunk(comptime T: type, data_ptr: [*]T, junk_len: usize, swap_len: usize, comptime mode: BufferMode) void {
switch (comptime mode) {
.no_buffer => assert(junk_len == 0), // no junk buffer, everything is where it needs to be.
// TODO-OPT: This preserves the order of the junk buffer.
// There may be an opportunity to speed it up by giving up
// that constraint.
.inline_buffer => blockSwap(T, data_ptr - junk_len, data_ptr, swap_len),
.external_buffer => {
copyNoAlias(T, data_ptr - junk_len, data_ptr, swap_len);
setUndefined(T, data_ptr[0..swap_len]);
},
}
}
/// Moves data past the junk that follows it, moving the junk back to
/// the beginning. Optimized for the buffer mode.
inline fn moveFrontPastJunk(comptime T: type, array: []T, data_len: usize, comptime mode: BufferMode) void {
switch (mode) {
.no_buffer => {}, // everything is where it needs to be
.inline_buffer => moveFrontToBack (T, array, data_len),
.external_buffer => moveFrontToClobberBack(T, array, data_len),
}
}
/// This function accepts an array made up of three parts:
/// a sliding buffer, a sorted left half, and a sorted right half, in that order.
/// It merges the left and right halves into a single sorted buffer at the beginning,
/// while rotating the sliding buffer to the end. The sliding buffer may be reordered
/// during this process.
fn mergeForwards(comptime T: type, data_start: [*]T, buffer_len: usize, left_len: usize, right_len: usize, cmp: anytype) void {
// The buffer must be at least as large as the right array,
// to prevent overwriting the left array.
assert(buffer_len >= right_len);
// we will do everything relative to the start of the buffer.
const array = data_start - buffer_len;
// tracking iterator pointers for our three sections
var buffer = array;
var left = buffer + buffer_len;
const left_end = left + left_len;
var right = left_end;
const right_end = right + right_len;
// Pretty standard merge sort, but merge into the buffer.
// While doing this, move the buffer data into the merged out slots.
// Loop until the entire right side is consumed.
while (@ptrToInt(right) < @ptrToInt(right_end)) {
// TODO-OPT: Branchless is much better for sorting random inputs,
// but much worse for inputs that are already mostly sorted.
// Consider allowing the caller to pass a hint parameter which
// selects one or the other.
const branchless = false;
if (branchless) {
if (@ptrToInt(left) < @ptrToInt(left_end)) {
const from_right = mask(cmp.lessThan(right[0], left[0]));
const read_ptr_usize = (@ptrToInt(left) & ~from_right) | (@ptrToInt(right) & from_right);
const read_ptr = @intToPtr([*]T, read_ptr_usize);
const tmp = buffer[0];
buffer[0] = read_ptr[0];
read_ptr[0] = tmp;
right += from_right & 1;
left += ~from_right & 1;
} else {
const tmp = buffer[0];
buffer[0] = right[0];
right[0] = tmp;
right += 1;
}
} else {
if (@ptrToInt(left) >= @ptrToInt(left_end) or cmp.lessThan(right[0], left[0])) {
const tmp = buffer[0];
buffer[0] = right[0];
right[0] = tmp;
right += 1;
} else {
const tmp = buffer[0];
buffer[0] = left[0];
left[0] = tmp;
left += 1;
}
}
buffer += 1;
}
// If anything remains on the left side, move it directly to the end of the list.
// We only need to do this if there is more buffer in the way, otherwise it is
// already at the end of the list.
if (buffer != left) {
// TODO-OPT: This can alias if right_len < buffer_len.
// We usually know statically at the call site if that
// is the case, is it worth bifurcating codegen?
moveBackToFront(T, ptrSlice(T, buffer, left_end), ptrDiff(T, left, left_end));
}
}
inline fn ptrSlice(comptime T: type, start: [*]T, end: [*]T) []T {
return start[0..ptrDiff(T, start, end)];
}
inline fn ptrDiff(comptime T: type, left: [*]T, right: [*]T) usize {
return @divExact(@ptrToInt(right) - @ptrToInt(left), @sizeOf(T));
}
inline fn mask(bit: bool) usize {
return @bitCast(usize, -@intCast(isize, @boolToInt(bit)));
}
/// Like mergeForwards, this function accepts an array made up of three parts:
/// a sorted left half, a sorted right half, and a sliding buffer, in that order.
/// It merges the left and right halves into a single sorted buffer at the end,
/// while rotating the sliding buffer to the beginning. The sliding buffer may be
/// reordered during this process.
fn mergeBackwards(comptime T: type, array: [*]T, left_len: usize, right_len: usize, buffer_len: usize, cmp: anytype) void {
// The buffer must be at least as large as the left array,
// to prevent overwriting the right array.
// At the end we assume that the remainder of the right
// buffer does not alias the final junk buffer. This
// happens when the left array is at least as large as
// the junk buffer. So they must be exactly equal.
assert(buffer_len == left_len);
// Iterators. These are empty, that is they point to the slot after the next element.
const left_end: usize = 0;
var left = left_len;
const right_end = left;
var right = right_end + right_len;
var buffer = right + buffer_len;
while (left > left_end) {
buffer -= 1;
// TODO-OPT: This could be made branchless, test that and see if it's faster.
if (right == right_end or cmp.lessThan(array[right-1], array[left-1])) {
left -= 1;
const tmp = array[left];
array[left] = array[buffer];
array[buffer] = tmp;
} else {
right -= 1;
const tmp = array[right];
array[right] = array[buffer];
array[buffer] = tmp;
}
}
// If anything remains on the right side, move it directly to the beginning of the list.
// We only need to do this if there is more buffer in the way, otherwise it is
// already at the beginning of the list.
// TODO-OPT: This preserves the order of the junk buffer, but we don't actually need that.
if (right != buffer) {
const remain = right - right_end;
blockSwap(T, array + right_end, array + (buffer - remain), remain);
}
}
/// This function accepts an array made up of three parts:
/// a scratch buffer, a sorted left half, and a sorted right half, in that order.
/// It merges the left and right halves into a single sorted buffer at the beginning,
/// leaving the memory at the end undefined.
fn mergeForwardExternal(comptime T: type, data_ptr: [*]T, buffer_len: usize, left_len: usize, right_len: usize, cmp: anytype) void {
// The buffer must be at least as large as the right array,
// to prevent overwriting the left array.
assert(buffer_len >= right_len);
// We will do everything relative to the start of the buffer
const array = data_ptr - buffer_len;
// tracking iterators for our three sections
var buffer: usize = 0;
var left = buffer_len;
const left_end = left + left_len;
var right = left_end;
const right_end = right + right_len;
// Pretty standard merge sort, but merge into the buffer.
// While doing this, move the buffer data into the merged out slots.
// Loop until the entire right side is consumed.
while (right < right_end) {
// TODO-OPT: This could be made branchless, test that and see if it's faster.
if (left >= left_end or cmp.lessThan(array[right], array[left])) {
array[buffer] = array[right];
if (DEBUG_SET_UNDEFINED) array[right] = undefined;
right += 1;
} else {
array[buffer] = array[left];
if (DEBUG_SET_UNDEFINED) array[left] = undefined;
left += 1;
}
buffer += 1;
}
// If anything remains on the left side, move it directly to the end of the list.
// We only need to do this if there is more buffer in the way, otherwise it is
// already at the end of the list.
if (buffer != left) {
// TODO-OPT: This can alias if right_len < buffer_len.
// We usually know statically at the call site if that
// is the case, is it worth bifurcating codegen?
moveBackToClobberFront(T, array[buffer..left_end], left_end - left);
}
}
/// Like mergeForwards, this function accepts an array made up of three parts:
/// a sorted left half, a sorted right half, and a sliding buffer, in that order.
/// It merges the left and right halves into a single sorted buffer at the end,
/// clobbering the data at the beginning.
fn mergeBackwardExternal(comptime T: type, array: [*]T, left_len: usize, right_len: usize, buffer_len: usize, cmp: anytype) void {
// The buffer must be at least as large as the left array,
// to prevent overwriting the right array.
// At the end we assume that the remainder of the right
// buffer does not alias the final junk buffer. This
// happens when the left array is at least as large as
// the junk buffer. So they must be exactly equal.
assert(buffer_len == left_len);
// Iterators. These are empty, that is they point to the slot after the next element.
const left_end: usize = 0;
var left = left_len;
const right_end = left;
var right = right_end + right_len;
var buffer = right + buffer_len;
while (left > left_end) {
buffer -= 1;
// TODO-OPT: This could be made branchless, test that and see if it's faster.
if (right == right_end or cmp.lessThan(array[right-1], array[left-1])) {
left -= 1;
array[buffer] = array[left];
} else {
right -= 1;
array[buffer] = array[right];
}
}
// If anything remains on the right side, move it directly to the beginning of the list.
// We only need to do this if there is more buffer in the way, otherwise it is
// already at the beginning of the list.
// TODO-OPT: This preserves the order of the junk buffer, but we don't actually need that.
if (right != buffer) {
const remain = right - right_end;
copyNoAlias(T, array + (buffer - remain), array + right_end, remain);
}
setUndefined(T, array[0..buffer_len]);
}
/// This function performs an iterative fully in-place merge sort of the array,
/// starting with a fully unsorted list and working its way up. We use it
/// in the case where the array has very few unique items. (specifically, fewer
/// than four).
/// TODO-OPT: There might be better sorts for this case. Counting sort
/// would need extra storage though, and quicksort is not stable, so
/// those are out. But we could at least use external storage maybe.
fn lazyStableSort(comptime T: type, array: []T, cmp: anytype) void {
const tcc = tc.Zone(@src());
defer tcc.End();
// sort pairs in-place
var i: usize = 1;
while (i < array.len) : (i += 2) {
if (cmp.lessThan(array[i], array[i-1])) {
const tmp = array[i-1];
array[i-1] = array[i];
array[i] = tmp;
}
}
// iteratively do merge sort with larger sets of items.
var merge_size: usize = 2;
while (merge_size < array.len) : (merge_size *= 2) {
var full_merge = merge_size * 2;
// Merge pairs of sorted blocks
var merge_index: usize = 0;
while (merge_index + full_merge <= array.len) : (merge_index += full_merge) {
lazyMerge(T, array[merge_index..][0..full_merge], merge_size, cmp);
}
// Check for a partial block at the end
if (merge_index + merge_size < array.len) {
lazyMerge(T, array[merge_index..], merge_size, cmp);
}
}
}
/// This function performs an in-place merge sort, without using a junk buffer.
/// It is much slower than mergeForward and mergeForwardExternal, so those should
/// be preferred if possible.
/// TODO-OPT: This can absolutely use external storage if available.
fn lazyMerge(comptime T: type, array: []T, left_len: usize, cmp: anytype) void {
// For performance, always sort the shorter array into the longer one.
// TODO-OPT: The reference implementation has a tight loop to recognize
// when multiple keys are already in order and avoid unnecessary binary
// search calls. This is probably worth doing, since the relevant items
// are cached and it's a quick check.
const right_len = array.len - left_len;
if (left_len <= right_len) {
// If left is smaller, iteratively find insertion points in the right
// half and move the left array to that index with `rotate`.
var rest = array;
var left_remain = left_len;
while (rest.len > left_remain and left_remain > 0) {
// find the insertion index
const insert_idx = binarySearchLeft(T, rest[left_remain..], rest[0], cmp) + left_remain;
// move the left array to that index
rotate(T, rest[0..insert_idx], left_remain);
// leave the smallest element here
left_remain -= 1;
rest = rest[insert_idx - left_remain..];
}
} else {
// If right is smaller, iteratively find insertion points in the left
// half and move the right array to that index with `rotate`.
var rest = array;
var right_remain = right_len;
while (rest.len > right_remain and right_remain > 0) {
// find the insertion index
const left_remain = rest.len - right_remain;
const insert_idx = binarySearchRight(T, rest[0..left_remain], rest[rest.len-1], cmp);
// move the right array to that index
const insert_len = left_remain - insert_idx;
rotate(T, rest[insert_idx..], insert_len);
// leave the largest element here
right_remain -= 1;
rest = rest[0..insert_idx + right_remain];
}
}
}
/// A standard linear scan insertion sort. Picks items from the
/// unsorted right half and moves them to the sorted left half
/// until the entire array is sorted.
fn insertSort(comptime T: type, array: []T, cmp: anytype) void {
// items to the left of i are sorted
var i: usize = 1;
while (i < array.len) : (i += 1) {
var left = i;
const item = array[i];
// scan left to find the insertion point,
// copying greater elements to the right.
while (left > 0 and cmp.lessThan(item, array[left-1])) {
array[left] = array[left-1];
left -= 1;
}
// insert the item.
array[left] = item;
}
}
/// Finds the index of the left-most value in the sorted array
/// that is greater than or equal to the target. If there is no
/// such element, the length of the array is returned.
fn binarySearchLeft(comptime T: type, array: []const T, target: T, cmp: anytype) usize {
var left: usize = 0;
var right = array.len;
// TODO-OPT: consider using a duffs device branchless search.
while (left < right) {
// this is equivalent to (right - left)/2, but avoids overflow.
// TODO-OPT: may not be worth it though? analyze the possibility
// for overflow here and consider not having it.
const middle = left + (right - left) / 2;
if (cmp.lessThan(array[middle], target)) {
left = middle + 1;
} else {
right = middle;
}
}
return left;
}
/// Finds the index of the left-most value in the sorted array
/// that is strictly greater than the target. If there is no
/// such element, the length of the array is returned.
fn binarySearchRight(comptime T: type, array: []const T, target: T, cmp: anytype) usize {
var left: usize = 0;
var right = array.len;
// TODO-OPT: consider using a duffs device branchless search.
while (left < right) {
// this is equivalent to (right - left)/2, but avoids overflow.
// TODO-OPT: may not be worth it though? analyze the possibility
// for overflow here and consider not having it.
const middle = left + (right - left) / 2;
if (cmp.lessThan(target, array[middle])) {
right = middle;
} else {
left = middle + 1;
}
}
return right;
}
/// Moves a set of buffer_len items from the beginning of the passed array
/// to the end. The order of the moved items is preserved. Displaced
/// items are moved to the front, but their order is not preserved.
fn moveFrontToBack(
comptime T: type,
array: []T,
buffer_len: usize,
) void {
// TODO-OPT: Use a stack buffer to accelerate this, or use SSE
var front = buffer_len;
var back = array.len;
while (front > 0) {
front -= 1; back -= 1;
const tmp = array[front];
array[front] = array[back];
array[back] = tmp;
}
}
/// Moves buffer_len items from the start of the array to the back,
/// clobbering any displaced items at the back.
fn moveFrontToClobberBack(
comptime T: type,
array: []T,
buffer_len: usize,
) void {
// TODO-OPT: Use a stack buffer to accelerate this, or use SSE
var front = buffer_len;
var back = array.len;
while (front > 0) {
front -= 1; back -= 1;
array[back] = array[front];
}
setUndefined(T, array[0..back]);
}
/// Moves a set of buffer_len items from the end of the passed array
/// to the beginning. The order of the moved items is preserved. Displaced
/// items are moved to the back, but their order is not preserved.
fn moveBackToFront(
comptime T: type,
array: []T,
buffer_len: usize,
) void {
// TODO-OPT: Use a stack buffer to accelerate this, or use SSE
var front: usize = 0;
var back = array.len - buffer_len;
while (back < array.len) : ({front += 1; back += 1;}) {
const tmp = array[front];
array[front] = array[back];
array[back] = tmp;
}
}
/// Moves buffer_len items from the back of the array to the start,
/// clobbering any displaced items at the start.
fn moveBackToClobberFront(
comptime T: type,
array: []T,
buffer_len: usize,
) void {
// TODO-OPT: Use a stack buffer to accelerate this, or use SSE
var front: usize = 0;
var back = array.len - buffer_len;
while (back < array.len) : ({front += 1; back += 1;}) {
array[front] = array[back];
}
setUndefined(T, array[front..]);
}
/// Swaps an array around a pivot point.
/// Example:
/// [0123 S 456789]
/// [4567 S 012389] // block swap L
/// 4567[0123 S 89] // move forward 4
/// 4567[0189 S 23] // block swap R
/// 4567[01 S 89]23 // move back 2
/// 4567[89 S 01]23 // block swap L
/// 456789[01 S ]23 // move forward 2
/// left_len == remain.len, exit loop
/// 4567890123
fn rotate(comptime T: type, array: []T, pivot: usize) void {
// TODO-OPT: This is optimized for rotations where the pivot is
// far from either end of the array. If the pivot is within a
// certain threshold of the end of the array (say, 16 elements),
// we should instead copy the elements onto the stack and use a memmove.
// This is not purely theoretical. In some cases, this function is
// used to rotate exactly one element. Those cases are pathologically
// slow with this implementation.
var remain = array;
var left_len = pivot;
while (left_len > 0 and left_len < remain.len) {
if (left_len + left_len <= remain.len) {
blockSwap(T, remain.ptr, remain.ptr + left_len, left_len);
remain = remain[left_len..];
} else {
const right_len = remain.len - left_len;
blockSwap(T, remain.ptr + (left_len - right_len), remain.ptr + left_len, right_len);
remain = remain[0..left_len];
left_len -= right_len;
}
}
}
/// Swaps left[0..block_len] with right[0..block_len]. The two arrays must not overlap.
fn blockSwap(comptime T: type, noalias left: [*]T, noalias right: [*]T, block_len: usize) void {
assertNoAlias(T, left, block_len, right, block_len);
var idx: usize = 0;
while (idx < block_len) : (idx += 1) {
// TODO-OPT: optimization of this loop is highly dependent on the size of T.
// The left and right halves of the array are guaranteed not to alias,
// but the compiler may not realize this. We could explicitly use a
// vectorized memswap here instead to go faster.
const tmp = right[idx];
right[idx] = left[idx];
left[idx] = tmp;
}
}
/// Copies count items from src to dest. src and dest must not alias.
fn copyNoAlias(comptime T: type, noalias dest: [*]T, noalias src: [*]const T, count: usize) void {
assertNoAlias(T, dest, count, src, count);
@memcpy(@ptrCast([*]u8, dest), @ptrCast([*]const u8, src), count * @sizeOf(T));
}
/// Asserts that two regions of memory do not overlap.
inline fn assertNoAlias(comptime T: type, noalias a: [*]const T, a_len: usize, noalias b: [*]const T, b_len: usize) void {
// TODO-ZIG: when arePointersComparable is implemented, do this.
// if (comptime @arePointersComparable(a, b)) {
assert(a_len == 0 or b_len == 0 or
@ptrToInt(a + a_len) <= @ptrToInt(b) or
@ptrToInt(b + b_len) <= @ptrToInt(a));
// }
}
/// Returns an empty mutable slice containing T.
/// The pointer is not undefined, but dereferencing it is UB.
/// TODO-ZIG: In the current compiler implementation, the pointer
/// actually may be undefined. This is a problem because it breaks
/// optionals. But hopefully that's irrelevant here.
fn emptySlice(comptime T: type) []T {
var x = [_]T{};
return &x;
}
inline fn setUndefined(comptime T: type, data: []T) void {
if (DEBUG_SET_UNDEFINED) {
for (data) |*v| v.* = undefined;
}
}
inline fn verifyKeys(comptime T: type, keys: []T, cmp: anytype) void {
if (DEBUG_VALIDATE) {
insertSort(T, keys, cmp);
verifySortedStrict(T, keys, cmp);
}
}
inline fn verifySorted(comptime T: type, data: []T, sorted_len: usize, cmp: anytype) void {
if (DEBUG_VALIDATE) {
if (data.len < 2) return;
var block_len = if (sorted_len == 0) data.len else sorted_len;
var start: usize = 0;
while (start + block_len <= data.len) : (start += block_len) {
var part = data[start..][0..block_len];
for (part[1..]) |v, i| {
assert(!cmp.lessThan(v, part[i]));
}
}
if (start < data.len) {
var part2 = data[start..];
for (part2[1..]) |v2, j| {
assert(!cmp.lessThan(v2, part2[j]));
}
}
}
}
inline fn verifySortedStrict(comptime T: type, data: []T, cmp: anytype) void {
if (DEBUG_VALIDATE) {
if (data.len < 2) return;
for (data[1..]) |v, i| {
assert(cmp.lessThan(data[i], v));
}
}
}
// --------------------------------- Tests ---------------------------------
// mark Tests as referenced so its' tests get compiled.
comptime { _ = Tests; }
pub const runAllTests = Tests.runAll;
const Tests = struct {
const testing = std.testing;
const print = std.debug.print;
fn runAll() void {
const tests = .{
"emptySlice",
"copyNoAlias",
"blockSwap",
"rotate",
"moveBackToFront",
"moveFrontToBack",
"binarySearchLeft",
"insertSort",
"mergeForwards",
"mergeBackwards",
"mergeForwardExternal",
"lazyMerge",
"lazyStableSort",
"collectKeys",
"pairwiseSwaps",
"pairwiseWrites",
"buildInPlace",
"buildBlocksExternal",
"mergeBlocks_noTrailer",
"mergeBlocks_withTrailer",
"mergeBlocks_noTrailerLazy",
"mergeBlocks_withTrailerLazy",
"mergeBlocks_noTrailerExternal",
"mergeBlocks_withTrailerExternal",
"combineBlocksInPlace",
"combineBlocksInPlace_lazy",
"sort_inPlace_enoughKeys",
"sort_inPlace_notEnoughKeys",
"sort_external_enoughKeys",
"sort_external_notEnoughKeys",
};
print("Running tests...\n", .{});
inline for (tests) |fn_name| {
print("{}...\n", .{fn_name});
@field(@This(), "test_"++fn_name)();
}
print("All {} tests passed.\n", .{tests.len});
}
test "sort_inPlace_enoughKeys" { test_sort_inPlace_enoughKeys(); }
fn test_sort_inPlace_enoughKeys() void {
var array = [_]u32{
17, 30, 8, 14, 11, 3, 12, 24,
33, 2, 1, 36, 37, 23, 6, 38,
5, 13, 26, 16, 31, 15, 0, 20,
35, 19, 18, 28, 34, 32, 25, 7,
21, 9, 29, 4, 10, 27, 40, 39,
22,
};
sort(u32, &array, {}, asc_ignoreBottomBitFn);
const sorted = [_]u32{
1, 0, 3, 2, 5, 4, 6, 7,
8, 9, 11, 10, 12, 13, 14, 15,
17, 16, 19, 18, 20, 21, 23, 22,
24, 25, 26, 27, 28, 29, 30, 31,
33, 32, 35, 34, 36, 37, 38, 39,
40,
};
testing.expectEqualSlices(u32, &sorted, &array);
}
test "sort_inPlace_notEnoughKeys" { test_sort_inPlace_notEnoughKeys(); }
fn test_sort_inPlace_notEnoughKeys() void {
var array = [_]u32{
17, 0, 8, 14, 11, 3, 12, 14,
3, 2, 1, 6, 7, 13, 6, 8,
5, 13, 16, 16, 1, 15, 0, 20,
5, 19, 18, 18, 4, 2, 15, 7,
11, 9, 19, 4, 10, 17, 10, 9,
12,
};
sort(u32, &array, {}, asc_ignoreBottomBitFn);
const sorted = [_]u32{
0, 1, 1, 0, 3, 3, 2, 2,
5, 5, 4, 4, 6, 7, 6, 7,
8, 8, 9, 9, 11, 11, 10, 10,
12, 13, 13, 12, 14, 14, 15, 15,
17, 16, 16, 17, 19, 18, 18, 19,
20,
};
testing.expectEqualSlices(u32, &sorted, &array);
}
test "sort_external_enoughKeys" { test_sort_external_enoughKeys(); }
fn test_sort_external_enoughKeys() void {
var buffer: [44]u32 = undefined;
for (buffer[1..]) |_, i| {
std.mem.set(u32, &buffer, 0xFFFFFFFF);
var array = [_]u32{
17, 30, 8, 14, 11, 3, 12, 24,
33, 2, 1, 36, 37, 23, 6, 38,
5, 13, 26, 16, 31, 15, 0, 20,
35, 19, 18, 28, 34, 32, 25, 7,
21, 9, 29, 4, 10, 27, 40, 39,
22,
};
// use canaries that will be sorted to the wrong place
// if they are observed.
buffer[i+1] = 1;
buffer[0] = 0xEA7F00D5;
sortExternal(u32, &array, {}, asc_ignoreBottomBitFn, buffer[1..i+1]);
const sorted = [_]u32{
1, 0, 3, 2, 5, 4, 6, 7,
8, 9, 11, 10, 12, 13, 14, 15,
17, 16, 19, 18, 20, 21, 23, 22,
24, 25, 26, 27, 28, 29, 30, 31,
33, 32, 35, 34, 36, 37, 38, 39,
40,
};
testing.expectEqual(@as(u32, 1), buffer[i+1]);
testing.expectEqual(@as(u32, 0xEA7F00D5), buffer[0]);
testing.expectEqualSlices(u32, &sorted, &array);
}
}
test "sort_external_notEnoughKeys" { test_sort_external_notEnoughKeys(); }
fn test_sort_external_notEnoughKeys() void {
var buffer: [44]u32 = undefined;
buffer[0] = 0xEA7F00D5;
for (buffer[1..]) |_, i| {
std.mem.set(u32, &buffer, 0xFFFFFFFF);
var array = [_]u32{
17, 0, 8, 14, 11, 3, 12, 14,
3, 2, 1, 6, 7, 13, 6, 8,
5, 13, 16, 16, 1, 15, 0, 20,
5, 19, 18, 18, 4, 2, 15, 7,
11, 9, 19, 4, 10, 17, 10, 9,
12,
};
// use canaries that will be sorted to the wrong place
// if they are observed.
buffer[i+1] = 1;
buffer[0] = 0xEA7F00D5;
sortExternal(u32, &array, {}, asc_ignoreBottomBitFn, buffer[1..i+1]);
const sorted = [_]u32{
0, 1, 1, 0, 3, 3, 2, 2,
5, 5, 4, 4, 6, 7, 6, 7,
8, 8, 9, 9, 11, 11, 10, 10,
12, 13, 13, 12, 14, 14, 15, 15,
17, 16, 16, 17, 19, 18, 18, 19,
20,
};
testing.expectEqual(@as(u32, 1), buffer[i+1]);
testing.expectEqual(@as(u32, 0xEA7F00D5), buffer[0]);
testing.expectEqualSlices(u32, &sorted, &array);
}
}
test "collectKeys" { test_collectKeys(); }
fn test_collectKeys() void {
// test with enough keys
var test1 = [_]u32{
17, 30, 8, 14, 11, 3, 12, 24,
33, 2, 1, 36, 37, 23, 6, 38,
5, 13, 26, 16, 31, 15, 0, 20,
35, 19, 18, 28, 34, 32, 25, 7,
21, 9, 29, 4, 10, 27, 40, 39,
22,
};
const found_keys_1 = collectKeys(u32, &test1, 14, asc_ignoreBottomBit);
// We should find all 14 keys in this array.
testing.expectEqual(@as(usize, 14), found_keys_1);
// Keys are not necessarily ordered, but are stable.
// order them to check.
insertSort(u32, test1[0..found_keys_1], asc_ignoreBottomBit);
const test1_result = [_]u32{
1, 3, 6, 8, 11, 12, 14, 17,
23, 24, 30, 33, 36, 38,
2, 37,
5, 13, 26, 16, 31, 15, 0, 20,
35, 19, 18, 28, 34, 32, 25, 7,
21, 9, 29, 4, 10, 27, 40, 39,
22,
};
testing.expectEqualSlices(u32, &test1_result, &test1);
// test with not enough keys
var test2 = [_]u32{
5, 4, 3, 4, 7, 4, 4, 2, 6, 2, 11, 6, 10, 9,
};
const found_keys_2 = collectKeys(u32, &test2, 10, asc_ignoreBottomBit);
testing.expectEqual(@as(usize, 5), found_keys_2);
insertSort(u32, test2[0..found_keys_2], asc_ignoreBottomBit);
const test2_result = [_]u32{
3, 5, 7, 9, 11,
4, 4, 4, 4, 2, 6, 2, 6, 10,
};
testing.expectEqualSlices(u32, &test2_result, &test2);
}
test "buildInPlace" { test_buildInPlace(); }
fn test_buildInPlace() void {
// We're going to go from sorted blocks of two to blocks of 8.
// This will do one forward merge pass and one reverse merge pass.
var buffer = [_]u32{
// two keys at the beginning
1<<20, 1<<21,
// in order
2, 4, 6, 8,
10, 12, 14, 16,
// reversed
14, 16, 10, 12,
6, 8, 2, 4,
// mixed 1
2, 4, 10, 12,
6, 8, 14, 16,
// mixed 2
14, 16, 6, 8,
10, 12, 2, 4,
// mixed 3
6, 16, 8, 14,
2, 12, 4, 10,
// stability
8, 10, 2, 4,
3, 5, 7, 9,
// stability 2
7, 9, 3, 5,
2, 4, 8, 10,
// stability 3
4, 5, 2, 3,
8, 9, 6, 7,
// trailer
6, 8, 2, 4,
5, 10, 7,
// two keys at the end from the first pass
1<<22, 1<<23,
};
const key_sum = sum(u32, buffer[0..2]) + sum(u32, buffer[buffer.len-2..]);
// N.B. block size is 4, to produce chunks of size 8.
buildInPlace(u32, &buffer, 2, 4, asc_ignoreBottomBit);
const sorted = [_]u32{
2, 4, 6, 8, 10, 12, 14, 16, // 0
2, 4, 6, 8, 10, 12, 14, 16, // 8
2, 4, 6, 8, 10, 12, 14, 16, // 16
2, 4, 6, 8, 10, 12, 14, 16, // 24
2, 4, 6, 8, 10, 12, 14, 16, // 32
2, 3, 4, 5, 7, 8, 9, 10, // 40
3, 2, 5, 4, 7, 9, 8, 10, // 48
2, 3, 4, 5, 6, 7, 8, 9, // 56
2, 4, 5, 6, 7, 8, 10, // 64
};
testing.expectEqual(key_sum, sum(u32, buffer[0..4]));
testing.expectEqualSlices(u32, &sorted, buffer[4..]);
}
test "buildBlocksExternal" { test_buildBlocksExternal(); }
fn test_buildBlocksExternal() void {
var external_buffer: [10]u32 = undefined;
for (external_buffer[1..9]) |_, i| {
if (i < 2) continue;
// We're going to go from sorted blocks of two to blocks of 8.
// This will do one forward merge pass and one reverse merge pass.
var buffer = [_]u32{
// four keys at the beginning
1<<20, 1<<21, 1<<22, 1<<23,
// in order
2, 4, 6, 8,
10, 12, 14, 16,
// reversed
16, 14, 12, 10,
8, 6, 4, 2,
// mixed 1
2, 4, 10, 12,
6, 8, 14, 16,
// mixed 2
16, 14, 6, 8,
10, 12, 4, 2,
// mixed 3
16, 6, 8, 14,
12, 2, 4, 10,
// stability
8, 10, 2, 4,
3, 5, 9, 7,
// stability 2
7, 9, 5, 3,
2, 4, 8, 10,
// stability 3
5, 4, 3, 2,
9, 8, 7, 6,
// trailer
6, 8, 4, 2,
5, 10, 7,
};
external_buffer[0] = 0xBADF00D5;
external_buffer[i+1] = 0;
// N.B. block size is 4, to produce chunks of size 8.
buildBlocksExternal(u32, &buffer, 4, external_buffer[1..i+1], asc_ignoreBottomBit);
// verify canaries
testing.expectEqual(@as(u32, 0xBADF00D5), external_buffer[0]);
testing.expectEqual(@as(u32, 0), external_buffer[i+1]);
const sorted = [_]u32{
// to preserve stability, the keys must be
// unmodified at the beginning.
1<<20, 1<<21, 1<<22, 1<<23,
2, 4, 6, 8, 10, 12, 14, 16, // 0
2, 4, 6, 8, 10, 12, 14, 16, // 8
2, 4, 6, 8, 10, 12, 14, 16, // 16
2, 4, 6, 8, 10, 12, 14, 16, // 24
2, 4, 6, 8, 10, 12, 14, 16, // 32
2, 3, 4, 5, 7, 8, 9, 10, // 40
3, 2, 5, 4, 7, 9, 8, 10, // 48
3, 2, 5, 4, 7, 6, 9, 8, // 56
2, 4, 5, 6, 7, 8, 10, // 64
};
if (i >= 4) {
// keys must be stable if we have an ideal buffer
testing.expectEqualSlices(u32, &sorted, &buffer);
} else {
// otherwise we just need the data to be preserved,
// order is not important.
testing.expectEqual(sum(u32, sorted[0..4]), sum(u32, buffer[0..4]));
testing.expectEqualSlices(u32, sorted[4..], buffer[4..]);
}
}
}
test "pairwiseSwaps" { test_pairwiseSwaps(); }
fn test_pairwiseSwaps() void {
var buffer = [_]u32{
// two keys at the beginning
1<<20, 1<<21,
// odd number of keys
2, 3, // stability 1
3, 2, // stability 2
2, 4, // in order
4, 2, // out of order
0, // last item
};
const key_sum = sum(u32, buffer[0..2]);
pairwiseSwaps(u32, &buffer, asc_ignoreBottomBit);
const sorted = [_]u32 {
2, 3, // stability preserved
3, 2, // stability preserved
2, 4, // kept in order
2, 4, // swapped into order
0, // last item preserved
};
testing.expectEqualSlices(u32, &sorted, buffer[0..buffer.len-2]);
testing.expectEqual(key_sum, sum(u32, buffer[buffer.len-2..]));
}
test "pairwiseWrites" { test_pairwiseWrites(); }
fn test_pairwiseWrites() void {
var buffer = [_]u32{
// two keys at the beginning
1<<20, 1<<21,
// odd number of keys
2, 3, // stability 1
3, 2, // stability 2
2, 4, // in order
4, 2, // out of order
0, // last item
};
const key_sum = sum(u32, buffer[0..2]);
pairwiseWrites(u32, &buffer, asc_ignoreBottomBit);
const sorted = [_]u32 {
2, 3, // stability preserved
3, 2, // stability preserved
2, 4, // kept in order
2, 4, // swapped into order
0, // last item preserved
};
testing.expectEqualSlices(u32, &sorted, buffer[0..buffer.len-2]);
// we don't care what's at the end of the array
}
test "combineBlocksInPlace" { test_combineBlocksInPlace(); }
fn test_combineBlocksInPlace() void {
// for this test, the block size is 4.
// we will merge four subarrays of four blocks into two subarrays of eight blocks.
// this is the initial state:
var buffer = [73]u32{
// canary 0
0xdeadb0a7,
// 8 keys for our max 8 blocks
1<<20, 1<<21, 1<<22, 1<<23,
1<<24, 1<<25, 1<<26, 1<<27,
// canary 1
0xdeadbeef,
// one block of junk
1<<12, 1<<13, 1<<14, 1<<15,
// subarray 1
// four blocks of left list, sorted
2, 4, 6, 8,
10, 10, 10, 10,
10, 12, 12, 20,
22, 30, 60, 100,
// four blocks of right list, sorted
3, 3, 7, 7,
7, 9, 9, 11,
11, 15, 19, 25,
35, 37, 39, 41,
// subarray 2, partial
// four blocks of left list, sorted
2, 4, 6, 8,
10, 10, 10, 10,
12, 12, 12, 20,
22, 30, 60, 100,
// two and a half blocks of right list, sorted
1, 3, 3, 5,
7, 7, 7, 7,
11, 50,
// end canary
0xcafebabe,
};
// references to chunks of the buffer
const canary0 = &buffer[0];
const keys: []u32 = buffer[1..9];
const canary1 = &buffer[9];
const data: []u32 = buffer[10..72];
const canary2 = &buffer[72];
const key_sum = sum(u32, keys);
const junk_sum = sum(u32, data[0..4]);
// do the combine
combineBlocks(u32, buffer[1..72], 13, 4 * 4, 4, .inline_buffer, emptySlice(u32), asc_ignoreBottomBit);
const sorted_data = [_]u32{
// subarray 0
2, 3, 3, 4,
6, 7, 7, 7,
8, 9, 9, 10,
10, 10, 10, 10,
11, 11, 12, 12,
15, 19, 20, 22,
25, 30, 35, 37,
39, 41, 60, 100,
// subarray 1
1, 2, 3, 3,
4, 5, 6, 7,
7, 7, 7, 8,
10, 10, 10, 10,
11, 12, 12, 12,
20, 22, 30, 50,
60, 100,
};
// the canaries should be untouched
testing.expectEqual(@as(u32, 0xdeadb0a7), canary0.*);
testing.expectEqual(@as(u32, 0xdeadbeef), canary1.*);
testing.expectEqual(@as(u32, 0xcafebabe), canary2.*);
// the keys must all exist but may be in any order
testing.expectEqual(key_sum, sum(u32, keys));
// the junk buffer should be before the data, but may be reordered.
testing.expectEqual(junk_sum, sum(u32, data[0..4]));
// the data should be sorted
testing.expectEqualSlices(u32, &sorted_data, data[4..]);
}
test "combineBlocksInPlace_lazy" { test_combineBlocksInPlace_lazy(); }
fn test_combineBlocksInPlace_lazy() void {
// for this test, the block size is 4.
// we will merge four subarrays of four blocks into two subarrays of eight blocks.
// this is the initial state:
var buffer = [69]u32{
// canary 0
0xdeadb0a7,
// 8 keys for our max 8 blocks
1<<20, 1<<21, 1<<22, 1<<23,
1<<24, 1<<25, 1<<26, 1<<27,
// canary 1
0xdeadbeef,
// subarray 1
// four blocks of left list, sorted
2, 4, 6, 8,
10, 10, 10, 10,
10, 12, 12, 20,
22, 30, 60, 100,
// four blocks of right list, sorted
3, 3, 7, 7,
7, 9, 9, 11,
11, 15, 19, 25,
35, 37, 39, 41,
// subarray 2, partial
// four blocks of left list, sorted
2, 4, 6, 8,
10, 10, 10, 10,
12, 12, 12, 20,
22, 30, 60, 100,
// two and a half blocks of right list, sorted
1, 3, 3, 5,
7, 7, 7, 7,
11, 50,
// end canary
0xcafebabe,
};
// references to chunks of the buffer
const canary0 = &buffer[0];
const keys: []u32 = buffer[1..9];
const canary1 = &buffer[9];
const data: []u32 = buffer[10..68];
const canary2 = &buffer[68];
const key_sum = sum(u32, keys);
// do the combine
combineBlocks(u32, buffer[1..68], 9, 4 * 4, 4, .no_buffer, emptySlice(u32), asc_ignoreBottomBit);
const sorted_data = [_]u32{
// subarray 0
2, 3, 3, 4,
6, 7, 7, 7,
8, 9, 9, 10,
10, 10, 10, 10,
11, 11, 12, 12,
15, 19, 20, 22,
25, 30, 35, 37,
39, 41, 60, 100,
// subarray 1
1, 2, 3, 3,
4, 5, 6, 7,
7, 7, 7, 8,
10, 10, 10, 10,
11, 12, 12, 12,
20, 22, 30, 50,
60, 100,
};
// the canaries should be untouched
testing.expectEqual(@as(u32, 0xdeadb0a7), canary0.*);
testing.expectEqual(@as(u32, 0xdeadbeef), canary1.*);
testing.expectEqual(@as(u32, 0xcafebabe), canary2.*);
// the keys must all exist but may be in any order
testing.expectEqual(key_sum, sum(u32, keys));
// the data should be sorted
testing.expectEqualSlices(u32, &sorted_data, data);
}
test "mergeBlocks_noTrailer" { test_mergeBlocks_noTrailer(); }
fn test_mergeBlocks_noTrailer() void {
test_mergeBlocks_noTrailerMode(.inline_buffer);
}
test "mergeBlocks_withTrailer" { test_mergeBlocks_withTrailer(); }
fn test_mergeBlocks_withTrailer() void {
test_mergeBlocks_withTrailerMode(.inline_buffer);
}
test "mergeBlocks_noTrailerLazy" { test_mergeBlocks_noTrailerLazy(); }
fn test_mergeBlocks_noTrailerLazy() void {
test_mergeBlocks_noTrailerMode(.no_buffer);
}
test "mergeBlocks_withTrailerLazy" { test_mergeBlocks_withTrailerLazy(); }
fn test_mergeBlocks_withTrailerLazy() void {
test_mergeBlocks_withTrailerMode(.no_buffer);
}
test "mergeBlocks_noTrailerExternal" { test_mergeBlocks_noTrailerExternal(); }
fn test_mergeBlocks_noTrailerExternal() void {
test_mergeBlocks_noTrailerMode(.external_buffer);
}
test "mergeBlocks_withTrailerExternal" { test_mergeBlocks_withTrailerExternal(); }
fn test_mergeBlocks_withTrailerExternal() void {
test_mergeBlocks_withTrailerMode(.external_buffer);
}
fn test_mergeBlocks_noTrailerMode(comptime mode: BufferMode) void {
// for this test, the block size is 4.
// we have 8 blocks total from two lists of 4.
// this is the initial state:
var buffer = [47]u32{
// canary 0
0xdeadb0a7,
// 8 keys for our 8 blocks
1000, 1001, 1002, 1003,
1004, 1005, 1006, 1007,
// canary 1
0xdeadbeef,
// one block of junk
1<<12, 1<<13, 1<<14, 1<<15,
// four blocks of left list, sorted
2, 4, 6, 8,
10, 10, 10, 10,
10, 12, 12, 20,
22, 30, 60, 100,
// four blocks of right list, sorted
3, 3, 7, 7,
7, 9, 9, 11,
11, 15, 19, 25,
35, 37, 39, 41,
// end canary
0xcafebabe,
};
const junk_sum = sum(u32, buffer[10..14]);
// references to chunks of the buffer
const canary0 = &buffer[0];
const keys: []u32 = buffer[1..9];
const canary1 = &buffer[9];
const data: []u32 = buffer[10..46];
const canary2 = &buffer[46];
// First, selection sort the blocks (not including the junk buffer)
const median_key = blockSelectSort(u32, keys.ptr, data.ptr + 4, 4, 8, 4, asc_ignoreBottomBit);
// Now the buffer should look like this:
const selected_buffer = [47]u32{
// canary 0
0xdeadb0a7,
// 8 keys, permuted in the same way as the blocks
1000, 1004, 1005, 1001,
1002, 1006, 1003, 1007,
// canary 1
0xdeadbeef,
// one block of junk
1<<12, 1<<13, 1<<14, 1<<15,
// eight blocks from the two lists, sorted by starting value
2, 4, 6, 8,
3, 3, 7, 7,
7, 9, 9, 11,
10, 10, 10, 10,
10, 12, 12, 20,
11, 15, 19, 25,
22, 30, 60, 100,
35, 37, 39, 41,
// end canary
0xcafebabe,
};
testing.expectEqualSlices(u32, &selected_buffer, &buffer);
// The median key has moved to index 1.
testing.expectEqual(@as(usize, 1), median_key);
// merge the blocks together
mergeBlocks(u32, keys.ptr, median_key, data.ptr + 4, 8, 4, 0, 0, mode, asc_ignoreBottomBit);
const sorted_data = [32]u32{
2, 3, 3, 4,
6, 7, 7, 7,
8, 9, 9, 10,
10, 10, 10, 10,
11, 11, 12, 12,
15, 19, 20, 22,
25, 30, 35, 37,
39, 41, 60, 100,
};
// the canaries and keys should be untouched
testing.expectEqual(@as(u32, 0xdeadb0a7), canary0.*);
testing.expectEqual(@as(u32, 0xdeadbeef), canary1.*);
testing.expectEqual(@as(u32, 0xcafebabe), canary2.*);
testing.expectEqualSlices(u32, selected_buffer[1..9], keys);
switch (mode) {
.no_buffer => {
// the junk should be untouched
testing.expectEqualSlices(u32, &[_]u32{ 1<<12, 1<<13, 1<<14, 1<<15 }, data[0..4]);
// the data should be sorted at the end
testing.expectEqualSlices(u32, &sorted_data, data[4..36]);
},
.inline_buffer => {
// the data should be sorted at the beginning
testing.expectEqualSlices(u32, &sorted_data, data[0..32]);
// the junk buffer should be after the data, but may be reordered.
testing.expectEqual(junk_sum, sum(u32, data[32..]));
},
.external_buffer => {
// the data should be sorted at the beginning
testing.expectEqualSlices(u32, &sorted_data, data[0..32]);
// we don't care about the contents of the junk buffer
}
}
}
fn test_mergeBlocks_withTrailerMode(comptime mode: BufferMode) void {
// for this test, the block size is 4.
// we have 6 and a half blocks total,
// this is the initial state:
var buffer = [39]u32{
// canary 0
0xdeadb0a7,
// 6 keys for our 6 blocks
1000, 1001, 1002, 1003,
1004, 1005,
// canary 1
0xdeadbeef,
// one block of junk
1<<12, 1<<13, 1<<14, 1<<15,
// four blocks of left list, sorted
2, 4, 6, 8,
10, 10, 10, 10,
12, 12, 12, 20,
22, 30, 60, 100,
// two and a half blocks of right list, sorted
1, 3, 3, 5,
7, 7, 7, 7,
11, 50,
// end canary
0xcafebabe,
};
const junk_sum = sum(u32, buffer[8..12]);
// references to chunks of the buffer
const canary0 = &buffer[0];
const keys: []u32 = buffer[1..7];
const canary1 = &buffer[7];
const data: []u32 = buffer[8..38];
const canary2 = &buffer[38];
// First, selection sort the blocks (not including the junk buffer)
const median_key = blockSelectSort(u32, keys.ptr, data.ptr + 4, 4, 6, 4, asc_ignoreBottomBit);
// Now this is the buffer:
const selected_buffer = [_]u32{
// canary 0
0xdeadb0a7,
// 6 keys for our 6 blocks, permuted
1004, 1000, 1005, 1001, 1002, 1003,
// canary 1
0xdeadbeef,
// one block of junk
1<<12, 1<<13, 1<<14, 1<<15,
// all six blocks, sorted by first value
1, 3, 3, 5,
2, 4, 6, 8,
7, 7, 7, 7,
10, 10, 10, 10,
12, 12, 12, 20, // these two are unfit to participate in the normal sort
22, 30, 60, 100,
// trailers, not sorted
11, 50,
// end canary
0xcafebabe,
};
testing.expectEqualSlices(u32, &selected_buffer, &buffer);
// 1004 is now in index 0
testing.expectEqual(@as(usize, 0), median_key);
const unfit_blocks = countLastMergeBlocks(u32, data[4..], 6, 4, asc_ignoreBottomBit);
testing.expectEqual(@as(usize, 2), unfit_blocks);
// merge the blocks together
const trailer_len = unfit_blocks * 4 + 2;
mergeBlocks(u32, keys.ptr, median_key, data.ptr + 4, 6 - unfit_blocks, 4, unfit_blocks, trailer_len, mode, asc_ignoreBottomBit);
const sorted_data = [_]u32{
1, 2, 3, 3,
4, 5, 6, 7,
7, 7, 7, 8,
10, 10, 10, 10,
11, 12, 12, 12,
20, 22, 30, 50,
60, 100,
};
// the canaries and keys should be untouched
testing.expectEqual(@as(u32, 0xdeadb0a7), canary0.*);
testing.expectEqual(@as(u32, 0xdeadbeef), canary1.*);
testing.expectEqual(@as(u32, 0xcafebabe), canary2.*);
testing.expectEqualSlices(u32, selected_buffer[1..7], keys);
switch (mode) {
.no_buffer => {
// the junk should be untouched
testing.expectEqualSlices(u32, &[_]u32{ 1<<12, 1<<13, 1<<14, 1<<15 }, data[0..4]);
// the data should be sorted at the end
testing.expectEqualSlices(u32, &sorted_data, data[4..30]);
},
.inline_buffer => {
// the data should be sorted at the beginning
testing.expectEqualSlices(u32, &sorted_data, data[0..26]);
// the junk buffer should be after the data, but may be reordered.
testing.expectEqual(junk_sum, sum(u32, data[26..]));
},
.external_buffer => {
// the data should be sorted at the beginning
testing.expectEqualSlices(u32, &sorted_data, data[0..26]);
// we don't care about the contents of the junk buffer
}
}
}
test "mergeForwards" { test_mergeForwards(); }
fn test_mergeForwards() void {
var buffer = [18]u32 {
// start canary
0xdeadbeef,
// 5 item junk buffer
1<<10, 1<<11, 1<<12, 1<<13, 1<<14,
// 6 items in left list
2, 4, 4, 6, 32, 32,
// 5 items in right list
3, 5, 9, 11, 15,
// end canary
0xcafebabe,
};
const junk_total = sum(u32, buffer[1..6]);
mergeForwards(u32, buffer[6..17], 5, 6, 5, asc_ignoreBottomBit);
testing.expectEqual(@as(u32, 0xdeadbeef), buffer[0]);
testing.expectEqualSlices(u32, &[_]u32{ 2, 3, 4, 4, 5, 6, 9, 11, 15, 32, 32 }, buffer[1..12]);
testing.expectEqual(junk_total, sum(u32, buffer[12..17]));
testing.expectEqual(@as(u32, 0xcafebabe), buffer[17]);
}
test "mergeBackwards" { test_mergeBackwards(); }
fn test_mergeBackwards() void {
var buffer = [18]u32 {
// start canary
0xdeadbeef,
// 5 items in left list
3, 5, 9, 11, 15,
// 6 items in right list
2, 4, 4, 6, 32, 32,
// 5 item junk buffer
1<<10, 1<<11, 1<<12, 1<<13, 1<<14,
// end canary
0xcafebabe,
};
const junk_total = sum(u32, buffer[12..17]);
mergeBackwards(u32, buffer[1..17], 5, 6, 5, asc_ignoreBottomBit);
testing.expectEqual(@as(u32, 0xdeadbeef), buffer[0]);
testing.expectEqual(junk_total, sum(u32, buffer[1..6]));
testing.expectEqualSlices(u32, &[_]u32{ 3, 2, 5, 4, 4, 6, 9, 11, 15, 32, 32 }, buffer[6..17]);
testing.expectEqual(@as(u32, 0xcafebabe), buffer[17]);
}
test "mergeForwardExternal" { test_mergeForwardExternal(); }
fn test_mergeForwardExternal() void {
var buffer = [18]u32 {
// start canary
0xdeadbeef,
// 5 item junk buffer
1<<10, 1<<11, 1<<12, 1<<13, 1<<14,
// 6 items in left list
2, 4, 4, 6, 32, 32,
// 5 items in right list
3, 5, 9, 11, 15,
// end canary
0xcafebabe,
};
mergeForwards(u32, buffer[6..17], 5, 6, 5, asc_ignoreBottomBit);
testing.expectEqual(@as(u32, 0xdeadbeef), buffer[0]);
testing.expectEqualSlices(u32, &[_]u32{ 2, 3, 4, 4, 5, 6, 9, 11, 15, 32, 32 }, buffer[1..12]);
// we don't care what's in the new junk buffer
testing.expectEqual(@as(u32, 0xcafebabe), buffer[17]);
}
test "lazyStableSort" { test_lazyStableSort(); }
fn test_lazyStableSort() void {
var array = [_]u32{
17, 30, 8, 14, 11, 3, 12, 24,
33, 2, 1, 36, 37, 23, 6, 38,
5, 13, 26, 16, 31, 15, 0, 20,
35, 19, 18, 28, 34, 32, 25, 7,
21, 9, 29, 4, 10, 27, 40, 39,
22,
};
lazyStableSort(u32, &array, asc_ignoreBottomBit);
const sorted = [_]u32{
1, 0, 3, 2, 5, 4, 6, 7,
8, 9, 11, 10, 12, 13, 14, 15,
17, 16, 19, 18, 20, 21, 23, 22,
24, 25, 26, 27, 28, 29, 30, 31,
33, 32, 35, 34, 36, 37, 38, 39,
40,
};
testing.expectEqualSlices(u32, &sorted, &array);
}
test "lazyMerge" { test_lazyMerge(); }
fn test_lazyMerge() void {
// lazyMerge has separate implementations depending on
// which half is smaller, make sure we test both.
var left_smaller = [_]u32{
2, 8, 8, 16,
5, 7, 9, 11, 13, 15,
};
lazyMerge(u32, &left_smaller, 4, asc_ignoreBottomBit);
testing.expectEqualSlices(u32, &[_]u32{ 2, 5, 7, 8, 8, 9, 11, 13, 15, 16 }, &left_smaller);
var right_smaller = [_]u32{
5, 7, 9, 11, 13, 15,
2, 8, 8, 16,
};
lazyMerge(u32, &right_smaller, 6, asc_ignoreBottomBit);
testing.expectEqualSlices(u32, &[_]u32{ 2, 5, 7, 9, 8, 8, 11, 13, 15, 16 }, &right_smaller);
}
test "insertSort" { test_insertSort(); }
fn test_insertSort() void {
var test_1 = [_]u32{ 8, 7, 6, 4, 2, 3, 1 };
insertSort(u32, &test_1, asc_ignoreBottomBit);
testing.expectEqualSlices(u32, &[_]u32{ 1, 2, 3, 4, 7, 6, 8 }, &test_1);
// just make sure this doesn't crash
insertSort(u32, emptySlice(u32), asc_ignoreBottomBit);
}
test "binarySearchLeft" { test_binarySearchLeft(); }
fn test_binarySearchLeft() void {
const sorted = [_]u32{ 2, 3, 4, 5, 4, 6, 11 };
testing.expectEqual(@as(usize, 0), binarySearchLeft(u32, &sorted, 0, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 0), binarySearchLeft(u32, &sorted, 1, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 0), binarySearchLeft(u32, &sorted, 2, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 0), binarySearchLeft(u32, &sorted, 3, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 2), binarySearchLeft(u32, &sorted, 4, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 2), binarySearchLeft(u32, &sorted, 5, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 5), binarySearchLeft(u32, &sorted, 6, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 5), binarySearchLeft(u32, &sorted, 7, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 6), binarySearchLeft(u32, &sorted, 8, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 6), binarySearchLeft(u32, &sorted, 9, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 6), binarySearchLeft(u32, &sorted, 10, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 6), binarySearchLeft(u32, &sorted, 11, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 7), binarySearchLeft(u32, &sorted, 12, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 7), binarySearchLeft(u32, &sorted, 200, asc_ignoreBottomBit));
testing.expectEqual(@as(usize, 0), binarySearchLeft(u32, emptySlice(u32), 200, asc_ignoreBottomBit));
}
test "moveFrontToBack" { test_moveFrontToBack(); }
fn test_moveFrontToBack() void {
// use unique bits so we can sum the items to find if they exist.
const init_array = [_]u32{ 1, 2, 4, 8, 16, 32, 64, 128, 256 };
// test with buffer smaller than half
var array = init_array;
moveFrontToBack(u32, array[1..8], 3);
testing.expectEqual(@as(u32, 1), array[0]);
// we don't care about the order of these items but they all need to be there.
testing.expectEqual(sum(u32, init_array[4..8]), sum(u32, array[1..5]));
testing.expectEqualSlices(u32, &[_]u32{ 2, 4, 8, 256 }, array[5..]);
// test with buffer larger than half
array = init_array;
moveFrontToBack(u32, array[1..8], 5);
testing.expectEqual(@as(u32, 1), array[0]);
// we don't care about the order of these items but they all need to be there.
testing.expectEqual(sum(u32, init_array[6..8]), sum(u32, array[1..3]));
testing.expectEqualSlices(u32, &[_]u32{ 2, 4, 8, 16, 32, 256 }, array[3..]);
// test with buffer length zero
array = init_array;
moveFrontToBack(u32, array[1..8], 0);
testing.expectEqualSlices(u32, &init_array, &array);
// test with full length buffer
array = init_array;
moveFrontToBack(u32, array[1..8], 7);
testing.expectEqualSlices(u32, &init_array, &array);
// test with empty slice
moveFrontToBack(u32, emptySlice(u32), 0);
}
test "moveBackToFront" { test_moveBackToFront(); }
fn test_moveBackToFront() void {
const init_array = [_]u32{ 1, 2, 4, 8, 16, 32, 64, 128, 256 };
// test with buffer smaller than half
var array = init_array;
moveBackToFront(u32, array[1..8], 3);
testing.expectEqualSlices(u32, &[_]u32{ 1, 32, 64, 128 }, array[0..4]);
// we don't care about the order of these items but they all need to be there.
testing.expectEqual(sum(u32, init_array[1..5]), sum(u32, array[4..8]));
testing.expectEqual(@as(u32, 256), array[8]);
// test with buffer larger than half
array = init_array;
moveBackToFront(u32, array[1..8], 5);
testing.expectEqualSlices(u32, &[_]u32{ 1, 8, 16, 32, 64, 128 }, array[0..6]);
// we don't care about the order of these items but they all need to be there.
testing.expectEqual(sum(u32, init_array[1..3]), sum(u32, array[6..8]));
testing.expectEqual(@as(u32, 256), array[8]);
// test with buffer length zero
array = init_array;
moveBackToFront(u32, array[1..8], 0);
testing.expectEqualSlices(u32, &init_array, &array);
// test with full length buffer
array = init_array;
moveBackToFront(u32, array[1..8], 7);
testing.expectEqualSlices(u32, &init_array, &array);
// test with empty slice
moveBackToFront(u32, emptySlice(u32), 0);
}
test "rotate" { test_rotate(); }
fn test_rotate() void {
var array = [_]u32{ 0, 1, 2, 3, 4, 5, 6, 7, 8 };
rotate(u32, array[1..8], 3);
testing.expectEqualSlices(u32, &[_]u32{ 0, 4, 5, 6, 7, 1, 2, 3, 8 }, &array);
}
test "blockSwap" { test_blockSwap(); }
fn test_blockSwap() void {
var array = [_]u32{ 0, 1, 2, 3, 4, 5, 6, 7, 8 };
blockSwap(u32, @as([*]u32, &array) + 1, @as([*]u32, &array) + 5, 3);
testing.expectEqualSlices(u32, &[_]u32{ 0, 5, 6, 7, 4, 1, 2, 3, 8 }, &array);
}
test "copyNoAlias" { test_copyNoAlias(); }
fn test_copyNoAlias() void {
var array = [_]u32{ 0, 1, 2, 3, 4, 5, 6, 7, 8 };
copyNoAlias(u32, @as([*]u32, &array) + 1, @as([*]u32, &array) + 5, 3);
testing.expectEqualSlices(u32, &[_]u32{ 0, 5, 6, 7, 4, 5, 6, 7, 8 }, &array);
}
test "emptySlice" { test_emptySlice(); }
fn test_emptySlice() void {
test_emptySliceType(u32);
test_emptySliceType(struct{x: u32, y: f32});
test_emptySliceType(void);
}
fn test_emptySliceType(comptime T: type) void {
const a = emptySlice(T);
testing.expectEqual([]T, @TypeOf(a));
testing.expectEqual(@as(usize, 0), a.len);
// TODO-ZIG: The compiler currently doesn't do this properly.
//testing.expect(@ptrToInt(a.ptr) != 0);
}
fn sum(comptime T: type, slice: []const T) T {
var total: T = 0;
for (slice) |item| total += item;
return total;
}
// A comparison function for sorting in ascending order,
// ignoring the bottom bit of both operands. The bottom
// bit can be used to check stability.
fn asc_ignoreBottomBitFn(ctx: void, a: u32, b: u32) bool {
return (a >> 1) < (b >> 1);
}
const asc_ignoreBottomBit = struct {
//! This struct just cleans up the parameter lists of inner functions.
context: void,
pub inline fn lessThan(self: @This(), lhs: u32, rhs: u32) bool {
// TODO-API: do some tests to see if a compare function
// can actually be performant here
return asc_ignoreBottomBitFn(self.context, lhs, rhs);
}
pub inline fn compare(self: @This(), lhs: u32, rhs: u32) std.math.Order {
// TODO-OPT: use an actual compare function here
if (asc_ignoreBottomBitFn(self.context, lhs, rhs)) return .lt;
if (asc_ignoreBottomBitFn(self.context, rhs, lhs)) return .gt;
return .eq;
}
} { .context = {} };
}; | src/grailsort.zig |
const std = @import("std");
const Server = @import("server.zig").Server;
const Config = @import("config.zig").Config;
const mimes = @import("mimes.zig");
pub const io_mode = .evented;
const Handler = struct {
const MAX_FILE_SIZE = 10 * 1024 * 1024;
config: *const Config,
context: *Server.Context,
pub fn handle(config: *const Config, context: *Server.Context) !void {
var handler = Handler{
.config = config,
.context = context,
};
try handler.handleEntry();
}
fn handleEntry(self: *Handler) !void {
const cwd = std.fs.cwd();
if (!std.ascii.eqlIgnoreCase(self.context.request.url.scheme, "gemini"))
return self.context.status(Server.ResponseStatus.NO_NON_GEMINI);
var it = self.config.vhosts.iterator();
while (it.next()) |entry| {
if (std.ascii.eqlIgnoreCase(entry.key, self.context.request.url.host)) {
var root = try cwd.openDir(entry.value.root, .{ .iterate = true });
defer root.close();
if (self.context.request.url.path.len == 0) return self.handleDir(root, true, &entry.value);
if (root.openDir(self.context.request.url.path, .{ .iterate = true })) |*subdir| {
defer subdir.close();
return self.handleDir(subdir.*, false, &entry.value);
} else |err| {}
if (try self.maybeReadFile(root, self.context.request.url.path)) return;
return self.context.status(Server.ResponseStatus.NOT_FOUND);
}
}
return self.context.status(Server.ResponseStatus.NO_MATCHING_VHOST);
}
fn handleDir(self: *Handler, dir: std.fs.Dir, at_root: bool, vhost: *const Config.VHost) !void {
if (try self.maybeReadFile(dir, vhost.index)) return;
var dirs_al = std.ArrayList([]u8).init(&self.context.arena.allocator);
var files_al = std.ArrayList([]u8).init(&self.context.arena.allocator);
var it = dir.iterate();
while (try it.next()) |entry| {
if (std.mem.startsWith(u8, entry.name, ".")) continue;
try (switch (entry.kind) {
.Directory => dirs_al,
else => files_al,
}).append(try self.context.arena.allocator.dupe(u8, entry.name));
}
var dirs = dirs_al.toOwnedSlice();
std.sort.sort([]u8, dirs, {}, sortFn);
var files = files_al.toOwnedSlice();
std.sort.sort([]u8, files, {}, sortFn);
self.context.status(.{ .code = .Success, .meta = "text/gemini" });
var writer = self.context.writer();
if (!at_root) try writer.writeAll("=> .. ../\r\n");
for (dirs) |name| {
try writer.writeAll("=> ");
try writer.writeAll(name);
try writer.writeAll("/\r\n");
}
for (files) |name| {
try writer.writeAll("=> ");
try writer.writeAll(name);
try writer.writeAll("\r\n");
}
}
fn sortFn(_: void, lhs: []const u8, rhs: []const u8) bool {
return std.mem.lessThan(u8, lhs, rhs);
}
fn maybeReadFile(self: *Handler, dir: std.fs.Dir, path: []const u8) !bool {
if (dir.readFileAlloc(&self.context.arena.allocator, path, MAX_FILE_SIZE)) |s| {
const basename = std.fs.path.basename(path);
const mime_type = if (std.mem.lastIndexOfScalar(u8, basename, '.')) |ix|
mimes.lookup(basename[ix + 1 ..])
else
null;
self.context.status(.{ .code = .Success, .meta = mime_type orelse "text/plain" });
try self.context.writer().writeAll(s);
return true;
} else |err| switch (err) {
error.FileNotFound => return false,
else => return err,
}
}
};
usingnamespace if (std.io.is_async)
struct {
pub const AsyncClient = struct {
config: *Config,
connection: std.net.StreamServer.Connection,
handle_frame: @Frame(handle) = undefined,
timeout_frame: @Frame(timeout) = undefined,
status: enum { started, finished, hit_timeout, finish_pushed } = .started,
pub fn create(allocator: *std.mem.Allocator, config: *Config, connection: std.net.StreamServer.Connection) !*AsyncClient {
var self = try allocator.create(AsyncClient);
self.* = .{ .config = config, .connection = connection };
self.handle_frame = async self.handle();
self.timeout_frame = async self.timeout();
return self;
}
fn handle(self: *AsyncClient) void {
handleConnection(self.config, self.connection);
// State transitions for post-handle:
// * started -> finished (We completed work here; signal timeout frame to exit)
// * finished -X (Only we should set this here)
// * hit_timeout -> finish_pushed (Timeout was hit and connection force-closed for us; exit)
// * finish_pushed -X (We should never observe this; implies our frame was dealloced)
switch (self.status) {
.started => self.status = .finished,
.hit_timeout => return self.finished(),
else => unreachable,
}
}
fn timeout(self: *AsyncClient) void {
var seconds_remaining: usize = self.config.client_timeout_seconds;
while (seconds_remaining > 0) : (seconds_remaining -= 1) {
std.event.Loop.instance.?.sleep(1 * std.time.ns_per_s);
// State transitions for mid-timeout loop:
// * started -> started (Still working ...)
// * finished -> finish_pushed (Normal completion, exit)
// * hit_timeout -X (Only we should set this later)
// * finish_pushed -X (We should never observe this; implies our frame was dealloced)
switch (self.status) {
.started => {},
.finished => return self.finished(),
else => unreachable,
}
}
// State transitions for post-timeout:
// * started -> hit_timeout (Signal handle frame that we've exited here; kill connection)
// * finished -X (This should've been handled above, as execution proceeds straight down)
// * hit_timeout -X (Only we should set this here)
// * finish_pushed -X (We should never observe this; implies our frame was dealloced)
switch (self.status) {
.started => {
std.debug.print("timeout handling request\n", .{});
self.status = .hit_timeout;
std.os.shutdown(self.connection.stream.handle, .both) catch {};
},
else => unreachable,
}
}
fn finished(self: *AsyncClient) void {
self.status = .finish_pushed;
finished_clients.append(self) catch |err| {
std.debug.print("{*}: finished(): error appending to finished_clients: {}\n", .{ self, err });
};
}
};
pub var clients: std.AutoHashMap(*AsyncClient, void) = undefined;
pub var finished_clients: std.ArrayList(*AsyncClient) = undefined;
pub fn cleanupFinished(allocator: *std.mem.Allocator) void {
for (finished_clients.items) |fin| {
_ = clients.remove(fin);
allocator.destroy(fin);
}
finished_clients.items.len = 0;
}
}
else
struct {};
fn handleConnection(config: *Config, connection: std.net.StreamServer.Connection) void {
var context = Server.readRequest(connection) catch |err| {
std.debug.print("readRequest failed: {}\n", .{err});
connection.stream.close();
return;
};
defer context.deinit();
Handler.handle(config, &context) catch |err| {
std.debug.print("{s} -> {}\n", .{ context.request.original_url, err });
return;
};
std.debug.print("{s} -> {s} {s}\n", .{ context.request.original_url, @tagName(context.response_status.code), context.response_status.meta });
}
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
var allocator = &gpa.allocator;
try mimes.init(allocator);
defer mimes.deinit(allocator);
var config = blk: {
var raw_config = try std.fs.cwd().readFileAlloc(allocator, "config.zzz", 1024 * 100);
errdefer gpa.allocator.free(raw_config);
break :blk try Config.init(allocator, raw_config);
};
defer config.deinit();
var server = try Server.init(config.bind, config.port);
defer server.deinit();
try std.io.getStdOut().writer().print("kaksikud listening on {s}:{}\n", .{ config.bind, config.port });
if (std.io.is_async) {
clients = std.AutoHashMap(*AsyncClient, void).init(allocator);
finished_clients = std.ArrayList(*AsyncClient).init(allocator);
}
defer if (std.io.is_async) {
var it = clients.iterator();
while (it.next()) |entry| {
await entry.key.handle_frame;
await entry.key.timeout_frame;
}
cleanupFinished(allocator);
finished_clients.deinit();
clients.deinit();
};
while (true) {
var connection = server.getConnection() catch |err| {
std.debug.print("getConnection failed: {}\n", .{err});
continue;
};
if (std.io.is_async) {
cleanupFinished(allocator);
if (clients.count() >= config.max_concurrent_clients) {
connection.stream.close();
continue;
}
var client = try AsyncClient.create(allocator, &config, connection);
try clients.putNoClobber(client, {});
} else {
handleConnection(&config, connection);
}
}
} | src/main.zig |
const std = @import("std");
const debug = std.debug;
const mem = std.mem;
const fmt = std.fmt;
const testing = std.testing;
const heap = std.heap;
const fs = std.fs;
const log = std.log;
const network = @import("network");
const parsing = @import("./parsing.zig");
const example_data = @import("./example_data.zig");
const ArrayList = std.ArrayList;
const Socket = network.Socket;
const EndPoint = network.EndPoint;
const SocketSet = network.SocketSet;
const Etag = u64;
pub const Connection = union(enum) {
idle: void,
receiving: ReceivingState,
sending: SendingState,
pub fn sendingFile(
file: fs.File,
file_length: usize,
socket: Socket,
endpoint: EndPoint,
etag: Etag,
arena: *heap.ArenaAllocator,
static_path: []const u8,
request: parsing.Request,
longlived_allocator: *mem.Allocator,
) Connection {
return Connection{
.sending = SendingState{
.socket = socket,
.payload = Payload{
.file = FileInformation{
.file = file,
.file_length = file_length,
.etag = etag,
.static_path = static_path,
},
},
.endpoint = endpoint,
.arena = arena,
.request = request,
.start_timestamp = std.time.nanoTimestamp(),
.longlived_allocator = longlived_allocator,
.headers_sent = false,
},
};
}
pub fn sendingData(
data: []const u8,
content_type: []const u8,
allocator: *mem.Allocator,
socket: Socket,
endpoint: EndPoint,
arena: *heap.ArenaAllocator,
request: parsing.Request,
longlived_allocator: *mem.Allocator,
) Connection {
return Connection{
.sending = SendingState{
.socket = socket,
.payload = Payload{
.data = DataInformation{
.data = data,
.content_type = content_type,
.position = 0,
.allocator = allocator,
},
},
.endpoint = endpoint,
.arena = arena,
.start_timestamp = std.time.nanoTimestamp(),
.request = request,
.headers_sent = false,
.longlived_allocator = longlived_allocator,
},
};
}
pub fn receiving(socket: Socket, endpoint: EndPoint) Connection {
return Connection{
.receiving = ReceivingState{
.socket = socket,
.endpoint = endpoint,
.start_timestamp = std.time.nanoTimestamp(),
},
};
}
};
pub const ReceivingState = struct {
socket: Socket,
endpoint: EndPoint,
start_timestamp: i128,
};
const FileInformation = struct {
file: fs.File,
file_length: usize,
etag: Etag,
static_path: []const u8,
};
const DataInformation = struct {
data: []const u8,
position: usize,
allocator: *mem.Allocator,
content_type: []const u8,
};
const Payload = union(enum) {
file: FileInformation,
data: DataInformation,
};
pub const SendingState = struct {
const Self = @This();
payload: Payload,
socket: Socket,
endpoint: EndPoint,
arena: *heap.ArenaAllocator,
request: parsing.Request,
start_timestamp: i128,
longlived_allocator: *mem.Allocator,
headers_sent: bool = false,
pub fn sendChunk(
self: *Self,
allocator: *mem.Allocator,
socket_set: *SocketSet,
chunk_size: usize,
) !Connection {
const send_buffer = send_buffer: {
switch (self.payload) {
.file => |file_information| {
var file_buffer = try allocator.alloc(u8, chunk_size);
const read_bytes = try file_information.file.read(file_buffer);
break :send_buffer file_buffer[0..read_bytes];
},
.data => |*data_information| {
const position = data_information.position;
const data = data_information.data;
const data_buffer = if (position + chunk_size < data.len)
data[position..(position + chunk_size)]
else
data[position..];
data_information.position += data_buffer.len;
break :send_buffer data_buffer;
},
}
};
var sent_bytes = try self.socket.send(send_buffer);
if (send_buffer.len < chunk_size) {
const end_timestamp = std.time.nanoTimestamp();
const time_difference = end_timestamp - self.start_timestamp;
const timestamp_in_ms = @intToFloat(f64, time_difference) / 1_000_000.0;
log.info(
"{} <== {} ({d:.3} ms)",
.{ self.endpoint, self.request.request_line.resource, timestamp_in_ms },
);
self.deinit(socket_set);
return Connection.idle;
} else {
return Connection{ .sending = self.* };
}
}
pub fn deinit(self: *Self, socket_set: *SocketSet) void {
self.arena.deinit();
self.longlived_allocator.destroy(self.arena);
self.socket.close();
switch (self.payload) {
.file => |file_information| file_information.file.close(),
.data => |data_information| data_information.allocator.free(data_information.data),
}
socket_set.remove(self.socket);
}
};
pub fn handleConnection(
connection: *Connection,
stack_allocator: *mem.Allocator,
longlived_allocator: *mem.Allocator,
local_endpoint: EndPoint,
socket_set: *SocketSet,
send_chunk_size: usize,
memory_debug: bool,
connections: ArrayList(Connection),
static_root: []const u8,
running: *bool,
) !Connection {
const socket_is_faulted = switch (connection.*) {
.receiving => |receiving| removeFaultedReceivingSocket(receiving, socket_set),
.sending => |*sending| removeFaultedSendingSocket(sending, socket_set),
.idle => false,
};
if (socket_is_faulted) return Connection.idle;
return switch (connection.*) {
.receiving => |receiving| try handleReceiving(
receiving,
connection,
longlived_allocator,
stack_allocator,
socket_set,
connections,
static_root,
running,
memory_debug,
),
.sending => |*sending| try handleSending(
sending,
connection,
socket_set,
longlived_allocator,
stack_allocator,
send_chunk_size,
),
.idle => Connection.idle,
};
}
fn handleSending(
sending: *SendingState,
connection: *Connection,
socket_set: *SocketSet,
longlived_allocator: *mem.Allocator,
stack_allocator: *mem.Allocator,
send_chunk_size: usize,
) !Connection {
const socket = sending.socket;
if (socket_set.isReadyWrite(socket)) {
if (!sending.headers_sent) {
_ = socket.send("HTTP/1.1 200 OK\n") catch unreachable;
var header_buffer = try stack_allocator.alloc(u8, 128);
const etag_header = switch (sending.payload) {
.file => |file_information| try fmt.bufPrint(
header_buffer,
"ETag: {}\n",
.{file_information.etag},
),
.data => "",
};
_ = socket.send(etag_header) catch unreachable;
const content_type = switch (sending.payload) {
.file => |file_information| determineContentType(file_information.static_path),
.data => |data_information| data_information.content_type,
};
const content_type_header = try fmt.bufPrint(
header_buffer,
"Content-type: {}\n",
.{content_type},
);
_ = socket.send(content_type_header) catch unreachable;
const content_length = switch (sending.payload) {
.file => |file_information| file_information.file_length,
.data => |data_information| data_information.data.len,
};
const content_length_header = try fmt.bufPrint(
header_buffer,
"Content-length: {}\n",
.{content_length},
);
_ = socket.send(content_length_header) catch unreachable;
_ = socket.send("\n") catch unreachable;
sending.headers_sent = true;
}
const next_state = sending.sendChunk(
stack_allocator,
socket_set,
send_chunk_size,
) catch |e| {
switch (e) {
error.OutOfMemory => {
log.err("OOM!", .{});
},
error.ConnectionTimedOut,
error.ConnectionResetByPeer,
error.BrokenPipe,
error.OperationAborted,
=> {
log.err(
"Broken pipe / ConnectionResetByPeer sending to {}",
.{sending.endpoint},
);
},
error.FileDescriptorNotASocket,
error.IsDir,
error.AccessDenied,
error.WouldBlock,
error.FastOpenAlreadyInProgress,
error.MessageTooBig,
error.SystemResources,
error.InputOutput,
error.Unexpected,
error.NotOpenForReading,
=> {
debug.panic("odd error: {}\n", .{e});
},
}
sending.deinit(socket_set);
return Connection.idle;
};
return next_state;
} else {
return connection.*;
}
}
fn handleReceiving(
receiving: ReceivingState,
connection: *Connection,
longlived_allocator: *mem.Allocator,
stack_allocator: *mem.Allocator,
socket_set: *SocketSet,
connections: ArrayList(Connection),
static_root: []const u8,
running: *bool,
memory_debug: bool,
) !Connection {
const timestamp = std.time.nanoTimestamp();
if ((timestamp - receiving.start_timestamp) > receiving_state_timeout) {
socket_set.remove(receiving.socket);
receiving.socket.close();
return Connection.idle;
} else if (socket_set.isReadyRead(receiving.socket)) {
var arena = try longlived_allocator.create(heap.ArenaAllocator);
arena.* = heap.ArenaAllocator.init(longlived_allocator);
errdefer arena.deinit();
var request_arena_allocator = &arena.allocator;
const remote_endpoint = receiving.endpoint;
const socket = receiving.socket;
var buffer = try stack_allocator.alloc(u8, 2056);
var received = socket.receive(buffer[0..]) catch |e| {
log.err("=== receive error 1 ===", .{});
socket.close();
socket_set.remove(socket);
return Connection.idle;
};
const request = parsing.Request.fromSlice(
request_arena_allocator,
buffer[0..received],
) catch |parsing_error| {
arena.deinit();
longlived_allocator.destroy(arena);
socket_set.remove(socket);
switch (parsing_error) {
error.OutOfMemory => {
_ = socket.send(high_load_response) catch |send_error| {
log.err(
"{} <== OOM error send error: {}",
.{ remote_endpoint, send_error },
);
};
socket.close();
return Connection.idle;
},
error.InvalidCharacter,
error.UnableToParseConnectionStatus,
error.UnableToParseCacheControlValue,
error.UnableToParseCacheControlHeader,
error.UnableToParseWeakETagValue,
error.UnableToParseNormalETagValue,
error.UnableToParseETag,
error.UnableToParseCrossOriginResourcePolicy,
error.UnableToParseMethod,
error.UnableToParseAllowCredentials,
error.UnableToParseScheme,
error.UnableToParseOriginScheme,
error.UnableToFindHeaderSeparator,
error.UnableToParseVersion,
error.NoVersionGiven,
error.NoResourceGiven,
error.NoMethodGiven,
=> {
log.err(
"{} <== 400 Bad Request: {}",
.{ remote_endpoint, parsing_error },
);
_ = socket.send(bad_request_response) catch |send_error| {
log.err(
"{} <== 400 Bad Request send error",
.{remote_endpoint},
);
};
socket.close();
return Connection.idle;
},
error.Overflow => {
log.err(
"{} <== 500 Internal error: Overflow",
.{remote_endpoint},
);
_ = socket.send(internal_error_response) catch |send_error| {
log.err(
"{} <== 500 Internal error send error: {}",
.{ remote_endpoint, send_error },
);
};
socket.close();
return Connection.idle;
},
}
};
const resource = if (mem.eql(u8, request.request_line.resource, ""))
"index.html"
else
request.request_line.resource;
if (request.request_line.method == .get and mem.eql(u8, resource, "diagnostics")) {
const content_format =
\\Connections: {}
\\
;
var content = fmt.allocPrint(
stack_allocator,
content_format,
.{connections.items.len},
) catch |alloc_print_error| {
switch (alloc_print_error) {
error.OutOfMemory => {
log.err(
"Unable to allocate memory for diagnostics content.",
.{},
);
socket.close();
socket_set.remove(socket);
arena.deinit();
longlived_allocator.destroy(arena);
return Connection.idle;
},
}
};
for (connections.items) |c| {
const connection_info = switch (c) {
.receiving => |r| try fmt.allocPrint(
stack_allocator,
"R: {}\n",
.{r.endpoint},
),
.sending => |s| connection_info: {
var string = try fmt.allocPrint(
stack_allocator,
"S: {} => {}\n",
.{ s.request.request_line.resource, s.endpoint },
);
for (s.request.headers.items) |h| {
string = try mem.concat(
stack_allocator,
u8,
&[_][]const u8{
string,
try fmt.allocPrint(stack_allocator, "\t{}\n", .{h}),
},
);
}
string = try mem.concat(stack_allocator, u8, &[_][]const u8{
string,
try fmt.allocPrint(stack_allocator, "\t{}\n", .{s.request.body}),
});
break :connection_info string;
},
.idle => "Idle\n",
};
content = mem.concat(
stack_allocator,
u8,
&[_][]const u8{ content, connection_info },
) catch |concat_error| content: {
log.err(
"Concat error while adding '{}'",
.{connection_info},
);
break :content content;
};
}
const format =
\\HTTP/1.1 200 OK
\\Content-length: {}
\\Content-type: text/plain
\\
\\{}
;
const response = try fmt.allocPrint(
stack_allocator,
format,
.{ content.len, content },
);
_ = socket.send(response) catch |send_error| {
log.err("=== Diagnostics send error: {}", .{send_error});
};
socket.close();
socket_set.remove(socket);
arena.deinit();
longlived_allocator.destroy(arena);
return Connection.idle;
} else if (request.request_line.method == .get) {
// I think it's reasonable to error out completely here, given that the static directory
// is pretty central to the whole thing. If it can't be opened it's closing time.
var static_directory = try fs.cwd().openDir(static_root, .{});
defer static_directory.close();
const resource_is_directory = isDirectory(
static_directory,
request.request_line.resource,
);
// @TODO: Should probably redirect here to force trailing slash, so relative links work
// properly?
// `http://.../sub-directory` won't handle a relative link to `style.css` such that it
// loads `http://.../sub-directory/style.css` for the implicitly loaded `index.html`.
// This also needs to interact with any sanitation that the parser does for the resource
// slice, which likely means it needs to use `trimLeft` instead of `trim`.
const static_path_components = if (resource_is_directory)
&[_][]const u8{ static_root, resource, "/index.html" }
else
&[_][]const u8{ static_root, resource };
const static_path = mem.concat(
request_arena_allocator,
u8,
static_path_components,
) catch |concat_error| {
switch (concat_error) {
error.OutOfMemory => {
log.err(
"=== OOM while concatenating static path: {}",
.{resource},
);
_ = socket.send(high_load_response) catch |send_error| {
log.err(
"=== High load / OOM send error: {}\n",
.{send_error},
);
};
socket.close();
socket_set.remove(socket);
arena.deinit();
longlived_allocator.destroy(arena);
return Connection.idle;
},
}
};
errdefer request_arena_allocator.free(static_path);
log.info(
"{} ==> {} {}",
.{ remote_endpoint, request.request_line.method.toSlice(), static_path },
);
const file = fs.cwd().openFile(static_path, .{}) catch |e| {
switch (e) {
error.FileNotFound => {
_ = socket.send(not_found_response) catch |send_error| {
log.err("=== send error 404 {} ===", .{send_error});
};
log.err(
"{} <== 404 ({})",
.{ remote_endpoint, static_path },
);
socket.close();
socket_set.remove(socket);
arena.deinit();
longlived_allocator.destroy(arena);
return Connection.idle;
},
error.NameTooLong => {
_ = socket.send(name_too_long_response) catch |send_error| {
log.err("=== send error 500 {} ===", .{send_error});
};
log.err(
"{} <== 400 (Name too long, {})",
.{ remote_endpoint, static_path },
);
socket.close();
socket_set.remove(socket);
arena.deinit();
longlived_allocator.destroy(arena);
return Connection.idle;
},
error.IsDir,
error.SystemResources,
error.WouldBlock,
error.FileTooBig,
error.AccessDenied,
error.Unexpected,
error.SharingViolation,
error.PathAlreadyExists,
error.PipeBusy,
error.InvalidUtf8,
error.BadPathName,
error.SymLinkLoop,
error.ProcessFdQuotaExceeded,
error.SystemFdQuotaExceeded,
error.NoDevice,
error.NoSpaceLeft,
error.NotDir,
error.DeviceBusy,
error.FileLocksNotSupported,
=> {
_ = socket.send(internal_error_response) catch |send_error| {
log.err("=== send error 500: {} ===", .{send_error});
};
log.err(
"{} <== 500 ({}) ({})",
.{ remote_endpoint, static_path, e },
);
socket.close();
socket_set.remove(socket);
arena.deinit();
longlived_allocator.destroy(arena);
return Connection.idle;
},
}
};
const stat = file.stat() catch |stat_error| {
switch (stat_error) {
error.AccessDenied => {
_ = socket.send(not_found_response) catch |send_error| {
log.err("=== send error 404 {} ===", .{send_error});
};
log.err(
"{} <== 404 ({})",
.{ remote_endpoint, static_path },
);
socket.close();
socket_set.remove(socket);
arena.deinit();
longlived_allocator.destroy(arena);
return Connection.idle;
},
error.SystemResources, error.Unexpected => {
_ = socket.send(internal_error_response) catch |send_error| {
log.err("=== send error 500: {} ===", .{send_error});
};
log.err(
"{} <== 500 ({}) ({})",
.{ remote_endpoint, static_path, stat_error },
);
socket.close();
socket_set.remove(socket);
arena.deinit();
longlived_allocator.destroy(arena);
return Connection.idle;
},
}
};
const last_modification_time = stat.mtime;
const expected_file_size = stat.size;
const hash_function = std.hash_map.getAutoHashFn(@TypeOf(last_modification_time));
const etag = hash_function(last_modification_time);
var if_none_match_request_header: ?parsing.Header = null;
for (request.headers.items) |h| {
switch (h) {
.if_none_match => |d| if_none_match_request_header = h,
else => {},
}
}
if (if_none_match_request_header) |h| {
const etag_value = fmt.parseInt(
Etag,
h.if_none_match,
10,
) catch |e| etag_value: {
log.err(
"|== Unable to hash incoming etag value: {}",
.{h.if_none_match},
);
break :etag_value 0;
};
if (etag_value == etag) {
log.info(
"{} <== {} (304 via ETag)",
.{ remote_endpoint, static_path },
);
_ = socket.send(not_modified_response) catch |send_error| {
log.err(
"{} <== 304 not modified send error: {}",
.{ remote_endpoint, send_error },
);
};
socket.close();
socket_set.remove(socket);
arena.deinit();
longlived_allocator.destroy(arena);
return Connection.idle;
}
}
return Connection.sendingFile(
file,
expected_file_size,
socket,
remote_endpoint,
etag,
arena,
static_path,
request,
longlived_allocator,
);
} else {
_ = socket.send(method_not_allowed_response) catch |send_error| {
log.err(
"{} <== Method not allowed send error: {}",
.{ remote_endpoint, send_error },
);
};
log.info(
"{} <== 405 Method Not Allowed: {}",
.{ remote_endpoint, request.request_line.method },
);
socket_set.remove(socket);
socket.close();
arena.deinit();
longlived_allocator.destroy(arena);
return Connection.idle;
}
}
return connection.*;
}
fn removeFaultedReceivingSocket(receiving: ReceivingState, socket_set: *SocketSet) bool {
if (socket_set.isFaulted(receiving.socket)) {
socket_set.remove(receiving.socket);
receiving.socket.close();
return true;
}
return false;
}
fn removeFaultedSendingSocket(sending: *SendingState, socket_set: *SocketSet) bool {
if (socket_set.isFaulted(sending.socket)) {
sending.deinit(socket_set);
return true;
}
return false;
}
const receiving_state_timeout = 30_000_000_000;
const not_found_response =
\\HTTP/1.1 404 Not found
\\Content-length: 14
\\
\\File not found
;
const not_modified_response =
\\HTTP/1.1 304 Not modified
\\
\\
;
const high_load_response =
\\HTTP/1.1 503 Busy
\\Content-length: 13
\\
\\Load too high
;
const bad_request_response =
\\HTTP/1.1 400 Bad Request
\\Content-length: 11
\\
\\Bad request
;
const method_not_allowed_response =
\\HTTP/1.1 405 Method Not Allowed
\\Content-length: 18
\\
\\Method not allowed
;
const name_too_long_response =
\\HTTP/1.1 400 Bad Request
\\Content-length: 45
\\
\\Bad request, name too long for this server :(
;
const internal_error_response =
\\HTTP/1.1 500 Internal Server Error
\\Content-length: 21
\\
\\Internal Server Error
;
fn temporaryRedirect(allocator: *mem.Allocator, location: []const u8) ![]const u8 {
const response_format =
\\HTTP/1.1 307 Temporary Redirect
\\Location: {}
\\
\\
;
return try fmt.allocPrint(allocator, response_format, .{location});
}
fn determineContentType(path: []const u8) []const u8 {
return if (endsWithAny(
u8,
path,
&[_][]const u8{ ".zig", ".txt", ".h", ".c", ".md", ".cpp", ".cc", ".hh" },
))
"text/plain"
else if (mem.endsWith(u8, path, ".html") or mem.endsWith(u8, path, ".htm"))
"text/html"
else if (mem.endsWith(u8, path, ".css"))
"text/css"
else if (mem.endsWith(u8, path, ".jpg") or mem.endsWith(u8, path, ".jpeg"))
"image/jpeg"
else if (mem.endsWith(u8, path, ".mp4"))
"video/mp4"
else if (mem.endsWith(u8, path, ".mkv"))
"video/x-matroska"
else if (mem.endsWith(u8, path, ".png"))
"image/png"
else if (mem.endsWith(u8, path, ".json"))
"application/json"
else
"application/octet-stream";
}
fn endsWithAny(comptime T: type, slice: []const T, comptime suffixes: []const []const T) bool {
inline for (suffixes) |suffix| {
if (mem.endsWith(T, slice, suffix)) return true;
}
return false;
}
fn removeLeadingSlashes(string: []const u8) []const u8 {
return mem.trimLeft(u8, string, "/");
}
fn isDirectory(directory: fs.Dir, path: []const u8) bool {
var resource_as_directory: ?fs.Dir = directory.openDir(
path,
.{},
) catch |err| null;
if (resource_as_directory) |*d| {
d.close();
return true;
} else {
return false;
}
} | src/connection.zig |
const sf = @import("../sfml_import.zig");
const math = @import("std").math;
pub const Color = packed struct {
/// Converts a color from a csfml object
/// For inner workings
pub fn _fromCSFML(col: sf.c.sfColor) Color {
return @bitCast(Color, col);
}
/// Converts this color to a csfml one
/// For inner workings
pub fn _toCSFML(self: Color) sf.c.sfColor {
return @bitCast(sf.c.sfColor, self);
}
/// Inits a color with rgb components
pub fn fromRGB(red: u8, green: u8, blue: u8) Color {
return Color{
.r = red,
.g = green,
.b = blue,
.a = 0xff,
};
}
/// Inits a color with rgba components
pub fn fromRGBA(red: u8, green: u8, blue: u8, alpha: u8) Color {
return Color{
.r = red,
.g = green,
.b = blue,
.a = alpha,
};
}
/// Inits a color from a 32bits value (RGBA in that order)
pub fn fromInteger(int: u32) Color {
return Color{
.r = @truncate(u8, (int & 0xff000000) >> 24),
.g = @truncate(u8, (int & 0x00ff0000) >> 16),
.b = @truncate(u8, (int & 0x0000ff00) >> 8),
.a = @truncate(u8, (int & 0x000000ff) >> 0),
};
}
/// Gets a 32 bit integer representing the color
pub fn toInteger(self: Color) u32 {
return (@intCast(u32, self.r) << 24) |
(@intCast(u32, self.g) << 16) |
(@intCast(u32, self.b) << 8) |
(@intCast(u32, self.a) << 0);
}
/// Creates a color with rgba floats from 0 to 1
fn fromFloats(red: f32, green: f32, blue: f32, alpha: f32) Color {
return Color{
.r = @floatToInt(u8, math.clamp(red, 0.0, 1.0) * 255.0),
.g = @floatToInt(u8, math.clamp(green, 0.0, 1.0) * 255.0),
.b = @floatToInt(u8, math.clamp(blue, 0.0, 1.0) * 255.0),
.a = @floatToInt(u8, math.clamp(alpha, 0.0, 1.0) * 255.0),
};
}
/// Creates a color from HSV and transparency components (this is not part of the SFML)
/// hue is in degrees, saturation and value are in percents
pub fn fromHSVA(hue: f32, saturation: f32, value: f32, alpha: f32) Color {
const h = hue;
const s = saturation / 100;
const v = value / 100;
const a = alpha;
var hh: f32 = h;
if (v <= 0.0)
return fromFloats(0, 0, 0, a);
if (hh >= 360.0)
hh = 0;
hh /= 60.0;
var ff: f32 = hh - math.floor(hh);
var p: f32 = v * (1.0 - s);
var q: f32 = v * (1.0 - (s * ff));
var t: f32 = v * (1.0 - (s * (1.0 - ff)));
return switch (@floatToInt(usize, hh)) {
0 => fromFloats(v, t, p, a),
1 => fromFloats(q, v, p, a),
2 => fromFloats(p, v, t, a),
3 => fromFloats(p, q, v, a),
4 => fromFloats(t, p, v, a),
else => fromFloats(v, p, q, a),
};
}
/// Get a GLSL float vector for this color (for shaders)
pub fn toFVec4(self: Color) sf.graphics.glsl.FVec4 {
return .{
.x = @intToFloat(f32, self.r) / 255.0,
.y = @intToFloat(f32, self.g) / 255.0,
.z = @intToFloat(f32, self.b) / 255.0,
.w = @intToFloat(f32, self.a) / 255.0
};
}
/// Get a GLSL int vector for this color (for shaders)
pub fn toIVec4(self: Color) sf.graphcis.glsl.IVec4 {
return .{
.x = self.r,
.y = self.g,
.z = self.b,
.w = self.a
};
}
// Colors
/// Black color
pub const Black = Color.fromRGB(0, 0, 0);
/// White color
pub const White = Color.fromRGB(255, 255, 255);
/// Red color
pub const Red = Color.fromRGB(255, 0, 0);
/// Green color
pub const Green = Color.fromRGB(0, 255, 0);
/// Blue color
pub const Blue = Color.fromRGB(0, 0, 255);
/// Yellow color
pub const Yellow = Color.fromRGB(255, 255, 0);
/// Magenta color
pub const Magenta = Color.fromRGB(255, 0, 255);
/// Cyan color
pub const Cyan = Color.fromRGB(0, 255, 255);
/// Transparent color
pub const Transparent = Color.fromRGBA(0, 0, 0, 0);
/// Red component
r: u8,
/// Green component
g: u8,
/// Blue component
b: u8,
/// Alpha (opacity) component
a: u8,
};
test "color: conversions" {
const tst = @import("std").testing;
var code: u32 = 0x4BDA9CFF;
var col = Color.fromInteger(code);
try tst.expectEqual(Color.fromRGB(75, 218, 156), col);
try tst.expectEqual(code, col.toInteger());
var csfml_col = sf.c.sfColor_fromInteger(@as(c_uint, code));
try tst.expectEqual(Color._fromCSFML(csfml_col), col);
}
test "color: hsv to rgb" {
const tst = @import("std").testing;
var col = Color.fromHSVA(10, 20, 100, 255);
try tst.expectEqual(Color.fromRGB(255, 212, 204), col);
}
test "color: sane from/to CSFML color" {
const tst = @import("std").testing;
const col = Color.fromRGBA(5, 12, 28, 127);
const ccol = col._toCSFML();
try tst.expectEqual(col.r, ccol.r);
try tst.expectEqual(col.g, ccol.g);
try tst.expectEqual(col.b, ccol.b);
try tst.expectEqual(col.a, ccol.a);
const col2 = Color._fromCSFML(ccol);
try tst.expectEqual(col, col2);
} | src/sfml/graphics/color.zig |
const std = @import("std");
const argsParser = @import("args");
const ihex = @import("ihex");
const spu = @import("spu-mk2");
const FileFormat = enum { ihex, binary };
const DisasmError = error{EndOfStream} || std.os.WriteError || std.io.FixedBufferStream([]const u8).ReadError;
fn processRecord(out: *const std.io.Writer(std.fs.File, std.os.WriteError, std.fs.File.write), base: u32, data: []const u8) DisasmError!void {
const in = std.io.fixedBufferStream(data).reader();
var offset = base;
while (true) {
try out.print("{X:0>4} ", .{offset});
offset += 2;
if (in.readIntLittle(u16)) |instr_int| {
const instr = @bitCast(spu.Instruction, instr_int);
try out.print("{}", .{instr});
if (instr.input0 == .immediate) {
offset += 2;
const val = in.readIntLittle(u16) catch |err| switch (err) {
error.EndOfStream => {
try out.writeAll(" | ????\n");
return;
},
else => return err,
};
try out.print(" | {X:0>4}", .{val});
}
if (instr.input1 == .immediate) {
offset += 2;
const val = in.readIntLittle(u16) catch |err| switch (err) {
error.EndOfStream => {
try out.writeAll(" | ????\n");
return;
},
else => return err,
};
try out.print(" | {X:0>4}", .{val});
}
try out.writeAll("\n");
} else |err| {
switch (err) {
error.EndOfStream => break,
else => return err,
}
}
}
}
pub fn main() !u8 {
const cli_args = argsParser.parseForCurrentProcess(struct {
help: bool = false,
format: ?FileFormat = null,
offset: ?u16 = null,
pub const shorthands = .{
.h = "help",
.f = "format",
};
}, std.heap.page_allocator, .print) catch return 1;
defer cli_args.deinit();
const out = std.io.getStdOut().writer();
if (cli_args.options.help or cli_args.positionals.len == 0) {
try out.writeAll(
\\disassembler --help [--format ihex|binary] [--offset XXXX] fileA fileB
\\Disassembles code for the SPU Mark II platform.
\\
\\-h, --help Displays this help text.
\\-f, --format Selects the input format (binary or ihex).
\\ If not given, the file extension will be used
\\ to guess the format.
\\--offset XXXX Defines the disassembly offset for binary files.
\\
);
return if (cli_args.options.help) @as(u8, 0) else @as(u8, 1);
}
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const hexParseMode = ihex.ParseMode{ .pedantic = true };
for (cli_args.positionals) |path| {
var file = try std.fs.cwd().openFile(path, .{ .read = true, .write = false });
defer file.close();
if (std.mem.endsWith(u8, path, ".hex")) {
// Emulator will always start at address 0x0000 or CLI given entry point.
_ = try ihex.parseData(file.reader(), hexParseMode, &out, DisasmError, processRecord);
} else {
const buffer = try file.reader().readAllAlloc(arena.allocator(), 65536);
defer arena.allocator().free(buffer);
try processRecord(&out, cli_args.options.offset orelse 0x0000, buffer);
}
}
return 0;
} | tools/disassembler/main.zig |
pub usingnamespace @import("std").zig.c_builtins;
pub const ptrdiff_t = c_long;
pub const wchar_t = c_int;
pub const max_align_t = c_longdouble;
pub const int_least8_t = i8;
pub const int_least16_t = i16;
pub const int_least32_t = i32;
pub const int_least64_t = i64;
pub const uint_least8_t = u8;
pub const uint_least16_t = u16;
pub const uint_least32_t = u32;
pub const uint_least64_t = u64;
pub const int_fast8_t = i8;
pub const int_fast16_t = i16;
pub const int_fast32_t = i32;
pub const int_fast64_t = i64;
pub const uint_fast8_t = u8;
pub const uint_fast16_t = u16;
pub const uint_fast32_t = u32;
pub const uint_fast64_t = u64;
pub const __int8_t = i8;
pub const __uint8_t = u8;
pub const __int16_t = c_short;
pub const __uint16_t = c_ushort;
pub const __int32_t = c_int;
pub const __uint32_t = c_uint;
pub const __int64_t = c_longlong;
pub const __uint64_t = c_ulonglong;
pub const __darwin_intptr_t = c_long;
pub const __darwin_natural_t = c_uint;
pub const __darwin_ct_rune_t = c_int;
pub const __mbstate_t = extern union {
__mbstate8: [128]u8,
_mbstateL: c_longlong,
};
pub const __darwin_mbstate_t = __mbstate_t;
pub const __darwin_ptrdiff_t = c_long;
pub const __darwin_size_t = c_ulong;
pub const struct___va_list_tag = extern struct {
gp_offset: c_uint,
fp_offset: c_uint,
overflow_arg_area: ?*c_void,
reg_save_area: ?*c_void,
};
pub const __builtin_va_list = [1]struct___va_list_tag;
pub const __darwin_va_list = __builtin_va_list;
pub const __darwin_wchar_t = c_int;
pub const __darwin_rune_t = __darwin_wchar_t;
pub const __darwin_wint_t = c_int;
pub const __darwin_clock_t = c_ulong;
pub const __darwin_socklen_t = __uint32_t;
pub const __darwin_ssize_t = c_long;
pub const __darwin_time_t = c_long;
pub const __darwin_blkcnt_t = __int64_t;
pub const __darwin_blksize_t = __int32_t;
pub const __darwin_dev_t = __int32_t;
pub const __darwin_fsblkcnt_t = c_uint;
pub const __darwin_fsfilcnt_t = c_uint;
pub const __darwin_gid_t = __uint32_t;
pub const __darwin_id_t = __uint32_t;
pub const __darwin_ino64_t = __uint64_t;
pub const __darwin_ino_t = __darwin_ino64_t;
pub const __darwin_mach_port_name_t = __darwin_natural_t;
pub const __darwin_mach_port_t = __darwin_mach_port_name_t;
pub const __darwin_mode_t = __uint16_t;
pub const __darwin_off_t = __int64_t;
pub const __darwin_pid_t = __int32_t;
pub const __darwin_sigset_t = __uint32_t;
pub const __darwin_suseconds_t = __int32_t;
pub const __darwin_uid_t = __uint32_t;
pub const __darwin_useconds_t = __uint32_t;
pub const __darwin_uuid_t = [16]u8;
pub const __darwin_uuid_string_t = [37]u8;
pub const struct___darwin_pthread_handler_rec = extern struct {
__routine: ?fn (?*c_void) callconv(.C) void,
__arg: ?*c_void,
__next: [*c]struct___darwin_pthread_handler_rec,
};
pub const struct__opaque_pthread_attr_t = extern struct {
__sig: c_long,
__opaque: [56]u8,
};
pub const struct__opaque_pthread_cond_t = extern struct {
__sig: c_long,
__opaque: [40]u8,
};
pub const struct__opaque_pthread_condattr_t = extern struct {
__sig: c_long,
__opaque: [8]u8,
};
pub const struct__opaque_pthread_mutex_t = extern struct {
__sig: c_long,
__opaque: [56]u8,
};
pub const struct__opaque_pthread_mutexattr_t = extern struct {
__sig: c_long,
__opaque: [8]u8,
};
pub const struct__opaque_pthread_once_t = extern struct {
__sig: c_long,
__opaque: [8]u8,
};
pub const struct__opaque_pthread_rwlock_t = extern struct {
__sig: c_long,
__opaque: [192]u8,
};
pub const struct__opaque_pthread_rwlockattr_t = extern struct {
__sig: c_long,
__opaque: [16]u8,
};
pub const struct__opaque_pthread_t = extern struct {
__sig: c_long,
__cleanup_stack: [*c]struct___darwin_pthread_handler_rec,
__opaque: [8176]u8,
};
pub const __darwin_pthread_attr_t = struct__opaque_pthread_attr_t;
pub const __darwin_pthread_cond_t = struct__opaque_pthread_cond_t;
pub const __darwin_pthread_condattr_t = struct__opaque_pthread_condattr_t;
pub const __darwin_pthread_key_t = c_ulong;
pub const __darwin_pthread_mutex_t = struct__opaque_pthread_mutex_t;
pub const __darwin_pthread_mutexattr_t = struct__opaque_pthread_mutexattr_t;
pub const __darwin_pthread_once_t = struct__opaque_pthread_once_t;
pub const __darwin_pthread_rwlock_t = struct__opaque_pthread_rwlock_t;
pub const __darwin_pthread_rwlockattr_t = struct__opaque_pthread_rwlockattr_t;
pub const __darwin_pthread_t = [*c]struct__opaque_pthread_t;
pub const u_int8_t = u8;
pub const u_int16_t = c_ushort;
pub const u_int32_t = c_uint;
pub const u_int64_t = c_ulonglong;
pub const register_t = i64;
pub const user_addr_t = u_int64_t;
pub const user_size_t = u_int64_t;
pub const user_ssize_t = i64;
pub const user_long_t = i64;
pub const user_ulong_t = u_int64_t;
pub const user_time_t = i64;
pub const user_off_t = i64;
pub const syscall_arg_t = u_int64_t;
pub const intmax_t = c_long;
pub const uintmax_t = c_ulong;
pub const shaderc_target_env_vulkan: c_int = 0;
pub const shaderc_target_env_opengl: c_int = 1;
pub const shaderc_target_env_opengl_compat: c_int = 2;
pub const shaderc_target_env_webgpu: c_int = 3;
pub const shaderc_target_env_default: c_int = 0;
pub const shaderc_target_env = c_uint;
pub const shaderc_env_version_vulkan_1_0: c_int = 4194304;
pub const shaderc_env_version_vulkan_1_1: c_int = 4198400;
pub const shaderc_env_version_vulkan_1_2: c_int = 4202496;
pub const shaderc_env_version_opengl_4_5: c_int = 450;
pub const shaderc_env_version_webgpu: c_int = 451;
pub const shaderc_env_version = c_uint;
pub const shaderc_spirv_version_1_0: c_int = 65536;
pub const shaderc_spirv_version_1_1: c_int = 65792;
pub const shaderc_spirv_version_1_2: c_int = 66048;
pub const shaderc_spirv_version_1_3: c_int = 66304;
pub const shaderc_spirv_version_1_4: c_int = 66560;
pub const shaderc_spirv_version_1_5: c_int = 66816;
pub const shaderc_spirv_version = c_uint;
pub const shaderc_compilation_status_success: c_int = 0;
pub const shaderc_compilation_status_invalid_stage: c_int = 1;
pub const shaderc_compilation_status_compilation_error: c_int = 2;
pub const shaderc_compilation_status_internal_error: c_int = 3;
pub const shaderc_compilation_status_null_result_object: c_int = 4;
pub const shaderc_compilation_status_invalid_assembly: c_int = 5;
pub const shaderc_compilation_status_validation_error: c_int = 6;
pub const shaderc_compilation_status_transformation_error: c_int = 7;
pub const shaderc_compilation_status_configuration_error: c_int = 8;
pub const shaderc_compilation_status = c_uint;
pub const shaderc_source_language_glsl: c_int = 0;
pub const shaderc_source_language_hlsl: c_int = 1;
pub const shaderc_source_language = c_uint;
pub const shaderc_vertex_shader: c_int = 0;
pub const shaderc_fragment_shader: c_int = 1;
pub const shaderc_compute_shader: c_int = 2;
pub const shaderc_geometry_shader: c_int = 3;
pub const shaderc_tess_control_shader: c_int = 4;
pub const shaderc_tess_evaluation_shader: c_int = 5;
pub const shaderc_glsl_vertex_shader: c_int = 0;
pub const shaderc_glsl_fragment_shader: c_int = 1;
pub const shaderc_glsl_compute_shader: c_int = 2;
pub const shaderc_glsl_geometry_shader: c_int = 3;
pub const shaderc_glsl_tess_control_shader: c_int = 4;
pub const shaderc_glsl_tess_evaluation_shader: c_int = 5;
pub const shaderc_glsl_infer_from_source: c_int = 6;
pub const shaderc_glsl_default_vertex_shader: c_int = 7;
pub const shaderc_glsl_default_fragment_shader: c_int = 8;
pub const shaderc_glsl_default_compute_shader: c_int = 9;
pub const shaderc_glsl_default_geometry_shader: c_int = 10;
pub const shaderc_glsl_default_tess_control_shader: c_int = 11;
pub const shaderc_glsl_default_tess_evaluation_shader: c_int = 12;
pub const shaderc_spirv_assembly: c_int = 13;
pub const shaderc_raygen_shader: c_int = 14;
pub const shaderc_anyhit_shader: c_int = 15;
pub const shaderc_closesthit_shader: c_int = 16;
pub const shaderc_miss_shader: c_int = 17;
pub const shaderc_intersection_shader: c_int = 18;
pub const shaderc_callable_shader: c_int = 19;
pub const shaderc_glsl_raygen_shader: c_int = 14;
pub const shaderc_glsl_anyhit_shader: c_int = 15;
pub const shaderc_glsl_closesthit_shader: c_int = 16;
pub const shaderc_glsl_miss_shader: c_int = 17;
pub const shaderc_glsl_intersection_shader: c_int = 18;
pub const shaderc_glsl_callable_shader: c_int = 19;
pub const shaderc_glsl_default_raygen_shader: c_int = 20;
pub const shaderc_glsl_default_anyhit_shader: c_int = 21;
pub const shaderc_glsl_default_closesthit_shader: c_int = 22;
pub const shaderc_glsl_default_miss_shader: c_int = 23;
pub const shaderc_glsl_default_intersection_shader: c_int = 24;
pub const shaderc_glsl_default_callable_shader: c_int = 25;
pub const shaderc_task_shader: c_int = 26;
pub const shaderc_mesh_shader: c_int = 27;
pub const shaderc_glsl_task_shader: c_int = 26;
pub const shaderc_glsl_mesh_shader: c_int = 27;
pub const shaderc_glsl_default_task_shader: c_int = 28;
pub const shaderc_glsl_default_mesh_shader: c_int = 29;
pub const shaderc_shader_kind = c_uint;
pub const shaderc_profile_none: c_int = 0;
pub const shaderc_profile_core: c_int = 1;
pub const shaderc_profile_compatibility: c_int = 2;
pub const shaderc_profile_es: c_int = 3;
pub const shaderc_profile = c_uint;
pub const shaderc_optimization_level_zero: c_int = 0;
pub const shaderc_optimization_level_size: c_int = 1;
pub const shaderc_optimization_level_performance: c_int = 2;
pub const shaderc_optimization_level = c_uint;
pub const shaderc_limit_max_lights: c_int = 0;
pub const shaderc_limit_max_clip_planes: c_int = 1;
pub const shaderc_limit_max_texture_units: c_int = 2;
pub const shaderc_limit_max_texture_coords: c_int = 3;
pub const shaderc_limit_max_vertex_attribs: c_int = 4;
pub const shaderc_limit_max_vertex_uniform_components: c_int = 5;
pub const shaderc_limit_max_varying_floats: c_int = 6;
pub const shaderc_limit_max_vertex_texture_image_units: c_int = 7;
pub const shaderc_limit_max_combined_texture_image_units: c_int = 8;
pub const shaderc_limit_max_texture_image_units: c_int = 9;
pub const shaderc_limit_max_fragment_uniform_components: c_int = 10;
pub const shaderc_limit_max_draw_buffers: c_int = 11;
pub const shaderc_limit_max_vertex_uniform_vectors: c_int = 12;
pub const shaderc_limit_max_varying_vectors: c_int = 13;
pub const shaderc_limit_max_fragment_uniform_vectors: c_int = 14;
pub const shaderc_limit_max_vertex_output_vectors: c_int = 15;
pub const shaderc_limit_max_fragment_input_vectors: c_int = 16;
pub const shaderc_limit_min_program_texel_offset: c_int = 17;
pub const shaderc_limit_max_program_texel_offset: c_int = 18;
pub const shaderc_limit_max_clip_distances: c_int = 19;
pub const shaderc_limit_max_compute_work_group_count_x: c_int = 20;
pub const shaderc_limit_max_compute_work_group_count_y: c_int = 21;
pub const shaderc_limit_max_compute_work_group_count_z: c_int = 22;
pub const shaderc_limit_max_compute_work_group_size_x: c_int = 23;
pub const shaderc_limit_max_compute_work_group_size_y: c_int = 24;
pub const shaderc_limit_max_compute_work_group_size_z: c_int = 25;
pub const shaderc_limit_max_compute_uniform_components: c_int = 26;
pub const shaderc_limit_max_compute_texture_image_units: c_int = 27;
pub const shaderc_limit_max_compute_image_uniforms: c_int = 28;
pub const shaderc_limit_max_compute_atomic_counters: c_int = 29;
pub const shaderc_limit_max_compute_atomic_counter_buffers: c_int = 30;
pub const shaderc_limit_max_varying_components: c_int = 31;
pub const shaderc_limit_max_vertex_output_components: c_int = 32;
pub const shaderc_limit_max_geometry_input_components: c_int = 33;
pub const shaderc_limit_max_geometry_output_components: c_int = 34;
pub const shaderc_limit_max_fragment_input_components: c_int = 35;
pub const shaderc_limit_max_image_units: c_int = 36;
pub const shaderc_limit_max_combined_image_units_and_fragment_outputs: c_int = 37;
pub const shaderc_limit_max_combined_shader_output_resources: c_int = 38;
pub const shaderc_limit_max_image_samples: c_int = 39;
pub const shaderc_limit_max_vertex_image_uniforms: c_int = 40;
pub const shaderc_limit_max_tess_control_image_uniforms: c_int = 41;
pub const shaderc_limit_max_tess_evaluation_image_uniforms: c_int = 42;
pub const shaderc_limit_max_geometry_image_uniforms: c_int = 43;
pub const shaderc_limit_max_fragment_image_uniforms: c_int = 44;
pub const shaderc_limit_max_combined_image_uniforms: c_int = 45;
pub const shaderc_limit_max_geometry_texture_image_units: c_int = 46;
pub const shaderc_limit_max_geometry_output_vertices: c_int = 47;
pub const shaderc_limit_max_geometry_total_output_components: c_int = 48;
pub const shaderc_limit_max_geometry_uniform_components: c_int = 49;
pub const shaderc_limit_max_geometry_varying_components: c_int = 50;
pub const shaderc_limit_max_tess_control_input_components: c_int = 51;
pub const shaderc_limit_max_tess_control_output_components: c_int = 52;
pub const shaderc_limit_max_tess_control_texture_image_units: c_int = 53;
pub const shaderc_limit_max_tess_control_uniform_components: c_int = 54;
pub const shaderc_limit_max_tess_control_total_output_components: c_int = 55;
pub const shaderc_limit_max_tess_evaluation_input_components: c_int = 56;
pub const shaderc_limit_max_tess_evaluation_output_components: c_int = 57;
pub const shaderc_limit_max_tess_evaluation_texture_image_units: c_int = 58;
pub const shaderc_limit_max_tess_evaluation_uniform_components: c_int = 59;
pub const shaderc_limit_max_tess_patch_components: c_int = 60;
pub const shaderc_limit_max_patch_vertices: c_int = 61;
pub const shaderc_limit_max_tess_gen_level: c_int = 62;
pub const shaderc_limit_max_viewports: c_int = 63;
pub const shaderc_limit_max_vertex_atomic_counters: c_int = 64;
pub const shaderc_limit_max_tess_control_atomic_counters: c_int = 65;
pub const shaderc_limit_max_tess_evaluation_atomic_counters: c_int = 66;
pub const shaderc_limit_max_geometry_atomic_counters: c_int = 67;
pub const shaderc_limit_max_fragment_atomic_counters: c_int = 68;
pub const shaderc_limit_max_combined_atomic_counters: c_int = 69;
pub const shaderc_limit_max_atomic_counter_bindings: c_int = 70;
pub const shaderc_limit_max_vertex_atomic_counter_buffers: c_int = 71;
pub const shaderc_limit_max_tess_control_atomic_counter_buffers: c_int = 72;
pub const shaderc_limit_max_tess_evaluation_atomic_counter_buffers: c_int = 73;
pub const shaderc_limit_max_geometry_atomic_counter_buffers: c_int = 74;
pub const shaderc_limit_max_fragment_atomic_counter_buffers: c_int = 75;
pub const shaderc_limit_max_combined_atomic_counter_buffers: c_int = 76;
pub const shaderc_limit_max_atomic_counter_buffer_size: c_int = 77;
pub const shaderc_limit_max_transform_feedback_buffers: c_int = 78;
pub const shaderc_limit_max_transform_feedback_interleaved_components: c_int = 79;
pub const shaderc_limit_max_cull_distances: c_int = 80;
pub const shaderc_limit_max_combined_clip_and_cull_distances: c_int = 81;
pub const shaderc_limit_max_samples: c_int = 82;
pub const shaderc_limit = c_uint;
pub const shaderc_uniform_kind_image: c_int = 0;
pub const shaderc_uniform_kind_sampler: c_int = 1;
pub const shaderc_uniform_kind_texture: c_int = 2;
pub const shaderc_uniform_kind_buffer: c_int = 3;
pub const shaderc_uniform_kind_storage_buffer: c_int = 4;
pub const shaderc_uniform_kind_unordered_access_view: c_int = 5;
pub const shaderc_uniform_kind = c_uint;
pub const struct_shaderc_compiler = opaque {};
pub const shaderc_compiler_t = ?*struct_shaderc_compiler;
pub extern fn shaderc_compiler_initialize() shaderc_compiler_t;
pub extern fn shaderc_compiler_release(shaderc_compiler_t) void;
pub const struct_shaderc_compile_options = opaque {};
pub const shaderc_compile_options_t = ?*struct_shaderc_compile_options;
pub extern fn shaderc_compile_options_initialize() shaderc_compile_options_t;
pub extern fn shaderc_compile_options_clone(options: shaderc_compile_options_t) shaderc_compile_options_t;
pub extern fn shaderc_compile_options_release(options: shaderc_compile_options_t) void;
pub extern fn shaderc_compile_options_add_macro_definition(options: shaderc_compile_options_t, name: [*c]const u8, name_length: usize, value: [*c]const u8, value_length: usize) void;
pub extern fn shaderc_compile_options_set_source_language(options: shaderc_compile_options_t, lang: shaderc_source_language) void;
pub extern fn shaderc_compile_options_set_generate_debug_info(options: shaderc_compile_options_t) void;
pub extern fn shaderc_compile_options_set_optimization_level(options: shaderc_compile_options_t, level: shaderc_optimization_level) void;
pub extern fn shaderc_compile_options_set_forced_version_profile(options: shaderc_compile_options_t, version: c_int, profile: shaderc_profile) void;
pub const struct_shaderc_include_result = extern struct {
source_name: [*c]const u8,
source_name_length: usize,
content: [*c]const u8,
content_length: usize,
user_data: ?*c_void,
};
pub const shaderc_include_result = struct_shaderc_include_result;
pub const shaderc_include_type_relative: c_int = 0;
pub const shaderc_include_type_standard: c_int = 1;
pub const enum_shaderc_include_type = c_uint;
pub const shaderc_include_resolve_fn = ?fn (?*c_void, [*c]const u8, c_int, [*c]const u8, usize) callconv(.C) [*c]shaderc_include_result;
pub const shaderc_include_result_release_fn = ?fn (?*c_void, [*c]shaderc_include_result) callconv(.C) void;
pub extern fn shaderc_compile_options_set_include_callbacks(options: shaderc_compile_options_t, resolver: shaderc_include_resolve_fn, result_releaser: shaderc_include_result_release_fn, user_data: ?*c_void) void;
pub extern fn shaderc_compile_options_set_suppress_warnings(options: shaderc_compile_options_t) void;
pub extern fn shaderc_compile_options_set_target_env(options: shaderc_compile_options_t, target: shaderc_target_env, version: u32) void;
pub extern fn shaderc_compile_options_set_target_spirv(options: shaderc_compile_options_t, version: shaderc_spirv_version) void;
pub extern fn shaderc_compile_options_set_warnings_as_errors(options: shaderc_compile_options_t) void;
pub extern fn shaderc_compile_options_set_limit(options: shaderc_compile_options_t, limit: shaderc_limit, value: c_int) void;
pub extern fn shaderc_compile_options_set_auto_bind_uniforms(options: shaderc_compile_options_t, auto_bind: bool) void;
pub extern fn shaderc_compile_options_set_hlsl_io_mapping(options: shaderc_compile_options_t, hlsl_iomap: bool) void;
pub extern fn shaderc_compile_options_set_hlsl_offsets(options: shaderc_compile_options_t, hlsl_offsets: bool) void;
pub extern fn shaderc_compile_options_set_binding_base(options: shaderc_compile_options_t, kind: shaderc_uniform_kind, base: u32) void;
pub extern fn shaderc_compile_options_set_binding_base_for_stage(options: shaderc_compile_options_t, shader_kind: shaderc_shader_kind, kind: shaderc_uniform_kind, base: u32) void;
pub extern fn shaderc_compile_options_set_auto_map_locations(options: shaderc_compile_options_t, auto_map: bool) void;
pub extern fn shaderc_compile_options_set_hlsl_register_set_and_binding_for_stage(options: shaderc_compile_options_t, shader_kind: shaderc_shader_kind, reg: [*c]const u8, set: [*c]const u8, binding: [*c]const u8) void;
pub extern fn shaderc_compile_options_set_hlsl_register_set_and_binding(options: shaderc_compile_options_t, reg: [*c]const u8, set: [*c]const u8, binding: [*c]const u8) void;
pub extern fn shaderc_compile_options_set_hlsl_functionality1(options: shaderc_compile_options_t, enable: bool) void;
pub extern fn shaderc_compile_options_set_invert_y(options: shaderc_compile_options_t, enable: bool) void;
pub extern fn shaderc_compile_options_set_nan_clamp(options: shaderc_compile_options_t, enable: bool) void;
pub const struct_shaderc_compilation_result = opaque {};
pub const shaderc_compilation_result_t = ?*struct_shaderc_compilation_result;
pub extern fn shaderc_compile_into_spv(compiler: shaderc_compiler_t, source_text: [*c]const u8, source_text_size: usize, shader_kind: shaderc_shader_kind, input_file_name: [*c]const u8, entry_point_name: [*c]const u8, additional_options: shaderc_compile_options_t) shaderc_compilation_result_t;
pub extern fn shaderc_compile_into_spv_assembly(compiler: shaderc_compiler_t, source_text: [*c]const u8, source_text_size: usize, shader_kind: shaderc_shader_kind, input_file_name: [*c]const u8, entry_point_name: [*c]const u8, additional_options: shaderc_compile_options_t) shaderc_compilation_result_t;
pub extern fn shaderc_compile_into_preprocessed_text(compiler: shaderc_compiler_t, source_text: [*c]const u8, source_text_size: usize, shader_kind: shaderc_shader_kind, input_file_name: [*c]const u8, entry_point_name: [*c]const u8, additional_options: shaderc_compile_options_t) shaderc_compilation_result_t;
pub extern fn shaderc_assemble_into_spv(compiler: shaderc_compiler_t, source_assembly: [*c]const u8, source_assembly_size: usize, additional_options: shaderc_compile_options_t) shaderc_compilation_result_t;
pub extern fn shaderc_result_release(result: shaderc_compilation_result_t) void;
pub extern fn shaderc_result_get_length(result: shaderc_compilation_result_t) usize;
pub extern fn shaderc_result_get_num_warnings(result: shaderc_compilation_result_t) usize;
pub extern fn shaderc_result_get_num_errors(result: shaderc_compilation_result_t) usize;
pub extern fn shaderc_result_get_compilation_status(shaderc_compilation_result_t) shaderc_compilation_status;
pub extern fn shaderc_result_get_bytes(result: shaderc_compilation_result_t) [*c]const u8;
pub extern fn shaderc_result_get_error_message(result: shaderc_compilation_result_t) [*c]const u8;
pub extern fn shaderc_get_spv_version(version: [*c]c_uint, revision: [*c]c_uint) void;
pub extern fn shaderc_parse_version_profile(str: [*c]const u8, version: [*c]c_int, profile: [*c]shaderc_profile) bool;
pub const offsetof = @compileError("TODO implement function '__builtin_offsetof' in std.c.builtins"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/include/stddef.h:104:9
pub const __CONCAT = @compileError("unable to translate C expr: unexpected token .HashHash"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:113:9
pub const __STRING = @compileError("unable to translate C expr: unexpected token .Hash"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:114:9
pub const __const = @compileError("unable to translate C expr: unexpected token .Keyword_const"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:116:9
pub const __volatile = @compileError("unable to translate C expr: unexpected token .Keyword_volatile"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:118:9
pub const __kpi_deprecated = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:202:9
pub const __restrict = @compileError("unable to translate C expr: unexpected token .Keyword_restrict"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:222:9
pub const __swift_unavailable = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:288:9
pub const __header_inline = @compileError("unable to translate C expr: unexpected token .Keyword_inline"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:322:10
pub const __unreachable_ok_push = @compileError("unable to translate C expr: unexpected token .Identifier"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:348:10
pub const __IDSTRING = @compileError("unable to translate C expr: unexpected token .Keyword_static"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:379:9
pub const __FBSDID = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:399:9
pub const __DECONST = @compileError("unable to translate C expr: unexpected token .Keyword_const"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:403:9
pub const __DEVOLATILE = @compileError("unable to translate C expr: unexpected token .Keyword_volatile"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:407:9
pub const __DEQUALIFY = @compileError("unable to translate C expr: unexpected token .Keyword_const"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:411:9
pub const __alloc_size = @compileError("unable to translate C expr: expected ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:429:9
pub const __DARWIN_ALIAS = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:612:9
pub const __DARWIN_ALIAS_C = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:613:9
pub const __DARWIN_ALIAS_I = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:614:9
pub const __DARWIN_NOCANCEL = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:615:9
pub const __DARWIN_INODE64 = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:616:9
pub const __DARWIN_1050 = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:618:9
pub const __DARWIN_1050ALIAS = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:619:9
pub const __DARWIN_1050ALIAS_C = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:620:9
pub const __DARWIN_1050ALIAS_I = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:621:9
pub const __DARWIN_1050INODE64 = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:622:9
pub const __DARWIN_EXTSN = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:624:9
pub const __DARWIN_EXTSN_C = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:625:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_2_0 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:35:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_2_1 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:41:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_2_2 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:47:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_3_0 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:53:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_3_1 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:59:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_3_2 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:65:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_4_0 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:71:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_4_1 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:77:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_4_2 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:83:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_4_3 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:89:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_5_0 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:95:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_5_1 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:101:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_6_0 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:107:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_6_1 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:113:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_7_0 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:119:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_7_1 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:125:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_8_0 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:131:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_8_1 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:137:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_8_2 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:143:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_8_3 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:149:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_8_4 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:155:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_9_0 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:161:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_9_1 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:167:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_9_2 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:173:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_9_3 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:179:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_10_0 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:185:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_10_1 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:191:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_10_2 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:197:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_10_3 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:203:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_11_0 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:209:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_11_1 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:215:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_11_2 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:221:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_11_3 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:227:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_11_4 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:233:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_12_0 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:239:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_12_1 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:245:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_12_2 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:251:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_12_3 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:257:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_12_4 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:263:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_13_0 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:269:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_13_1 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:275:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_13_2 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:281:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_13_3 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:287:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_13_4 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:293:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_13_5 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:299:9
pub const __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_13_6 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:305:9
pub const __DARWIN_ALIAS_STARTING_MAC___MAC_10_15 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:491:9
pub const __DARWIN_ALIAS_STARTING_MAC___MAC_10_15_1 = @compileError("unable to translate C expr: unexpected token .Eof"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/_symbol_aliasing.h:497:9
pub const __DARWIN_ALIAS_STARTING = @compileError("unable to translate C expr: unexpected token .HashHash"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:635:9
pub const __POSIX_C_DEPRECATED = @compileError("unable to translate C expr: unexpected token .HashHash"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:698:9
pub const __compiler_barrier = @compileError("unable to translate C expr: expected ',' or ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:812:9
pub const __enum_decl = @compileError("unable to translate C expr: expected ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:836:9
pub const __enum_closed_decl = @compileError("unable to translate C expr: expected ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:838:9
pub const __options_decl = @compileError("unable to translate C expr: expected ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:840:9
pub const __options_closed_decl = @compileError("unable to translate C expr: expected ')'"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/x86_64-macos-gnu/sys/cdefs.h:842:9
pub const __offsetof = @compileError("TODO implement function '__builtin_offsetof' in std.c.builtins"); // /usr/local/Cellar/zig/HEAD-eb010ce_1/lib/zig/libc/include/any-macos-any/sys/_types.h:83:9
pub const __llvm__ = @as(c_int, 1);
pub const __clang__ = @as(c_int, 1);
pub const __clang_major__ = @as(c_int, 12);
pub const __clang_minor__ = @as(c_int, 0);
pub const __clang_patchlevel__ = @as(c_int, 1);
pub const __clang_version__ = "12.0.1 ";
pub const __GNUC__ = @as(c_int, 4);
pub const __GNUC_MINOR__ = @as(c_int, 2);
pub const __GNUC_PATCHLEVEL__ = @as(c_int, 1);
pub const __GXX_ABI_VERSION = @as(c_int, 1002);
pub const __ATOMIC_RELAXED = @as(c_int, 0);
pub const __ATOMIC_CONSUME = @as(c_int, 1);
pub const __ATOMIC_ACQUIRE = @as(c_int, 2);
pub const __ATOMIC_RELEASE = @as(c_int, 3);
pub const __ATOMIC_ACQ_REL = @as(c_int, 4);
pub const __ATOMIC_SEQ_CST = @as(c_int, 5);
pub const __OPENCL_MEMORY_SCOPE_WORK_ITEM = @as(c_int, 0);
pub const __OPENCL_MEMORY_SCOPE_WORK_GROUP = @as(c_int, 1);
pub const __OPENCL_MEMORY_SCOPE_DEVICE = @as(c_int, 2);
pub const __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES = @as(c_int, 3);
pub const __OPENCL_MEMORY_SCOPE_SUB_GROUP = @as(c_int, 4);
pub const __PRAGMA_REDEFINE_EXTNAME = @as(c_int, 1);
pub const __VERSION__ = "Homebrew Clang 12.0.1";
pub const __OBJC_BOOL_IS_BOOL = @as(c_int, 0);
pub const __CONSTANT_CFSTRINGS__ = @as(c_int, 1);
pub const __block = __attribute__(__blocks__(byref));
pub const __BLOCKS__ = @as(c_int, 1);
pub const __OPTIMIZE__ = @as(c_int, 1);
pub const __ORDER_LITTLE_ENDIAN__ = @as(c_int, 1234);
pub const __ORDER_BIG_ENDIAN__ = @as(c_int, 4321);
pub const __ORDER_PDP_ENDIAN__ = @as(c_int, 3412);
pub const __BYTE_ORDER__ = __ORDER_LITTLE_ENDIAN__;
pub const __LITTLE_ENDIAN__ = @as(c_int, 1);
pub const _LP64 = @as(c_int, 1);
pub const __LP64__ = @as(c_int, 1);
pub const __CHAR_BIT__ = @as(c_int, 8);
pub const __SCHAR_MAX__ = @as(c_int, 127);
pub const __SHRT_MAX__ = @as(c_int, 32767);
pub const __INT_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const __LONG_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_long, 9223372036854775807, .decimal);
pub const __LONG_LONG_MAX__ = @as(c_longlong, 9223372036854775807);
pub const __WCHAR_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const __WINT_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const __INTMAX_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_long, 9223372036854775807, .decimal);
pub const __SIZE_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_ulong, 18446744073709551615, .decimal);
pub const __UINTMAX_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_ulong, 18446744073709551615, .decimal);
pub const __PTRDIFF_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_long, 9223372036854775807, .decimal);
pub const __INTPTR_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_long, 9223372036854775807, .decimal);
pub const __UINTPTR_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_ulong, 18446744073709551615, .decimal);
pub const __SIZEOF_DOUBLE__ = @as(c_int, 8);
pub const __SIZEOF_FLOAT__ = @as(c_int, 4);
pub const __SIZEOF_INT__ = @as(c_int, 4);
pub const __SIZEOF_LONG__ = @as(c_int, 8);
pub const __SIZEOF_LONG_DOUBLE__ = @as(c_int, 16);
pub const __SIZEOF_LONG_LONG__ = @as(c_int, 8);
pub const __SIZEOF_POINTER__ = @as(c_int, 8);
pub const __SIZEOF_SHORT__ = @as(c_int, 2);
pub const __SIZEOF_PTRDIFF_T__ = @as(c_int, 8);
pub const __SIZEOF_SIZE_T__ = @as(c_int, 8);
pub const __SIZEOF_WCHAR_T__ = @as(c_int, 4);
pub const __SIZEOF_WINT_T__ = @as(c_int, 4);
pub const __SIZEOF_INT128__ = @as(c_int, 16);
pub const __INTMAX_TYPE__ = c_long;
pub const __INTMAX_FMTd__ = "ld";
pub const __INTMAX_FMTi__ = "li";
pub const __INTMAX_C_SUFFIX__ = L;
pub const __UINTMAX_TYPE__ = c_ulong;
pub const __UINTMAX_FMTo__ = "lo";
pub const __UINTMAX_FMTu__ = "lu";
pub const __UINTMAX_FMTx__ = "lx";
pub const __UINTMAX_FMTX__ = "lX";
pub const __UINTMAX_C_SUFFIX__ = UL;
pub const __INTMAX_WIDTH__ = @as(c_int, 64);
pub const __PTRDIFF_TYPE__ = c_long;
pub const __PTRDIFF_FMTd__ = "ld";
pub const __PTRDIFF_FMTi__ = "li";
pub const __PTRDIFF_WIDTH__ = @as(c_int, 64);
pub const __INTPTR_TYPE__ = c_long;
pub const __INTPTR_FMTd__ = "ld";
pub const __INTPTR_FMTi__ = "li";
pub const __INTPTR_WIDTH__ = @as(c_int, 64);
pub const __SIZE_TYPE__ = c_ulong;
pub const __SIZE_FMTo__ = "lo";
pub const __SIZE_FMTu__ = "lu";
pub const __SIZE_FMTx__ = "lx";
pub const __SIZE_FMTX__ = "lX";
pub const __SIZE_WIDTH__ = @as(c_int, 64);
pub const __WCHAR_TYPE__ = c_int;
pub const __WCHAR_WIDTH__ = @as(c_int, 32);
pub const __WINT_TYPE__ = c_int;
pub const __WINT_WIDTH__ = @as(c_int, 32);
pub const __SIG_ATOMIC_WIDTH__ = @as(c_int, 32);
pub const __SIG_ATOMIC_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const __CHAR16_TYPE__ = c_ushort;
pub const __CHAR32_TYPE__ = c_uint;
pub const __UINTMAX_WIDTH__ = @as(c_int, 64);
pub const __UINTPTR_TYPE__ = c_ulong;
pub const __UINTPTR_FMTo__ = "lo";
pub const __UINTPTR_FMTu__ = "lu";
pub const __UINTPTR_FMTx__ = "lx";
pub const __UINTPTR_FMTX__ = "lX";
pub const __UINTPTR_WIDTH__ = @as(c_int, 64);
pub const __FLT_DENORM_MIN__ = @as(f32, 1.40129846e-45);
pub const __FLT_HAS_DENORM__ = @as(c_int, 1);
pub const __FLT_DIG__ = @as(c_int, 6);
pub const __FLT_DECIMAL_DIG__ = @as(c_int, 9);
pub const __FLT_EPSILON__ = @as(f32, 1.19209290e-7);
pub const __FLT_HAS_INFINITY__ = @as(c_int, 1);
pub const __FLT_HAS_QUIET_NAN__ = @as(c_int, 1);
pub const __FLT_MANT_DIG__ = @as(c_int, 24);
pub const __FLT_MAX_10_EXP__ = @as(c_int, 38);
pub const __FLT_MAX_EXP__ = @as(c_int, 128);
pub const __FLT_MAX__ = @as(f32, 3.40282347e+38);
pub const __FLT_MIN_10_EXP__ = -@as(c_int, 37);
pub const __FLT_MIN_EXP__ = -@as(c_int, 125);
pub const __FLT_MIN__ = @as(f32, 1.17549435e-38);
pub const __DBL_DENORM_MIN__ = 4.9406564584124654e-324;
pub const __DBL_HAS_DENORM__ = @as(c_int, 1);
pub const __DBL_DIG__ = @as(c_int, 15);
pub const __DBL_DECIMAL_DIG__ = @as(c_int, 17);
pub const __DBL_EPSILON__ = 2.2204460492503131e-16;
pub const __DBL_HAS_INFINITY__ = @as(c_int, 1);
pub const __DBL_HAS_QUIET_NAN__ = @as(c_int, 1);
pub const __DBL_MANT_DIG__ = @as(c_int, 53);
pub const __DBL_MAX_10_EXP__ = @as(c_int, 308);
pub const __DBL_MAX_EXP__ = @as(c_int, 1024);
pub const __DBL_MAX__ = 1.7976931348623157e+308;
pub const __DBL_MIN_10_EXP__ = -@as(c_int, 307);
pub const __DBL_MIN_EXP__ = -@as(c_int, 1021);
pub const __DBL_MIN__ = 2.2250738585072014e-308;
pub const __LDBL_DENORM_MIN__ = @as(c_longdouble, 3.64519953188247460253e-4951);
pub const __LDBL_HAS_DENORM__ = @as(c_int, 1);
pub const __LDBL_DIG__ = @as(c_int, 18);
pub const __LDBL_DECIMAL_DIG__ = @as(c_int, 21);
pub const __LDBL_EPSILON__ = @as(c_longdouble, 1.08420217248550443401e-19);
pub const __LDBL_HAS_INFINITY__ = @as(c_int, 1);
pub const __LDBL_HAS_QUIET_NAN__ = @as(c_int, 1);
pub const __LDBL_MANT_DIG__ = @as(c_int, 64);
pub const __LDBL_MAX_10_EXP__ = @as(c_int, 4932);
pub const __LDBL_MAX_EXP__ = @as(c_int, 16384);
pub const __LDBL_MAX__ = @as(c_longdouble, 1.18973149535723176502e+4932);
pub const __LDBL_MIN_10_EXP__ = -@as(c_int, 4931);
pub const __LDBL_MIN_EXP__ = -@as(c_int, 16381);
pub const __LDBL_MIN__ = @as(c_longdouble, 3.36210314311209350626e-4932);
pub const __POINTER_WIDTH__ = @as(c_int, 64);
pub const __BIGGEST_ALIGNMENT__ = @as(c_int, 16);
pub const __INT8_TYPE__ = i8;
pub const __INT8_FMTd__ = "hhd";
pub const __INT8_FMTi__ = "hhi";
pub const __INT16_TYPE__ = c_short;
pub const __INT16_FMTd__ = "hd";
pub const __INT16_FMTi__ = "hi";
pub const __INT32_TYPE__ = c_int;
pub const __INT32_FMTd__ = "d";
pub const __INT32_FMTi__ = "i";
pub const __INT64_TYPE__ = c_longlong;
pub const __INT64_FMTd__ = "lld";
pub const __INT64_FMTi__ = "lli";
pub const __INT64_C_SUFFIX__ = LL;
pub const __UINT8_TYPE__ = u8;
pub const __UINT8_FMTo__ = "hho";
pub const __UINT8_FMTu__ = "hhu";
pub const __UINT8_FMTx__ = "hhx";
pub const __UINT8_FMTX__ = "hhX";
pub const __UINT8_MAX__ = @as(c_int, 255);
pub const __INT8_MAX__ = @as(c_int, 127);
pub const __UINT16_TYPE__ = c_ushort;
pub const __UINT16_FMTo__ = "ho";
pub const __UINT16_FMTu__ = "hu";
pub const __UINT16_FMTx__ = "hx";
pub const __UINT16_FMTX__ = "hX";
pub const __UINT16_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 65535, .decimal);
pub const __INT16_MAX__ = @as(c_int, 32767);
pub const __UINT32_TYPE__ = c_uint;
pub const __UINT32_FMTo__ = "o";
pub const __UINT32_FMTu__ = "u";
pub const __UINT32_FMTx__ = "x";
pub const __UINT32_FMTX__ = "X";
pub const __UINT32_C_SUFFIX__ = U;
pub const __UINT32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_uint, 4294967295, .decimal);
pub const __INT32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const __UINT64_TYPE__ = c_ulonglong;
pub const __UINT64_FMTo__ = "llo";
pub const __UINT64_FMTu__ = "llu";
pub const __UINT64_FMTx__ = "llx";
pub const __UINT64_FMTX__ = "llX";
pub const __UINT64_C_SUFFIX__ = ULL;
pub const __UINT64_MAX__ = @as(c_ulonglong, 18446744073709551615);
pub const __INT64_MAX__ = @as(c_longlong, 9223372036854775807);
pub const __INT_LEAST8_TYPE__ = i8;
pub const __INT_LEAST8_MAX__ = @as(c_int, 127);
pub const __INT_LEAST8_FMTd__ = "hhd";
pub const __INT_LEAST8_FMTi__ = "hhi";
pub const __UINT_LEAST8_TYPE__ = u8;
pub const __UINT_LEAST8_MAX__ = @as(c_int, 255);
pub const __UINT_LEAST8_FMTo__ = "hho";
pub const __UINT_LEAST8_FMTu__ = "hhu";
pub const __UINT_LEAST8_FMTx__ = "hhx";
pub const __UINT_LEAST8_FMTX__ = "hhX";
pub const __INT_LEAST16_TYPE__ = c_short;
pub const __INT_LEAST16_MAX__ = @as(c_int, 32767);
pub const __INT_LEAST16_FMTd__ = "hd";
pub const __INT_LEAST16_FMTi__ = "hi";
pub const __UINT_LEAST16_TYPE__ = c_ushort;
pub const __UINT_LEAST16_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 65535, .decimal);
pub const __UINT_LEAST16_FMTo__ = "ho";
pub const __UINT_LEAST16_FMTu__ = "hu";
pub const __UINT_LEAST16_FMTx__ = "hx";
pub const __UINT_LEAST16_FMTX__ = "hX";
pub const __INT_LEAST32_TYPE__ = c_int;
pub const __INT_LEAST32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const __INT_LEAST32_FMTd__ = "d";
pub const __INT_LEAST32_FMTi__ = "i";
pub const __UINT_LEAST32_TYPE__ = c_uint;
pub const __UINT_LEAST32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_uint, 4294967295, .decimal);
pub const __UINT_LEAST32_FMTo__ = "o";
pub const __UINT_LEAST32_FMTu__ = "u";
pub const __UINT_LEAST32_FMTx__ = "x";
pub const __UINT_LEAST32_FMTX__ = "X";
pub const __INT_LEAST64_TYPE__ = c_longlong;
pub const __INT_LEAST64_MAX__ = @as(c_longlong, 9223372036854775807);
pub const __INT_LEAST64_FMTd__ = "lld";
pub const __INT_LEAST64_FMTi__ = "lli";
pub const __UINT_LEAST64_TYPE__ = c_ulonglong;
pub const __UINT_LEAST64_MAX__ = @as(c_ulonglong, 18446744073709551615);
pub const __UINT_LEAST64_FMTo__ = "llo";
pub const __UINT_LEAST64_FMTu__ = "llu";
pub const __UINT_LEAST64_FMTx__ = "llx";
pub const __UINT_LEAST64_FMTX__ = "llX";
pub const __INT_FAST8_TYPE__ = i8;
pub const __INT_FAST8_MAX__ = @as(c_int, 127);
pub const __INT_FAST8_FMTd__ = "hhd";
pub const __INT_FAST8_FMTi__ = "hhi";
pub const __UINT_FAST8_TYPE__ = u8;
pub const __UINT_FAST8_MAX__ = @as(c_int, 255);
pub const __UINT_FAST8_FMTo__ = "hho";
pub const __UINT_FAST8_FMTu__ = "hhu";
pub const __UINT_FAST8_FMTx__ = "hhx";
pub const __UINT_FAST8_FMTX__ = "hhX";
pub const __INT_FAST16_TYPE__ = c_short;
pub const __INT_FAST16_MAX__ = @as(c_int, 32767);
pub const __INT_FAST16_FMTd__ = "hd";
pub const __INT_FAST16_FMTi__ = "hi";
pub const __UINT_FAST16_TYPE__ = c_ushort;
pub const __UINT_FAST16_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 65535, .decimal);
pub const __UINT_FAST16_FMTo__ = "ho";
pub const __UINT_FAST16_FMTu__ = "hu";
pub const __UINT_FAST16_FMTx__ = "hx";
pub const __UINT_FAST16_FMTX__ = "hX";
pub const __INT_FAST32_TYPE__ = c_int;
pub const __INT_FAST32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const __INT_FAST32_FMTd__ = "d";
pub const __INT_FAST32_FMTi__ = "i";
pub const __UINT_FAST32_TYPE__ = c_uint;
pub const __UINT_FAST32_MAX__ = @import("std").zig.c_translation.promoteIntLiteral(c_uint, 4294967295, .decimal);
pub const __UINT_FAST32_FMTo__ = "o";
pub const __UINT_FAST32_FMTu__ = "u";
pub const __UINT_FAST32_FMTx__ = "x";
pub const __UINT_FAST32_FMTX__ = "X";
pub const __INT_FAST64_TYPE__ = c_longlong;
pub const __INT_FAST64_MAX__ = @as(c_longlong, 9223372036854775807);
pub const __INT_FAST64_FMTd__ = "lld";
pub const __INT_FAST64_FMTi__ = "lli";
pub const __UINT_FAST64_TYPE__ = c_ulonglong;
pub const __UINT_FAST64_MAX__ = @as(c_ulonglong, 18446744073709551615);
pub const __UINT_FAST64_FMTo__ = "llo";
pub const __UINT_FAST64_FMTu__ = "llu";
pub const __UINT_FAST64_FMTx__ = "llx";
pub const __UINT_FAST64_FMTX__ = "llX";
pub const __USER_LABEL_PREFIX__ = @"_";
pub const __FINITE_MATH_ONLY__ = @as(c_int, 0);
pub const __GNUC_STDC_INLINE__ = @as(c_int, 1);
pub const __GCC_ATOMIC_TEST_AND_SET_TRUEVAL = @as(c_int, 1);
pub const __CLANG_ATOMIC_BOOL_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_CHAR_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_CHAR16_T_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_CHAR32_T_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_WCHAR_T_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_SHORT_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_INT_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_LONG_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_LLONG_LOCK_FREE = @as(c_int, 2);
pub const __CLANG_ATOMIC_POINTER_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_BOOL_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_CHAR_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_CHAR16_T_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_CHAR32_T_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_WCHAR_T_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_SHORT_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_INT_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_LONG_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_LLONG_LOCK_FREE = @as(c_int, 2);
pub const __GCC_ATOMIC_POINTER_LOCK_FREE = @as(c_int, 2);
pub const __PIC__ = @as(c_int, 2);
pub const __pic__ = @as(c_int, 2);
pub const __FLT_EVAL_METHOD__ = @as(c_int, 0);
pub const __FLT_RADIX__ = @as(c_int, 2);
pub const __DECIMAL_DIG__ = __LDBL_DECIMAL_DIG__;
pub const __SSP_STRONG__ = @as(c_int, 2);
pub const __nonnull = _Nonnull;
pub const __null_unspecified = _Null_unspecified;
pub const __nullable = _Nullable;
pub const __GCC_ASM_FLAG_OUTPUTS__ = @as(c_int, 1);
pub const __code_model_small__ = @as(c_int, 1);
pub const __amd64__ = @as(c_int, 1);
pub const __amd64 = @as(c_int, 1);
pub const __x86_64 = @as(c_int, 1);
pub const __x86_64__ = @as(c_int, 1);
pub const __SEG_GS = @as(c_int, 1);
pub const __SEG_FS = @as(c_int, 1);
pub const __seg_gs = __attribute__(address_space(@as(c_int, 256)));
pub const __seg_fs = __attribute__(address_space(@as(c_int, 257)));
pub const __corei7 = @as(c_int, 1);
pub const __corei7__ = @as(c_int, 1);
pub const __tune_corei7__ = @as(c_int, 1);
pub const __NO_MATH_INLINES = @as(c_int, 1);
pub const __AES__ = @as(c_int, 1);
pub const __PCLMUL__ = @as(c_int, 1);
pub const __LAHF_SAHF__ = @as(c_int, 1);
pub const __LZCNT__ = @as(c_int, 1);
pub const __RDRND__ = @as(c_int, 1);
pub const __FSGSBASE__ = @as(c_int, 1);
pub const __BMI__ = @as(c_int, 1);
pub const __BMI2__ = @as(c_int, 1);
pub const __POPCNT__ = @as(c_int, 1);
pub const __RTM__ = @as(c_int, 1);
pub const __PRFCHW__ = @as(c_int, 1);
pub const __RDSEED__ = @as(c_int, 1);
pub const __ADX__ = @as(c_int, 1);
pub const __MOVBE__ = @as(c_int, 1);
pub const __FMA__ = @as(c_int, 1);
pub const __F16C__ = @as(c_int, 1);
pub const __FXSR__ = @as(c_int, 1);
pub const __XSAVE__ = @as(c_int, 1);
pub const __XSAVEOPT__ = @as(c_int, 1);
pub const __XSAVEC__ = @as(c_int, 1);
pub const __XSAVES__ = @as(c_int, 1);
pub const __CLFLUSHOPT__ = @as(c_int, 1);
pub const __SGX__ = @as(c_int, 1);
pub const __INVPCID__ = @as(c_int, 1);
pub const __AVX2__ = @as(c_int, 1);
pub const __AVX__ = @as(c_int, 1);
pub const __SSE4_2__ = @as(c_int, 1);
pub const __SSE4_1__ = @as(c_int, 1);
pub const __SSSE3__ = @as(c_int, 1);
pub const __SSE3__ = @as(c_int, 1);
pub const __SSE2__ = @as(c_int, 1);
pub const __SSE2_MATH__ = @as(c_int, 1);
pub const __SSE__ = @as(c_int, 1);
pub const __SSE_MATH__ = @as(c_int, 1);
pub const __MMX__ = @as(c_int, 1);
pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1 = @as(c_int, 1);
pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2 = @as(c_int, 1);
pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 = @as(c_int, 1);
pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 = @as(c_int, 1);
pub const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 = @as(c_int, 1);
pub const __APPLE_CC__ = @as(c_int, 6000);
pub const __APPLE__ = @as(c_int, 1);
pub const __STDC_NO_THREADS__ = @as(c_int, 1);
pub const __weak = __attribute__(objc_gc(weak));
pub const __DYNAMIC__ = @as(c_int, 1);
pub const __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ = @import("std").zig.c_translation.promoteIntLiteral(c_int, 101406, .decimal);
pub const __MACH__ = @as(c_int, 1);
pub const __STDC__ = @as(c_int, 1);
pub const __STDC_HOSTED__ = @as(c_int, 1);
pub const __STDC_VERSION__ = @as(c_long, 201710);
pub const __STDC_UTF_16__ = @as(c_int, 1);
pub const __STDC_UTF_32__ = @as(c_int, 1);
pub const _DEBUG = @as(c_int, 1);
pub const bool_1 = bool;
pub const true_2 = @as(c_int, 1);
pub const false_3 = @as(c_int, 0);
pub const __bool_true_false_are_defined = @as(c_int, 1);
pub const NULL = @import("std").zig.c_translation.cast(?*c_void, @as(c_int, 0));
pub const __WORDSIZE = @as(c_int, 64);
pub inline fn __P(protos: anytype) @TypeOf(protos) {
return protos;
}
pub const __signed = c_int;
pub const __dead2 = __attribute__(__noreturn__);
pub const __pure2 = __attribute__(__const__);
pub const __unused = __attribute__(__unused__);
pub const __used = __attribute__(__used__);
pub const __cold = __attribute__(__cold__);
pub const __deprecated = __attribute__(__deprecated__);
pub inline fn __deprecated_msg(_msg: anytype) @TypeOf(__attribute__(__deprecated__(_msg))) {
return __attribute__(__deprecated__(_msg));
}
pub inline fn __deprecated_enum_msg(_msg: anytype) @TypeOf(__deprecated_msg(_msg)) {
return __deprecated_msg(_msg);
}
pub const __unavailable = __attribute__(__unavailable__);
pub const __disable_tail_calls = __attribute__(__disable_tail_calls__);
pub const __not_tail_called = __attribute__(__not_tail_called__);
pub const __result_use_check = __attribute__(__warn_unused_result__);
pub const __abortlike = __dead2 ++ __cold ++ __not_tail_called;
pub const __header_always_inline = __header_inline ++ __attribute__(__always_inline__);
pub const __unreachable_ok_pop = _Pragma("clang diagnostic pop");
pub inline fn __printflike(fmtarg: anytype, firstvararg: anytype) @TypeOf(__attribute__(__format__(__printf__, fmtarg, firstvararg))) {
return __attribute__(__format__(__printf__, fmtarg, firstvararg));
}
pub inline fn __printf0like(fmtarg: anytype, firstvararg: anytype) @TypeOf(__attribute__(__format__(__printf0__, fmtarg, firstvararg))) {
return __attribute__(__format__(__printf0__, fmtarg, firstvararg));
}
pub inline fn __scanflike(fmtarg: anytype, firstvararg: anytype) @TypeOf(__attribute__(__format__(__scanf__, fmtarg, firstvararg))) {
return __attribute__(__format__(__scanf__, fmtarg, firstvararg));
}
pub inline fn __COPYRIGHT(s: anytype) @TypeOf(__IDSTRING(copyright, s)) {
return __IDSTRING(copyright, s);
}
pub inline fn __RCSID(s: anytype) @TypeOf(__IDSTRING(rcsid, s)) {
return __IDSTRING(rcsid, s);
}
pub inline fn __SCCSID(s: anytype) @TypeOf(__IDSTRING(sccsid, s)) {
return __IDSTRING(sccsid, s);
}
pub inline fn __PROJECT_VERSION(s: anytype) @TypeOf(__IDSTRING(project_version, s)) {
return __IDSTRING(project_version, s);
}
pub const __DARWIN_ONLY_64_BIT_INO_T = @as(c_int, 0);
pub const __DARWIN_ONLY_VERS_1050 = @as(c_int, 0);
pub const __DARWIN_ONLY_UNIX_CONFORMANCE = @as(c_int, 1);
pub const __DARWIN_UNIX03 = @as(c_int, 1);
pub const __DARWIN_64_BIT_INO_T = @as(c_int, 1);
pub const __DARWIN_VERS_1050 = @as(c_int, 1);
pub const __DARWIN_NON_CANCELABLE = @as(c_int, 0);
pub const __DARWIN_SUF_64_BIT_INO_T = "$INODE64";
pub const __DARWIN_SUF_1050 = "$1050";
pub const __DARWIN_SUF_EXTSN = "$DARWIN_EXTSN";
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_0(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_1(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_2(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_3(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_4(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_5(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_6(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_7(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_8(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_9(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_10(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_10_2(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_10_3(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_11(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_11_2(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_11_3(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_11_4(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_12(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_12_1(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_12_2(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_12_4(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_13(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_13_1(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_13_2(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_13_4(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_14(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_14_1(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_14_4(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_14_5(x: anytype) @TypeOf(x) {
return x;
}
pub inline fn __DARWIN_ALIAS_STARTING_MAC___MAC_10_14_6(x: anytype) @TypeOf(x) {
return x;
}
pub const __DARWIN_C_ANSI = @as(c_long, 0o10000);
pub const __DARWIN_C_FULL = @as(c_long, 900000);
pub const __DARWIN_C_LEVEL = __DARWIN_C_FULL;
pub const __STDC_WANT_LIB_EXT1__ = @as(c_int, 1);
pub const __DARWIN_NO_LONG_LONG = @as(c_int, 0);
pub const _DARWIN_FEATURE_64_BIT_INODE = @as(c_int, 1);
pub const _DARWIN_FEATURE_ONLY_UNIX_CONFORMANCE = @as(c_int, 1);
pub const _DARWIN_FEATURE_UNIX_CONFORMANCE = @as(c_int, 3);
pub inline fn __CAST_AWAY_QUALIFIER(variable: anytype, qualifier: anytype, type_1: anytype) @TypeOf(type_1(c_long)(variable)) {
_ = qualifier;
return type_1(c_long)(variable);
}
pub const __XNU_PRIVATE_EXTERN = __attribute__(visibility("hidden"));
pub const __enum_open = __attribute__(__enum_extensibility__(open));
pub const __enum_closed = __attribute__(__enum_extensibility__(closed));
pub const __enum_options = __attribute__(__flag_enum__);
pub const __DARWIN_NULL = @import("std").zig.c_translation.cast(?*c_void, @as(c_int, 0));
pub const __PTHREAD_SIZE__ = @as(c_int, 8176);
pub const __PTHREAD_ATTR_SIZE__ = @as(c_int, 56);
pub const __PTHREAD_MUTEXATTR_SIZE__ = @as(c_int, 8);
pub const __PTHREAD_MUTEX_SIZE__ = @as(c_int, 56);
pub const __PTHREAD_CONDATTR_SIZE__ = @as(c_int, 8);
pub const __PTHREAD_COND_SIZE__ = @as(c_int, 40);
pub const __PTHREAD_ONCE_SIZE__ = @as(c_int, 8);
pub const __PTHREAD_RWLOCK_SIZE__ = @as(c_int, 192);
pub const __PTHREAD_RWLOCKATTR_SIZE__ = @as(c_int, 16);
pub const USER_ADDR_NULL = @import("std").zig.c_translation.cast(user_addr_t, @as(c_int, 0));
pub inline fn CAST_USER_ADDR_T(a_ptr: anytype) user_addr_t {
return @import("std").zig.c_translation.cast(user_addr_t, @import("std").zig.c_translation.cast(usize, a_ptr));
}
pub inline fn INT8_C(v: anytype) @TypeOf(v) {
return v;
}
pub inline fn INT16_C(v: anytype) @TypeOf(v) {
return v;
}
pub inline fn INT32_C(v: anytype) @TypeOf(v) {
return v;
}
pub const INT64_C = @import("std").zig.c_translation.Macros.LL_SUFFIX;
pub inline fn UINT8_C(v: anytype) @TypeOf(v) {
return v;
}
pub inline fn UINT16_C(v: anytype) @TypeOf(v) {
return v;
}
pub const UINT32_C = @import("std").zig.c_translation.Macros.U_SUFFIX;
pub const UINT64_C = @import("std").zig.c_translation.Macros.ULL_SUFFIX;
pub const INTMAX_C = @import("std").zig.c_translation.Macros.L_SUFFIX;
pub const UINTMAX_C = @import("std").zig.c_translation.Macros.UL_SUFFIX;
pub const INT8_MAX = @as(c_int, 127);
pub const INT16_MAX = @as(c_int, 32767);
pub const INT32_MAX = @import("std").zig.c_translation.promoteIntLiteral(c_int, 2147483647, .decimal);
pub const INT64_MAX = @as(c_longlong, 9223372036854775807);
pub const INT8_MIN = -@as(c_int, 128);
pub const INT16_MIN = -@import("std").zig.c_translation.promoteIntLiteral(c_int, 32768, .decimal);
pub const INT32_MIN = -INT32_MAX - @as(c_int, 1);
pub const INT64_MIN = -INT64_MAX - @as(c_int, 1);
pub const UINT8_MAX = @as(c_int, 255);
pub const UINT16_MAX = @import("std").zig.c_translation.promoteIntLiteral(c_int, 65535, .decimal);
pub const UINT32_MAX = @import("std").zig.c_translation.promoteIntLiteral(c_uint, 4294967295, .decimal);
pub const UINT64_MAX = @as(c_ulonglong, 18446744073709551615);
pub const INT_LEAST8_MIN = INT8_MIN;
pub const INT_LEAST16_MIN = INT16_MIN;
pub const INT_LEAST32_MIN = INT32_MIN;
pub const INT_LEAST64_MIN = INT64_MIN;
pub const INT_LEAST8_MAX = INT8_MAX;
pub const INT_LEAST16_MAX = INT16_MAX;
pub const INT_LEAST32_MAX = INT32_MAX;
pub const INT_LEAST64_MAX = INT64_MAX;
pub const UINT_LEAST8_MAX = UINT8_MAX;
pub const UINT_LEAST16_MAX = UINT16_MAX;
pub const UINT_LEAST32_MAX = UINT32_MAX;
pub const UINT_LEAST64_MAX = UINT64_MAX;
pub const INT_FAST8_MIN = INT8_MIN;
pub const INT_FAST16_MIN = INT16_MIN;
pub const INT_FAST32_MIN = INT32_MIN;
pub const INT_FAST64_MIN = INT64_MIN;
pub const INT_FAST8_MAX = INT8_MAX;
pub const INT_FAST16_MAX = INT16_MAX;
pub const INT_FAST32_MAX = INT32_MAX;
pub const INT_FAST64_MAX = INT64_MAX;
pub const UINT_FAST8_MAX = UINT8_MAX;
pub const UINT_FAST16_MAX = UINT16_MAX;
pub const UINT_FAST32_MAX = UINT32_MAX;
pub const UINT_FAST64_MAX = UINT64_MAX;
pub const INTPTR_MAX = @import("std").zig.c_translation.promoteIntLiteral(c_long, 9223372036854775807, .decimal);
pub const INTPTR_MIN = -INTPTR_MAX - @as(c_int, 1);
pub const UINTPTR_MAX = @import("std").zig.c_translation.promoteIntLiteral(c_ulong, 18446744073709551615, .decimal);
pub const INTMAX_MAX = INTMAX_C(@import("std").zig.c_translation.promoteIntLiteral(c_int, 9223372036854775807, .decimal));
pub const UINTMAX_MAX = UINTMAX_C(@import("std").zig.c_translation.promoteIntLiteral(c_int, 18446744073709551615, .decimal));
pub const INTMAX_MIN = -INTMAX_MAX - @as(c_int, 1);
pub const PTRDIFF_MIN = INTMAX_MIN;
pub const PTRDIFF_MAX = INTMAX_MAX;
pub const SIZE_MAX = UINTPTR_MAX;
pub const RSIZE_MAX = SIZE_MAX >> @as(c_int, 1);
pub const WCHAR_MAX = __WCHAR_MAX__;
pub const WCHAR_MIN = -WCHAR_MAX - @as(c_int, 1);
pub const WINT_MIN = INT32_MIN;
pub const WINT_MAX = INT32_MAX;
pub const SIG_ATOMIC_MIN = INT32_MIN;
pub const SIG_ATOMIC_MAX = INT32_MAX;
pub const __va_list_tag = struct___va_list_tag;
pub const __darwin_pthread_handler_rec = struct___darwin_pthread_handler_rec;
pub const _opaque_pthread_attr_t = struct__opaque_pthread_attr_t;
pub const _opaque_pthread_cond_t = struct__opaque_pthread_cond_t;
pub const _opaque_pthread_condattr_t = struct__opaque_pthread_condattr_t;
pub const _opaque_pthread_mutex_t = struct__opaque_pthread_mutex_t;
pub const _opaque_pthread_mutexattr_t = struct__opaque_pthread_mutexattr_t;
pub const _opaque_pthread_once_t = struct__opaque_pthread_once_t;
pub const _opaque_pthread_rwlock_t = struct__opaque_pthread_rwlock_t;
pub const _opaque_pthread_rwlockattr_t = struct__opaque_pthread_rwlockattr_t;
pub const _opaque_pthread_t = struct__opaque_pthread_t;
pub const shaderc_compiler = struct_shaderc_compiler;
pub const shaderc_compile_options = struct_shaderc_compile_options;
pub const shaderc_include_type = enum_shaderc_include_type;
pub const shaderc_compilation_result = struct_shaderc_compilation_result; | vendor/shaderc.zig |
const std = @import("std");
const print = std.debug.print;
const util = @import("util.zig");
const gpa = util.gpa;
const data = @embedFile("../data/day11.txt");
const Octo = struct {
energy: usize = 0,
flashed: bool = false,
};
pub fn main() !void {
// init octo field (grown by 1 to make edges easier to handle)
var octos: [12][12]Octo = undefined;
for (octos) |row, i| {
for (row) |octo, j| {
octos[i][j] = .{ .energy = 0, .flashed = false };
}
}
// read in initial values
var lines = try util.toStrSlice(data, "\n");
defer gpa.free(lines);
for (lines) |line, i| {
for (line) |c, j| {
octos[i + 1][j + 1].energy = c - '0';
}
}
var num_flashes: usize = 0;
var step_all_flashed: usize = 0;
var step: usize = 0;
var all_flashed = false;
while (step < 100 or !all_flashed) : (step += 1) {
// increase by one
for (octos[1..11]) |row, i| {
for (row[1..11]) |octo, j| {
octos[i + 1][j + 1].energy += 1;
}
}
// flash!
var flashed = true;
while (flashed) {
flashed = false;
for (octos[1..11]) |row, i| {
for (row[1..11]) |octo, j| {
if (octo.energy > 9 and !octo.flashed) {
if (step < 100) num_flashes += 1;
flashed = true;
// flash!
octos[i + 1][j + 1].flashed = true;
var r = i + 1;
var c = j + 1;
octos[r - 1][c - 1].energy += 1;
octos[r - 1][c].energy += 1;
octos[r - 1][c + 1].energy += 1;
octos[r][c + 1].energy += 1;
octos[r + 1][c + 1].energy += 1;
octos[r + 1][c].energy += 1;
octos[r + 1][c - 1].energy += 1;
octos[r][c - 1].energy += 1;
}
}
}
}
all_flashed = true;
for (octos[1..11]) |row, i| {
for (row[1..11]) |octo, j| {
if (octos[i + 1][j + 1].flashed) {
octos[i + 1][j + 1].energy = 0;
octos[i + 1][j + 1].flashed = false;
} else all_flashed = false;
}
}
if (all_flashed) step_all_flashed = step + 1;
}
print("{}\n", .{num_flashes});
print("{}\n", .{step_all_flashed});
} | 2021/src/day11.zig |
const std = @import("std");
const Builder = std.build.Builder;
const builtin = std.builtin;
const assert = std.debug.assert;
fn here() []const u8 {
return std.fs.path.dirname(@src().file) orelse ".";
}
fn cpu_features(arch: std.Target.Cpu.Arch, ctarget: std.zig.CrossTarget) std.zig.CrossTarget {
var disabled_features = std.Target.Cpu.Feature.Set.empty;
var enabled_feautres = std.Target.Cpu.Feature.Set.empty;
if (arch == .aarch64) {
const features = std.Target.aarch64.Feature;
// This is equal to -mgeneral-regs-only
disabled_features.addFeature(@enumToInt(features.fp_armv8));
disabled_features.addFeature(@enumToInt(features.crypto));
disabled_features.addFeature(@enumToInt(features.neon));
}
return std.zig.CrossTarget{
.cpu_arch = arch,
.os_tag = ctarget.os_tag,
.abi = ctarget.abi,
.cpu_features_sub = disabled_features,
.cpu_features_add = enabled_feautres,
};
}
fn freestanding_target(elf: *std.build.LibExeObjStep, arch: std.Target.Cpu.Arch, do_code_model: bool) void {
if (arch == .aarch64) {
// We don't need the code model in asm blobs
if (do_code_model)
elf.code_model = .tiny;
}
elf.setTarget(cpu_features(arch, .{
.os_tag = std.Target.Os.Tag.freestanding,
.abi = std.Target.Abi.none,
}));
}
fn executable_common(b: *Builder, exec: *std.build.LibExeObjStep, board_name: []const u8) void {
exec.setBuildMode(.ReleaseSmall);
var options = b.addOptions();
options.addOption([]const u8, "board_name", board_name);
exec.addOptions("build_options", options);
exec.setBuildMode(.ReleaseSmall);
if (@hasField(@TypeOf(exec.*), "want_lto"))
exec.want_lto = false;
exec.setMainPkgPath(here() ++ "/src/");
exec.setOutputDir(b.cache_root);
exec.install();
exec.disable_stack_probing = true;
}
pub fn build_uefi(b: *Builder, arch: std.Target.Cpu.Arch) !*std.build.LibExeObjStep {
const filename = "BOOTA64";
const platform_path = b.fmt(here() ++ "/src/platform/uefi_{s}", .{@tagName(arch)});
const exec = b.addExecutable(filename, b.fmt("{s}/main.zig", .{platform_path}));
executable_common(b, exec, "UEFI");
exec.code_model = .small;
exec.setTarget(cpu_features(arch, .{
.os_tag = std.Target.Os.Tag.uefi,
.abi = std.Target.Abi.msvc,
}));
exec.setOutputDir(here() ++ "/uefidir/image/EFI/BOOT/");
return exec;
}
fn build_elf(b: *Builder, arch: std.Target.Cpu.Arch, target_name: []const u8) !*std.build.LibExeObjStep {
const elf_filename = b.fmt("Sabaton_{s}_{s}.elf", .{ target_name, @tagName(arch) });
const platform_path = b.fmt(here() ++ "/src/platform/{s}_{s}", .{ target_name, @tagName(arch) });
const elf = b.addExecutable(elf_filename, b.fmt("{s}/main.zig", .{platform_path}));
elf.setLinkerScriptPath(.{ .path = b.fmt("{s}/linker.ld", .{platform_path}) });
elf.addAssemblyFile(b.fmt("{s}/entry.S", .{platform_path}));
executable_common(b, elf, target_name);
freestanding_target(elf, arch, true);
return elf;
}
fn assembly_blob(b: *Builder, arch: std.Target.Cpu.Arch, name: []const u8, asm_file: []const u8) !*std.build.InstallRawStep {
const elf_filename = b.fmt("{s}_{s}.elf", .{ name, @tagName(arch) });
const elf = b.addExecutable(elf_filename, null);
elf.setLinkerScriptPath(.{ .path = "src/blob.ld" });
elf.addAssemblyFile(asm_file);
freestanding_target(elf, arch, false);
elf.setBuildMode(.ReleaseSafe);
elf.setMainPkgPath("src/");
elf.setOutputDir(b.cache_root);
elf.install();
return elf.installRaw(b.fmt("{s}.bin", .{elf_filename}), .{
.format = .bin,
.only_section_name = ".blob",
.pad_to_size = null,
});
}
pub fn aarch64VirtBlob(b: *Builder) *std.build.InstallRawStep {
const elf = try build_elf(b, .aarch64, "virt");
return elf.installRaw(b.fmt("{s}.bin", .{elf.out_filename}), .{
.format = .bin,
.only_section_name = ".blob",
.pad_to_size = 64 * 1024 * 1024, // 64M
});
}
fn qemu_aarch64(b: *Builder, board_name: []const u8, desc: []const u8) !void {
const command_step = b.step(board_name, desc);
const blob = aarch64VirtBlob(b);
const blob_path = b.getInstallPath(blob.dest_dir, blob.dest_filename);
const run_step = b.addSystemCommand(&[_][]const u8{
// zig fmt: off
"qemu-system-aarch64",
"-M", board_name,
"-cpu", "cortex-a57",
"-m", "4G",
"-serial", "stdio",
//"-S", "-s",
"-d", "int",
"-smp", "8",
"-device", "ramfb",
"-fw_cfg", "opt/Sabaton/kernel,file=test/Flork_stivale2_aarch64",
"-drive", b.fmt("if=pflash,format=raw,file={s},readonly=on", .{blob_path}),
// zig fmt: on
});
run_step.step.dependOn(&blob.step);
command_step.dependOn(&run_step.step);
}
fn qemu_pi3_aarch64(b: *Builder, desc: []const u8, elf: *std.build.LibExeObjStep) !void {
const command_step = b.step("pi3", desc);
const blob = elf.installRaw(b.fmt("{s}.bin", .{elf.out_filename}), .{
.format = .bin,
.only_section_name = ".blob",
.pad_to_size = null,
});
const blob_path = b.getInstallPath(blob.dest_dir, blob.dest_filename);
const run_step = b.addSystemCommand(&[_][]const u8{
// zig fmt: off
"qemu-system-aarch64",
"-M", "raspi3",
"-device", "loader,file=test/Flork_stivale2_aarch64,addr=0x200000,force-raw=on",
"-serial", "null",
"-serial", "stdio",
"-d", "int",
"-kernel", blob_path,
// zig fmt: off
});
run_step.step.dependOn(&blob.step);
command_step.dependOn(&run_step.step);
}
fn qemu_uefi_aarch64(b: *Builder, desc: []const u8, dep: *std.build.LibExeObjStep) !void {
const command_step = b.step("uefi", desc);
const run_step = b.addSystemCommand(&[_][]const u8{
// zig fmt: off
"qemu-system-aarch64",
"-M", "virt",
"-m", "4G",
"-cpu", "cortex-a57",
"-serial", "stdio",
"-device", "ramfb",
"-drive", b.fmt("if=pflash,format=raw,file={s}/QEMU_EFI.fd,readonly=on", .{std.os.getenv("AARCH64_EDK_PATH").?}),
"-drive", b.fmt("if=pflash,format=raw,file={s}/QEMU_VARS.fd", .{std.os.getenv("AARCH64_EDK_PATH").?}),
"-hdd", "fat:rw:uefidir/image",
"-usb",
"-device", "usb-ehci",
"-device", "usb-kbd",
// zig fmt: off
});
run_step.step.dependOn(&dep.install_step.?.step);
command_step.dependOn(&run_step.step);
}
const Device = struct {
name: []const u8,
arch: std.Target.Cpu.Arch,
};
const AssemblyBlobSpec = struct {
name: []const u8,
arch: std.Target.Cpu.Arch,
path: []const u8,
};
pub fn build(b: *Builder) !void {
//make_source_blob(b);
try qemu_aarch64(
b,
"virt",
"Run aarch64 sabaton for the qemu virt board",
);
try qemu_pi3_aarch64(
b,
"Run aarch64 sabaton for the qemu raspi3 board",
try build_elf(b, .aarch64, "pi3"),
);
try qemu_uefi_aarch64(
b,
"Run aarch64 sabaton for UEFI",
try build_uefi(b, .aarch64),
);
{
const assembly_blobs = &[_]AssemblyBlobSpec{
.{ .path = "src/platform/pine_aarch64/identity.S", .name = "identity_pine", .arch = .aarch64 },
};
for (assembly_blobs) |spec| {
const blob_file = try assembly_blob(b, spec.arch, spec.name, spec.path);
b.default_step.dependOn(&blob_file.step);
}
}
{
const elf_devices = &[_]Device{};
for (elf_devices) |dev| {
const elf_file = try build_elf(b, .aarch64, dev.name);
const s = b.step(dev.name, b.fmt("Build the blob for {s}", .{dev.name}));
s.dependOn(&elf_file.step);
b.default_step.dependOn(s);
}
}
{
const blob_devices = &[_]Device{
.{ .name = "pine", .arch = .aarch64 },
};
for (blob_devices) |dev| {
const elf = try build_elf(b, dev.arch, dev.name);
const blob_file = elf.installRaw(b.fmt("{s}_{s}.bin", .{dev.name, @tagName(dev.arch)}), .{
.format = .bin,
.only_section_name = ".blob",
.pad_to_size = null,
});
const s = b.step(dev.name, b.fmt("Build the blob for {s}", .{dev.name}));
s.dependOn(&blob_file.step);
b.default_step.dependOn(s);
}
}
// qemu_riscv(b,
// "virt",
// "Run riscv64 sabaton on for the qemu virt board",
// build_elf(b, .riscv64, "virt"),
// );
} | build.zig |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.