code
stringlengths
38
801k
repo_path
stringlengths
6
263
const std = @import("std"); const builtin = @import("builtin"); const expect = std.testing.expect; const minInt = std.math.minInt; test "@bitReverse large exotic integer" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // Currently failing on stage1 for big-endian targets if (builtin.zig_backend == .stage1) return error.SkipZigTest; try expect(@bitReverse(u95, @as(u95, 0x123456789abcdef111213141)) == 0x4146424447bd9eac8f351624); } test "@bitReverse" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; comptime try testBitReverse(); try testBitReverse(); } fn testBitReverse() !void { // using comptime_ints, unsigned try expect(@bitReverse(u0, @as(u0, 0)) == 0); try expect(@bitReverse(u5, @as(u5, 0x12)) == 0x9); try expect(@bitReverse(u8, @as(u8, 0x12)) == 0x48); try expect(@bitReverse(u16, @as(u16, 0x1234)) == 0x2c48); try expect(@bitReverse(u24, @as(u24, 0x123456)) == 0x6a2c48); try expect(@bitReverse(u32, @as(u32, 0x12345678)) == 0x1e6a2c48); try expect(@bitReverse(u40, @as(u40, 0x123456789a)) == 0x591e6a2c48); try expect(@bitReverse(u48, @as(u48, 0x123456789abc)) == 0x3d591e6a2c48); try expect(@bitReverse(u56, @as(u56, 0x123456789abcde)) == 0x7b3d591e6a2c48); try expect(@bitReverse(u64, @as(u64, 0x123456789abcdef1)) == 0x8f7b3d591e6a2c48); try expect(@bitReverse(u96, @as(u96, 0x123456789abcdef111213141)) == 0x828c84888f7b3d591e6a2c48); try expect(@bitReverse(u128, @as(u128, 0x123456789abcdef11121314151617181)) == 0x818e868a828c84888f7b3d591e6a2c48); // using runtime uints, unsigned var num0: u0 = 0; try expect(@bitReverse(u0, num0) == 0); var num5: u5 = 0x12; try expect(@bitReverse(u5, num5) == 0x9); var num8: u8 = 0x12; try expect(@bitReverse(u8, num8) == 0x48); var num16: u16 = 0x1234; try expect(@bitReverse(u16, num16) == 0x2c48); var num24: u24 = 0x123456; try expect(@bitReverse(u24, num24) == 0x6a2c48); var num32: u32 = 0x12345678; try expect(@bitReverse(u32, num32) == 0x1e6a2c48); var num40: u40 = 0x123456789a; try expect(@bitReverse(u40, num40) == 0x591e6a2c48); var num48: u48 = 0x123456789abc; try expect(@bitReverse(u48, num48) == 0x3d591e6a2c48); var num56: u56 = 0x123456789abcde; try expect(@bitReverse(u56, num56) == 0x7b3d591e6a2c48); var num64: u64 = 0x123456789abcdef1; try expect(@bitReverse(u64, num64) == 0x8f7b3d591e6a2c48); var num128: u128 = 0x123456789abcdef11121314151617181; try expect(@bitReverse(u128, num128) == 0x818e868a828c84888f7b3d591e6a2c48); // using comptime_ints, signed, positive try expect(@bitReverse(u8, @as(u8, 0)) == 0); try expect(@bitReverse(i8, @bitCast(i8, @as(u8, 0x92))) == @bitCast(i8, @as(u8, 0x49))); try expect(@bitReverse(i16, @bitCast(i16, @as(u16, 0x1234))) == @bitCast(i16, @as(u16, 0x2c48))); try expect(@bitReverse(i24, @bitCast(i24, @as(u24, 0x123456))) == @bitCast(i24, @as(u24, 0x6a2c48))); try expect(@bitReverse(i24, @bitCast(i24, @as(u24, 0x12345f))) == @bitCast(i24, @as(u24, 0xfa2c48))); try expect(@bitReverse(i24, @bitCast(i24, @as(u24, 0xf23456))) == @bitCast(i24, @as(u24, 0x6a2c4f))); try expect(@bitReverse(i32, @bitCast(i32, @as(u32, 0x12345678))) == @bitCast(i32, @as(u32, 0x1e6a2c48))); try expect(@bitReverse(i32, @bitCast(i32, @as(u32, 0xf2345678))) == @bitCast(i32, @as(u32, 0x1e6a2c4f))); try expect(@bitReverse(i32, @bitCast(i32, @as(u32, 0x1234567f))) == @bitCast(i32, @as(u32, 0xfe6a2c48))); try expect(@bitReverse(i40, @bitCast(i40, @as(u40, 0x123456789a))) == @bitCast(i40, @as(u40, 0x591e6a2c48))); try expect(@bitReverse(i48, @bitCast(i48, @as(u48, 0x123456789abc))) == @bitCast(i48, @as(u48, 0x3d591e6a2c48))); try expect(@bitReverse(i56, @bitCast(i56, @as(u56, 0x123456789abcde))) == @bitCast(i56, @as(u56, 0x7b3d591e6a2c48))); try expect(@bitReverse(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1))) == @bitCast(i64, @as(u64, 0x8f7b3d591e6a2c48))); try expect(@bitReverse(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141))) == @bitCast(i96, @as(u96, 0x828c84888f7b3d591e6a2c48))); try expect(@bitReverse(i128, @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181))) == @bitCast(i128, @as(u128, 0x818e868a828c84888f7b3d591e6a2c48))); // using signed, negative. Compare to runtime ints returned from llvm. var neg8: i8 = -18; try expect(@bitReverse(i8, @as(i8, -18)) == @bitReverse(i8, neg8)); var neg16: i16 = -32694; try expect(@bitReverse(i16, @as(i16, -32694)) == @bitReverse(i16, neg16)); var neg24: i24 = -6773785; try expect(@bitReverse(i24, @as(i24, -6773785)) == @bitReverse(i24, neg24)); var neg32: i32 = -16773785; try expect(@bitReverse(i32, @as(i32, -16773785)) == @bitReverse(i32, neg32)); }
test/behavior/bitreverse.zig
const std = @import("std"); const assertOrPanic = std.debug.assertOrPanic; const minInt = std.math.minInt; test "@bitreverse" { comptime testBitReverse(); testBitReverse(); } fn testBitReverse() void { // using comptime_ints, unsigned assertOrPanic(@bitreverse(u0, 0) == 0); assertOrPanic(@bitreverse(u5, 0x12) == 0x9); assertOrPanic(@bitreverse(u8, 0x12) == 0x48); assertOrPanic(@bitreverse(u16, 0x1234) == 0x2c48); assertOrPanic(@bitreverse(u24, 0x123456) == 0x6a2c48); assertOrPanic(@bitreverse(u32, 0x12345678) == 0x1e6a2c48); assertOrPanic(@bitreverse(u40, 0x123456789a) == 0x591e6a2c48); assertOrPanic(@bitreverse(u48, 0x123456789abc) == 0x3d591e6a2c48); assertOrPanic(@bitreverse(u56, 0x123456789abcde) == 0x7b3d591e6a2c48); assertOrPanic(@bitreverse(u64, 0x123456789abcdef1) == 0x8f7b3d591e6a2c48); assertOrPanic(@bitreverse(u128, 0x123456789abcdef11121314151617181) == 0x818e868a828c84888f7b3d591e6a2c48); // using runtime uints, unsigned var num0: u0 = 0; assertOrPanic(@bitreverse(u0, num0) == 0); var num5: u5 = 0x12; assertOrPanic(@bitreverse(u5, num5) == 0x9); var num8: u8 = 0x12; assertOrPanic(@bitreverse(u8, num8) == 0x48); var num16: u16 = 0x1234; assertOrPanic(@bitreverse(u16, num16) == 0x2c48); var num24: u24 = 0x123456; assertOrPanic(@bitreverse(u24, num24) == 0x6a2c48); var num32: u32 = 0x12345678; assertOrPanic(@bitreverse(u32, num32) == 0x1e6a2c48); var num40: u40 = 0x123456789a; assertOrPanic(@bitreverse(u40, num40) == 0x591e6a2c48); var num48: u48 = 0x123456789abc; assertOrPanic(@bitreverse(u48, num48) == 0x3d591e6a2c48); var num56: u56 = 0x123456789abcde; assertOrPanic(@bitreverse(u56, num56) == 0x7b3d591e6a2c48); var num64: u64 = 0x123456789abcdef1; assertOrPanic(@bitreverse(u64, num64) == 0x8f7b3d591e6a2c48); var num128: u128 = 0x123456789abcdef11121314151617181; assertOrPanic(@bitreverse(u128, num128) == 0x818e868a828c84888f7b3d591e6a2c48); // using comptime_ints, signed, positive assertOrPanic(@bitreverse(i0, 0) == 0); assertOrPanic(@bitreverse(i8, @bitCast(i8, u8(0x92))) == @bitCast(i8, u8(0x49))); assertOrPanic(@bitreverse(i16, @bitCast(i16, u16(0x1234))) == @bitCast(i16, u16(0x2c48))); assertOrPanic(@bitreverse(i24, @bitCast(i24, u24(0x123456))) == @bitCast(i24, u24(0x6a2c48))); assertOrPanic(@bitreverse(i32, @bitCast(i32, u32(0x12345678))) == @bitCast(i32, u32(0x1e6a2c48))); assertOrPanic(@bitreverse(i40, @bitCast(i40, u40(0x123456789a))) == @bitCast(i40, u40(0x591e6a2c48))); assertOrPanic(@bitreverse(i48, @bitCast(i48, u48(0x123456789abc))) == @bitCast(i48, u48(0x3d591e6a2c48))); assertOrPanic(@bitreverse(i56, @bitCast(i56, u56(0x123456789abcde))) == @bitCast(i56, u56(0x7b3d591e6a2c48))); assertOrPanic(@bitreverse(i64, @bitCast(i64, u64(0x123456789abcdef1))) == @bitCast(i64, u64(0x8f7b3d591e6a2c48))); assertOrPanic(@bitreverse(i128, @bitCast(i128, u128(0x123456789abcdef11121314151617181))) == @bitCast(i128, u128(0x818e868a828c84888f7b3d591e6a2c48))); // using comptime_ints, signed, negative. Compare to runtime ints returned from llvm. var neg5: i5 = minInt(i5) + 1; assertOrPanic(@bitreverse(i5, minInt(i5) + 1) == @bitreverse(i5, neg5)); var neg8: i8 = -18; assertOrPanic(@bitreverse(i8, -18) == @bitreverse(i8, neg8)); var neg16: i16 = -32694; assertOrPanic(@bitreverse(i16, -32694) == @bitreverse(i16, neg16)); var neg24: i24 = -6773785; assertOrPanic(@bitreverse(i24, -6773785) == @bitreverse(i24, neg24)); var neg32: i32 = -16773785; assertOrPanic(@bitreverse(i32, -16773785) == @bitreverse(i32, neg32)); var neg40: i40 = minInt(i40) + 12345; assertOrPanic(@bitreverse(i40, minInt(i40) + 12345) == @bitreverse(i40, neg40)); var neg48: i48 = minInt(i48) + 12345; assertOrPanic(@bitreverse(i48, minInt(i48) + 12345) == @bitreverse(i48, neg48)); var neg56: i56 = minInt(i56) + 12345; assertOrPanic(@bitreverse(i56, minInt(i56) + 12345) == @bitreverse(i56, neg56)); var neg64: i64 = minInt(i64) + 12345; assertOrPanic(@bitreverse(i64, minInt(i64) + 12345) == @bitreverse(i64, neg64)); var neg128: i128 = minInt(i128) + 12345; assertOrPanic(@bitreverse(i128, minInt(i128) + 12345) == @bitreverse(i128, neg128)); }
test/stage1/behavior/bitreverse.zig
const std = @import("std"); const winzigo = @import("winzigo"); var core: winzigo = undefined; var window: winzigo.Window = undefined; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; pub fn init() !void { const allocator = gpa.allocator(); core = try winzigo.init(allocator); errdefer core.deinit(); window = try core.createWindow(.{}); errdefer window.deinit(); } pub fn update() !bool { while (core.pollEvent()) |event| { switch (event.ev) { .key_press => |ev| { if (ev.key == .escape) return false; std.log.info("key pressed {s}", .{@tagName(ev.key)}); }, .key_release => |ev| { std.log.info("key released {s}", .{@tagName(ev.key)}); }, .button_press => |ev| { std.log.info("button pressed {s}", .{@tagName(ev.button)}); }, .button_release => |ev| { std.log.info("button released {s}", .{@tagName(ev.button)}); }, .mouse_scroll => |ev| { std.log.info("mouse scroll x: {} y: {}", .{ ev.scroll_x, ev.scroll_y }); }, .mouse_motion => |ev| { std.log.info("mouse pos x: {} y: {}", .{ ev.x, ev.y }); }, .mouse_enter => |ev| { std.log.info("mouse entered window at x: {} y: {}", .{ ev.x, ev.y }); }, .mouse_leave => |ev| { std.log.info("mouse left window at x: {} y: {}", .{ ev.x, ev.y }); }, .focus_in => |_| { std.log.info("gained focus", .{}); }, .focus_out => |_| { std.log.info("lost focus", .{}); }, .window_resize => |ev| { std.log.info("window resized, width: {} height: {}", .{ ev.width, ev.height }); }, .quit => |_| { std.log.info("quit", .{}); return false; }, } } return true; } pub fn deinit() void { window.deinit(); core.deinit(); _ = gpa.deinit(); std.log.info("All your queued events are belong to us.", .{}); } test "basic test" { try std.testing.expectEqual(10, 3 + 7); }
examples/events.zig
/// composite maps the code points `a` and `b` to their composite if any. pub fn composite(a: u21, b: u21) ?u21 { if (a == 0x0041 and b == 0x0300) return 0x00C0; if (a == 0x0041 and b == 0x0301) return 0x00C1; if (a == 0x0041 and b == 0x0302) return 0x00C2; if (a == 0x0041 and b == 0x0303) return 0x00C3; if (a == 0x0041 and b == 0x0308) return 0x00C4; if (a == 0x0041 and b == 0x030A) return 0x00C5; if (a == 0x0043 and b == 0x0327) return 0x00C7; if (a == 0x0045 and b == 0x0300) return 0x00C8; if (a == 0x0045 and b == 0x0301) return 0x00C9; if (a == 0x0045 and b == 0x0302) return 0x00CA; if (a == 0x0045 and b == 0x0308) return 0x00CB; if (a == 0x0049 and b == 0x0300) return 0x00CC; if (a == 0x0049 and b == 0x0301) return 0x00CD; if (a == 0x0049 and b == 0x0302) return 0x00CE; if (a == 0x0049 and b == 0x0308) return 0x00CF; if (a == 0x004E and b == 0x0303) return 0x00D1; if (a == 0x004F and b == 0x0300) return 0x00D2; if (a == 0x004F and b == 0x0301) return 0x00D3; if (a == 0x004F and b == 0x0302) return 0x00D4; if (a == 0x004F and b == 0x0303) return 0x00D5; if (a == 0x004F and b == 0x0308) return 0x00D6; if (a == 0x0055 and b == 0x0300) return 0x00D9; if (a == 0x0055 and b == 0x0301) return 0x00DA; if (a == 0x0055 and b == 0x0302) return 0x00DB; if (a == 0x0055 and b == 0x0308) return 0x00DC; if (a == 0x0059 and b == 0x0301) return 0x00DD; if (a == 0x0061 and b == 0x0300) return 0x00E0; if (a == 0x0061 and b == 0x0301) return 0x00E1; if (a == 0x0061 and b == 0x0302) return 0x00E2; if (a == 0x0061 and b == 0x0303) return 0x00E3; if (a == 0x0061 and b == 0x0308) return 0x00E4; if (a == 0x0061 and b == 0x030A) return 0x00E5; if (a == 0x0063 and b == 0x0327) return 0x00E7; if (a == 0x0065 and b == 0x0300) return 0x00E8; if (a == 0x0065 and b == 0x0301) return 0x00E9; if (a == 0x0065 and b == 0x0302) return 0x00EA; if (a == 0x0065 and b == 0x0308) return 0x00EB; if (a == 0x0069 and b == 0x0300) return 0x00EC; if (a == 0x0069 and b == 0x0301) return 0x00ED; if (a == 0x0069 and b == 0x0302) return 0x00EE; if (a == 0x0069 and b == 0x0308) return 0x00EF; if (a == 0x006E and b == 0x0303) return 0x00F1; if (a == 0x006F and b == 0x0300) return 0x00F2; if (a == 0x006F and b == 0x0301) return 0x00F3; if (a == 0x006F and b == 0x0302) return 0x00F4; if (a == 0x006F and b == 0x0303) return 0x00F5; if (a == 0x006F and b == 0x0308) return 0x00F6; if (a == 0x0075 and b == 0x0300) return 0x00F9; if (a == 0x0075 and b == 0x0301) return 0x00FA; if (a == 0x0075 and b == 0x0302) return 0x00FB; if (a == 0x0075 and b == 0x0308) return 0x00FC; if (a == 0x0079 and b == 0x0301) return 0x00FD; if (a == 0x0079 and b == 0x0308) return 0x00FF; if (a == 0x0041 and b == 0x0304) return 0x0100; if (a == 0x0061 and b == 0x0304) return 0x0101; if (a == 0x0041 and b == 0x0306) return 0x0102; if (a == 0x0061 and b == 0x0306) return 0x0103; if (a == 0x0041 and b == 0x0328) return 0x0104; if (a == 0x0061 and b == 0x0328) return 0x0105; if (a == 0x0043 and b == 0x0301) return 0x0106; if (a == 0x0063 and b == 0x0301) return 0x0107; if (a == 0x0043 and b == 0x0302) return 0x0108; if (a == 0x0063 and b == 0x0302) return 0x0109; if (a == 0x0043 and b == 0x0307) return 0x010A; if (a == 0x0063 and b == 0x0307) return 0x010B; if (a == 0x0043 and b == 0x030C) return 0x010C; if (a == 0x0063 and b == 0x030C) return 0x010D; if (a == 0x0044 and b == 0x030C) return 0x010E; if (a == 0x0064 and b == 0x030C) return 0x010F; if (a == 0x0045 and b == 0x0304) return 0x0112; if (a == 0x0065 and b == 0x0304) return 0x0113; if (a == 0x0045 and b == 0x0306) return 0x0114; if (a == 0x0065 and b == 0x0306) return 0x0115; if (a == 0x0045 and b == 0x0307) return 0x0116; if (a == 0x0065 and b == 0x0307) return 0x0117; if (a == 0x0045 and b == 0x0328) return 0x0118; if (a == 0x0065 and b == 0x0328) return 0x0119; if (a == 0x0045 and b == 0x030C) return 0x011A; if (a == 0x0065 and b == 0x030C) return 0x011B; if (a == 0x0047 and b == 0x0302) return 0x011C; if (a == 0x0067 and b == 0x0302) return 0x011D; if (a == 0x0047 and b == 0x0306) return 0x011E; if (a == 0x0067 and b == 0x0306) return 0x011F; if (a == 0x0047 and b == 0x0307) return 0x0120; if (a == 0x0067 and b == 0x0307) return 0x0121; if (a == 0x0047 and b == 0x0327) return 0x0122; if (a == 0x0067 and b == 0x0327) return 0x0123; if (a == 0x0048 and b == 0x0302) return 0x0124; if (a == 0x0068 and b == 0x0302) return 0x0125; if (a == 0x0049 and b == 0x0303) return 0x0128; if (a == 0x0069 and b == 0x0303) return 0x0129; if (a == 0x0049 and b == 0x0304) return 0x012A; if (a == 0x0069 and b == 0x0304) return 0x012B; if (a == 0x0049 and b == 0x0306) return 0x012C; if (a == 0x0069 and b == 0x0306) return 0x012D; if (a == 0x0049 and b == 0x0328) return 0x012E; if (a == 0x0069 and b == 0x0328) return 0x012F; if (a == 0x0049 and b == 0x0307) return 0x0130; if (a == 0x004A and b == 0x0302) return 0x0134; if (a == 0x006A and b == 0x0302) return 0x0135; if (a == 0x004B and b == 0x0327) return 0x0136; if (a == 0x006B and b == 0x0327) return 0x0137; if (a == 0x004C and b == 0x0301) return 0x0139; if (a == 0x006C and b == 0x0301) return 0x013A; if (a == 0x004C and b == 0x0327) return 0x013B; if (a == 0x006C and b == 0x0327) return 0x013C; if (a == 0x004C and b == 0x030C) return 0x013D; if (a == 0x006C and b == 0x030C) return 0x013E; if (a == 0x004E and b == 0x0301) return 0x0143; if (a == 0x006E and b == 0x0301) return 0x0144; if (a == 0x004E and b == 0x0327) return 0x0145; if (a == 0x006E and b == 0x0327) return 0x0146; if (a == 0x004E and b == 0x030C) return 0x0147; if (a == 0x006E and b == 0x030C) return 0x0148; if (a == 0x004F and b == 0x0304) return 0x014C; if (a == 0x006F and b == 0x0304) return 0x014D; if (a == 0x004F and b == 0x0306) return 0x014E; if (a == 0x006F and b == 0x0306) return 0x014F; if (a == 0x004F and b == 0x030B) return 0x0150; if (a == 0x006F and b == 0x030B) return 0x0151; if (a == 0x0052 and b == 0x0301) return 0x0154; if (a == 0x0072 and b == 0x0301) return 0x0155; if (a == 0x0052 and b == 0x0327) return 0x0156; if (a == 0x0072 and b == 0x0327) return 0x0157; if (a == 0x0052 and b == 0x030C) return 0x0158; if (a == 0x0072 and b == 0x030C) return 0x0159; if (a == 0x0053 and b == 0x0301) return 0x015A; if (a == 0x0073 and b == 0x0301) return 0x015B; if (a == 0x0053 and b == 0x0302) return 0x015C; if (a == 0x0073 and b == 0x0302) return 0x015D; if (a == 0x0053 and b == 0x0327) return 0x015E; if (a == 0x0073 and b == 0x0327) return 0x015F; if (a == 0x0053 and b == 0x030C) return 0x0160; if (a == 0x0073 and b == 0x030C) return 0x0161; if (a == 0x0054 and b == 0x0327) return 0x0162; if (a == 0x0074 and b == 0x0327) return 0x0163; if (a == 0x0054 and b == 0x030C) return 0x0164; if (a == 0x0074 and b == 0x030C) return 0x0165; if (a == 0x0055 and b == 0x0303) return 0x0168; if (a == 0x0075 and b == 0x0303) return 0x0169; if (a == 0x0055 and b == 0x0304) return 0x016A; if (a == 0x0075 and b == 0x0304) return 0x016B; if (a == 0x0055 and b == 0x0306) return 0x016C; if (a == 0x0075 and b == 0x0306) return 0x016D; if (a == 0x0055 and b == 0x030A) return 0x016E; if (a == 0x0075 and b == 0x030A) return 0x016F; if (a == 0x0055 and b == 0x030B) return 0x0170; if (a == 0x0075 and b == 0x030B) return 0x0171; if (a == 0x0055 and b == 0x0328) return 0x0172; if (a == 0x0075 and b == 0x0328) return 0x0173; if (a == 0x0057 and b == 0x0302) return 0x0174; if (a == 0x0077 and b == 0x0302) return 0x0175; if (a == 0x0059 and b == 0x0302) return 0x0176; if (a == 0x0079 and b == 0x0302) return 0x0177; if (a == 0x0059 and b == 0x0308) return 0x0178; if (a == 0x005A and b == 0x0301) return 0x0179; if (a == 0x007A and b == 0x0301) return 0x017A; if (a == 0x005A and b == 0x0307) return 0x017B; if (a == 0x007A and b == 0x0307) return 0x017C; if (a == 0x005A and b == 0x030C) return 0x017D; if (a == 0x007A and b == 0x030C) return 0x017E; if (a == 0x004F and b == 0x031B) return 0x01A0; if (a == 0x006F and b == 0x031B) return 0x01A1; if (a == 0x0055 and b == 0x031B) return 0x01AF; if (a == 0x0075 and b == 0x031B) return 0x01B0; if (a == 0x0041 and b == 0x030C) return 0x01CD; if (a == 0x0061 and b == 0x030C) return 0x01CE; if (a == 0x0049 and b == 0x030C) return 0x01CF; if (a == 0x0069 and b == 0x030C) return 0x01D0; if (a == 0x004F and b == 0x030C) return 0x01D1; if (a == 0x006F and b == 0x030C) return 0x01D2; if (a == 0x0055 and b == 0x030C) return 0x01D3; if (a == 0x0075 and b == 0x030C) return 0x01D4; if (a == 0x00DC and b == 0x0304) return 0x01D5; if (a == 0x00FC and b == 0x0304) return 0x01D6; if (a == 0x00DC and b == 0x0301) return 0x01D7; if (a == 0x00FC and b == 0x0301) return 0x01D8; if (a == 0x00DC and b == 0x030C) return 0x01D9; if (a == 0x00FC and b == 0x030C) return 0x01DA; if (a == 0x00DC and b == 0x0300) return 0x01DB; if (a == 0x00FC and b == 0x0300) return 0x01DC; if (a == 0x00C4 and b == 0x0304) return 0x01DE; if (a == 0x00E4 and b == 0x0304) return 0x01DF; if (a == 0x0226 and b == 0x0304) return 0x01E0; if (a == 0x0227 and b == 0x0304) return 0x01E1; if (a == 0x00C6 and b == 0x0304) return 0x01E2; if (a == 0x00E6 and b == 0x0304) return 0x01E3; if (a == 0x0047 and b == 0x030C) return 0x01E6; if (a == 0x0067 and b == 0x030C) return 0x01E7; if (a == 0x004B and b == 0x030C) return 0x01E8; if (a == 0x006B and b == 0x030C) return 0x01E9; if (a == 0x004F and b == 0x0328) return 0x01EA; if (a == 0x006F and b == 0x0328) return 0x01EB; if (a == 0x01EA and b == 0x0304) return 0x01EC; if (a == 0x01EB and b == 0x0304) return 0x01ED; if (a == 0x01B7 and b == 0x030C) return 0x01EE; if (a == 0x0292 and b == 0x030C) return 0x01EF; if (a == 0x006A and b == 0x030C) return 0x01F0; if (a == 0x0047 and b == 0x0301) return 0x01F4; if (a == 0x0067 and b == 0x0301) return 0x01F5; if (a == 0x004E and b == 0x0300) return 0x01F8; if (a == 0x006E and b == 0x0300) return 0x01F9; if (a == 0x00C5 and b == 0x0301) return 0x01FA; if (a == 0x00E5 and b == 0x0301) return 0x01FB; if (a == 0x00C6 and b == 0x0301) return 0x01FC; if (a == 0x00E6 and b == 0x0301) return 0x01FD; if (a == 0x00D8 and b == 0x0301) return 0x01FE; if (a == 0x00F8 and b == 0x0301) return 0x01FF; if (a == 0x0041 and b == 0x030F) return 0x0200; if (a == 0x0061 and b == 0x030F) return 0x0201; if (a == 0x0041 and b == 0x0311) return 0x0202; if (a == 0x0061 and b == 0x0311) return 0x0203; if (a == 0x0045 and b == 0x030F) return 0x0204; if (a == 0x0065 and b == 0x030F) return 0x0205; if (a == 0x0045 and b == 0x0311) return 0x0206; if (a == 0x0065 and b == 0x0311) return 0x0207; if (a == 0x0049 and b == 0x030F) return 0x0208; if (a == 0x0069 and b == 0x030F) return 0x0209; if (a == 0x0049 and b == 0x0311) return 0x020A; if (a == 0x0069 and b == 0x0311) return 0x020B; if (a == 0x004F and b == 0x030F) return 0x020C; if (a == 0x006F and b == 0x030F) return 0x020D; if (a == 0x004F and b == 0x0311) return 0x020E; if (a == 0x006F and b == 0x0311) return 0x020F; if (a == 0x0052 and b == 0x030F) return 0x0210; if (a == 0x0072 and b == 0x030F) return 0x0211; if (a == 0x0052 and b == 0x0311) return 0x0212; if (a == 0x0072 and b == 0x0311) return 0x0213; if (a == 0x0055 and b == 0x030F) return 0x0214; if (a == 0x0075 and b == 0x030F) return 0x0215; if (a == 0x0055 and b == 0x0311) return 0x0216; if (a == 0x0075 and b == 0x0311) return 0x0217; if (a == 0x0053 and b == 0x0326) return 0x0218; if (a == 0x0073 and b == 0x0326) return 0x0219; if (a == 0x0054 and b == 0x0326) return 0x021A; if (a == 0x0074 and b == 0x0326) return 0x021B; if (a == 0x0048 and b == 0x030C) return 0x021E; if (a == 0x0068 and b == 0x030C) return 0x021F; if (a == 0x0041 and b == 0x0307) return 0x0226; if (a == 0x0061 and b == 0x0307) return 0x0227; if (a == 0x0045 and b == 0x0327) return 0x0228; if (a == 0x0065 and b == 0x0327) return 0x0229; if (a == 0x00D6 and b == 0x0304) return 0x022A; if (a == 0x00F6 and b == 0x0304) return 0x022B; if (a == 0x00D5 and b == 0x0304) return 0x022C; if (a == 0x00F5 and b == 0x0304) return 0x022D; if (a == 0x004F and b == 0x0307) return 0x022E; if (a == 0x006F and b == 0x0307) return 0x022F; if (a == 0x022E and b == 0x0304) return 0x0230; if (a == 0x022F and b == 0x0304) return 0x0231; if (a == 0x0059 and b == 0x0304) return 0x0232; if (a == 0x0079 and b == 0x0304) return 0x0233; if (a == 0x0308 and b == 0x0301) return 0x0344; if (a == 0x00A8 and b == 0x0301) return 0x0385; if (a == 0x0391 and b == 0x0301) return 0x0386; if (a == 0x0395 and b == 0x0301) return 0x0388; if (a == 0x0397 and b == 0x0301) return 0x0389; if (a == 0x0399 and b == 0x0301) return 0x038A; if (a == 0x039F and b == 0x0301) return 0x038C; if (a == 0x03A5 and b == 0x0301) return 0x038E; if (a == 0x03A9 and b == 0x0301) return 0x038F; if (a == 0x03CA and b == 0x0301) return 0x0390; if (a == 0x0399 and b == 0x0308) return 0x03AA; if (a == 0x03A5 and b == 0x0308) return 0x03AB; if (a == 0x03B1 and b == 0x0301) return 0x03AC; if (a == 0x03B5 and b == 0x0301) return 0x03AD; if (a == 0x03B7 and b == 0x0301) return 0x03AE; if (a == 0x03B9 and b == 0x0301) return 0x03AF; if (a == 0x03CB and b == 0x0301) return 0x03B0; if (a == 0x03B9 and b == 0x0308) return 0x03CA; if (a == 0x03C5 and b == 0x0308) return 0x03CB; if (a == 0x03BF and b == 0x0301) return 0x03CC; if (a == 0x03C5 and b == 0x0301) return 0x03CD; if (a == 0x03C9 and b == 0x0301) return 0x03CE; if (a == 0x03D2 and b == 0x0301) return 0x03D3; if (a == 0x03D2 and b == 0x0308) return 0x03D4; if (a == 0x0415 and b == 0x0300) return 0x0400; if (a == 0x0415 and b == 0x0308) return 0x0401; if (a == 0x0413 and b == 0x0301) return 0x0403; if (a == 0x0406 and b == 0x0308) return 0x0407; if (a == 0x041A and b == 0x0301) return 0x040C; if (a == 0x0418 and b == 0x0300) return 0x040D; if (a == 0x0423 and b == 0x0306) return 0x040E; if (a == 0x0418 and b == 0x0306) return 0x0419; if (a == 0x0438 and b == 0x0306) return 0x0439; if (a == 0x0435 and b == 0x0300) return 0x0450; if (a == 0x0435 and b == 0x0308) return 0x0451; if (a == 0x0433 and b == 0x0301) return 0x0453; if (a == 0x0456 and b == 0x0308) return 0x0457; if (a == 0x043A and b == 0x0301) return 0x045C; if (a == 0x0438 and b == 0x0300) return 0x045D; if (a == 0x0443 and b == 0x0306) return 0x045E; if (a == 0x0474 and b == 0x030F) return 0x0476; if (a == 0x0475 and b == 0x030F) return 0x0477; if (a == 0x0416 and b == 0x0306) return 0x04C1; if (a == 0x0436 and b == 0x0306) return 0x04C2; if (a == 0x0410 and b == 0x0306) return 0x04D0; if (a == 0x0430 and b == 0x0306) return 0x04D1; if (a == 0x0410 and b == 0x0308) return 0x04D2; if (a == 0x0430 and b == 0x0308) return 0x04D3; if (a == 0x0415 and b == 0x0306) return 0x04D6; if (a == 0x0435 and b == 0x0306) return 0x04D7; if (a == 0x04D8 and b == 0x0308) return 0x04DA; if (a == 0x04D9 and b == 0x0308) return 0x04DB; if (a == 0x0416 and b == 0x0308) return 0x04DC; if (a == 0x0436 and b == 0x0308) return 0x04DD; if (a == 0x0417 and b == 0x0308) return 0x04DE; if (a == 0x0437 and b == 0x0308) return 0x04DF; if (a == 0x0418 and b == 0x0304) return 0x04E2; if (a == 0x0438 and b == 0x0304) return 0x04E3; if (a == 0x0418 and b == 0x0308) return 0x04E4; if (a == 0x0438 and b == 0x0308) return 0x04E5; if (a == 0x041E and b == 0x0308) return 0x04E6; if (a == 0x043E and b == 0x0308) return 0x04E7; if (a == 0x04E8 and b == 0x0308) return 0x04EA; if (a == 0x04E9 and b == 0x0308) return 0x04EB; if (a == 0x042D and b == 0x0308) return 0x04EC; if (a == 0x044D and b == 0x0308) return 0x04ED; if (a == 0x0423 and b == 0x0304) return 0x04EE; if (a == 0x0443 and b == 0x0304) return 0x04EF; if (a == 0x0423 and b == 0x0308) return 0x04F0; if (a == 0x0443 and b == 0x0308) return 0x04F1; if (a == 0x0423 and b == 0x030B) return 0x04F2; if (a == 0x0443 and b == 0x030B) return 0x04F3; if (a == 0x0427 and b == 0x0308) return 0x04F4; if (a == 0x0447 and b == 0x0308) return 0x04F5; if (a == 0x042B and b == 0x0308) return 0x04F8; if (a == 0x044B and b == 0x0308) return 0x04F9; if (a == 0x0627 and b == 0x0653) return 0x0622; if (a == 0x0627 and b == 0x0654) return 0x0623; if (a == 0x0648 and b == 0x0654) return 0x0624; if (a == 0x0627 and b == 0x0655) return 0x0625; if (a == 0x064A and b == 0x0654) return 0x0626; if (a == 0x06D5 and b == 0x0654) return 0x06C0; if (a == 0x06C1 and b == 0x0654) return 0x06C2; if (a == 0x06D2 and b == 0x0654) return 0x06D3; if (a == 0x0928 and b == 0x093C) return 0x0929; if (a == 0x0930 and b == 0x093C) return 0x0931; if (a == 0x0933 and b == 0x093C) return 0x0934; if (a == 0x0915 and b == 0x093C) return 0x0958; if (a == 0x0916 and b == 0x093C) return 0x0959; if (a == 0x0917 and b == 0x093C) return 0x095A; if (a == 0x091C and b == 0x093C) return 0x095B; if (a == 0x0921 and b == 0x093C) return 0x095C; if (a == 0x0922 and b == 0x093C) return 0x095D; if (a == 0x092B and b == 0x093C) return 0x095E; if (a == 0x092F and b == 0x093C) return 0x095F; if (a == 0x09C7 and b == 0x09BE) return 0x09CB; if (a == 0x09C7 and b == 0x09D7) return 0x09CC; if (a == 0x09A1 and b == 0x09BC) return 0x09DC; if (a == 0x09A2 and b == 0x09BC) return 0x09DD; if (a == 0x09AF and b == 0x09BC) return 0x09DF; if (a == 0x0A32 and b == 0x0A3C) return 0x0A33; if (a == 0x0A38 and b == 0x0A3C) return 0x0A36; if (a == 0x0A16 and b == 0x0A3C) return 0x0A59; if (a == 0x0A17 and b == 0x0A3C) return 0x0A5A; if (a == 0x0A1C and b == 0x0A3C) return 0x0A5B; if (a == 0x0A2B and b == 0x0A3C) return 0x0A5E; if (a == 0x0B47 and b == 0x0B56) return 0x0B48; if (a == 0x0B47 and b == 0x0B3E) return 0x0B4B; if (a == 0x0B47 and b == 0x0B57) return 0x0B4C; if (a == 0x0B21 and b == 0x0B3C) return 0x0B5C; if (a == 0x0B22 and b == 0x0B3C) return 0x0B5D; if (a == 0x0B92 and b == 0x0BD7) return 0x0B94; if (a == 0x0BC6 and b == 0x0BBE) return 0x0BCA; if (a == 0x0BC7 and b == 0x0BBE) return 0x0BCB; if (a == 0x0BC6 and b == 0x0BD7) return 0x0BCC; if (a == 0x0C46 and b == 0x0C56) return 0x0C48; if (a == 0x0CBF and b == 0x0CD5) return 0x0CC0; if (a == 0x0CC6 and b == 0x0CD5) return 0x0CC7; if (a == 0x0CC6 and b == 0x0CD6) return 0x0CC8; if (a == 0x0CC6 and b == 0x0CC2) return 0x0CCA; if (a == 0x0CCA and b == 0x0CD5) return 0x0CCB; if (a == 0x0D46 and b == 0x0D3E) return 0x0D4A; if (a == 0x0D47 and b == 0x0D3E) return 0x0D4B; if (a == 0x0D46 and b == 0x0D57) return 0x0D4C; if (a == 0x0DD9 and b == 0x0DCA) return 0x0DDA; if (a == 0x0DD9 and b == 0x0DCF) return 0x0DDC; if (a == 0x0DDC and b == 0x0DCA) return 0x0DDD; if (a == 0x0DD9 and b == 0x0DDF) return 0x0DDE; if (a == 0x0F42 and b == 0x0FB7) return 0x0F43; if (a == 0x0F4C and b == 0x0FB7) return 0x0F4D; if (a == 0x0F51 and b == 0x0FB7) return 0x0F52; if (a == 0x0F56 and b == 0x0FB7) return 0x0F57; if (a == 0x0F5B and b == 0x0FB7) return 0x0F5C; if (a == 0x0F40 and b == 0x0FB5) return 0x0F69; if (a == 0x0F71 and b == 0x0F72) return 0x0F73; if (a == 0x0F71 and b == 0x0F74) return 0x0F75; if (a == 0x0FB2 and b == 0x0F80) return 0x0F76; if (a == 0x0FB3 and b == 0x0F80) return 0x0F78; if (a == 0x0F71 and b == 0x0F80) return 0x0F81; if (a == 0x0F92 and b == 0x0FB7) return 0x0F93; if (a == 0x0F9C and b == 0x0FB7) return 0x0F9D; if (a == 0x0FA1 and b == 0x0FB7) return 0x0FA2; if (a == 0x0FA6 and b == 0x0FB7) return 0x0FA7; if (a == 0x0FAB and b == 0x0FB7) return 0x0FAC; if (a == 0x0F90 and b == 0x0FB5) return 0x0FB9; if (a == 0x1025 and b == 0x102E) return 0x1026; if (a == 0x1B05 and b == 0x1B35) return 0x1B06; if (a == 0x1B07 and b == 0x1B35) return 0x1B08; if (a == 0x1B09 and b == 0x1B35) return 0x1B0A; if (a == 0x1B0B and b == 0x1B35) return 0x1B0C; if (a == 0x1B0D and b == 0x1B35) return 0x1B0E; if (a == 0x1B11 and b == 0x1B35) return 0x1B12; if (a == 0x1B3A and b == 0x1B35) return 0x1B3B; if (a == 0x1B3C and b == 0x1B35) return 0x1B3D; if (a == 0x1B3E and b == 0x1B35) return 0x1B40; if (a == 0x1B3F and b == 0x1B35) return 0x1B41; if (a == 0x1B42 and b == 0x1B35) return 0x1B43; if (a == 0x0041 and b == 0x0325) return 0x1E00; if (a == 0x0061 and b == 0x0325) return 0x1E01; if (a == 0x0042 and b == 0x0307) return 0x1E02; if (a == 0x0062 and b == 0x0307) return 0x1E03; if (a == 0x0042 and b == 0x0323) return 0x1E04; if (a == 0x0062 and b == 0x0323) return 0x1E05; if (a == 0x0042 and b == 0x0331) return 0x1E06; if (a == 0x0062 and b == 0x0331) return 0x1E07; if (a == 0x00C7 and b == 0x0301) return 0x1E08; if (a == 0x00E7 and b == 0x0301) return 0x1E09; if (a == 0x0044 and b == 0x0307) return 0x1E0A; if (a == 0x0064 and b == 0x0307) return 0x1E0B; if (a == 0x0044 and b == 0x0323) return 0x1E0C; if (a == 0x0064 and b == 0x0323) return 0x1E0D; if (a == 0x0044 and b == 0x0331) return 0x1E0E; if (a == 0x0064 and b == 0x0331) return 0x1E0F; if (a == 0x0044 and b == 0x0327) return 0x1E10; if (a == 0x0064 and b == 0x0327) return 0x1E11; if (a == 0x0044 and b == 0x032D) return 0x1E12; if (a == 0x0064 and b == 0x032D) return 0x1E13; if (a == 0x0112 and b == 0x0300) return 0x1E14; if (a == 0x0113 and b == 0x0300) return 0x1E15; if (a == 0x0112 and b == 0x0301) return 0x1E16; if (a == 0x0113 and b == 0x0301) return 0x1E17; if (a == 0x0045 and b == 0x032D) return 0x1E18; if (a == 0x0065 and b == 0x032D) return 0x1E19; if (a == 0x0045 and b == 0x0330) return 0x1E1A; if (a == 0x0065 and b == 0x0330) return 0x1E1B; if (a == 0x0228 and b == 0x0306) return 0x1E1C; if (a == 0x0229 and b == 0x0306) return 0x1E1D; if (a == 0x0046 and b == 0x0307) return 0x1E1E; if (a == 0x0066 and b == 0x0307) return 0x1E1F; if (a == 0x0047 and b == 0x0304) return 0x1E20; if (a == 0x0067 and b == 0x0304) return 0x1E21; if (a == 0x0048 and b == 0x0307) return 0x1E22; if (a == 0x0068 and b == 0x0307) return 0x1E23; if (a == 0x0048 and b == 0x0323) return 0x1E24; if (a == 0x0068 and b == 0x0323) return 0x1E25; if (a == 0x0048 and b == 0x0308) return 0x1E26; if (a == 0x0068 and b == 0x0308) return 0x1E27; if (a == 0x0048 and b == 0x0327) return 0x1E28; if (a == 0x0068 and b == 0x0327) return 0x1E29; if (a == 0x0048 and b == 0x032E) return 0x1E2A; if (a == 0x0068 and b == 0x032E) return 0x1E2B; if (a == 0x0049 and b == 0x0330) return 0x1E2C; if (a == 0x0069 and b == 0x0330) return 0x1E2D; if (a == 0x00CF and b == 0x0301) return 0x1E2E; if (a == 0x00EF and b == 0x0301) return 0x1E2F; if (a == 0x004B and b == 0x0301) return 0x1E30; if (a == 0x006B and b == 0x0301) return 0x1E31; if (a == 0x004B and b == 0x0323) return 0x1E32; if (a == 0x006B and b == 0x0323) return 0x1E33; if (a == 0x004B and b == 0x0331) return 0x1E34; if (a == 0x006B and b == 0x0331) return 0x1E35; if (a == 0x004C and b == 0x0323) return 0x1E36; if (a == 0x006C and b == 0x0323) return 0x1E37; if (a == 0x1E36 and b == 0x0304) return 0x1E38; if (a == 0x1E37 and b == 0x0304) return 0x1E39; if (a == 0x004C and b == 0x0331) return 0x1E3A; if (a == 0x006C and b == 0x0331) return 0x1E3B; if (a == 0x004C and b == 0x032D) return 0x1E3C; if (a == 0x006C and b == 0x032D) return 0x1E3D; if (a == 0x004D and b == 0x0301) return 0x1E3E; if (a == 0x006D and b == 0x0301) return 0x1E3F; if (a == 0x004D and b == 0x0307) return 0x1E40; if (a == 0x006D and b == 0x0307) return 0x1E41; if (a == 0x004D and b == 0x0323) return 0x1E42; if (a == 0x006D and b == 0x0323) return 0x1E43; if (a == 0x004E and b == 0x0307) return 0x1E44; if (a == 0x006E and b == 0x0307) return 0x1E45; if (a == 0x004E and b == 0x0323) return 0x1E46; if (a == 0x006E and b == 0x0323) return 0x1E47; if (a == 0x004E and b == 0x0331) return 0x1E48; if (a == 0x006E and b == 0x0331) return 0x1E49; if (a == 0x004E and b == 0x032D) return 0x1E4A; if (a == 0x006E and b == 0x032D) return 0x1E4B; if (a == 0x00D5 and b == 0x0301) return 0x1E4C; if (a == 0x00F5 and b == 0x0301) return 0x1E4D; if (a == 0x00D5 and b == 0x0308) return 0x1E4E; if (a == 0x00F5 and b == 0x0308) return 0x1E4F; if (a == 0x014C and b == 0x0300) return 0x1E50; if (a == 0x014D and b == 0x0300) return 0x1E51; if (a == 0x014C and b == 0x0301) return 0x1E52; if (a == 0x014D and b == 0x0301) return 0x1E53; if (a == 0x0050 and b == 0x0301) return 0x1E54; if (a == 0x0070 and b == 0x0301) return 0x1E55; if (a == 0x0050 and b == 0x0307) return 0x1E56; if (a == 0x0070 and b == 0x0307) return 0x1E57; if (a == 0x0052 and b == 0x0307) return 0x1E58; if (a == 0x0072 and b == 0x0307) return 0x1E59; if (a == 0x0052 and b == 0x0323) return 0x1E5A; if (a == 0x0072 and b == 0x0323) return 0x1E5B; if (a == 0x1E5A and b == 0x0304) return 0x1E5C; if (a == 0x1E5B and b == 0x0304) return 0x1E5D; if (a == 0x0052 and b == 0x0331) return 0x1E5E; if (a == 0x0072 and b == 0x0331) return 0x1E5F; if (a == 0x0053 and b == 0x0307) return 0x1E60; if (a == 0x0073 and b == 0x0307) return 0x1E61; if (a == 0x0053 and b == 0x0323) return 0x1E62; if (a == 0x0073 and b == 0x0323) return 0x1E63; if (a == 0x015A and b == 0x0307) return 0x1E64; if (a == 0x015B and b == 0x0307) return 0x1E65; if (a == 0x0160 and b == 0x0307) return 0x1E66; if (a == 0x0161 and b == 0x0307) return 0x1E67; if (a == 0x1E62 and b == 0x0307) return 0x1E68; if (a == 0x1E63 and b == 0x0307) return 0x1E69; if (a == 0x0054 and b == 0x0307) return 0x1E6A; if (a == 0x0074 and b == 0x0307) return 0x1E6B; if (a == 0x0054 and b == 0x0323) return 0x1E6C; if (a == 0x0074 and b == 0x0323) return 0x1E6D; if (a == 0x0054 and b == 0x0331) return 0x1E6E; if (a == 0x0074 and b == 0x0331) return 0x1E6F; if (a == 0x0054 and b == 0x032D) return 0x1E70; if (a == 0x0074 and b == 0x032D) return 0x1E71; if (a == 0x0055 and b == 0x0324) return 0x1E72; if (a == 0x0075 and b == 0x0324) return 0x1E73; if (a == 0x0055 and b == 0x0330) return 0x1E74; if (a == 0x0075 and b == 0x0330) return 0x1E75; if (a == 0x0055 and b == 0x032D) return 0x1E76; if (a == 0x0075 and b == 0x032D) return 0x1E77; if (a == 0x0168 and b == 0x0301) return 0x1E78; if (a == 0x0169 and b == 0x0301) return 0x1E79; if (a == 0x016A and b == 0x0308) return 0x1E7A; if (a == 0x016B and b == 0x0308) return 0x1E7B; if (a == 0x0056 and b == 0x0303) return 0x1E7C; if (a == 0x0076 and b == 0x0303) return 0x1E7D; if (a == 0x0056 and b == 0x0323) return 0x1E7E; if (a == 0x0076 and b == 0x0323) return 0x1E7F; if (a == 0x0057 and b == 0x0300) return 0x1E80; if (a == 0x0077 and b == 0x0300) return 0x1E81; if (a == 0x0057 and b == 0x0301) return 0x1E82; if (a == 0x0077 and b == 0x0301) return 0x1E83; if (a == 0x0057 and b == 0x0308) return 0x1E84; if (a == 0x0077 and b == 0x0308) return 0x1E85; if (a == 0x0057 and b == 0x0307) return 0x1E86; if (a == 0x0077 and b == 0x0307) return 0x1E87; if (a == 0x0057 and b == 0x0323) return 0x1E88; if (a == 0x0077 and b == 0x0323) return 0x1E89; if (a == 0x0058 and b == 0x0307) return 0x1E8A; if (a == 0x0078 and b == 0x0307) return 0x1E8B; if (a == 0x0058 and b == 0x0308) return 0x1E8C; if (a == 0x0078 and b == 0x0308) return 0x1E8D; if (a == 0x0059 and b == 0x0307) return 0x1E8E; if (a == 0x0079 and b == 0x0307) return 0x1E8F; if (a == 0x005A and b == 0x0302) return 0x1E90; if (a == 0x007A and b == 0x0302) return 0x1E91; if (a == 0x005A and b == 0x0323) return 0x1E92; if (a == 0x007A and b == 0x0323) return 0x1E93; if (a == 0x005A and b == 0x0331) return 0x1E94; if (a == 0x007A and b == 0x0331) return 0x1E95; if (a == 0x0068 and b == 0x0331) return 0x1E96; if (a == 0x0074 and b == 0x0308) return 0x1E97; if (a == 0x0077 and b == 0x030A) return 0x1E98; if (a == 0x0079 and b == 0x030A) return 0x1E99; if (a == 0x017F and b == 0x0307) return 0x1E9B; if (a == 0x0041 and b == 0x0323) return 0x1EA0; if (a == 0x0061 and b == 0x0323) return 0x1EA1; if (a == 0x0041 and b == 0x0309) return 0x1EA2; if (a == 0x0061 and b == 0x0309) return 0x1EA3; if (a == 0x00C2 and b == 0x0301) return 0x1EA4; if (a == 0x00E2 and b == 0x0301) return 0x1EA5; if (a == 0x00C2 and b == 0x0300) return 0x1EA6; if (a == 0x00E2 and b == 0x0300) return 0x1EA7; if (a == 0x00C2 and b == 0x0309) return 0x1EA8; if (a == 0x00E2 and b == 0x0309) return 0x1EA9; if (a == 0x00C2 and b == 0x0303) return 0x1EAA; if (a == 0x00E2 and b == 0x0303) return 0x1EAB; if (a == 0x1EA0 and b == 0x0302) return 0x1EAC; if (a == 0x1EA1 and b == 0x0302) return 0x1EAD; if (a == 0x0102 and b == 0x0301) return 0x1EAE; if (a == 0x0103 and b == 0x0301) return 0x1EAF; if (a == 0x0102 and b == 0x0300) return 0x1EB0; if (a == 0x0103 and b == 0x0300) return 0x1EB1; if (a == 0x0102 and b == 0x0309) return 0x1EB2; if (a == 0x0103 and b == 0x0309) return 0x1EB3; if (a == 0x0102 and b == 0x0303) return 0x1EB4; if (a == 0x0103 and b == 0x0303) return 0x1EB5; if (a == 0x1EA0 and b == 0x0306) return 0x1EB6; if (a == 0x1EA1 and b == 0x0306) return 0x1EB7; if (a == 0x0045 and b == 0x0323) return 0x1EB8; if (a == 0x0065 and b == 0x0323) return 0x1EB9; if (a == 0x0045 and b == 0x0309) return 0x1EBA; if (a == 0x0065 and b == 0x0309) return 0x1EBB; if (a == 0x0045 and b == 0x0303) return 0x1EBC; if (a == 0x0065 and b == 0x0303) return 0x1EBD; if (a == 0x00CA and b == 0x0301) return 0x1EBE; if (a == 0x00EA and b == 0x0301) return 0x1EBF; if (a == 0x00CA and b == 0x0300) return 0x1EC0; if (a == 0x00EA and b == 0x0300) return 0x1EC1; if (a == 0x00CA and b == 0x0309) return 0x1EC2; if (a == 0x00EA and b == 0x0309) return 0x1EC3; if (a == 0x00CA and b == 0x0303) return 0x1EC4; if (a == 0x00EA and b == 0x0303) return 0x1EC5; if (a == 0x1EB8 and b == 0x0302) return 0x1EC6; if (a == 0x1EB9 and b == 0x0302) return 0x1EC7; if (a == 0x0049 and b == 0x0309) return 0x1EC8; if (a == 0x0069 and b == 0x0309) return 0x1EC9; if (a == 0x0049 and b == 0x0323) return 0x1ECA; if (a == 0x0069 and b == 0x0323) return 0x1ECB; if (a == 0x004F and b == 0x0323) return 0x1ECC; if (a == 0x006F and b == 0x0323) return 0x1ECD; if (a == 0x004F and b == 0x0309) return 0x1ECE; if (a == 0x006F and b == 0x0309) return 0x1ECF; if (a == 0x00D4 and b == 0x0301) return 0x1ED0; if (a == 0x00F4 and b == 0x0301) return 0x1ED1; if (a == 0x00D4 and b == 0x0300) return 0x1ED2; if (a == 0x00F4 and b == 0x0300) return 0x1ED3; if (a == 0x00D4 and b == 0x0309) return 0x1ED4; if (a == 0x00F4 and b == 0x0309) return 0x1ED5; if (a == 0x00D4 and b == 0x0303) return 0x1ED6; if (a == 0x00F4 and b == 0x0303) return 0x1ED7; if (a == 0x1ECC and b == 0x0302) return 0x1ED8; if (a == 0x1ECD and b == 0x0302) return 0x1ED9; if (a == 0x01A0 and b == 0x0301) return 0x1EDA; if (a == 0x01A1 and b == 0x0301) return 0x1EDB; if (a == 0x01A0 and b == 0x0300) return 0x1EDC; if (a == 0x01A1 and b == 0x0300) return 0x1EDD; if (a == 0x01A0 and b == 0x0309) return 0x1EDE; if (a == 0x01A1 and b == 0x0309) return 0x1EDF; if (a == 0x01A0 and b == 0x0303) return 0x1EE0; if (a == 0x01A1 and b == 0x0303) return 0x1EE1; if (a == 0x01A0 and b == 0x0323) return 0x1EE2; if (a == 0x01A1 and b == 0x0323) return 0x1EE3; if (a == 0x0055 and b == 0x0323) return 0x1EE4; if (a == 0x0075 and b == 0x0323) return 0x1EE5; if (a == 0x0055 and b == 0x0309) return 0x1EE6; if (a == 0x0075 and b == 0x0309) return 0x1EE7; if (a == 0x01AF and b == 0x0301) return 0x1EE8; if (a == 0x01B0 and b == 0x0301) return 0x1EE9; if (a == 0x01AF and b == 0x0300) return 0x1EEA; if (a == 0x01B0 and b == 0x0300) return 0x1EEB; if (a == 0x01AF and b == 0x0309) return 0x1EEC; if (a == 0x01B0 and b == 0x0309) return 0x1EED; if (a == 0x01AF and b == 0x0303) return 0x1EEE; if (a == 0x01B0 and b == 0x0303) return 0x1EEF; if (a == 0x01AF and b == 0x0323) return 0x1EF0; if (a == 0x01B0 and b == 0x0323) return 0x1EF1; if (a == 0x0059 and b == 0x0300) return 0x1EF2; if (a == 0x0079 and b == 0x0300) return 0x1EF3; if (a == 0x0059 and b == 0x0323) return 0x1EF4; if (a == 0x0079 and b == 0x0323) return 0x1EF5; if (a == 0x0059 and b == 0x0309) return 0x1EF6; if (a == 0x0079 and b == 0x0309) return 0x1EF7; if (a == 0x0059 and b == 0x0303) return 0x1EF8; if (a == 0x0079 and b == 0x0303) return 0x1EF9; if (a == 0x03B1 and b == 0x0313) return 0x1F00; if (a == 0x03B1 and b == 0x0314) return 0x1F01; if (a == 0x1F00 and b == 0x0300) return 0x1F02; if (a == 0x1F01 and b == 0x0300) return 0x1F03; if (a == 0x1F00 and b == 0x0301) return 0x1F04; if (a == 0x1F01 and b == 0x0301) return 0x1F05; if (a == 0x1F00 and b == 0x0342) return 0x1F06; if (a == 0x1F01 and b == 0x0342) return 0x1F07; if (a == 0x0391 and b == 0x0313) return 0x1F08; if (a == 0x0391 and b == 0x0314) return 0x1F09; if (a == 0x1F08 and b == 0x0300) return 0x1F0A; if (a == 0x1F09 and b == 0x0300) return 0x1F0B; if (a == 0x1F08 and b == 0x0301) return 0x1F0C; if (a == 0x1F09 and b == 0x0301) return 0x1F0D; if (a == 0x1F08 and b == 0x0342) return 0x1F0E; if (a == 0x1F09 and b == 0x0342) return 0x1F0F; if (a == 0x03B5 and b == 0x0313) return 0x1F10; if (a == 0x03B5 and b == 0x0314) return 0x1F11; if (a == 0x1F10 and b == 0x0300) return 0x1F12; if (a == 0x1F11 and b == 0x0300) return 0x1F13; if (a == 0x1F10 and b == 0x0301) return 0x1F14; if (a == 0x1F11 and b == 0x0301) return 0x1F15; if (a == 0x0395 and b == 0x0313) return 0x1F18; if (a == 0x0395 and b == 0x0314) return 0x1F19; if (a == 0x1F18 and b == 0x0300) return 0x1F1A; if (a == 0x1F19 and b == 0x0300) return 0x1F1B; if (a == 0x1F18 and b == 0x0301) return 0x1F1C; if (a == 0x1F19 and b == 0x0301) return 0x1F1D; if (a == 0x03B7 and b == 0x0313) return 0x1F20; if (a == 0x03B7 and b == 0x0314) return 0x1F21; if (a == 0x1F20 and b == 0x0300) return 0x1F22; if (a == 0x1F21 and b == 0x0300) return 0x1F23; if (a == 0x1F20 and b == 0x0301) return 0x1F24; if (a == 0x1F21 and b == 0x0301) return 0x1F25; if (a == 0x1F20 and b == 0x0342) return 0x1F26; if (a == 0x1F21 and b == 0x0342) return 0x1F27; if (a == 0x0397 and b == 0x0313) return 0x1F28; if (a == 0x0397 and b == 0x0314) return 0x1F29; if (a == 0x1F28 and b == 0x0300) return 0x1F2A; if (a == 0x1F29 and b == 0x0300) return 0x1F2B; if (a == 0x1F28 and b == 0x0301) return 0x1F2C; if (a == 0x1F29 and b == 0x0301) return 0x1F2D; if (a == 0x1F28 and b == 0x0342) return 0x1F2E; if (a == 0x1F29 and b == 0x0342) return 0x1F2F; if (a == 0x03B9 and b == 0x0313) return 0x1F30; if (a == 0x03B9 and b == 0x0314) return 0x1F31; if (a == 0x1F30 and b == 0x0300) return 0x1F32; if (a == 0x1F31 and b == 0x0300) return 0x1F33; if (a == 0x1F30 and b == 0x0301) return 0x1F34; if (a == 0x1F31 and b == 0x0301) return 0x1F35; if (a == 0x1F30 and b == 0x0342) return 0x1F36; if (a == 0x1F31 and b == 0x0342) return 0x1F37; if (a == 0x0399 and b == 0x0313) return 0x1F38; if (a == 0x0399 and b == 0x0314) return 0x1F39; if (a == 0x1F38 and b == 0x0300) return 0x1F3A; if (a == 0x1F39 and b == 0x0300) return 0x1F3B; if (a == 0x1F38 and b == 0x0301) return 0x1F3C; if (a == 0x1F39 and b == 0x0301) return 0x1F3D; if (a == 0x1F38 and b == 0x0342) return 0x1F3E; if (a == 0x1F39 and b == 0x0342) return 0x1F3F; if (a == 0x03BF and b == 0x0313) return 0x1F40; if (a == 0x03BF and b == 0x0314) return 0x1F41; if (a == 0x1F40 and b == 0x0300) return 0x1F42; if (a == 0x1F41 and b == 0x0300) return 0x1F43; if (a == 0x1F40 and b == 0x0301) return 0x1F44; if (a == 0x1F41 and b == 0x0301) return 0x1F45; if (a == 0x039F and b == 0x0313) return 0x1F48; if (a == 0x039F and b == 0x0314) return 0x1F49; if (a == 0x1F48 and b == 0x0300) return 0x1F4A; if (a == 0x1F49 and b == 0x0300) return 0x1F4B; if (a == 0x1F48 and b == 0x0301) return 0x1F4C; if (a == 0x1F49 and b == 0x0301) return 0x1F4D; if (a == 0x03C5 and b == 0x0313) return 0x1F50; if (a == 0x03C5 and b == 0x0314) return 0x1F51; if (a == 0x1F50 and b == 0x0300) return 0x1F52; if (a == 0x1F51 and b == 0x0300) return 0x1F53; if (a == 0x1F50 and b == 0x0301) return 0x1F54; if (a == 0x1F51 and b == 0x0301) return 0x1F55; if (a == 0x1F50 and b == 0x0342) return 0x1F56; if (a == 0x1F51 and b == 0x0342) return 0x1F57; if (a == 0x03A5 and b == 0x0314) return 0x1F59; if (a == 0x1F59 and b == 0x0300) return 0x1F5B; if (a == 0x1F59 and b == 0x0301) return 0x1F5D; if (a == 0x1F59 and b == 0x0342) return 0x1F5F; if (a == 0x03C9 and b == 0x0313) return 0x1F60; if (a == 0x03C9 and b == 0x0314) return 0x1F61; if (a == 0x1F60 and b == 0x0300) return 0x1F62; if (a == 0x1F61 and b == 0x0300) return 0x1F63; if (a == 0x1F60 and b == 0x0301) return 0x1F64; if (a == 0x1F61 and b == 0x0301) return 0x1F65; if (a == 0x1F60 and b == 0x0342) return 0x1F66; if (a == 0x1F61 and b == 0x0342) return 0x1F67; if (a == 0x03A9 and b == 0x0313) return 0x1F68; if (a == 0x03A9 and b == 0x0314) return 0x1F69; if (a == 0x1F68 and b == 0x0300) return 0x1F6A; if (a == 0x1F69 and b == 0x0300) return 0x1F6B; if (a == 0x1F68 and b == 0x0301) return 0x1F6C; if (a == 0x1F69 and b == 0x0301) return 0x1F6D; if (a == 0x1F68 and b == 0x0342) return 0x1F6E; if (a == 0x1F69 and b == 0x0342) return 0x1F6F; if (a == 0x03B1 and b == 0x0300) return 0x1F70; if (a == 0x03B5 and b == 0x0300) return 0x1F72; if (a == 0x03B7 and b == 0x0300) return 0x1F74; if (a == 0x03B9 and b == 0x0300) return 0x1F76; if (a == 0x03BF and b == 0x0300) return 0x1F78; if (a == 0x03C5 and b == 0x0300) return 0x1F7A; if (a == 0x03C9 and b == 0x0300) return 0x1F7C; if (a == 0x1F00 and b == 0x0345) return 0x1F80; if (a == 0x1F01 and b == 0x0345) return 0x1F81; if (a == 0x1F02 and b == 0x0345) return 0x1F82; if (a == 0x1F03 and b == 0x0345) return 0x1F83; if (a == 0x1F04 and b == 0x0345) return 0x1F84; if (a == 0x1F05 and b == 0x0345) return 0x1F85; if (a == 0x1F06 and b == 0x0345) return 0x1F86; if (a == 0x1F07 and b == 0x0345) return 0x1F87; if (a == 0x1F08 and b == 0x0345) return 0x1F88; if (a == 0x1F09 and b == 0x0345) return 0x1F89; if (a == 0x1F0A and b == 0x0345) return 0x1F8A; if (a == 0x1F0B and b == 0x0345) return 0x1F8B; if (a == 0x1F0C and b == 0x0345) return 0x1F8C; if (a == 0x1F0D and b == 0x0345) return 0x1F8D; if (a == 0x1F0E and b == 0x0345) return 0x1F8E; if (a == 0x1F0F and b == 0x0345) return 0x1F8F; if (a == 0x1F20 and b == 0x0345) return 0x1F90; if (a == 0x1F21 and b == 0x0345) return 0x1F91; if (a == 0x1F22 and b == 0x0345) return 0x1F92; if (a == 0x1F23 and b == 0x0345) return 0x1F93; if (a == 0x1F24 and b == 0x0345) return 0x1F94; if (a == 0x1F25 and b == 0x0345) return 0x1F95; if (a == 0x1F26 and b == 0x0345) return 0x1F96; if (a == 0x1F27 and b == 0x0345) return 0x1F97; if (a == 0x1F28 and b == 0x0345) return 0x1F98; if (a == 0x1F29 and b == 0x0345) return 0x1F99; if (a == 0x1F2A and b == 0x0345) return 0x1F9A; if (a == 0x1F2B and b == 0x0345) return 0x1F9B; if (a == 0x1F2C and b == 0x0345) return 0x1F9C; if (a == 0x1F2D and b == 0x0345) return 0x1F9D; if (a == 0x1F2E and b == 0x0345) return 0x1F9E; if (a == 0x1F2F and b == 0x0345) return 0x1F9F; if (a == 0x1F60 and b == 0x0345) return 0x1FA0; if (a == 0x1F61 and b == 0x0345) return 0x1FA1; if (a == 0x1F62 and b == 0x0345) return 0x1FA2; if (a == 0x1F63 and b == 0x0345) return 0x1FA3; if (a == 0x1F64 and b == 0x0345) return 0x1FA4; if (a == 0x1F65 and b == 0x0345) return 0x1FA5; if (a == 0x1F66 and b == 0x0345) return 0x1FA6; if (a == 0x1F67 and b == 0x0345) return 0x1FA7; if (a == 0x1F68 and b == 0x0345) return 0x1FA8; if (a == 0x1F69 and b == 0x0345) return 0x1FA9; if (a == 0x1F6A and b == 0x0345) return 0x1FAA; if (a == 0x1F6B and b == 0x0345) return 0x1FAB; if (a == 0x1F6C and b == 0x0345) return 0x1FAC; if (a == 0x1F6D and b == 0x0345) return 0x1FAD; if (a == 0x1F6E and b == 0x0345) return 0x1FAE; if (a == 0x1F6F and b == 0x0345) return 0x1FAF; if (a == 0x03B1 and b == 0x0306) return 0x1FB0; if (a == 0x03B1 and b == 0x0304) return 0x1FB1; if (a == 0x1F70 and b == 0x0345) return 0x1FB2; if (a == 0x03B1 and b == 0x0345) return 0x1FB3; if (a == 0x03AC and b == 0x0345) return 0x1FB4; if (a == 0x03B1 and b == 0x0342) return 0x1FB6; if (a == 0x1FB6 and b == 0x0345) return 0x1FB7; if (a == 0x0391 and b == 0x0306) return 0x1FB8; if (a == 0x0391 and b == 0x0304) return 0x1FB9; if (a == 0x0391 and b == 0x0300) return 0x1FBA; if (a == 0x0391 and b == 0x0345) return 0x1FBC; if (a == 0x00A8 and b == 0x0342) return 0x1FC1; if (a == 0x1F74 and b == 0x0345) return 0x1FC2; if (a == 0x03B7 and b == 0x0345) return 0x1FC3; if (a == 0x03AE and b == 0x0345) return 0x1FC4; if (a == 0x03B7 and b == 0x0342) return 0x1FC6; if (a == 0x1FC6 and b == 0x0345) return 0x1FC7; if (a == 0x0395 and b == 0x0300) return 0x1FC8; if (a == 0x0397 and b == 0x0300) return 0x1FCA; if (a == 0x0397 and b == 0x0345) return 0x1FCC; if (a == 0x1FBF and b == 0x0300) return 0x1FCD; if (a == 0x1FBF and b == 0x0301) return 0x1FCE; if (a == 0x1FBF and b == 0x0342) return 0x1FCF; if (a == 0x03B9 and b == 0x0306) return 0x1FD0; if (a == 0x03B9 and b == 0x0304) return 0x1FD1; if (a == 0x03CA and b == 0x0300) return 0x1FD2; if (a == 0x03B9 and b == 0x0342) return 0x1FD6; if (a == 0x03CA and b == 0x0342) return 0x1FD7; if (a == 0x0399 and b == 0x0306) return 0x1FD8; if (a == 0x0399 and b == 0x0304) return 0x1FD9; if (a == 0x0399 and b == 0x0300) return 0x1FDA; if (a == 0x1FFE and b == 0x0300) return 0x1FDD; if (a == 0x1FFE and b == 0x0301) return 0x1FDE; if (a == 0x1FFE and b == 0x0342) return 0x1FDF; if (a == 0x03C5 and b == 0x0306) return 0x1FE0; if (a == 0x03C5 and b == 0x0304) return 0x1FE1; if (a == 0x03CB and b == 0x0300) return 0x1FE2; if (a == 0x03C1 and b == 0x0313) return 0x1FE4; if (a == 0x03C1 and b == 0x0314) return 0x1FE5; if (a == 0x03C5 and b == 0x0342) return 0x1FE6; if (a == 0x03CB and b == 0x0342) return 0x1FE7; if (a == 0x03A5 and b == 0x0306) return 0x1FE8; if (a == 0x03A5 and b == 0x0304) return 0x1FE9; if (a == 0x03A5 and b == 0x0300) return 0x1FEA; if (a == 0x03A1 and b == 0x0314) return 0x1FEC; if (a == 0x00A8 and b == 0x0300) return 0x1FED; if (a == 0x1F7C and b == 0x0345) return 0x1FF2; if (a == 0x03C9 and b == 0x0345) return 0x1FF3; if (a == 0x03CE and b == 0x0345) return 0x1FF4; if (a == 0x03C9 and b == 0x0342) return 0x1FF6; if (a == 0x1FF6 and b == 0x0345) return 0x1FF7; if (a == 0x039F and b == 0x0300) return 0x1FF8; if (a == 0x03A9 and b == 0x0300) return 0x1FFA; if (a == 0x03A9 and b == 0x0345) return 0x1FFC; if (a == 0x2190 and b == 0x0338) return 0x219A; if (a == 0x2192 and b == 0x0338) return 0x219B; if (a == 0x2194 and b == 0x0338) return 0x21AE; if (a == 0x21D0 and b == 0x0338) return 0x21CD; if (a == 0x21D4 and b == 0x0338) return 0x21CE; if (a == 0x21D2 and b == 0x0338) return 0x21CF; if (a == 0x2203 and b == 0x0338) return 0x2204; if (a == 0x2208 and b == 0x0338) return 0x2209; if (a == 0x220B and b == 0x0338) return 0x220C; if (a == 0x2223 and b == 0x0338) return 0x2224; if (a == 0x2225 and b == 0x0338) return 0x2226; if (a == 0x223C and b == 0x0338) return 0x2241; if (a == 0x2243 and b == 0x0338) return 0x2244; if (a == 0x2245 and b == 0x0338) return 0x2247; if (a == 0x2248 and b == 0x0338) return 0x2249; if (a == 0x003D and b == 0x0338) return 0x2260; if (a == 0x2261 and b == 0x0338) return 0x2262; if (a == 0x224D and b == 0x0338) return 0x226D; if (a == 0x003C and b == 0x0338) return 0x226E; if (a == 0x003E and b == 0x0338) return 0x226F; if (a == 0x2264 and b == 0x0338) return 0x2270; if (a == 0x2265 and b == 0x0338) return 0x2271; if (a == 0x2272 and b == 0x0338) return 0x2274; if (a == 0x2273 and b == 0x0338) return 0x2275; if (a == 0x2276 and b == 0x0338) return 0x2278; if (a == 0x2277 and b == 0x0338) return 0x2279; if (a == 0x227A and b == 0x0338) return 0x2280; if (a == 0x227B and b == 0x0338) return 0x2281; if (a == 0x2282 and b == 0x0338) return 0x2284; if (a == 0x2283 and b == 0x0338) return 0x2285; if (a == 0x2286 and b == 0x0338) return 0x2288; if (a == 0x2287 and b == 0x0338) return 0x2289; if (a == 0x22A2 and b == 0x0338) return 0x22AC; if (a == 0x22A8 and b == 0x0338) return 0x22AD; if (a == 0x22A9 and b == 0x0338) return 0x22AE; if (a == 0x22AB and b == 0x0338) return 0x22AF; if (a == 0x227C and b == 0x0338) return 0x22E0; if (a == 0x227D and b == 0x0338) return 0x22E1; if (a == 0x2291 and b == 0x0338) return 0x22E2; if (a == 0x2292 and b == 0x0338) return 0x22E3; if (a == 0x22B2 and b == 0x0338) return 0x22EA; if (a == 0x22B3 and b == 0x0338) return 0x22EB; if (a == 0x22B4 and b == 0x0338) return 0x22EC; if (a == 0x22B5 and b == 0x0338) return 0x22ED; if (a == 0x2ADD and b == 0x0338) return 0x2ADC; if (a == 0x304B and b == 0x3099) return 0x304C; if (a == 0x304D and b == 0x3099) return 0x304E; if (a == 0x304F and b == 0x3099) return 0x3050; if (a == 0x3051 and b == 0x3099) return 0x3052; if (a == 0x3053 and b == 0x3099) return 0x3054; if (a == 0x3055 and b == 0x3099) return 0x3056; if (a == 0x3057 and b == 0x3099) return 0x3058; if (a == 0x3059 and b == 0x3099) return 0x305A; if (a == 0x305B and b == 0x3099) return 0x305C; if (a == 0x305D and b == 0x3099) return 0x305E; if (a == 0x305F and b == 0x3099) return 0x3060; if (a == 0x3061 and b == 0x3099) return 0x3062; if (a == 0x3064 and b == 0x3099) return 0x3065; if (a == 0x3066 and b == 0x3099) return 0x3067; if (a == 0x3068 and b == 0x3099) return 0x3069; if (a == 0x306F and b == 0x3099) return 0x3070; if (a == 0x306F and b == 0x309A) return 0x3071; if (a == 0x3072 and b == 0x3099) return 0x3073; if (a == 0x3072 and b == 0x309A) return 0x3074; if (a == 0x3075 and b == 0x3099) return 0x3076; if (a == 0x3075 and b == 0x309A) return 0x3077; if (a == 0x3078 and b == 0x3099) return 0x3079; if (a == 0x3078 and b == 0x309A) return 0x307A; if (a == 0x307B and b == 0x3099) return 0x307C; if (a == 0x307B and b == 0x309A) return 0x307D; if (a == 0x3046 and b == 0x3099) return 0x3094; if (a == 0x309D and b == 0x3099) return 0x309E; if (a == 0x30AB and b == 0x3099) return 0x30AC; if (a == 0x30AD and b == 0x3099) return 0x30AE; if (a == 0x30AF and b == 0x3099) return 0x30B0; if (a == 0x30B1 and b == 0x3099) return 0x30B2; if (a == 0x30B3 and b == 0x3099) return 0x30B4; if (a == 0x30B5 and b == 0x3099) return 0x30B6; if (a == 0x30B7 and b == 0x3099) return 0x30B8; if (a == 0x30B9 and b == 0x3099) return 0x30BA; if (a == 0x30BB and b == 0x3099) return 0x30BC; if (a == 0x30BD and b == 0x3099) return 0x30BE; if (a == 0x30BF and b == 0x3099) return 0x30C0; if (a == 0x30C1 and b == 0x3099) return 0x30C2; if (a == 0x30C4 and b == 0x3099) return 0x30C5; if (a == 0x30C6 and b == 0x3099) return 0x30C7; if (a == 0x30C8 and b == 0x3099) return 0x30C9; if (a == 0x30CF and b == 0x3099) return 0x30D0; if (a == 0x30CF and b == 0x309A) return 0x30D1; if (a == 0x30D2 and b == 0x3099) return 0x30D3; if (a == 0x30D2 and b == 0x309A) return 0x30D4; if (a == 0x30D5 and b == 0x3099) return 0x30D6; if (a == 0x30D5 and b == 0x309A) return 0x30D7; if (a == 0x30D8 and b == 0x3099) return 0x30D9; if (a == 0x30D8 and b == 0x309A) return 0x30DA; if (a == 0x30DB and b == 0x3099) return 0x30DC; if (a == 0x30DB and b == 0x309A) return 0x30DD; if (a == 0x30A6 and b == 0x3099) return 0x30F4; if (a == 0x30EF and b == 0x3099) return 0x30F7; if (a == 0x30F0 and b == 0x3099) return 0x30F8; if (a == 0x30F1 and b == 0x3099) return 0x30F9; if (a == 0x30F2 and b == 0x3099) return 0x30FA; if (a == 0x30FD and b == 0x3099) return 0x30FE; if (a == 0x05D9 and b == 0x05B4) return 0xFB1D; if (a == 0x05F2 and b == 0x05B7) return 0xFB1F; if (a == 0x05E9 and b == 0x05C1) return 0xFB2A; if (a == 0x05E9 and b == 0x05C2) return 0xFB2B; if (a == 0xFB49 and b == 0x05C1) return 0xFB2C; if (a == 0xFB49 and b == 0x05C2) return 0xFB2D; if (a == 0x05D0 and b == 0x05B7) return 0xFB2E; if (a == 0x05D0 and b == 0x05B8) return 0xFB2F; if (a == 0x05D0 and b == 0x05BC) return 0xFB30; if (a == 0x05D1 and b == 0x05BC) return 0xFB31; if (a == 0x05D2 and b == 0x05BC) return 0xFB32; if (a == 0x05D3 and b == 0x05BC) return 0xFB33; if (a == 0x05D4 and b == 0x05BC) return 0xFB34; if (a == 0x05D5 and b == 0x05BC) return 0xFB35; if (a == 0x05D6 and b == 0x05BC) return 0xFB36; if (a == 0x05D8 and b == 0x05BC) return 0xFB38; if (a == 0x05D9 and b == 0x05BC) return 0xFB39; if (a == 0x05DA and b == 0x05BC) return 0xFB3A; if (a == 0x05DB and b == 0x05BC) return 0xFB3B; if (a == 0x05DC and b == 0x05BC) return 0xFB3C; if (a == 0x05DE and b == 0x05BC) return 0xFB3E; if (a == 0x05E0 and b == 0x05BC) return 0xFB40; if (a == 0x05E1 and b == 0x05BC) return 0xFB41; if (a == 0x05E3 and b == 0x05BC) return 0xFB43; if (a == 0x05E4 and b == 0x05BC) return 0xFB44; if (a == 0x05E6 and b == 0x05BC) return 0xFB46; if (a == 0x05E7 and b == 0x05BC) return 0xFB47; if (a == 0x05E8 and b == 0x05BC) return 0xFB48; if (a == 0x05E9 and b == 0x05BC) return 0xFB49; if (a == 0x05EA and b == 0x05BC) return 0xFB4A; if (a == 0x05D5 and b == 0x05B9) return 0xFB4B; if (a == 0x05D1 and b == 0x05BF) return 0xFB4C; if (a == 0x05DB and b == 0x05BF) return 0xFB4D; if (a == 0x05E4 and b == 0x05BF) return 0xFB4E; if (a == 0x11099 and b == 0x110BA) return 0x1109A; if (a == 0x1109B and b == 0x110BA) return 0x1109C; if (a == 0x110A5 and b == 0x110BA) return 0x110AB; if (a == 0x11131 and b == 0x11127) return 0x1112E; if (a == 0x11132 and b == 0x11127) return 0x1112F; if (a == 0x11347 and b == 0x1133E) return 0x1134B; if (a == 0x11347 and b == 0x11357) return 0x1134C; if (a == 0x114B9 and b == 0x114BA) return 0x114BB; if (a == 0x114B9 and b == 0x114B0) return 0x114BC; if (a == 0x114B9 and b == 0x114BD) return 0x114BE; if (a == 0x115B8 and b == 0x115AF) return 0x115BA; if (a == 0x115B9 and b == 0x115AF) return 0x115BB; if (a == 0x11935 and b == 0x11930) return 0x11938; if (a == 0x1D157 and b == 0x1D165) return 0x1D15E; if (a == 0x1D158 and b == 0x1D165) return 0x1D15F; if (a == 0x1D15F and b == 0x1D16E) return 0x1D160; if (a == 0x1D15F and b == 0x1D16F) return 0x1D161; if (a == 0x1D15F and b == 0x1D170) return 0x1D162; if (a == 0x1D15F and b == 0x1D171) return 0x1D163; if (a == 0x1D15F and b == 0x1D172) return 0x1D164; if (a == 0x1D1B9 and b == 0x1D165) return 0x1D1BB; if (a == 0x1D1BA and b == 0x1D165) return 0x1D1BC; if (a == 0x1D1BB and b == 0x1D16E) return 0x1D1BD; if (a == 0x1D1BC and b == 0x1D16E) return 0x1D1BE; if (a == 0x1D1BB and b == 0x1D16F) return 0x1D1BF; if (a == 0x1D1BC and b == 0x1D16F) return 0x1D1C0; return null; }
src/components/autogen/Canonicals.zig
const print = @import("std").debug.print; pub fn main() void { // Here's a zero-terminated array of u32 values: var nums = [_:0]u32{ 1, 2, 3, 4, 5, 6 }; // And here's a zero-terminated many-item pointer: var ptr: [*:0]u32 = &nums; // For fun, let's replace the value at position 3 with the // sentinel value 0. This seems kind of naughty. nums[3] = 0; // So now we have a zero-terminated array and a many-item // pointer that reference the same data: a sequence of // numbers that both ends in and CONTAINS the sentinal value. // // Attempting to loop through and print both of these should // demonstrate how they are similar and different. // // (It turns out that the array prints completely, including // the sentinel 0 in the middle. The many-item pointer must // stop at the first sentinel value. The difference is simply // that arrays have a known length and many-item pointers // don't.) printSequence(nums); printSequence(ptr); print("\n", .{}); } // Here's our generic sequence printing function. It's nearly // complete, but there are a couple missing bits. Please fix // them! fn printSequence(my_seq: anytype) void { const my_type = @typeInfo(@TypeOf(my_seq)); // The TypeInfo contained in my_type is a union. We use a // switch to handle printing the Array or Pointer fields, // depending on which type of my_seq was passed in: switch (my_type) { .Array => { print("Array:", .{}); // Loop through the items in my_seq. for (my_seq) |s| { print("{}", .{s}); } }, .Pointer => { // Check this out - it's pretty cool: const my_sentinel = my_type.Pointer.sentinel; print("Many-item pointer:", .{}); // Loop through the items in my_seq until we hit the // sentinel value. var i: usize = 0; while (my_seq[i] != my_sentinel.?) { print("{}", .{my_seq[i]}); i += 1; } }, else => unreachable, } print(". ", .{}); }
exercises/076_sentinels.zig
const std = @import("std"); const panic = std.debug.panic; const builtin = @import("builtin"); const warn = std.debug.warn; const join = std.fs.path.join; const pi = std.math.pi; const sin = std.math.sin; const cos = std.math.cos; usingnamespace @import("c.zig"); const Camera = @import("camera.zig").Camera; const Shader = @import("shader.zig").Shader; const glm = @import("glm.zig"); const Mat4 = glm.Mat4; const Vec3 = glm.Vec3; const vec3 = glm.vec3; const translation = glm.translation; const rotation = glm.rotation; const scale = glm.scale; const perspective = glm.perspective; // settings const SCR_WIDTH: u32 = 1920; const SCR_HEIGHT: u32 = 1080; // camera var camera = Camera.default(); var lastX: f32 = 1920.0 / 2.0; var lastY: f32 = 1080.0 / 2.0; var firstMouse = true; // timing var deltaTime: f32 = 0.0; // time between current frame and last frame var lastFrame: f32 = 0.0; // lighting const lightPos = vec3(1.2, 1.0, 2.0); pub fn main() !void { const allocator = std.heap.page_allocator; const cubeVertPath = try join(allocator, &[_][]const u8{ "shaders", "2_6_multiple_lights.vert" }); const cubeFragPath = try join(allocator, &[_][]const u8{ "shaders", "2_6_multiple_lights.frag" }); const ok = glfwInit(); if (ok == 0) { panic("Failed to initialise GLFW\n", .{}); } defer glfwTerminate(); glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // glfw: initialize and configure var window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "Learn OpenGL", null, null); if (window == null) { panic("Failed to create GLFW window\n", .{}); } glfwMakeContextCurrent(window); const resizeCallback = glfwSetFramebufferSizeCallback(window, framebuffer_size_callback); const posCallback = glfwSetCursorPosCallback(window, mouse_callback); const scrollCallback = glfwSetScrollCallback(window, scroll_callback); // tell GLFW to capture our mouse glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED); // glad: load all OpenGL function pointers if (gladLoadGLLoader(@ptrCast(GLADloadproc, glfwGetProcAddress)) == 0) { panic("Failed to initialise GLAD\n", .{}); } glEnable(GL_DEPTH_TEST); // build and compile our shader zprogram const cubeShader = try Shader.init(allocator, cubeVertPath, cubeFragPath); // set up vertex data (and buffer(s)) and configure vertex attributes const vertices = [_]f32{ // positions // normals // texture coords -0.5, -0.5, -0.5, 0.0, 0.0, -1.0, 0.0, 0.0, 0.5, -0.5, -0.5, 0.0, 0.0, -1.0, 1.0, 0.0, 0.5, 0.5, -0.5, 0.0, 0.0, -1.0, 1.0, 1.0, 0.5, 0.5, -0.5, 0.0, 0.0, -1.0, 1.0, 1.0, -0.5, 0.5, -0.5, 0.0, 0.0, -1.0, 0.0, 1.0, -0.5, -0.5, -0.5, 0.0, 0.0, -1.0, 0.0, 0.0, -0.5, -0.5, 0.5, 0.0, 0.0, 1.0, 0.0, 0.0, 0.5, -0.5, 0.5, 0.0, 0.0, 1.0, 1.0, 0.0, 0.5, 0.5, 0.5, 0.0, 0.0, 1.0, 1.0, 1.0, 0.5, 0.5, 0.5, 0.0, 0.0, 1.0, 1.0, 1.0, -0.5, 0.5, 0.5, 0.0, 0.0, 1.0, 0.0, 1.0, -0.5, -0.5, 0.5, 0.0, 0.0, 1.0, 0.0, 0.0, -0.5, 0.5, 0.5, -1.0, 0.0, 0.0, 1.0, 0.0, -0.5, 0.5, -0.5, -1.0, 0.0, 0.0, 1.0, 1.0, -0.5, -0.5, -0.5, -1.0, 0.0, 0.0, 0.0, 1.0, -0.5, -0.5, -0.5, -1.0, 0.0, 0.0, 0.0, 1.0, -0.5, -0.5, 0.5, -1.0, 0.0, 0.0, 0.0, 0.0, -0.5, 0.5, 0.5, -1.0, 0.0, 0.0, 1.0, 0.0, 0.5, 0.5, 0.5, 1.0, 0.0, 0.0, 1.0, 0.0, 0.5, 0.5, -0.5, 1.0, 0.0, 0.0, 1.0, 1.0, 0.5, -0.5, -0.5, 1.0, 0.0, 0.0, 0.0, 1.0, 0.5, -0.5, -0.5, 1.0, 0.0, 0.0, 0.0, 1.0, 0.5, -0.5, 0.5, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 1.0, 0.0, 0.0, 1.0, 0.0, -0.5, -0.5, -0.5, 0.0, -1.0, 0.0, 0.0, 1.0, 0.5, -0.5, -0.5, 0.0, -1.0, 0.0, 1.0, 1.0, 0.5, -0.5, 0.5, 0.0, -1.0, 0.0, 1.0, 0.0, 0.5, -0.5, 0.5, 0.0, -1.0, 0.0, 1.0, 0.0, -0.5, -0.5, 0.5, 0.0, -1.0, 0.0, 0.0, 0.0, -0.5, -0.5, -0.5, 0.0, -1.0, 0.0, 0.0, 1.0, -0.5, 0.5, -0.5, 0.0, 1.0, 0.0, 0.0, 1.0, 0.5, 0.5, -0.5, 0.0, 1.0, 0.0, 1.0, 1.0, 0.5, 0.5, 0.5, 0.0, 1.0, 0.0, 1.0, 0.0, 0.5, 0.5, 0.5, 0.0, 1.0, 0.0, 1.0, 0.0, -0.5, 0.5, 0.5, 0.0, 1.0, 0.0, 0.0, 0.0, -0.5, 0.5, -0.5, 0.0, 1.0, 0.0, 0.0, 1.0, }; // world space positions of our cubes const cubePositions = [_]Vec3{ vec3(0.0, 0.0, 0.0), vec3(2.0, 5.0, -15.0), vec3(-1.5, -2.2, -2.5), vec3(-3.8, -2.0, -12.3), vec3(2.4, -0.4, -3.5), vec3(-1.7, 3.0, -7.5), vec3(1.3, -2.0, -2.5), vec3(1.5, 2.0, -2.5), vec3(1.5, 0.2, -1.5), vec3(-1.3, 1.0, -1.5), }; // // positions of the point lights const pointLightPositions = [_]Vec3{ vec3(0.7, 0.2, 2.0), vec3(2.3, -3.3, -4.0), vec3(-4.0, 2.0, -12.0), vec3(0.0, 0.0, -3.0), }; // first, configure the cube's VAO (and VBO) var VBO: c_uint = undefined; var cubeVAO: c_uint = undefined; glGenVertexArrays(1, &cubeVAO); glGenBuffers(1, &VBO); glBindBuffer(GL_ARRAY_BUFFER, VBO); glBufferData(GL_ARRAY_BUFFER, vertices.len * @sizeOf(f32), &vertices, GL_STATIC_DRAW); glBindVertexArray(cubeVAO); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8 * @sizeOf(f32), null); glEnableVertexAttribArray(0); glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * @sizeOf(f32), @intToPtr(*c_void, 3 * @sizeOf(f32))); glEnableVertexAttribArray(1); glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * @sizeOf(f32), @intToPtr(*c_void, 6 * @sizeOf(f32))); glEnableVertexAttribArray(2); // load textures (we now use a utility function to keep the code more organized) const diffuseMap = loadTexture("textures/container2.png"); const specularMap = loadTexture("textures/container2_specular.png"); // shader configuration cubeShader.use(); cubeShader.setInt("material.diffuse", 0); cubeShader.setInt("material.specular", 1); // render loop while (glfwWindowShouldClose(window) == 0) { // per-frame time logic const currentFrame = @floatCast(f32, glfwGetTime()); deltaTime = currentFrame - lastFrame; lastFrame = currentFrame; // input processInput(window); // render glClearColor(0.1, 0.1, 0.1, 0.1); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // be sure to activate shader when setting uniforms/drawing objects cubeShader.use(); cubeShader.setVec3("viewPos", camera.position); cubeShader.setFloat("material.shininess", 32.0); // Here we set all the uniforms for the 5/6 types of lights we have. We have to set them manually and index // the proper PointLight struct in the array to set each uniform variable. This can be done more code-friendly // by defining light types as classes and set their values in there, or by using a more efficient uniform approach // by using 'Uniform buffer objects', but that is something we'll discuss in the 'Advanced GLSL' tutorial. // directional light cubeShader.setVec3("dirLight.direction", vec3(-0.2, -1.0, -0.3)); cubeShader.setVec3("dirLight.ambient", vec3(0.05, 0.05, 0.05)); cubeShader.setVec3("dirLight.diffuse", vec3(0.4, 0.4, 0.4)); cubeShader.setVec3("dirLight.specular", vec3(0.5, 0.5, 0.5)); // point light 1 cubeShader.setVec3("pointLights[0].position", pointLightPositions[0]); cubeShader.setVec3("pointLights[0].ambient", vec3(0.05, 0.05, 0.05)); cubeShader.setVec3("pointLights[0].diffuse", vec3(0.8, 0.8, 0.8)); cubeShader.setVec3("pointLights[0].specular", vec3(1.0, 1.0, 1.0)); cubeShader.setFloat("pointLights[0].constant", 1.0); cubeShader.setFloat("pointLights[0].linear", 0.09); cubeShader.setFloat("pointLights[0].quadratic", 0.032); // point light 2 cubeShader.setVec3("pointLights[1].position", pointLightPositions[1]); cubeShader.setVec3("pointLights[1].ambient", vec3(0.05, 0.05, 0.05)); cubeShader.setVec3("pointLights[1].diffuse", vec3(0.8, 0.8, 0.8)); cubeShader.setVec3("pointLights[1].specular", vec3(1.0, 1.0, 1.0)); cubeShader.setFloat("pointLights[1].constant", 1.0); cubeShader.setFloat("pointLights[1].linear", 0.09); cubeShader.setFloat("pointLights[1].quadratic", 0.032); // point light 3 cubeShader.setVec3("pointLights[2].position", pointLightPositions[2]); cubeShader.setVec3("pointLights[2].ambient", vec3(0.05, 0.05, 0.05)); cubeShader.setVec3("pointLights[2].diffuse", vec3(0.8, 0.8, 0.8)); cubeShader.setVec3("pointLights[2].specular", vec3(1.0, 1.0, 1.0)); cubeShader.setFloat("pointLights[2].constant", 1.0); cubeShader.setFloat("pointLights[2].linear", 0.09); cubeShader.setFloat("pointLights[2].quadratic", 0.032); // point light 4 cubeShader.setVec3("pointLights[3].position", pointLightPositions[3]); cubeShader.setVec3("pointLights[3].ambient", vec3(0.05, 0.05, 0.05)); cubeShader.setVec3("pointLights[3].diffuse", vec3(0.8, 0.8, 0.8)); cubeShader.setVec3("pointLights[3].specular", vec3(1.0, 1.0, 1.0)); cubeShader.setFloat("pointLights[3].constant", 1.0); cubeShader.setFloat("pointLights[3].linear", 0.09); cubeShader.setFloat("pointLights[3].quadratic", 0.032); // spotLight cubeShader.setVec3("spotLight.position", camera.position); cubeShader.setVec3("spotLight.direction", camera.front); cubeShader.setVec3("spotLight.ambient", vec3(0.0, 0.0, 0.0)); cubeShader.setVec3("spotLight.diffuse", vec3(1.0, 1.0, 1.0)); cubeShader.setVec3("spotLight.specular", vec3(1.0, 1.0, 1.0)); cubeShader.setFloat("spotLight.constant", 1.0); cubeShader.setFloat("spotLight.linear", 0.09); cubeShader.setFloat("spotLight.quadratic", 0.032); cubeShader.setFloat("spotLight.cutOff", cos(@floatCast(f32, 12.5 / 180.0 * pi))); cubeShader.setFloat("spotLight.outerCutOff", cos(@floatCast(f32, 15.0 / 180.0 * pi))); // view/projection transformations const projection = perspective(camera.zoom / 180.0 * pi, @intToFloat(f32, SCR_WIDTH) / @intToFloat(f32, SCR_HEIGHT), 0.1, 100.0); const view = camera.getViewMatrix(); cubeShader.setMat4("projection", projection); cubeShader.setMat4("view", view); // world transformation const cubeModel = Mat4.identity(); cubeShader.setMat4("model", cubeModel); // bind diffuse map glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, diffuseMap); // bind specular map glActiveTexture(GL_TEXTURE1); glBindTexture(GL_TEXTURE_2D, specularMap); // render boxes glBindVertexArray(cubeVAO); var i: usize = 0; while (i < 10) : (i += 1) { // calculate the model matrix for each object and pass it to shader before drawing var model = translation(cubePositions[i]); const angle = 20.0 * @intToFloat(f32, i); model = model.matmul(rotation(angle / 180.0 * pi, vec3(1.0, 0.3, 0.5))); cubeShader.setMat4("model", model); glDrawArrays(GL_TRIANGLES, 0, 36); } // glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.) glfwSwapBuffers(window); glfwPollEvents(); } } // process all input: query GLFW whether relevant keys are pressed/released this frame and react accordingly pub fn processInput(window: ?*GLFWwindow) callconv(.C) void { if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS) glfwSetWindowShouldClose(window, 1); if (glfwGetKey(window, GLFW_KEY_W) == GLFW_PRESS) camera.processKeyboard(.Forward, deltaTime); if (glfwGetKey(window, GLFW_KEY_S) == GLFW_PRESS) camera.processKeyboard(.Backward, deltaTime); if (glfwGetKey(window, GLFW_KEY_A) == GLFW_PRESS) camera.processKeyboard(.Left, deltaTime); if (glfwGetKey(window, GLFW_KEY_D) == GLFW_PRESS) camera.processKeyboard(.Right, deltaTime); } // glfw: whenever the window size changed (by OS or user resize) this callback function executes pub fn framebuffer_size_callback(window: ?*GLFWwindow, width: c_int, height: c_int) callconv(.C) void { // make sure the viewport matches the new window dimensions; note that width and // height will be significantly larger than specified on retina displays. glViewport(0, 0, width, height); } // glfw: whenever the mouse moves, this callback is called pub fn mouse_callback(window: ?*GLFWwindow, xpos: f64, ypos: f64) callconv(.C) void { if (firstMouse) { lastX = @floatCast(f32, xpos); lastY = @floatCast(f32, ypos); firstMouse = false; } const xoffset = @floatCast(f32, xpos) - lastX; const yoffset = lastY - @floatCast(f32, ypos); // reversed since y-coordinates go from bottom to top lastX = @floatCast(f32, xpos); lastY = @floatCast(f32, ypos); camera.processMouseMovement(xoffset, yoffset); } // glfw: whenever the mouse scroll wheel scrolls, this callback is called pub fn scroll_callback(window: ?*GLFWwindow, xoffset: f64, yoffset: f64) callconv(.C) void { camera.processMouseScroll(@floatCast(f32, yoffset)); } // utility function for loading a 2D texture from file pub fn loadTexture(path: [:0]const u8) c_uint { var textureID: c_uint = undefined; glGenTextures(1, &textureID); var width: c_int = undefined; var height: c_int = undefined; var nrChannels: c_int = undefined; const data = stbi_load(path, &width, &height, &nrChannels, 0); if (data != null) { var format: GLenum = undefined; if (nrChannels == 1) { format = GL_RED; } else if (nrChannels == 3) { format = GL_RGB; } else if (nrChannels == 4) { format = GL_RGBA; } glBindTexture(GL_TEXTURE_2D, textureID); glTexImage2D(GL_TEXTURE_2D, 0, @intCast(c_int, format), width, height, 0, format, GL_UNSIGNED_BYTE, data); glGenerateMipmap(GL_TEXTURE_2D); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); stbi_image_free(data); } else { warn("Failed to load texture at path: {s}\n", .{path}); } return textureID; }
src/2_6_multiple_lights.zig
const c = @import("../../c_global.zig").c_imp; const std = @import("std"); const zigimg = import("zigimg"); // dross-zig const InternalTexture = @import("../texture.zig").InternalTexture; const TextureErrors = @import("../texture.zig").TextureErrors; const apis = @import("../renderer.zig").BackendApi; const fs = @import("../../utils/file_loader.zig"); const Vector2 = @import("../../core/vector2.zig").Vector2; // ----------------------------------------- // - TextureGl - // ----------------------------------------- /// OpenGL implmentation for image data pub const TextureGl = struct { /// OpenGl generated ID for the texture // NOTE(devon): 0 is not a valid ID! internal_id: c_uint = 0, /// The stored image data // data: [:0]const u8 = undefined, /// The width of the texture internal_width: c_int = 0, /// The height of the texture internal_height: c_int = 0, /// The number of channels used in the texture internal_channels: c_int = 0, const Self = @This(); /// Builds the TextureGl object and allocates any required memory /// Comments: The caller (Texture) will own the allocated memory. pub fn new(allocator: *std.mem.Allocator, path: []const u8) !*Self { var self = try allocator.create(TextureGl); const number_of_textures: c_int = 1; const mipmap_level: c_int = 0; const border: c_int = 0; c.stbi_set_flip_vertically_on_load(1); // Generate texture ID c.glGenTextures(number_of_textures, @ptrCast(*c_uint, &self.internal_id)); // Bind the texture c.glBindTexture(c.GL_TEXTURE_2D, self.internal_id); // Set texture parameters // NOTE(devon): Test image is pixel art, so we're defaulting to // nearest texture filtering c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_WRAP_S, c.GL_CLAMP_TO_EDGE); c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_WRAP_T, c.GL_CLAMP_TO_EDGE); c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_MIN_FILTER, c.GL_NEAREST); c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_MAG_FILTER, c.GL_NEAREST); //var compressed_bytes: []const u8 = @embedFile("../../../assets/sprites/s_guy_idle.png"); var compressed_bytes: ?[]const u8 = fs.loadFile(path, 4 * 1024 * 1024) catch |err| { std.debug.print("[Texture]: Failed to load Texture at {s}! {}\n", .{ path, err }); return err; }; const bytes_length: c_int = @intCast(c_int, compressed_bytes.?.len); // Determine if the file is a png file if (c.stbi_info_from_memory(compressed_bytes.?.ptr, bytes_length, &self.internal_width, &self.internal_height, &self.internal_channels) == 0) { return error.NotPngFile; } // Ensure that the image has pixel data if (self.internal_width <= 0 or self.internal_height <= 0) return error.NoPixels; if (c.stbi_is_16_bit_from_memory(compressed_bytes.?.ptr, bytes_length) != 0) { return error.InvalidFormat; } const bits_per_channel = 8; const channel_count = 4; const width_u32 = @intCast(u32, self.internal_width); const height_u32 = @intCast(u32, self.internal_height); const image_data = c.stbi_load_from_memory(compressed_bytes.?.ptr, bytes_length, &self.internal_width, &self.internal_height, &self.internal_channels, channel_count); if (image_data == null) return error.NoMem; const pitch = width_u32 * bits_per_channel * channel_count / 8; var data = image_data[0 .. height_u32 * pitch]; // Generate gl texture c.glTexImage2D( c.GL_TEXTURE_2D, // Texture Target mipmap_level, // mipmap detail level c.GL_RGBA, // Specifies the number of color components in texture self.internal_width, // Width of image self.internal_height, // Height of image border, // Boarde NOTE(devon): must be 0 c.GL_RGBA, // Specifies the format of the pixel data c.GL_UNSIGNED_BYTE, // Specifies the data type of the pixel data @ptrCast(*c_void, &data.ptr[0]), // void pointer to image data ); // Generate mipmap // c.glGenerateMipmap(c.GL_TEXTURE_2D); c.stbi_image_free(data.ptr); return self; } /// Builds a dataless TextureGl object and allocates any required memory /// Comments: The caller (Texture) will own the allocated memory. pub fn newDataless(allocator: *std.mem.Allocator, size: Vector2) !*Self { var self = try allocator.create(TextureGl); // Generate texture ID c.glGenTextures(1, @ptrCast(*c_uint, &self.internal_id)); // Bind the texture c.glBindTexture(c.GL_TEXTURE_2D, self.internal_id); // Set texture parameters c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_MIN_FILTER, c.GL_NEAREST); c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_MAG_FILTER, c.GL_NEAREST); self.internal_width = @floatToInt(c_int, size.x()); self.internal_height = @floatToInt(c_int, size.y()); // Generate gl texture c.glTexImage2D( c.GL_TEXTURE_2D, // Texture Target 0, // mipmap detail level c.GL_RGB, // Specifies the number of color components in texture self.internal_width, // Width of image self.internal_height, // Height of image 0, // Boarde NOTE(devon): must be 0 c.GL_RGB, // Specifies the format of the pixel data c.GL_UNSIGNED_BYTE, // Specifies the data type of the pixel data null, // void pointer to image data ); return self; } /// Builds the TextureGl object for font rendering and allocates any required memory /// Comments: The caller (Texture) will own the allocated memory. pub fn newFont(allocator: *std.mem.Allocator, data: [*c]u8, desired_width: u32, desired_rows: u32) !*Self { var self = try allocator.create(TextureGl); // Generate texture ID c.glGenTextures(1, @ptrCast(*c_uint, &self.internal_id)); // Bind the texture c.glBindTexture(c.GL_TEXTURE_2D, self.internal_id); // Set texture parameters c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_WRAP_S, c.GL_CLAMP_TO_BORDER); c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_WRAP_T, c.GL_CLAMP_TO_BORDER); c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_MIN_FILTER, c.GL_LINEAR); c.glTexParameteri(c.GL_TEXTURE_2D, c.GL_TEXTURE_MAG_FILTER, c.GL_LINEAR); self.internal_width = @intCast(c_int, desired_width); self.internal_height = @intCast(c_int, desired_rows); // Generate gl texture c.glTexImage2D( c.GL_TEXTURE_2D, // Texture Target 0, // mipmap detail level c.GL_RED, // Specifies the number of color components in texture self.internal_width, // Width of image self.internal_height, // Height of image 0, // Boarde NOTE(devon): must be 0 c.GL_RED, // Specifies the format of the pixel data c.GL_UNSIGNED_BYTE, // Specifies the data type of the pixel data @ptrCast(?*const c_void, data), // void pointer to image data ); return self; } /// Frees the allocated memory that OpenGlTexture required to function. pub fn free(allocator: *std.mem.Allocator, self: *Self) void { c.glDeleteTextures(1, @ptrCast(*c_uint, &self.internal_id)); allocator.destroy(self); } /// Binds the texture pub fn bind(self: *Self) void { c.glBindTexture(c.GL_TEXTURE_2D, self.internal_id); } /// Binds the texture pub fn bindUnit(slot_index: c_uint, external_id: c_uint) void { c.glBindTextureUnit(slot_index, external_id); } /// Returns the OpenGL generated texture id pub fn id(self: *Self) c_uint { if (self.internal_id == 0) @panic("[Renderer][OpenGL]: Texture ID of 0 is NOT valid!"); return self.internal_id; } /// Returns the stored height of the texture pub fn height(self: *Self) c_int { return self.internal_height; } /// Returns the stored width of the texture pub fn width(self: *Self) c_int { return self.internal_width; } };
src/renderer/backend/texture_opengl.zig
const __floatuntitf = @import("floatuntitf.zig").__floatuntitf; const testing = @import("std").testing; fn test__floatuntitf(a: u128, expected: f128) void { const x = __floatuntitf(a); testing.expect(x == expected); } test "floatuntitf" { test__floatuntitf(0, 0.0); test__floatuntitf(1, 1.0); test__floatuntitf(2, 2.0); test__floatuntitf(20, 20.0); test__floatuntitf(0x7FFFFF8000000000, 0x1.FFFFFEp+62); test__floatuntitf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62); test__floatuntitf(0x7FFFFF0000000000, 0x1.FFFFFCp+62); test__floatuntitf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62); test__floatuntitf(0x7FFFFFFFFFFFFFFF, 0xF.FFFFFFFFFFFFFFEp+59); test__floatuntitf(0xFFFFFFFFFFFFFFFE, 0xF.FFFFFFFFFFFFFFEp+60); test__floatuntitf(0xFFFFFFFFFFFFFFFF, 0xF.FFFFFFFFFFFFFFFp+60); test__floatuntitf(0x8000008000000000, 0x8.000008p+60); test__floatuntitf(0x8000000000000800, 0x8.0000000000008p+60); test__floatuntitf(0x8000010000000000, 0x8.00001p+60); test__floatuntitf(0x8000000000001000, 0x8.000000000001p+60); test__floatuntitf(0x8000000000000000, 0x8p+60); test__floatuntitf(0x8000000000000001, 0x8.000000000000001p+60); test__floatuntitf(0x0007FB72E8000000, 0x1.FEDCBAp+50); test__floatuntitf(0x0007FB72EA000000, 0x1.FEDCBA8p+50); test__floatuntitf(0x0007FB72EB000000, 0x1.FEDCBACp+50); test__floatuntitf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50); test__floatuntitf(0x0007FB72EC000000, 0x1.FEDCBBp+50); test__floatuntitf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50); test__floatuntitf(0x0007FB72E6000000, 0x1.FEDCB98p+50); test__floatuntitf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50); test__floatuntitf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50); test__floatuntitf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50); test__floatuntitf(0x0007FB72E4000000, 0x1.FEDCB9p+50); test__floatuntitf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57); test__floatuntitf(0x023479FD0E092DA1, 0x1.1A3CFE870496D08p+57); test__floatuntitf(0x023479FD0E092DB0, 0x1.1A3CFE870496D8p+57); test__floatuntitf(0x023479FD0E092DB8, 0x1.1A3CFE870496DCp+57); test__floatuntitf(0x023479FD0E092DB6, 0x1.1A3CFE870496DBp+57); test__floatuntitf(0x023479FD0E092DBF, 0x1.1A3CFE870496DF8p+57); test__floatuntitf(0x023479FD0E092DC1, 0x1.1A3CFE870496E08p+57); test__floatuntitf(0x023479FD0E092DC7, 0x1.1A3CFE870496E38p+57); test__floatuntitf(0x023479FD0E092DC8, 0x1.1A3CFE870496E4p+57); test__floatuntitf(0x023479FD0E092DCF, 0x1.1A3CFE870496E78p+57); test__floatuntitf(0x023479FD0E092DD0, 0x1.1A3CFE870496E8p+57); test__floatuntitf(0x023479FD0E092DD1, 0x1.1A3CFE870496E88p+57); test__floatuntitf(0x023479FD0E092DD8, 0x1.1A3CFE870496ECp+57); test__floatuntitf(0x023479FD0E092DDF, 0x1.1A3CFE870496EF8p+57); test__floatuntitf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57); test__floatuntitf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121); test__floatuntitf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496D08p+121); test__floatuntitf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496D8p+121); test__floatuntitf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496DCp+121); test__floatuntitf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496DBp+121); test__floatuntitf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496DF8p+121); test__floatuntitf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496E08p+121); test__floatuntitf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496E38p+121); test__floatuntitf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496E4p+121); test__floatuntitf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496E78p+121); test__floatuntitf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496E8p+121); test__floatuntitf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496E88p+121); test__floatuntitf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496ECp+121); test__floatuntitf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496EF8p+121); test__floatuntitf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121); test__floatuntitf(make_ti(0, 0xFFFFFFFFFFFFFFFF), 0x1.FFFFFFFFFFFFFFFEp+63); test__floatuntitf(make_ti(0xFFFFFFFFFFFFFFFF, 0x0000000000000000), 0x1.FFFFFFFFFFFFFFFEp+127); test__floatuntitf(make_ti(0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF), 0x1.0000000000000000p+128); test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC2801), 0x1.23456789ABCDEF0123456789ABC3p+124); test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3000), 0x1.23456789ABCDEF0123456789ABC3p+124); test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC37FF), 0x1.23456789ABCDEF0123456789ABC3p+124); test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3800), 0x1.23456789ABCDEF0123456789ABC4p+124); test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4000), 0x1.23456789ABCDEF0123456789ABC4p+124); test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC47FF), 0x1.23456789ABCDEF0123456789ABC4p+124); test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4800), 0x1.23456789ABCDEF0123456789ABC4p+124); test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4801), 0x1.23456789ABCDEF0123456789ABC5p+124); test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC57FF), 0x1.23456789ABCDEF0123456789ABC5p+124); } fn make_ti(high: u64, low: u64) u128 { var result: u128 = high; result <<= 64; result |= low; return result; }
lib/std/special/compiler_rt/floatuntitf_test.zig
const std = @import("../std.zig"); const CpuFeature = std.Target.Cpu.Feature; const CpuModel = std.Target.Cpu.Model; pub const Feature = enum { deflate_conversion, dfp_packed_conversion, dfp_zoned_conversion, distinct_ops, enhanced_dat_2, enhanced_sort, execution_hint, fast_serialization, fp_extension, guarded_storage, high_word, insert_reference_bits_multiple, interlocked_access1, load_and_trap, load_and_zero_rightmost_byte, load_store_on_cond, load_store_on_cond_2, message_security_assist_extension3, message_security_assist_extension4, message_security_assist_extension5, message_security_assist_extension7, message_security_assist_extension8, message_security_assist_extension9, miscellaneous_extensions, miscellaneous_extensions_2, miscellaneous_extensions_3, population_count, processor_assist, reset_reference_bits_multiple, transactional_execution, vector, vector_enhancements_1, vector_enhancements_2, vector_packed_decimal, vector_packed_decimal_enhancement, }; pub usingnamespace CpuFeature.feature_set_fns(Feature); pub const all_features = blk: { const len = @typeInfo(Feature).Enum.fields.len; std.debug.assert(len <= CpuFeature.Set.needed_bit_count); var result: [len]CpuFeature = undefined; result[@enumToInt(Feature.deflate_conversion)] = .{ .llvm_name = "deflate-conversion", .description = "Assume that the deflate-conversion facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.dfp_packed_conversion)] = .{ .llvm_name = "dfp-packed-conversion", .description = "Assume that the DFP packed-conversion facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.dfp_zoned_conversion)] = .{ .llvm_name = "dfp-zoned-conversion", .description = "Assume that the DFP zoned-conversion facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.distinct_ops)] = .{ .llvm_name = "distinct-ops", .description = "Assume that the distinct-operands facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.enhanced_dat_2)] = .{ .llvm_name = "enhanced-dat-2", .description = "Assume that the enhanced-DAT facility 2 is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.enhanced_sort)] = .{ .llvm_name = "enhanced-sort", .description = "Assume that the enhanced-sort facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.execution_hint)] = .{ .llvm_name = "execution-hint", .description = "Assume that the execution-hint facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.fast_serialization)] = .{ .llvm_name = "fast-serialization", .description = "Assume that the fast-serialization facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.fp_extension)] = .{ .llvm_name = "fp-extension", .description = "Assume that the floating-point extension facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.guarded_storage)] = .{ .llvm_name = "guarded-storage", .description = "Assume that the guarded-storage facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.high_word)] = .{ .llvm_name = "high-word", .description = "Assume that the high-word facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.insert_reference_bits_multiple)] = .{ .llvm_name = "insert-reference-bits-multiple", .description = "Assume that the insert-reference-bits-multiple facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.interlocked_access1)] = .{ .llvm_name = "interlocked-access1", .description = "Assume that interlocked-access facility 1 is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.load_and_trap)] = .{ .llvm_name = "load-and-trap", .description = "Assume that the load-and-trap facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.load_and_zero_rightmost_byte)] = .{ .llvm_name = "load-and-zero-rightmost-byte", .description = "Assume that the load-and-zero-rightmost-byte facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.load_store_on_cond)] = .{ .llvm_name = "load-store-on-cond", .description = "Assume that the load/store-on-condition facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.load_store_on_cond_2)] = .{ .llvm_name = "load-store-on-cond-2", .description = "Assume that the load/store-on-condition facility 2 is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.message_security_assist_extension3)] = .{ .llvm_name = "message-security-assist-extension3", .description = "Assume that the message-security-assist extension facility 3 is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.message_security_assist_extension4)] = .{ .llvm_name = "message-security-assist-extension4", .description = "Assume that the message-security-assist extension facility 4 is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.message_security_assist_extension5)] = .{ .llvm_name = "message-security-assist-extension5", .description = "Assume that the message-security-assist extension facility 5 is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.message_security_assist_extension7)] = .{ .llvm_name = "message-security-assist-extension7", .description = "Assume that the message-security-assist extension facility 7 is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.message_security_assist_extension8)] = .{ .llvm_name = "message-security-assist-extension8", .description = "Assume that the message-security-assist extension facility 8 is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.message_security_assist_extension9)] = .{ .llvm_name = "message-security-assist-extension9", .description = "Assume that the message-security-assist extension facility 9 is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.miscellaneous_extensions)] = .{ .llvm_name = "miscellaneous-extensions", .description = "Assume that the miscellaneous-extensions facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.miscellaneous_extensions_2)] = .{ .llvm_name = "miscellaneous-extensions-2", .description = "Assume that the miscellaneous-extensions facility 2 is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.miscellaneous_extensions_3)] = .{ .llvm_name = "miscellaneous-extensions-3", .description = "Assume that the miscellaneous-extensions facility 3 is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.population_count)] = .{ .llvm_name = "population-count", .description = "Assume that the population-count facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.processor_assist)] = .{ .llvm_name = "processor-assist", .description = "Assume that the processor-assist facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.reset_reference_bits_multiple)] = .{ .llvm_name = "reset-reference-bits-multiple", .description = "Assume that the reset-reference-bits-multiple facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.transactional_execution)] = .{ .llvm_name = "transactional-execution", .description = "Assume that the transactional-execution facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.vector)] = .{ .llvm_name = "vector", .description = "Assume that the vectory facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.vector_enhancements_1)] = .{ .llvm_name = "vector-enhancements-1", .description = "Assume that the vector enhancements facility 1 is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.vector_enhancements_2)] = .{ .llvm_name = "vector-enhancements-2", .description = "Assume that the vector enhancements facility 2 is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.vector_packed_decimal)] = .{ .llvm_name = "vector-packed-decimal", .description = "Assume that the vector packed decimal facility is installed", .dependencies = featureSet(&[_]Feature{}), }; result[@enumToInt(Feature.vector_packed_decimal_enhancement)] = .{ .llvm_name = "vector-packed-decimal-enhancement", .description = "Assume that the vector packed decimal enhancement facility is installed", .dependencies = featureSet(&[_]Feature{}), }; const ti = @typeInfo(Feature); for (result) |*elem, i| { elem.index = i; elem.name = ti.Enum.fields[i].name; } break :blk result; }; pub const cpu = struct { pub const arch10 = CpuModel{ .name = "arch10", .llvm_name = "arch10", .features = featureSet(&[_]Feature{ .dfp_zoned_conversion, .distinct_ops, .enhanced_dat_2, .execution_hint, .fast_serialization, .fp_extension, .high_word, .interlocked_access1, .load_and_trap, .load_store_on_cond, .message_security_assist_extension3, .message_security_assist_extension4, .miscellaneous_extensions, .population_count, .processor_assist, .reset_reference_bits_multiple, .transactional_execution, }), }; pub const arch11 = CpuModel{ .name = "arch11", .llvm_name = "arch11", .features = featureSet(&[_]Feature{ .dfp_packed_conversion, .dfp_zoned_conversion, .distinct_ops, .enhanced_dat_2, .execution_hint, .fast_serialization, .fp_extension, .high_word, .interlocked_access1, .load_and_trap, .load_and_zero_rightmost_byte, .load_store_on_cond, .load_store_on_cond_2, .message_security_assist_extension3, .message_security_assist_extension4, .message_security_assist_extension5, .miscellaneous_extensions, .population_count, .processor_assist, .reset_reference_bits_multiple, .transactional_execution, .vector, }), }; pub const arch12 = CpuModel{ .name = "arch12", .llvm_name = "arch12", .features = featureSet(&[_]Feature{ .dfp_packed_conversion, .dfp_zoned_conversion, .distinct_ops, .enhanced_dat_2, .execution_hint, .fast_serialization, .fp_extension, .guarded_storage, .high_word, .insert_reference_bits_multiple, .interlocked_access1, .load_and_trap, .load_and_zero_rightmost_byte, .load_store_on_cond, .load_store_on_cond_2, .message_security_assist_extension3, .message_security_assist_extension4, .message_security_assist_extension5, .message_security_assist_extension7, .message_security_assist_extension8, .miscellaneous_extensions, .miscellaneous_extensions_2, .population_count, .processor_assist, .reset_reference_bits_multiple, .transactional_execution, .vector, .vector_enhancements_1, .vector_packed_decimal, }), }; pub const arch13 = CpuModel{ .name = "arch13", .llvm_name = "arch13", .features = featureSet(&[_]Feature{ .deflate_conversion, .dfp_packed_conversion, .dfp_zoned_conversion, .distinct_ops, .enhanced_dat_2, .enhanced_sort, .execution_hint, .fast_serialization, .fp_extension, .guarded_storage, .high_word, .insert_reference_bits_multiple, .interlocked_access1, .load_and_trap, .load_and_zero_rightmost_byte, .load_store_on_cond, .load_store_on_cond_2, .message_security_assist_extension3, .message_security_assist_extension4, .message_security_assist_extension5, .message_security_assist_extension7, .message_security_assist_extension8, .message_security_assist_extension9, .miscellaneous_extensions, .miscellaneous_extensions_2, .miscellaneous_extensions_3, .population_count, .processor_assist, .reset_reference_bits_multiple, .transactional_execution, .vector, .vector_enhancements_1, .vector_enhancements_2, .vector_packed_decimal, .vector_packed_decimal_enhancement, }), }; pub const arch8 = CpuModel{ .name = "arch8", .llvm_name = "arch8", .features = featureSet(&[_]Feature{}), }; pub const arch9 = CpuModel{ .name = "arch9", .llvm_name = "arch9", .features = featureSet(&[_]Feature{ .distinct_ops, .fast_serialization, .fp_extension, .high_word, .interlocked_access1, .load_store_on_cond, .message_security_assist_extension3, .message_security_assist_extension4, .population_count, .reset_reference_bits_multiple, }), }; pub const generic = CpuModel{ .name = "generic", .llvm_name = "generic", .features = featureSet(&[_]Feature{}), }; pub const z10 = CpuModel{ .name = "z10", .llvm_name = "z10", .features = featureSet(&[_]Feature{}), }; pub const z13 = CpuModel{ .name = "z13", .llvm_name = "z13", .features = featureSet(&[_]Feature{ .dfp_packed_conversion, .dfp_zoned_conversion, .distinct_ops, .enhanced_dat_2, .execution_hint, .fast_serialization, .fp_extension, .high_word, .interlocked_access1, .load_and_trap, .load_and_zero_rightmost_byte, .load_store_on_cond, .load_store_on_cond_2, .message_security_assist_extension3, .message_security_assist_extension4, .message_security_assist_extension5, .miscellaneous_extensions, .population_count, .processor_assist, .reset_reference_bits_multiple, .transactional_execution, .vector, }), }; pub const z14 = CpuModel{ .name = "z14", .llvm_name = "z14", .features = featureSet(&[_]Feature{ .dfp_packed_conversion, .dfp_zoned_conversion, .distinct_ops, .enhanced_dat_2, .execution_hint, .fast_serialization, .fp_extension, .guarded_storage, .high_word, .insert_reference_bits_multiple, .interlocked_access1, .load_and_trap, .load_and_zero_rightmost_byte, .load_store_on_cond, .load_store_on_cond_2, .message_security_assist_extension3, .message_security_assist_extension4, .message_security_assist_extension5, .message_security_assist_extension7, .message_security_assist_extension8, .miscellaneous_extensions, .miscellaneous_extensions_2, .population_count, .processor_assist, .reset_reference_bits_multiple, .transactional_execution, .vector, .vector_enhancements_1, .vector_packed_decimal, }), }; pub const z196 = CpuModel{ .name = "z196", .llvm_name = "z196", .features = featureSet(&[_]Feature{ .distinct_ops, .fast_serialization, .fp_extension, .high_word, .interlocked_access1, .load_store_on_cond, .message_security_assist_extension3, .message_security_assist_extension4, .population_count, .reset_reference_bits_multiple, }), }; pub const zEC12 = CpuModel{ .name = "zEC12", .llvm_name = "zEC12", .features = featureSet(&[_]Feature{ .dfp_zoned_conversion, .distinct_ops, .enhanced_dat_2, .execution_hint, .fast_serialization, .fp_extension, .high_word, .interlocked_access1, .load_and_trap, .load_store_on_cond, .message_security_assist_extension3, .message_security_assist_extension4, .miscellaneous_extensions, .population_count, .processor_assist, .reset_reference_bits_multiple, .transactional_execution, }), }; }; /// All systemz CPUs, sorted alphabetically by name. /// TODO: Replace this with usage of `std.meta.declList`. It does work, but stage1 /// compiler has inefficient memory and CPU usage, affecting build times. pub const all_cpus = &[_]*const CpuModel{ &cpu.arch10, &cpu.arch11, &cpu.arch12, &cpu.arch13, &cpu.arch8, &cpu.arch9, &cpu.generic, &cpu.z10, &cpu.z13, &cpu.z14, &cpu.z196, &cpu.zEC12, };
lib/std/target/systemz.zig
const std = @import("std"); const fun = @import("fun"); const debug = std.debug; const heap = std.heap; const io = std.io; const math = std.math; const mem = std.mem; const os = std.os; const scan = fun.scan.scan; pub fn main() !void { const stdin = &(try io.getStdIn()).inStream().stream; const stdout = &(try io.getStdOut()).outStream().stream; var ps = io.PeekStream(1, os.File.InStream.Error).init(stdin); var direct_allocator = heap.DirectAllocator.init(); const allocator = &direct_allocator.allocator; defer direct_allocator.deinit(); const init = try readInitialState(allocator, &ps); defer allocator.free(init); _ = try scan(&ps, "\n", struct {}); const notes = try readNotes(allocator, &ps); defer allocator.free(notes); const note_mask = compileNotes(notes); var arena_state = heap.ArenaAllocator.init(allocator); const arena = &arena_state.allocator; defer arena_state.deinit(); // Sadly I had to look at reddit for this one. Did not think about the fact // that the input would stabilize (not all inputs do though // https://www.reddit.com/r/adventofcode/comments/a5eztl/comment/ebm4oec/) // This solution is a little messy. Ooh well. var sum_diff = std.ArrayList(isize).init(arena); var curr = init; var gen: usize = 0; var prev_sum: isize = sumState(init, 0); var sum = prev_sum; const repeating_diff = loop: while (gen < 50000000000) : (gen += 1) { curr = try simulate(arena, curr, note_mask); var off = @divExact(@intCast(isize, curr.len - init.len), 2) * 8; const curr_sum = sumState(curr, off); try sum_diff.append(curr_sum - prev_sum); prev_sum = curr_sum; // We just assume the sum has stabilize when the diff repeats 10 times const diffs = sum_diff.toSlice(); const last = diffs[diffs.len - 1]; sum += last; for (diffs) |_, i| { const index = diffs.len - (i + 1); if (diffs[index] != last) break; if (i == 10) break :loop last; } } else blk: { break :blk 0; }; gen += 1; sum += @intCast(isize, 50000000000 - gen) * repeating_diff; try stdout.print("{}\n", sum); } fn readInitialState(allocator: *mem.Allocator, ps: var) ![]u8 { var state = std.ArrayList(u8).init(allocator); defer state.deinit(); _ = try scan(ps, "initial state: ", struct {}); var i: usize = 0; while (true) : (i += 1) { if (state.len <= (i / 8)) try state.append(0); const curr = &state.toSlice()[i / 8]; const c = try ps.stream.readByte(); switch (c) { '#' => curr.* |= u8(1) << @intCast(u3, 8 - ((i % 8) + 1)), '.' => {}, '\n' => return state.toOwnedSlice(), else => return error.InvalidInput, } } } fn readNotes(allocator: *mem.Allocator, ps: var) ![]Note { var notes = std.ArrayList(Note).init(allocator); defer notes.deinit(); while (readNote(ps)) |note| { try notes.append(note); _ = try scan(ps, "\n", struct {}); } else |err| switch (err) { error.EndOfStream => {}, else => return err, } return notes.toOwnedSlice(); } fn readNote(ps: var) !Note { var mask: u8 = 0; while (true) switch (try ps.stream.readByte()) { '#' => { mask <<= 1; mask |= 1; }, '.' => mask <<= 1, ' ' => break, else => return error.InvalidInput, }; _ = try scan(ps, "=> ", struct {}); return Note{ .mask = mask, .result = switch (try ps.stream.readByte()) { '#' => true, '.' => false, else => return error.InvalidInput, }, }; } fn simulate(allocator: *mem.Allocator, state: []const u8, note_mask: u32) ![]u8 { const res = try allocator.alloc(u8, state.len + 2); mem.set(u8, res, 0); for (res) |*b, i| { const left = if (i < 2) u8(0) else state[i - 2]; const center = if (i < 1 or state.len <= i - 1) u8(0) else state[i - 1]; const right = if (state.len <= i) u8(0) else state[i]; const com: u16 = (u16(left) & 0b11) << 10 | u16(center) << 2 | ((u16(right) >> 6) & 0b11); comptime var j: usize = 0; inline while (j < @sizeOf(u8) * 8) : (j += 1) { const mask = (com >> @intCast(u5, j)) & u32(0b11111); b.* |= u8(@boolToInt(note_mask & (u32(1) << @intCast(u5, mask)) != 0)) << @intCast(u3, j); } } return res; } fn sumState(state: []const u8, off: isize) isize { var sum: isize = 0; for (state) |c, i| { var j: usize = 0; const s = @sizeOf(u8) * 8; while (j < s) : (j += 1) { const is_set = (c & (u8(1) << @intCast(u3, s - (j + 1)))) != 0; if (is_set) { sum += @intCast(isize, i * s + j) - off; } } } return sum; } fn compileNotes(notes: []const Note) u32 { var res: u32 = 0; for (notes) |n| res |= u32(@boolToInt(n.result)) << @intCast(u5, n.mask); return res; } fn sumF(comptime T: type, buf: []const T) isize { var res: isize = 0; for (buf) |item| res += item; return res; } const Note = struct { mask: u8, result: bool, };
src/day12.zig
const std = @import("std"); const zelda = @import("zelda"); // NOTE: this test will fail if ziglang.org is down! test "fetch status code of ziglang.org" { defer zelda.cleanup(); var response = try zelda.get(std.testing.allocator, "https://ziglang.org"); defer response.deinit(); try std.testing.expectEqual(@as(u10, 200), @enumToInt(response.statusCode)); } const HTTPBinResponse = struct { data: []const u8, }; test "post some data and get it back" { defer zelda.cleanup(); const data = "bruh moment"; var response = try zelda.postAndParseResponse(HTTPBinResponse, .{ .allocator = std.testing.allocator, .ignore_unknown_fields = true, }, std.testing.allocator, "https://httpbin.org/post", .{ .kind = .Raw, .bytes = data }); defer std.json.parseFree(HTTPBinResponse, response, .{ .allocator = std.testing.allocator, .ignore_unknown_fields = true, }); try std.testing.expectEqualStrings(data, response.data); } const TestDataStruct = struct { number_of_bruhs: usize, bruh_status: []const u8, maximum_bruh_enabled: bool, }; test "post some json data and get it back" { defer zelda.cleanup(); var source = TestDataStruct{ .number_of_bruhs = 69, .bruh_status = "engaged", .maximum_bruh_enabled = true, }; var httpBinResponse = try zelda.postJsonAndParseResponse(HTTPBinResponse, "https://httpbin.org/post", source, .{ .allocator = std.testing.allocator, .parseOptions = .{ .ignore_unknown_fields = true }, }); defer std.json.parseFree(HTTPBinResponse, httpBinResponse, .{ .allocator = std.testing.allocator, .ignore_unknown_fields = true, }); var obj = try std.json.parse(TestDataStruct, &std.json.TokenStream.init(httpBinResponse.data), .{ .allocator = std.testing.allocator, .ignore_unknown_fields = true, }); defer std.json.parseFree(TestDataStruct, obj, .{ .allocator = std.testing.allocator, .ignore_unknown_fields = true, }); try std.testing.expectEqual(source.number_of_bruhs, obj.number_of_bruhs); try std.testing.expectEqual(source.maximum_bruh_enabled, obj.maximum_bruh_enabled); try std.testing.expectEqualStrings(source.bruh_status, obj.bruh_status); }
src/tests.zig
const std = @import("std"); const curses = @import("curses.zig"); const core = @import("core.zig"); pub const Characters = struct { horiz_line: []const u8, // - vert_line: []const u8, // | crossing_up: []const u8, // _|_ crossing_down: []const u8, // T crossing_left: []const u8, // --| crossing_right: []const u8, // |-- crossing_plus: []const u8, // --|-- corner_topleft: []const u8, // ,-- corner_topright: []const u8, // --. corner_bottomleft: []const u8, // |__ corner_bottomright: []const u8, // __| flag: []const u8, // F mine: []const u8, // * }; pub const ascii_characters = Characters{ .horiz_line = "-", .vert_line = "|", .crossing_up = "-", .crossing_down = "-", .crossing_left = "|", .crossing_right = "|", .crossing_plus = "+", .corner_topleft = ",", .corner_topright = ".", .corner_bottomleft = "`", .corner_bottomright = "'", .flag = "F", .mine = "*", }; pub const unicode_characters = Characters{ .horiz_line = "\xe2\x94\x80", // BOX DRAWINGS LIGHT HORIZONTAL .vert_line = "\xe2\x94\x82", // BOX DRAWINGS LIGHT VERTICAL .crossing_up = "\xe2\x94\xb4", // BOX DRAWINGS LIGHT UP AND HORIZONTAL .crossing_down = "\xe2\x94\xac", // BOX DRAWINGS LIGHT DOWN AND HORIZONTAL .crossing_left = "\xe2\x94\xa4", // BOX DRAWINGS LIGHT VERTICAL AND LEFT .crossing_right = "\xe2\x94\x9c", // BOX DRAWINGS LIGHT VERTICAL AND RIGHT .crossing_plus = "\xe2\x94\xbc", // BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL .corner_topleft = "\xe2\x95\xad", // BOX DRAWINGS LIGHT ARC DOWN AND RIGHT .corner_topright = "\xe2\x95\xae", // BOX DRAWINGS LIGHT ARC DOWN AND LEFT .corner_bottomleft = "\xe2\x95\xb0", // BOX DRAWINGS LIGHT ARC UP AND RIGHT .corner_bottomright = "\xe2\x95\xaf", // BOX DRAWINGS LIGHT ARC UP AND LEFT .flag = "\xe2\x9a\x91", // BLACK FLAG .mine = "\xe2\x88\x97", // ASTERISK OPERATOR }; pub const Ui = struct { selected_x: u8, selected_y: u8, game: *core.Game, window: curses.Window, chars: Characters, colors: bool, number_attrs: [9]c_int, // for coloring the numbers that show how many mines are around status_message: ?[]const u8, pub fn init(game: *core.Game, window: curses.Window, characters: Characters, want_color: bool) !Ui { var self = Ui{ .selected_x = 0, .selected_y = 0, .game = game, .window = window, .chars = characters, .colors = false, .number_attrs = undefined, .status_message = null, }; try self.setupColors(want_color and curses.has_colors()); return self; } fn setupColors(self: *Ui, use_colors: bool) !void { if (!use_colors) { for (self.number_attrs) |*ptr| { ptr.* = 0; } return; } try curses.start_color(); const colors = comptime[_]c_short{ curses.COLOR_BLUE, curses.COLOR_GREEN, curses.COLOR_YELLOW, curses.COLOR_RED, curses.COLOR_CYAN, curses.COLOR_MAGENTA, curses.COLOR_MAGENTA, curses.COLOR_MAGENTA, curses.COLOR_MAGENTA, }; std.debug.assert(colors.len == self.number_attrs.len); for (colors) |color, i| { const pair = try curses.ColorPair.init(@intCast(c_short, i+1), color, curses.COLOR_BLACK); self.number_attrs[i] = pair.attr(); } } fn getWidth(self: *const Ui) u16 { return (self.game.width * @intCast(u16, "|---".len)) + @intCast(u16, "|".len); } fn getHeight(self: *const Ui) u16 { return (self.game.height * 2) + 1; } fn drawLine(self: *const Ui, y: u16, xleft: u16, left: []const u8, mid: []const u8, right: []const u8, horiz: []const u8) !void { var x: u16 = xleft; var i: u8 = 0; while (i < self.game.width) : (i += 1) { try self.window.mvaddstr(y, x, (if (i == 0) left else mid)); x += 1; var j: u8 = 0; while (j < 3) : (j += 1) { try self.window.mvaddstr(y, x, horiz); x += 1; } } try self.window.mvaddstr(y, x, right); } fn drawGrid(self: *const Ui, allocator: *std.mem.Allocator) !void { var top: u16 = (self.window.getmaxy() - self.getHeight()) / 2; var left: u16 = (self.window.getmaxx() - self.getWidth()) / 2; var gamey: u8 = 0; var y: u16 = top; while (gamey < self.game.height) : (gamey += 1) { if (gamey == 0) { try self.drawLine(y, left, self.chars.corner_topleft, self.chars.crossing_down, self.chars.corner_topright, self.chars.horiz_line); } else { try self.drawLine(y, left, self.chars.crossing_right, self.chars.crossing_plus, self.chars.crossing_left, self.chars.horiz_line); } y += 1; var x: u16 = left; var gamex: u8 = 0; while (gamex < self.game.width) : (gamex += 1) { var attrs: c_int = 0; if (gamex == self.selected_x and gamey == self.selected_y) { attrs |= curses.A_STANDOUT; } const info = self.game.getSquareInfo(gamex, gamey); var msg1: []const u8 = ""; var msg2: []const u8 = ""; const numbers = "012345678"; if ((self.game.status == core.GameStatus.PLAY and info.opened) or (self.game.status != core.GameStatus.PLAY and !info.mine)) { msg1 = numbers[info.n_mines_around..info.n_mines_around+1]; attrs |= self.number_attrs[info.n_mines_around]; } else if (self.game.status == core.GameStatus.PLAY) { if (info.flagged) { msg1 = self.chars.flag; } } else { msg1 = self.chars.mine; if (info.flagged) { msg2 = self.chars.flag; } } try self.window.mvaddstr(y, x, self.chars.vert_line); x += 1; try self.window.attron(attrs); { try self.window.mvaddstr(y, x, " "); // make sure that all 3 character places get attrs x += 1; try self.window.mvaddstr(y, x, msg1); x += 1; try self.window.mvaddstr(y, x, msg2); x += 1; } try self.window.attroff(attrs); } try self.window.mvaddstr(y, x, self.chars.vert_line); y += 1; } try self.drawLine(y, left, self.chars.corner_bottomleft, self.chars.crossing_up, self.chars.corner_bottomright, self.chars.horiz_line); } pub fn setStatusMessage(self: *Ui, msg: []const u8) void { self.status_message = msg; } // this may overlap the grid on a small terminal, it doesn't matter fn drawStatusText(self: *const Ui, msg: []const u8) !void { try self.window.attron(curses.A_STANDOUT); try self.window.mvaddstr(self.window.getmaxy()-1, 0, msg); try self.window.attroff(curses.A_STANDOUT); } pub fn draw(self: *Ui, allocator: *std.mem.Allocator) !void { try self.drawGrid(allocator); if (self.status_message == null) { switch(self.game.status) { core.GameStatus.PLAY => {}, core.GameStatus.WIN => self.setStatusMessage("You won! :D Press n to play again."), core.GameStatus.LOSE => self.setStatusMessage("Game Over :( Press n to play again."), } } if (self.status_message) |msg| { try self.drawStatusText(msg); self.status_message = null; } } // returns whether to keep running the game pub fn onResize(self: *const Ui) !bool { if (self.window.getmaxy() < self.getHeight() or self.window.getmaxx() < self.getWidth()) { try curses.endwin(); var stderr = std.io.getStdErr().writer(); try stderr.print("Terminal is too small :( Need {}x{}.\n", .{ self.getWidth(), self.getHeight() }); return false; } return true; } pub fn moveSelection(self: *Ui, xdiff: i8, ydiff: i8) void { switch (xdiff) { 1 => if (self.selected_x != self.game.width-1) { self.selected_x += 1; }, -1 => if (self.selected_x != 0) { self.selected_x -= 1; }, 0 => {}, else => unreachable, } switch(ydiff) { 1 => if (self.selected_y != self.game.height-1) { self.selected_y += 1; }, -1 => if (self.selected_y != 0) { self.selected_y -= 1; }, 0 => {}, else => unreachable, } } pub fn openSelected(self: *const Ui) void { self.game.open(self.selected_x, self.selected_y); } pub fn toggleFlagSelected(self: *const Ui) void { self.game.toggleFlag(self.selected_x, self.selected_y); } pub fn openAroundIfSafe(self: *const Ui) void { self.game.openAroundIfSafe(self.selected_x, self.selected_y); } pub fn openAroundEverythingSafe(self: *const Ui) void { self.game.openAroundEverythingSafe(); } };
src/cursesui.zig
const std = @import("std"); const builtin = @import("builtin"); pub fn getCookie(path: ?[]const u8) ![16]u8 { const xauth_file = blk: { if (path) |p| { break :blk try std.fs.openFileAbsolute(p, .{ .read = true, .write = false }); } else if (builtin.os.tag == .windows) { const xauthority = std.os.getenvW(std.unicode.utf8ToUtf16LeStringLiteral("XAUTHORITY")) orelse return error.XAuthorityNotSpecified; break :blk try std.fs.openFileAbsoluteW(xauthority, .{ .read = true, .write = false }); } else { if (std.os.getenv("XAUTHORITY")) |xafn| { break :blk try std.fs.openFileAbsolute(xafn, .{ .read = true, .write = false }); } const home = std.os.getenv("HOME") orelse return error.HomeDirectoryNotFound; var membuf: [256]u8 = undefined; var allocator = std.heap.FixedBufferAllocator.init(&membuf); const fpath = try std.mem.joinZ(&allocator.allocator, "/", &[_][]const u8{ home, ".Xauthority" }); break :blk try std.fs.openFileAbsoluteZ(fpath, .{ .read = true, .write = false }); } }; defer xauth_file.close(); var rbuf = std.io.bufferedReader(xauth_file.reader()); var reader = rbuf.reader(); while (true) { const family = reader.readIntBig(u16) catch break; const addr_len = try reader.readIntBig(u16); try reader.skipBytes(addr_len, .{ .buf_size = 64 }); const num_len = try reader.readIntBig(u16); try reader.skipBytes(num_len, .{ .buf_size = 64 }); const name_len = try reader.readIntBig(u16); if (name_len != 18) { try reader.skipBytes(name_len, .{ .buf_size = 64 }); const data_len = try reader.readIntBig(u16); try reader.skipBytes(data_len, .{ .buf_size = 64 }); continue; } var nbuf: [18]u8 = undefined; _ = try reader.readAll(nbuf[0..]); if (!std.mem.eql(u8, nbuf[0..], "MIT-MAGIC-COOKIE-1")) { const data_len = try reader.readIntBig(u16); try reader.skipBytes(data_len, .{ .buf_size = 64 }); continue; } const data_len = try reader.readIntBig(u16); if (data_len != 16) break; var xauth_data: [16]u8 = undefined; _ = try reader.readAll(xauth_data[0..]); return xauth_data; } return error.XauthorityCookieNotFound; }
didot-zwl/zwl/src/x11/auth.zig
const std = @import("std"); const ascii = std.ascii; const event = std.event; const fs = std.fs; const log = std.log; const math = std.math; const mem = std.mem; const os = std.os; const process = std.process; const Message = @import("../message.zig").Message; pub fn bspwm(channel: *event.Channel(Message)) void { const loop = event.Loop.instance.?; // TODO: Don't hardcode path to bspwm socket const sock_addr = os.sockaddr_un{ .path = ("/tmp/bspwm_0_0-socket" ++ "\x00" ** 87).* }; const socket = os.socket(os.AF_UNIX, os.SOCK_STREAM, 0) catch |err| { return log.err("Failed to get bspwm socket: {}", .{err}); }; defer os.close(socket); loop.connect(socket, @ptrCast(*const os.sockaddr, &sock_addr), @sizeOf(os.sockaddr_un)) catch |err| { return log.err("Failed to connect to bspwm socket: {}", .{err}); }; _ = loop.sendto(socket, "subscribe\x00report\x00", 0, null, 0) catch |err| { return log.err("Failed to subscribe to bspwm reports: {}", .{err}); }; var buf: [1024]u8 = undefined; while (true) { const len = loop.recvfrom(socket, &buf, 0, null, null) catch |err| { log.warn("Failed to read data from bspwm socket: {}", .{err}); continue; }; const lines = buf[1..len]; // Remove leading 'W' var it = mem.tokenize(lines, "\n"); while (it.next()) |line| { log.debug("bspwm report: {s}", .{line}); procesLine(line, channel); } } } fn procesLine(line: []const u8, channel: *event.Channel(Message)) void { var curr_monitor: usize = 0; var next_monitor: usize = 0; var curr_workspace: usize = 0; var it = mem.tokenize(line, ":"); while (it.next()) |item| { var name: [7:0]u8 = [1:0]u8{0} ** 7; mem.copy(u8, &name, item[0..math.min(item.len, 7)]); switch (item[0]) { 'm', 'M' => { curr_workspace = 0; curr_monitor = next_monitor; next_monitor += 1; }, 'O', 'o', 'F', 'f', 'U', 'u' => { channel.put(.{ .workspace = .{ .id = curr_workspace, .monitor_id = curr_monitor, .flags = .{ .focused = ascii.isUpper(item[0]), .occupied = ascii.toUpper(item[0]) != 'F', }, }, }); curr_workspace += 1; }, else => {}, } } }
src/producer/bspwm.zig
const builtin = @import("builtin"); const endian = builtin.cpu.arch.endian(); const testing = @import("std").testing; const ptr_size = @sizeOf(usize); test "type pun signed and unsigned as single pointer" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO comptime { var x: u32 = 0; const y = @ptrCast(*i32, &x); y.* = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); } } test "type pun signed and unsigned as many pointer" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO comptime { var x: u32 = 0; const y = @ptrCast([*]i32, &x); y[0] = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); } } test "type pun signed and unsigned as array pointer" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO comptime { var x: u32 = 0; const y = @ptrCast(*[1]i32, &x); y[0] = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); } } test "type pun signed and unsigned as offset many pointer" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend != .stage1) { // TODO https://github.com/ziglang/zig/issues/9646 return error.SkipZigTest; } comptime { var x: u32 = 0; var y = @ptrCast([*]i32, &x); y -= 10; y[10] = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); } } test "type pun signed and unsigned as array pointer" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend != .stage1) { // TODO https://github.com/ziglang/zig/issues/9646 return error.SkipZigTest; } comptime { var x: u32 = 0; const y = @ptrCast([*]i32, &x) - 10; const z: *[15]i32 = y[0..15]; z[10] = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), x); } } test "type pun value and struct" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO comptime { const StructOfU32 = extern struct { x: u32 }; var inst: StructOfU32 = .{ .x = 0 }; @ptrCast(*i32, &inst.x).* = -1; try testing.expectEqual(@as(u32, 0xFFFFFFFF), inst.x); @ptrCast(*i32, &inst).* = -2; try testing.expectEqual(@as(u32, 0xFFFFFFFE), inst.x); } } fn bigToNativeEndian(comptime T: type, v: T) T { return if (endian == .Big) v else @byteSwap(T, v); } test "type pun endianness" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO comptime { const StructOfBytes = extern struct { x: [4]u8 }; var inst: StructOfBytes = .{ .x = [4]u8{ 0, 0, 0, 0 } }; const structPtr = @ptrCast(*align(1) u32, &inst); const arrayPtr = @ptrCast(*align(1) u32, &inst.x); inst.x[0] = 0xFE; inst.x[2] = 0xBE; try testing.expectEqual(bigToNativeEndian(u32, 0xFE00BE00), structPtr.*); try testing.expectEqual(bigToNativeEndian(u32, 0xFE00BE00), arrayPtr.*); structPtr.* = bigToNativeEndian(u32, 0xDEADF00D); try testing.expectEqual(bigToNativeEndian(u32, 0xDEADF00D), structPtr.*); try testing.expectEqual(bigToNativeEndian(u32, 0xDEADF00D), arrayPtr.*); try testing.expectEqual(@as(u8, 0xDE), inst.x[0]); try testing.expectEqual(@as(u8, 0xAD), inst.x[1]); try testing.expectEqual(@as(u8, 0xF0), inst.x[2]); try testing.expectEqual(@as(u8, 0x0D), inst.x[3]); } } const Bits = packed struct { // Note: This struct has only single byte words so it // doesn't need to be byte swapped. p0: u1, p1: u4, p2: u3, p3: u2, p4: u6, p5: u8, p6: u7, p7: u1, }; const ShuffledBits = packed struct { p1: u4, p3: u2, p7: u1, p0: u1, p5: u8, p2: u3, p6: u7, p4: u6, }; fn shuffle(ptr: usize, comptime From: type, comptime To: type) usize { if (@sizeOf(From) != @sizeOf(To)) @compileError("Mismatched sizes! " ++ @typeName(From) ++ " and " ++ @typeName(To) ++ " must have the same size!"); const array_len = @divExact(ptr_size, @sizeOf(From)); var result: usize = 0; const pSource = @ptrCast(*align(1) const [array_len]From, &ptr); const pResult = @ptrCast(*align(1) [array_len]To, &result); var i: usize = 0; while (i < array_len) : (i += 1) { inline for (@typeInfo(To).Struct.fields) |f| { @field(pResult[i], f.name) = @field(pSource[i], f.name); } } return result; } fn doTypePunBitsTest(as_bits: *Bits) !void { const as_u32 = @ptrCast(*align(1) u32, as_bits); const as_bytes = @ptrCast(*[4]u8, as_bits); as_u32.* = bigToNativeEndian(u32, 0xB0A7DEED); try testing.expectEqual(@as(u1, 0x00), as_bits.p0); try testing.expectEqual(@as(u4, 0x08), as_bits.p1); try testing.expectEqual(@as(u3, 0x05), as_bits.p2); try testing.expectEqual(@as(u2, 0x03), as_bits.p3); try testing.expectEqual(@as(u6, 0x29), as_bits.p4); try testing.expectEqual(@as(u8, 0xDE), as_bits.p5); try testing.expectEqual(@as(u7, 0x6D), as_bits.p6); try testing.expectEqual(@as(u1, 0x01), as_bits.p7); as_bits.p6 = 0x2D; as_bits.p1 = 0x0F; try testing.expectEqual(bigToNativeEndian(u32, 0xBEA7DEAD), as_u32.*); // clobbering one bit doesn't clobber the word as_bits.p7 = undefined; try testing.expectEqual(@as(u7, 0x2D), as_bits.p6); // even when read as a whole const u = as_u32.*; _ = u; // u is undefined try testing.expectEqual(@as(u7, 0x2D), as_bits.p6); // or if a field which shares the byte is modified as_bits.p6 = 0x6D; try testing.expectEqual(@as(u7, 0x6D), as_bits.p6); // but overwriting the undefined will clear it as_bytes[3] = 0xAF; try testing.expectEqual(bigToNativeEndian(u32, 0xBEA7DEAF), as_u32.*); } test "type pun bits" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend != .stage1) { // TODO https://github.com/ziglang/zig/issues/9646 return error.SkipZigTest; } comptime { var v: u32 = undefined; try doTypePunBitsTest(@ptrCast(*Bits, &v)); } } const imports = struct { var global_u32: u32 = 0; }; // Make sure lazy values work on their own, before getting into more complex tests test "basic pointer preservation" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend != .stage1) { // TODO https://github.com/ziglang/zig/issues/9646 return error.SkipZigTest; } comptime { const lazy_address = @ptrToInt(&imports.global_u32); try testing.expectEqual(@ptrToInt(&imports.global_u32), lazy_address); try testing.expectEqual(&imports.global_u32, @intToPtr(*u32, lazy_address)); } } test "byte copy preserves linker value" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend != .stage1) { // TODO https://github.com/ziglang/zig/issues/9646 return error.SkipZigTest; } const ct_value = comptime blk: { const lazy = &imports.global_u32; var result: *u32 = undefined; const pSource = @ptrCast(*const [ptr_size]u8, &lazy); const pResult = @ptrCast(*[ptr_size]u8, &result); var i: usize = 0; while (i < ptr_size) : (i += 1) { pResult[i] = pSource[i]; try testing.expectEqual(pSource[i], pResult[i]); } try testing.expectEqual(&imports.global_u32, result); break :blk result; }; try testing.expectEqual(&imports.global_u32, ct_value); } test "unordered byte copy preserves linker value" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend != .stage1) { // TODO https://github.com/ziglang/zig/issues/9646 return error.SkipZigTest; } const ct_value = comptime blk: { const lazy = &imports.global_u32; var result: *u32 = undefined; const pSource = @ptrCast(*const [ptr_size]u8, &lazy); const pResult = @ptrCast(*[ptr_size]u8, &result); if (ptr_size > 8) @compileError("This array needs to be expanded for platform with very big pointers"); const shuffled_indices = [_]usize{ 4, 5, 2, 6, 1, 3, 0, 7 }; for (shuffled_indices) |i| { pResult[i] = pSource[i]; try testing.expectEqual(pSource[i], pResult[i]); } try testing.expectEqual(&imports.global_u32, result); break :blk result; }; try testing.expectEqual(&imports.global_u32, ct_value); } test "shuffle chunks of linker value" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend != .stage1) { // TODO https://github.com/ziglang/zig/issues/9646 return error.SkipZigTest; } const lazy_address = @ptrToInt(&imports.global_u32); const shuffled1_rt = shuffle(lazy_address, Bits, ShuffledBits); const unshuffled1_rt = shuffle(shuffled1_rt, ShuffledBits, Bits); try testing.expectEqual(lazy_address, unshuffled1_rt); const shuffled1_ct = comptime shuffle(lazy_address, Bits, ShuffledBits); const shuffled1_ct_2 = comptime shuffle(lazy_address, Bits, ShuffledBits); comptime try testing.expectEqual(shuffled1_ct, shuffled1_ct_2); const unshuffled1_ct = comptime shuffle(shuffled1_ct, ShuffledBits, Bits); comptime try testing.expectEqual(lazy_address, unshuffled1_ct); try testing.expectEqual(shuffled1_ct, shuffled1_rt); } test "dance on linker values" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend != .stage1) { // TODO https://github.com/ziglang/zig/issues/9646 return error.SkipZigTest; } comptime { var arr: [2]usize = undefined; arr[0] = @ptrToInt(&imports.global_u32); arr[1] = @ptrToInt(&imports.global_u32); const weird_ptr = @ptrCast([*]Bits, @ptrCast([*]u8, &arr) + @sizeOf(usize) - 3); try doTypePunBitsTest(&weird_ptr[0]); if (ptr_size > @sizeOf(Bits)) try doTypePunBitsTest(&weird_ptr[1]); var arr_bytes = @ptrCast(*[2][ptr_size]u8, &arr); var rebuilt_bytes: [ptr_size]u8 = undefined; var i: usize = 0; while (i < ptr_size - 3) : (i += 1) { rebuilt_bytes[i] = arr_bytes[0][i]; } while (i < ptr_size) : (i += 1) { rebuilt_bytes[i] = arr_bytes[1][i]; } try testing.expectEqual(&imports.global_u32, @intToPtr(*u32, @bitCast(usize, rebuilt_bytes))); } } test "offset array ptr by element size" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend != .stage1) { // TODO https://github.com/ziglang/zig/issues/9646 return error.SkipZigTest; } comptime { const VirtualStruct = struct { x: u32 }; var arr: [4]VirtualStruct = .{ .{ .x = bigToNativeEndian(u32, 0x0004080c) }, .{ .x = bigToNativeEndian(u32, 0x0105090d) }, .{ .x = bigToNativeEndian(u32, 0x02060a0e) }, .{ .x = bigToNativeEndian(u32, 0x03070b0f) }, }; const address = @ptrToInt(&arr); try testing.expectEqual(@ptrToInt(&arr[0]), address); try testing.expectEqual(@ptrToInt(&arr[0]) + 10, address + 10); try testing.expectEqual(@ptrToInt(&arr[1]), address + @sizeOf(VirtualStruct)); try testing.expectEqual(@ptrToInt(&arr[2]), address + 2 * @sizeOf(VirtualStruct)); try testing.expectEqual(@ptrToInt(&arr[3]), address + @sizeOf(VirtualStruct) * 3); const secondElement = @intToPtr(*VirtualStruct, @ptrToInt(&arr[0]) + 2 * @sizeOf(VirtualStruct)); try testing.expectEqual(bigToNativeEndian(u32, 0x02060a0e), secondElement.x); } } test "offset instance by field size" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend != .stage1) { // TODO https://github.com/ziglang/zig/issues/9646 return error.SkipZigTest; } comptime { const VirtualStruct = struct { x: u32, y: u32, z: u32, w: u32 }; var inst = VirtualStruct{ .x = 0, .y = 1, .z = 2, .w = 3 }; var ptr = @ptrToInt(&inst); ptr -= 4; ptr += @offsetOf(VirtualStruct, "x"); try testing.expectEqual(@as(u32, 0), @intToPtr([*]u32, ptr)[1]); ptr -= @offsetOf(VirtualStruct, "x"); ptr += @offsetOf(VirtualStruct, "y"); try testing.expectEqual(@as(u32, 1), @intToPtr([*]u32, ptr)[1]); ptr = ptr - @offsetOf(VirtualStruct, "y") + @offsetOf(VirtualStruct, "z"); try testing.expectEqual(@as(u32, 2), @intToPtr([*]u32, ptr)[1]); ptr = @ptrToInt(&inst.z) - 4 - @offsetOf(VirtualStruct, "z"); ptr += @offsetOf(VirtualStruct, "w"); try testing.expectEqual(@as(u32, 3), @intToPtr(*u32, ptr + 4).*); } } test "offset field ptr by enclosing array element size" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend != .stage1) { // TODO https://github.com/ziglang/zig/issues/9646 return error.SkipZigTest; } comptime { const VirtualStruct = struct { x: u32 }; var arr: [4]VirtualStruct = .{ .{ .x = bigToNativeEndian(u32, 0x0004080c) }, .{ .x = bigToNativeEndian(u32, 0x0105090d) }, .{ .x = bigToNativeEndian(u32, 0x02060a0e) }, .{ .x = bigToNativeEndian(u32, 0x03070b0f) }, }; var i: usize = 0; while (i < 4) : (i += 1) { var ptr: [*]u8 = @ptrCast([*]u8, &arr[0]); ptr += i; ptr += @offsetOf(VirtualStruct, "x"); var j: usize = 0; while (j < 4) : (j += 1) { const base = ptr + j * @sizeOf(VirtualStruct); try testing.expectEqual(@intCast(u8, i * 4 + j), base[0]); } } } } test "accessing reinterpreted memory of parent object" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = extern struct { a: f32, b: [4]u8, c: f32, }; const expected = if (endian == .Little) 102 else 38; comptime { const x = S{ .a = 1.5, .b = [_]u8{ 1, 2, 3, 4 }, .c = 2.6, }; const ptr = &x.b[0]; const b = @ptrCast([*c]const u8, ptr)[5]; try testing.expect(b == expected); } }
test/behavior/comptime_memory.zig
//-------------------------------------------------------------------------------- // Section: Types (1) //-------------------------------------------------------------------------------- pub const REGISTER_APPLICATION_RESTART_FLAGS = enum(u32) { CRASH = 1, HANG = 2, PATCH = 4, REBOOT = 8, _, pub fn initFlags(o: struct { CRASH: u1 = 0, HANG: u1 = 0, PATCH: u1 = 0, REBOOT: u1 = 0, }) REGISTER_APPLICATION_RESTART_FLAGS { return @intToEnum(REGISTER_APPLICATION_RESTART_FLAGS, (if (o.CRASH == 1) @enumToInt(REGISTER_APPLICATION_RESTART_FLAGS.CRASH) else 0) | (if (o.HANG == 1) @enumToInt(REGISTER_APPLICATION_RESTART_FLAGS.HANG) else 0) | (if (o.PATCH == 1) @enumToInt(REGISTER_APPLICATION_RESTART_FLAGS.PATCH) else 0) | (if (o.REBOOT == 1) @enumToInt(REGISTER_APPLICATION_RESTART_FLAGS.REBOOT) else 0) ); } }; pub const RESTART_NO_CRASH = REGISTER_APPLICATION_RESTART_FLAGS.CRASH; pub const RESTART_NO_HANG = REGISTER_APPLICATION_RESTART_FLAGS.HANG; pub const RESTART_NO_PATCH = REGISTER_APPLICATION_RESTART_FLAGS.PATCH; pub const RESTART_NO_REBOOT = REGISTER_APPLICATION_RESTART_FLAGS.REBOOT; //-------------------------------------------------------------------------------- // Section: Functions (8) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn RegisterApplicationRecoveryCallback( pRecoveyCallback: ?APPLICATION_RECOVERY_CALLBACK, pvParameter: ?*c_void, dwPingInterval: u32, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn UnregisterApplicationRecoveryCallback( ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn RegisterApplicationRestart( pwzCommandline: ?[*:0]const u16, dwFlags: REGISTER_APPLICATION_RESTART_FLAGS, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn UnregisterApplicationRestart( ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetApplicationRecoveryCallback( hProcess: ?HANDLE, pRecoveryCallback: ?*?APPLICATION_RECOVERY_CALLBACK, ppvParameter: ?*?*c_void, pdwPingInterval: ?*u32, pdwFlags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn GetApplicationRestartSettings( hProcess: ?HANDLE, pwzCommandline: ?[*:0]u16, pcchSize: ?*u32, pdwFlags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn ApplicationRecoveryInProgress( pbCancelled: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT; // TODO: this type is limited to platform 'windows6.0.6000' pub extern "KERNEL32" fn ApplicationRecoveryFinished( bSuccess: BOOL, ) callconv(@import("std").os.windows.WINAPI) void; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (5) //-------------------------------------------------------------------------------- const APPLICATION_RECOVERY_CALLBACK = @import("../system/windows_programming.zig").APPLICATION_RECOVERY_CALLBACK; const BOOL = @import("../foundation.zig").BOOL; const HANDLE = @import("../foundation.zig").HANDLE; const HRESULT = @import("../foundation.zig").HRESULT; const PWSTR = @import("../foundation.zig").PWSTR; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
deps/zigwin32/win32/system/recovery.zig
const std = @import("std"); const u = @import("util.zig"); const File = @import("File.zig"); const IR = @import("IR.zig"); const Wat = @import("Wat.zig"); const Wasm = @import("Wasm.zig"); allocator: std.mem.Allocator, trace: std.ArrayListUnmanaged(u.Txt) = .{}, errAt: fn (arg: ErrArg) void, const Self = @This(); pub const ErrArg = union(File.Type) { text: struct { point: File.ErrPoint, data: Wat.ErrData = null, }, wasm: struct { kind: anyerror, at: usize, file: *const File.Wasm, }, }; pub fn load(self: *Self, entry: u.Txt) !IR.Module { //TODO: cwd from trace const file = try File.read(entry, self.allocator); defer file.deinit(); return switch (file) { .text => |text| switch (text.tryRead()) { .ok => |exprs| switch (Wat.tryLoad(exprs.list(), self.allocator, self)) { .ok => |m| m, .err => |err| { if (err.at != null and err.at.?.at != null) { self.errAt(.{ .text = .{ .point = .{ .kind = err.kind, .file = &text, .at = err.at.?.at.?.offset }, .data = err.data } }); } return err.kind; }, }, .err => |err| { self.errAt(.{ .text = .{ .point = err } }); return err.kind; }, }, .wasm => |wasm| switch (Wasm.tryLoad(wasm.bytes, self.allocator)) { .ok => |m| m, .err => |err| { self.errAt(.{ .wasm = .{ .kind = err.kind, .file = &wasm, .at = err.at } }); return err.kind; }, }, }; } pub fn writeWasm(m: IR.Module, writer: anytype) !void { return Wasm.emit(m, writer, .{}); } pub fn writeText(m: IR.Module, writer: anytype, alloc: std.mem.Allocator, fmt: @import("Expr.zig").Format) !void { const wat = try Wat.emit(m, alloc, .{}); defer wat.deinit(); try wat.val.print(fmt, writer); try writer.writeByte('\n'); }
src/Loader.zig
const combn = @import("../combn/combn.zig"); const Result = combn.Result; const Parser = combn.Parser; const Error = combn.Error; const Context = combn.Context; const ParserPosKey = combn.ParserPosKey; const ParserPath = combn.ParserPath; const Sequence = combn.Sequence; const SequenceValue = combn.SequenceValue; const Repeated = combn.Repeated; const RepeatedValue = combn.RepeatedValue; const Literal = combn.Literal; const LiteralValue = combn.LiteralValue; const OneOf = combn.OneOf; const MapTo = combn.MapTo; const Optional = combn.Optional; const Always = combn.Always; const String = @import("String.zig"); const Node = @import("Node.zig"); const Compilation = @import("Compilation.zig"); const Identifier = @import("identifier.zig").Identifier; const CompilerContext = @import("CompilerContext.zig"); const std = @import("std"); const testing = std.testing; const mem = std.mem; const assert = std.debug.assert; fn mapLiteralToNone(in: Result(LiteralValue), compiler_context: *CompilerContext, _allocator: *mem.Allocator, key: ParserPosKey, path: ParserPath) Error!?Result(?Compilation) { return switch (in.result) { .err => Result(?Compilation).initError(in.offset, in.result.err), else => Result(?Compilation).init(in.offset, null), }; } /// Maps a SequenceValue(*Node) -> singular *Node with no name and children (each of the nodes in /// the sequence.) fn mapNodeSequence(in: Result(SequenceValue(*Node)), program_context: void, _allocator: *mem.Allocator, key: ParserPosKey, path: ParserPath) Error!?Result(*Node) { switch (in.result) { .err => return Result(*Node).initError(in.offset, in.result.err), else => { var sequence = in.result.value; // Collect all the children nodes. var children = std.ArrayList(*Node).init(_allocator); errdefer children.deinit(); var sub = sequence.results.subscribe(key, path, Result(*Node).initError(in.offset, "matches only the empty language")); var offset = in.offset; while (sub.next()) |next| { offset = next.offset; try children.append(next.result.value.ref()); } const node = try Node.init(_allocator, String.init("unknown"), null); node.children = children.toOwnedSlice(); return Result(*Node).init(in.offset, node); }, } } /// Maps a SequenceValue(?Compilation) -> singular ?Compilation which parses all compilations in sequence, /// emitting a single unnamed Node with children. fn mapCompilationSequence(in: Result(SequenceValue(?Compilation)), compiler_context: *CompilerContext, _allocator: *mem.Allocator, key: ParserPosKey, path: ParserPath) Error!?Result(?Compilation) { switch (in.result) { .err => return Result(?Compilation).initError(in.offset, in.result.err), else => { var sequence = in.result.value; // Collect all the parser compilations. var parsers = std.ArrayList(*Parser(void, *Node)).init(_allocator); var sub = sequence.results.subscribe(key, path, Result(?Compilation).initError(in.offset, "matches only the empty language")); var offset = in.offset; while (sub.next()) |next| { offset = next.offset; const compilation = next.result.value; if (compilation) |c| { try parsers.append(c.value.parser.ptr.ref()); } } var slice = parsers.toOwnedSlice(); // Build a parser which maps the many Parser(void, *Node) compilations into a // single Parser(void, *Node) which has each node as a child. var sequence_compilation = Sequence(void, *Node).init(slice); var mapped = MapTo(void, SequenceValue(*Node), *Node).init(.{ .parser = try sequence_compilation.parser.heapAlloc(_allocator, sequence_compilation), .mapTo = mapNodeSequence, }); var result_compilation = Compilation.initParser(Compilation.CompiledParser{ .ptr = try mapped.parser.heapAlloc(_allocator, mapped), .slice = slice, }); return Result(?Compilation).init(offset, result_compilation); }, } } pub const CompilerResult = struct { compilation: Result(Compilation), ctx: Context(*CompilerContext, Compilation), compilerContext: *CompilerContext, pub fn deinit(self: *const @This(), allocator: *mem.Allocator) void { self.ctx.deinit(); self.compilerContext.deinit(allocator); } }; pub fn compile(allocator: *mem.Allocator, syntax: []const u8) !CompilerResult { // DSL grammar // // ```ebnf // Newline = "\r\n" | "\r" | "\n" ; // Space = " " | "\t" ; // Whitespace = Newline | Space ; // Assignment = "=" ; // Semicolon = ";" ; // Identifier = /[A-Z][[:alnum:]_]*/ // RegExpr = "/", RegexpGrammar, "/" ; // Expr = RegExpr | Identifier ; // ExprList = (ExprList, ",")? , Expr ; // Definition = Identifier , Whitespace+, Assignment, Whitespace+, ExprList, Semicolon ; // Grammar = (Definition | Expr | Whitespace+)+, EOF ; // ``` // var newline = MapTo(*CompilerContext, LiteralValue, ?Compilation).init(.{ .parser = (&OneOf(*CompilerContext, LiteralValue).init(&.{ (&Literal(*CompilerContext).init("\r\n").parser).ref(), (&Literal(*CompilerContext).init("\r").parser).ref(), (&Literal(*CompilerContext).init("\n").parser).ref(), }).parser).ref(), .mapTo = mapLiteralToNone, }); var space = MapTo(*CompilerContext, LiteralValue, ?Compilation).init(.{ .parser = (&OneOf(*CompilerContext, LiteralValue).init(&.{ (&Literal(*CompilerContext).init(" ").parser).ref(), (&Literal(*CompilerContext).init("\t").parser).ref(), }).parser).ref(), .mapTo = mapLiteralToNone, }); var whitespace = OneOf(*CompilerContext, ?Compilation).init(&.{ (&newline.parser).ref(), (&space.parser).ref(), }); var whitespace_one_or_more = MapTo(*CompilerContext, RepeatedValue(?Compilation), ?Compilation).init(.{ .parser = (&Repeated(*CompilerContext, ?Compilation).init(.{ .parser = (&whitespace.parser).ref(), .min = 1, .max = -1, }).parser).ref(), .mapTo = struct { fn mapTo(in: Result(RepeatedValue(?Compilation)), compiler_context: *CompilerContext, _allocator: *mem.Allocator, key: ParserPosKey, path: ParserPath) callconv(.Async) Error!?Result(?Compilation) { switch (in.result) { .err => return Result(?Compilation).initError(in.offset, in.result.err), else => { // optimization: newline and space parsers produce no compilations, so no // need for us to pay any attention to repeated results. return Result(?Compilation).init(in.offset, null); }, } } }.mapTo, }); var assignment = MapTo(*CompilerContext, LiteralValue, ?Compilation).init(.{ .parser = (&Literal(*CompilerContext).init("=").parser).ref(), .mapTo = mapLiteralToNone, }); var semicolon = MapTo(*CompilerContext, LiteralValue, ?Compilation).init(.{ .parser = (&Literal(*CompilerContext).init(";").parser).ref(), .mapTo = mapLiteralToNone, }); var forward_slash = MapTo(*CompilerContext, LiteralValue, ?Compilation).init(.{ .parser = (&Literal(*CompilerContext).init("/").parser).ref(), .mapTo = mapLiteralToNone, }); var reg_expr = MapTo(*CompilerContext, SequenceValue(?Compilation), ?Compilation).init(.{ .parser = (&Sequence(*CompilerContext, ?Compilation).init(&.{ (&forward_slash.parser).ref(), // TODO(slimsag): define the regular expression grammar! //(&reg_expr_grammar.parser).ref(), (&forward_slash.parser).ref(), }).parser).ref(), .mapTo = struct { fn mapTo(in: Result(SequenceValue(?Compilation)), compiler_context: *CompilerContext, _allocator: *mem.Allocator, key: ParserPosKey, path: ParserPath) callconv(.Async) Error!?Result(?Compilation) { switch (in.result) { .err => return Result(?Compilation).initError(in.offset, in.result.err), else => { var sequence = in.result.value; // TODO(slimsag): actually compose the compilation to parse this regexp! const node = try Node.init(_allocator, String.init("TODO(slimsag): value from parsing regexp!"), null); const success = Result(*Node).init(in.offset, node); var always_success = Always(void, *Node).init(success); var result_compilation = Compilation.initParser(Compilation.CompiledParser{ .ptr = try always_success.parser.heapAlloc(_allocator, always_success), .slice = null, }); return Result(?Compilation).init(in.offset, result_compilation); }, } } }.mapTo, }); var identifier_expr = MapTo(*CompilerContext, ?Compilation, ?Compilation).init(.{ .parser = (&Identifier.init().parser).ref(), .mapTo = struct { fn mapTo(in: Result(?Compilation), compiler_context: *CompilerContext, _allocator: *mem.Allocator, key: ParserPosKey, path: ParserPath) callconv(.Async) Error!?Result(?Compilation) { switch (in.result) { .err => return Result(?Compilation).initError(in.offset, in.result.err), else => { // Lookup this identifier, which was previously defined. // TODO(slimsag): make it possible to reference future-definitions? var compilation = compiler_context.identifiers.get(in.result.value.?); if (compilation == null) { // TODO(slimsag): include name of definition that was not found in error. return Result(?Compilation).initError(in.offset, "definition not found"); } return Result(?Compilation).init(in.offset, compilation.?).toUnowned(); }, } } }.mapTo, }); var expr = OneOf(*CompilerContext, ?Compilation).init(&.{ (&reg_expr.parser).ref(), (&identifier_expr.parser).ref(), }); // ExprList = (ExprList, ",")? , Expr ; var expr_list_parsers = [_]*Parser(*CompilerContext, ?Compilation){ undefined, // placeholder for left-recursive `(ExprList, ",")?` (&expr.parser).ref(), }; var expr_list = MapTo(*CompilerContext, SequenceValue(?Compilation), ?Compilation).init(.{ .parser = (&Sequence(*CompilerContext, ?Compilation).init(&expr_list_parsers).parser).ref(), .mapTo = mapCompilationSequence, }); // (ExprList, ",") var comma = MapTo(*CompilerContext, LiteralValue, ?Compilation).init(.{ .parser = (&Literal(*CompilerContext).init(",").parser).ref(), .mapTo = mapLiteralToNone, }); var expr_list_inner_left = MapTo(*CompilerContext, SequenceValue(?Compilation), ?Compilation).init(.{ .parser = (&Sequence(*CompilerContext, ?Compilation).init(&.{ (&expr_list.parser).ref(), (&comma.parser).ref(), }).parser).ref(), .mapTo = struct { fn mapTo(in: Result(SequenceValue(?Compilation)), compiler_context: *CompilerContext, _allocator: *mem.Allocator, key: ParserPosKey, path: ParserPath) callconv(.Async) Error!?Result(?Compilation) { switch (in.result) { .err => return Result(?Compilation).initError(in.offset, in.result.err), else => { var sequence = in.result.value; var sub = sequence.results.subscribe(key, path, Result(?Compilation).initError(in.offset, "matches only the empty language")); var _expr_list = sub.next().?; _ = sub.next().?; // non-capturing compilation for comma assert(sub.next() == null); return _expr_list.toUnowned(); }, } } }.mapTo, }); var optional_expr_list_inner_left = MapTo(*CompilerContext, ??Compilation, ?Compilation).init(.{ .parser = (&Optional(*CompilerContext, ?Compilation).init((&expr_list_inner_left.parser).ref()).parser).ref(), .mapTo = struct { fn mapTo(in: Result(??Compilation), compiler_context: *CompilerContext, _allocator: *mem.Allocator, key: ParserPosKey, path: ParserPath) callconv(.Async) Error!?Result(?Compilation) { switch (in.result) { .err => return Result(?Compilation).initError(in.offset, in.result.err), else => { if (in.result.value == null) { return Result(?Compilation).init(in.offset, null); } return Result(?Compilation).init(in.offset, in.result.value.?).toUnowned(); }, } } }.mapTo, }); expr_list_parsers[0] = (&optional_expr_list_inner_left.parser).ref(); var definition = MapTo(*CompilerContext, SequenceValue(?Compilation), ?Compilation).init(.{ .parser = (&Sequence(*CompilerContext, ?Compilation).init(&.{ (&Identifier.init().parser).ref(), (&whitespace_one_or_more.parser).ref(), (&assignment.parser).ref(), (&whitespace_one_or_more.parser).ref(), (&expr_list.parser).ref(), (&semicolon.parser).ref(), }).parser).ref(), .mapTo = struct { fn mapTo(in: Result(SequenceValue(?Compilation)), compiler_context: *CompilerContext, _allocator: *mem.Allocator, key: ParserPosKey, path: ParserPath) callconv(.Async) Error!?Result(?Compilation) { switch (in.result) { .err => return Result(?Compilation).initError(in.offset, in.result.err), else => { var sequence = in.result.value; var sub = sequence.results.subscribe(key, path, Result(?Compilation).initError(in.offset, "matches only the empty language")); var identifier = sub.next().?; _ = sub.next().?; // non-capturing compilation for whitespace _ = sub.next().?; // non-capturing compilation for assignment `=` operator _ = sub.next().?; // non-capturing compilation for whitespace var _expr_list = sub.next().?; var last = sub.next().?; // non-capturing compilation for semicolon assert(sub.next() == null); // Set identifier = _expr_list, so that future identifier expressions can // lookup the resulting expression compilation for the identifier. const v = try compiler_context.identifiers.getOrPut(identifier.result.value.?); if (v.found_existing) { // TODO(slimsag): include name of definition in error message return Result(?Compilation).initError(last.offset, "definition redefined"); } v.value_ptr.* = _expr_list.result.value.?; // A definition assignment yields no nodes. return Result(?Compilation).init(in.offset, null); }, } } }.mapTo, }); var definition_or_expr_or_whitespace = OneOf(*CompilerContext, ?Compilation).init(&.{ (&definition.parser).ref(), (&expr.parser).ref(), (&whitespace_one_or_more.parser).ref(), }); // TODO(slimsag): match EOF var grammar = MapTo(*CompilerContext, RepeatedValue(?Compilation), Compilation).init(.{ .parser = (&Repeated(*CompilerContext, ?Compilation).init(.{ .parser = (&definition_or_expr_or_whitespace.parser).ref(), .min = 1, .max = -1, }).parser).ref(), .mapTo = struct { fn mapTo(in: Result(RepeatedValue(?Compilation)), compiler_context: *CompilerContext, _allocator: *mem.Allocator, key: ParserPosKey, path: ParserPath) callconv(.Async) Error!?Result(Compilation) { switch (in.result) { .err => return Result(Compilation).initError(in.offset, in.result.err), else => { var repeated = in.result.value; var sub = repeated.results.subscribe(key, path, Result(?Compilation).initError(in.offset, "matches only the empty language")); var offset = in.offset; var compilation: ?Result(Compilation) = null; while (sub.next()) |next| { offset = next.offset; switch (next.result) { .value => |v| { if (v != null) { if (compilation == null) { compilation = Result(Compilation).init(next.offset, v.?); } else { // another parse path yielded a compilation, i.e. our grammar was ambiguous - // and it definitely shouldn't be! unreachable; } } }, .err => |e| return Result(Compilation).initError(offset, e), } } if (compilation == null) { // Grammar does not have a root expression return Result(Compilation).initError(offset, "root expression missing"); } return compilation.?.toUnowned(); }, } } }.mapTo, }); var compilerContext = try CompilerContext.init(allocator); var ctx = try Context(*CompilerContext, Compilation).init(allocator, syntax, compilerContext); try grammar.parser.parse(&ctx); var sub = ctx.subscribe(); var compilation = sub.next(); assert(sub.next() == null); // our grammar is never ambiguous if (compilation == null) { return CompilerResult{ .compilation = Result(Compilation).initError(ctx.offset, "failed to compile"), .compilerContext = compilerContext, .ctx = ctx, }; } return CompilerResult{ .compilation = compilation.?, .compilerContext = compilerContext, .ctx = ctx, }; } test "DSL" { nosuspend { const allocator = testing.allocator; // Compile the regexp. var compilerResult = try compile(allocator, "//"); defer compilerResult.deinit(allocator); switch (compilerResult.compilation.result) { .err => |e| @panic(e), .value => {}, } var program = compilerResult.compilation.result.value; // Run the regexp. var input = "//"; var ctx = try Context(void, *Node).init(allocator, input, {}); defer ctx.deinit(); try program.value.parser.ptr.parse(&ctx); var sub = ctx.subscribe(); var first = sub.next().?; try testing.expectEqualStrings("TODO(slimsag): value from parsing regexp!", first.result.value.name.value); try testing.expectEqual(@as(usize, 0), first.offset); try testing.expect(sub.next() == null); } }
src/dsl/compiler.zig
const std = @import("std"); const zloppy = @import("zloppy.zig"); const usage = \\Usage: zloppy [options] [command] [file]... \\ \\ Modifies the input files in-places to silence errors about \\ unused variable/parameters and unreachable code. \\ Arguments can be files or directories, which are searched \\ recursively. \\ \\Options: \\ -h, --help Print this help and exit \\ --stdin Format code from stdin; output to stdout \\ \\Commands: \\ on Enable sloppy mode \\ off Disable sloppy mode \\ ; fn fatal(comptime format: []const u8, args: anytype) noreturn { std.log.err(format, args); std.process.exit(1); } fn fatalOom(err: anyerror) noreturn { switch (err) { error.OutOfMemory => { std.log.err("out of memory, aborting", .{}); std.process.exit(2); }, else => unreachable, } } fn logErr(err: anyerror, comptime format: []const u8, args: anytype) void { std.log.err(format ++ ": {s}", args ++ .{@errorName(err)}); } const Params = struct { cmd: Cmd, stdin: bool, input_paths: std.ArrayList([]const u8), const Cmd = enum { on, off, }; }; fn parseParams(gpa: std.mem.Allocator, args: [][:0]const u8) Params { var params = Params{ .cmd = undefined, .stdin = false, .input_paths = std.ArrayList([]const u8).init(gpa), }; { var i: usize = 0; // options while (i < args.len and std.mem.startsWith(u8, args[i], "-")) : (i += 1) { const arg = args[i]; if (std.mem.eql(u8, arg, "-h") or std.mem.eql(u8, arg, "--help")) { std.debug.print("{s}", .{usage}); std.process.exit(0); } else if (std.mem.eql(u8, arg, "--stdin")) { params.stdin = true; } else { fatal("unrecognized parameter: '{s}'", .{arg}); } } if (args.len == 0) { fatal("expected command argument", .{}); } else if (i >= args.len) { fatal("expected command argument after '{s}'", .{args[i - 1]}); } // cmd const cmd_type = args[i]; if (std.mem.eql(u8, cmd_type, "on")) { params.cmd = .on; } else if (std.mem.eql(u8, cmd_type, "off")) { params.cmd = .off; } else { fatal("unrecognized command: '{s}'", .{cmd_type}); } i += 1; // files while (i < args.len) : (i += 1) { params.input_paths.append(args[i]) catch |err| fatalOom(err); } } if (params.stdin) { if (params.input_paths.items.len != 0) { fatal("cannot specify both --stdin and files", .{}); } } else if (params.input_paths.items.len == 0) { fatal("expected at least one source file argument", .{}); } return params; } const FmtResult = struct { content: []u8, noop: bool, comments_removed: u32, comments_added: u32, }; fn fmtFile( gpa: std.mem.Allocator, cmd: Params.Cmd, input_file: *const std.fs.File, filename: []const u8, size_hint: ?usize, ) !FmtResult { const source = try input_file.readToEndAllocOptions( gpa, std.math.maxInt(u32), size_hint, @alignOf(u16), 0, ); defer gpa.free(source); const removed = try zloppy.cleanSource(filename, source); var tree = try std.zig.parse(gpa, source); defer tree.deinit(gpa); if (tree.errors.len != 0) { return error.ParsingError; } var out_buffer = std.ArrayList(u8).init(gpa); defer out_buffer.deinit(); var added: u32 = 0; switch (cmd) { .on => { var patches = try zloppy.genPatches(gpa, tree); defer patches.deinit(); try @import("render.zig").renderTreeWithPatches(&out_buffer, tree, &patches); added = patches.rendered_comments; }, .off => { try tree.renderToArrayList(&out_buffer); }, } return FmtResult{ .noop = std.mem.eql(u8, out_buffer.items, source), .content = out_buffer.toOwnedSlice(), .comments_removed = removed, .comments_added = added, }; } const TopLevelDir = struct { file_paths: [][]const u8, cur_idx: usize = 0, fn appendPathName( self: *TopLevelDir, gpa: std.mem.Allocator, path: []const u8, ) []const u8 { _ = self; return gpa.dupe(u8, path) catch |err| fatalOom(err); } fn getNextFileName(self: *TopLevelDir) ?[]const u8 { const next_idx = self.cur_idx; if (next_idx < self.file_paths.len) { self.cur_idx += 1; return self.file_paths[next_idx]; } else { return null; } } fn getDir(self: *TopLevelDir) std.fs.Dir { _ = self; return std.fs.cwd(); } }; const Dir = struct { dir: std.fs.Dir, path: []const u8, fullpath: []const u8, iterator: std.fs.Dir.Iterator, fn init( parent: std.fs.Dir, path: []const u8, fullpath: []const u8, ) !Dir { var self = Dir{ .dir = try parent.openDir(path, .{ .iterate = true }), .path = path, .fullpath = fullpath, .iterator = undefined, }; self.iterator = self.dir.iterate(); return self; } fn appendPathName(self: *Dir, gpa: std.mem.Allocator, path: []const u8) []const u8 { const left_len = self.fullpath.len; const sep_len = std.fs.path.sep_str.len; var new_path = gpa.alloc(u8, left_len + sep_len + path.len) catch |err| fatalOom(err); std.mem.copy(u8, new_path, self.fullpath); std.mem.copy(u8, new_path[left_len..], std.fs.path.sep_str); std.mem.copy(u8, new_path[left_len + sep_len ..], path); return new_path; } fn getNextFileName(self: *Dir) ?[]const u8 { while (self.iterator.next() catch |err| { logErr(err, "failed to get files in directory {s}", .{self.fullpath}); return null; }) |entry| { switch (entry.kind) { .Directory => { if (std.mem.eql(u8, entry.name, "zig-cache")) { continue; } else { return entry.name; } }, else => { if (std.mem.endsWith(u8, entry.name, ".zig")) { return entry.name; } else { continue; } }, } } return null; } fn getDir(self: *Dir) std.fs.Dir { return self.dir; } }; fn fmtDir( gpa: std.mem.Allocator, cmd: Params.Cmd, dir: anytype, ) error{FmtDirError}!void { var has_error = false; while (dir.getNextFileName()) |path| { const fullpath = dir.appendPathName(gpa, path); defer gpa.free(fullpath); var file = dir.getDir().openFile(path, .{}) catch |err| { logErr(err, "unable to open file '{s}'", .{fullpath}); has_error = true; continue; }; defer file.close(); const stat = file.stat() catch |err| { logErr(err, "unable to stat file '{s}'", .{fullpath}); has_error = true; continue; }; if (stat.kind == .Directory) { var subdir = Dir.init(dir.getDir(), path, fullpath) catch |err| { logErr(err, "unable to open directory '{s}'", .{fullpath}); has_error = true; continue; }; fmtDir(gpa, cmd, &subdir) catch { has_error = true; continue; }; } else { const result = fmtFile(gpa, cmd, &file, fullpath, stat.size) catch |err| { logErr(err, "failed to format file '{s}'", .{fullpath}); has_error = true; continue; }; defer gpa.free(result.content); if (result.noop) continue; var af = dir.getDir().atomicFile(path, .{ .mode = stat.mode }) catch |err| { logErr(err, "failed to initialize atomic write on '{s}'", .{fullpath}); has_error = true; continue; }; defer af.deinit(); af.file.writeAll(result.content) catch |err| { logErr(err, "failed to write content of {s} to temporary file", .{fullpath}); has_error = true; continue; }; af.finish() catch |err| { logErr(err, "failed to write to {s}", .{fullpath}); has_error = true; continue; }; if (result.comments_removed == 0 and result.comments_added == 0) { std.debug.print("{s} updated (format only)\n", .{fullpath}); } else { std.debug.print( "{s} updated ({} removed, {} added)\n", .{ fullpath, result.comments_removed, result.comments_added }, ); } } } if (has_error) return error.FmtDirError; } pub fn main() !void { var gpa_instance = std.heap.GeneralPurposeAllocator(.{}){}; const gpa = gpa_instance.allocator(); defer { _ = gpa_instance.deinit(); } const args = try std.process.argsAlloc(gpa); defer std.process.argsFree(gpa, args); std.debug.assert(args.len > 0); const params = parseParams(gpa, args[1..]); defer params.input_paths.deinit(); if (params.stdin) { var stdin = std.io.getStdIn(); const result = fmtFile(gpa, params.cmd, &stdin, "<stdin>", null) catch |err| { logErr(err, "failed to format stdin", .{}); std.process.exit(1); }; defer gpa.free(result.content); std.io.getStdOut().writeAll(result.content) catch |err| { logErr(err, "failed to write to stdout", .{}); std.process.exit(1); }; } else { var cwd = TopLevelDir{ .file_paths = params.input_paths.items }; fmtDir(gpa, params.cmd, &cwd) catch { std.process.exit(1); }; } }
src/main.zig
const std = @import("std"); const pike = @import("pike.zig"); const posix = @import("os/posix.zig"); const PackedWaker = @import("waker.zig").PackedWaker; const os = std.os; const system = os.system; const mem = std.mem; const meta = std.meta; const builtin = @import("builtin"); pub const SignalType = packed struct { terminate: bool = false, interrupt: bool = false, quit: bool = false, hup: bool = false, fn toSet(self: SignalType) os.sigset_t { const sigaddset = if (comptime builtin.target.isDarwin()) system.sigaddset else os.linux.sigaddset; var set = mem.zeroes(os.sigset_t); if (self.terminate) sigaddset(&set, os.SIG.TERM); if (self.interrupt) sigaddset(&set, os.SIG.INT); if (self.quit) sigaddset(&set, os.SIG.QUIT); if (self.hup) sigaddset(&set, os.SIG.HUP); return set; } }; pub const Signal = struct { const EMPTY_SIGACTION = os.Sigaction{ .handler = .{ .handler = null }, .mask = mem.zeroes(os.sigset_t), .flags = 0, }; const MaskInt = meta.Int(.unsigned, @bitSizeOf(SignalType)); const Self = @This(); var lock: std.Thread.Mutex = .{}; var mask: SignalType = .{}; var waker: PackedWaker(pike.Task, SignalType) = .{}; current: SignalType, previous: [@bitSizeOf(SignalType)]os.Sigaction, fn handler(signal: c_int) callconv(.C) void { const current_held = lock.lock(); _=current_held; const current_mask = mask; lock.unlock(); //current_held.release(); switch (signal) { os.SIG.TERM => { if (!current_mask.terminate) return; const held = lock.lock(); _=held; const next_node = waker.wake(.{ .terminate = true }); lock.unlock(); //held.release(); if (next_node) |node| pike.dispatch(&node.data, .{}); }, os.SIG.INT => { if (!current_mask.interrupt) return; const held = lock.lock(); _=held; const next_node = waker.wake(.{ .interrupt = true }); lock.unlock(); //held.release(); if (next_node) |node| pike.dispatch(&node.data, .{}); }, os.SIG.QUIT => { if (!current_mask.quit) return; const held = lock.lock(); _=held; const next_node = waker.wake(.{ .quit = true }); lock.unlock(); //held.release(); if (next_node) |node| pike.dispatch(&node.data, .{}); }, os.SIG.HUP => { if (!current_mask.hup) return; const held = lock.lock(); _=held; const next_node = waker.wake(.{ .hup = true }); lock.unlock(); //held.release(); if (next_node) |node| pike.dispatch(&node.data, .{}); }, else => {}, } } pub fn init(current: SignalType) !Self { const held = lock.lock(); _=held; defer lock.unlock(); const new_mask = @bitCast(SignalType, @bitCast(MaskInt, current) | @bitCast(MaskInt, mask)); const sigaction = os.Sigaction{ .handler = .{ .handler = handler }, .mask = new_mask.toSet(), .flags = 0, }; var previous = [_]os.Sigaction{EMPTY_SIGACTION} ** @bitSizeOf(SignalType); os.sigaction(os.SIG.TERM, &sigaction, &previous[std.meta.fieldIndex(SignalType, "terminate").?]); os.sigaction(os.SIG.INT, &sigaction, &previous[std.meta.fieldIndex(SignalType, "interrupt").?]); os.sigaction(os.SIG.QUIT, &sigaction, &previous[std.meta.fieldIndex(SignalType, "quit").?]); os.sigaction(os.SIG.HUP, &sigaction, &previous[std.meta.fieldIndex(SignalType, "hup").?]); mask = new_mask; return Self{ .current = current, .previous = previous, }; } pub fn deinit(self: *Self) void { for (self.previous) |sigaction, i| { os.sigaction( switch (i) { 0 => os.SIG.TERM, 1 => os.SIG.INT, 2 => os.SIG.QUIT, 3 => os.SIG.HUP, else => unreachable, }, &sigaction, null, ); } } pub fn wait(self: *Self) callconv(.Async) !void { const held = lock.lock(); _=held; if (waker.wait(self.current)) { lock.unlock(); //held.release(); } else { suspend { var node = @TypeOf(waker).FrameNode{ .data = pike.Task.init(@frame()) }; @TypeOf(waker).FrameList.append(&waker.heads, self.current, &node); lock.unlock(); //held.release(); } const next_held = lock.lock(); _=next_held; const next_node = waker.next(self.current); lock.unlock(); //next_held.release(); if (next_node) |node| { pike.dispatch(&node.data, .{}); } } } };
signal_posix.zig
const std = @import("std"); const print = std.debug.print; const SIZE = 100; const Grid = std.ArrayList([SIZE]u8); pub fn main() anyerror!void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); var allocator = &gpa.allocator; var file = try std.fs.cwd().openFile( "./inputs/day18.txt", .{ .read = true, }, ); var reader = std.io.bufferedReader(file.reader()).reader(); var grid = try Grid.initCapacity(allocator, SIZE); defer grid.deinit(); var grid2 = try Grid.initCapacity(allocator, SIZE); defer grid2.deinit(); var other = try Grid.initCapacity(allocator, SIZE); defer other.deinit(); var r: usize = 0; while (r < SIZE) : (r += 1) { var row: [SIZE]u8 = undefined; _ = try reader.readAll(&row); _ = try reader.readByte(); try grid.append(row); try grid2.append(row); try other.append(undefined); } { var i: usize = 0; while (i < SIZE) : (i += 1) { step(grid, &other); std.mem.swap(Grid, &grid, &other); } print("Part 1: {d}\n", .{alive(grid)}); } { var i: usize = 0; while (i < SIZE) : (i += 1) { grid2.items[0][0] = '#'; grid2.items[0][SIZE - 1] = '#'; grid2.items[SIZE - 1][0] = '#'; grid2.items[SIZE - 1][SIZE - 1] = '#'; step(grid2, &other); std.mem.swap(Grid, &grid2, &other); } grid2.items[0][0] = '#'; grid2.items[0][SIZE - 1] = '#'; grid2.items[SIZE - 1][0] = '#'; grid2.items[SIZE - 1][SIZE - 1] = '#'; print("Part 2: {d}\n", .{alive(grid2)}); } } fn step(grid: Grid, next: *Grid) void { var i: usize = 0; while (i < SIZE) : (i += 1) { var j: usize = 0; while (j < SIZE) : (j += 1) { var n: usize = 0; const li = if (i == 0) 0 else i - 1; const hi = if (i == SIZE - 1) SIZE - 1 else i + 1; const lj = if (j == 0) 0 else j - 1; const hj = if (j == SIZE - 1) SIZE - 1 else j + 1; var x = li; while (x <= hi) : (x += 1) { var y = lj; while (y <= hj) : (y += 1) { if (x != i or y != j) { n += if (grid.items[y][x] == '#') @as(usize, 1) else 0; } } } if (grid.items[j][i] == '#') { next.items[j][i] = if (n == 2 or n == 3) '#' else '.'; } else { next.items[j][i] = if (n == 3) '#' else '.'; } } } } fn alive(grid: Grid) usize { var count: usize = 0; for (grid.items) |row| { for (row) |cell| { if (cell == '#') { count += 1; } } } return count; }
src/day18.zig
const OpcodeEnum = @import("enum.zig").OpcodeEnum; const AddressingModeEnum = @import("enum.zig").AddressingModeEnum; pub const Opcode = struct { name: OpcodeEnum, addressing_mode: AddressingModeEnum, code: u8, size: u16, cycles: u8, pub fn init(name: OpcodeEnum, code: u8, addressing_mode: AddressingModeEnum, size: u16, cycles: u8) Opcode { return Opcode{ .name = name, .code = code, .addressing_mode = addressing_mode, .size = size, .cycles = cycles, }; } }; pub fn generate_opcodes() [0x100]Opcode { var opcodes: [0x100]Opcode = undefined; opcodes[0x00] = Opcode.init(OpcodeEnum.BRK, 0x00, AddressingModeEnum.Implicit, 1, 7); opcodes[0x01] = Opcode.init(OpcodeEnum.ORA, 0x01, AddressingModeEnum.IndirectX, 2, 6); opcodes[0x02] = Opcode.init(OpcodeEnum.KIL, 0x02, AddressingModeEnum.Implicit, 1, 0); opcodes[0x03] = Opcode.init(OpcodeEnum.SLO, 0x03, AddressingModeEnum.IndirectX, 2, 8); opcodes[0x04] = Opcode.init(OpcodeEnum.NOP, 0x04, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0x05] = Opcode.init(OpcodeEnum.ORA, 0x05, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0x06] = Opcode.init(OpcodeEnum.ASL, 0x06, AddressingModeEnum.ZeroPage, 2, 5); opcodes[0x07] = Opcode.init(OpcodeEnum.SLO, 0x07, AddressingModeEnum.ZeroPage, 2, 5); opcodes[0x08] = Opcode.init(OpcodeEnum.PHP, 0x08, AddressingModeEnum.Implicit, 1, 3); opcodes[0x09] = Opcode.init(OpcodeEnum.ORA, 0x09, AddressingModeEnum.Immediate, 2, 2); opcodes[0x0A] = Opcode.init(OpcodeEnum.ASL, 0x0A, AddressingModeEnum.Accumulator, 1, 2); opcodes[0x0B] = Opcode.init(OpcodeEnum.AAC, 0x0B, AddressingModeEnum.Immediate, 2, 2); opcodes[0x0C] = Opcode.init(OpcodeEnum.NOP, 0x0C, AddressingModeEnum.Absolute, 3, 4); opcodes[0x0D] = Opcode.init(OpcodeEnum.ORA, 0x0D, AddressingModeEnum.Absolute, 3, 4); opcodes[0x0E] = Opcode.init(OpcodeEnum.ASL, 0x0E, AddressingModeEnum.Absolute, 3, 6); opcodes[0x0F] = Opcode.init(OpcodeEnum.SLO, 0x0F, AddressingModeEnum.Absolute, 3, 6); opcodes[0x10] = Opcode.init(OpcodeEnum.BPL, 0x10, AddressingModeEnum.Relative, 2, 2); opcodes[0x11] = Opcode.init(OpcodeEnum.ORA, 0x11, AddressingModeEnum.IndirectY, 2, 5); opcodes[0x12] = Opcode.init(OpcodeEnum.KIL, 0x12, AddressingModeEnum.Implicit, 1, 0); opcodes[0x13] = Opcode.init(OpcodeEnum.SLO, 0x13, AddressingModeEnum.IndirectY, 2, 8); opcodes[0x14] = Opcode.init(OpcodeEnum.NOP, 0x14, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0x15] = Opcode.init(OpcodeEnum.ORA, 0x15, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0x16] = Opcode.init(OpcodeEnum.ASL, 0x16, AddressingModeEnum.ZeroPageX, 2, 6); opcodes[0x17] = Opcode.init(OpcodeEnum.SLO, 0x17, AddressingModeEnum.ZeroPageX, 2, 6); opcodes[0x18] = Opcode.init(OpcodeEnum.CLC, 0x18, AddressingModeEnum.Implicit, 1, 2); opcodes[0x19] = Opcode.init(OpcodeEnum.ORA, 0x19, AddressingModeEnum.AbsoluteY, 3, 4); opcodes[0x1A] = Opcode.init(OpcodeEnum.NOP, 0x1A, AddressingModeEnum.Implicit, 1, 2); opcodes[0x1B] = Opcode.init(OpcodeEnum.SLO, 0x1B, AddressingModeEnum.AbsoluteY, 3, 7); opcodes[0x1C] = Opcode.init(OpcodeEnum.NOP, 0x1C, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0x1D] = Opcode.init(OpcodeEnum.ORA, 0x1D, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0x1E] = Opcode.init(OpcodeEnum.ASL, 0x1E, AddressingModeEnum.AbsoluteX, 3, 7); opcodes[0x1F] = Opcode.init(OpcodeEnum.SLO, 0x1F, AddressingModeEnum.AbsoluteX, 3, 7); opcodes[0x20] = Opcode.init(OpcodeEnum.JSR, 0x20, AddressingModeEnum.Absolute, 3, 6); opcodes[0x21] = Opcode.init(OpcodeEnum.AND, 0x21, AddressingModeEnum.IndirectX, 2, 6); opcodes[0x22] = Opcode.init(OpcodeEnum.KIL, 0x22, AddressingModeEnum.Implicit, 1, 0); opcodes[0x23] = Opcode.init(OpcodeEnum.RLA, 0x23, AddressingModeEnum.IndirectX, 2, 8); opcodes[0x24] = Opcode.init(OpcodeEnum.BIT, 0x24, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0x25] = Opcode.init(OpcodeEnum.AND, 0x25, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0x26] = Opcode.init(OpcodeEnum.ROL, 0x26, AddressingModeEnum.ZeroPage, 2, 5); opcodes[0x27] = Opcode.init(OpcodeEnum.RLA, 0x27, AddressingModeEnum.ZeroPage, 2, 5); opcodes[0x28] = Opcode.init(OpcodeEnum.PLP, 0x28, AddressingModeEnum.Implicit, 1, 4); opcodes[0x29] = Opcode.init(OpcodeEnum.AND, 0x29, AddressingModeEnum.Immediate, 2, 2); opcodes[0x2A] = Opcode.init(OpcodeEnum.ROL, 0x2A, AddressingModeEnum.Accumulator, 1, 2); opcodes[0x2B] = Opcode.init(OpcodeEnum.AAC, 0x2B, AddressingModeEnum.Immediate, 2, 2); opcodes[0x2C] = Opcode.init(OpcodeEnum.BIT, 0x2C, AddressingModeEnum.Absolute, 3, 4); opcodes[0x2D] = Opcode.init(OpcodeEnum.AND, 0x2D, AddressingModeEnum.Absolute, 3, 4); opcodes[0x2E] = Opcode.init(OpcodeEnum.ROL, 0x2E, AddressingModeEnum.Absolute, 3, 6); opcodes[0x2F] = Opcode.init(OpcodeEnum.RLA, 0x2F, AddressingModeEnum.Absolute, 3, 6); opcodes[0x30] = Opcode.init(OpcodeEnum.BMI, 0x30, AddressingModeEnum.Relative, 2, 2); opcodes[0x31] = Opcode.init(OpcodeEnum.AND, 0x31, AddressingModeEnum.IndirectY, 2, 5); opcodes[0x32] = Opcode.init(OpcodeEnum.KIL, 0x32, AddressingModeEnum.Implicit, 1, 0); opcodes[0x33] = Opcode.init(OpcodeEnum.RLA, 0x33, AddressingModeEnum.IndirectY, 2, 8); opcodes[0x34] = Opcode.init(OpcodeEnum.NOP, 0x34, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0x35] = Opcode.init(OpcodeEnum.AND, 0x35, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0x36] = Opcode.init(OpcodeEnum.ROL, 0x36, AddressingModeEnum.ZeroPageX, 2, 6); opcodes[0x37] = Opcode.init(OpcodeEnum.RLA, 0x37, AddressingModeEnum.ZeroPageX, 2, 6); opcodes[0x38] = Opcode.init(OpcodeEnum.SEC, 0x38, AddressingModeEnum.Implicit, 1, 2); opcodes[0x39] = Opcode.init(OpcodeEnum.AND, 0x39, AddressingModeEnum.AbsoluteY, 3, 4); opcodes[0x3A] = Opcode.init(OpcodeEnum.NOP, 0x3A, AddressingModeEnum.Implicit, 1, 2); opcodes[0x3B] = Opcode.init(OpcodeEnum.RLA, 0x3B, AddressingModeEnum.AbsoluteY, 3, 7); opcodes[0x3C] = Opcode.init(OpcodeEnum.NOP, 0x3C, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0x3D] = Opcode.init(OpcodeEnum.AND, 0x3D, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0x3E] = Opcode.init(OpcodeEnum.ROL, 0x3E, AddressingModeEnum.AbsoluteX, 3, 7); opcodes[0x3F] = Opcode.init(OpcodeEnum.RLA, 0x3F, AddressingModeEnum.AbsoluteX, 3, 7); opcodes[0x40] = Opcode.init(OpcodeEnum.RTI, 0x40, AddressingModeEnum.Implicit, 1, 6); opcodes[0x41] = Opcode.init(OpcodeEnum.EOR, 0x41, AddressingModeEnum.IndirectX, 2, 6); opcodes[0x42] = Opcode.init(OpcodeEnum.KIL, 0x42, AddressingModeEnum.Implicit, 1, 0); opcodes[0x43] = Opcode.init(OpcodeEnum.SRE, 0x43, AddressingModeEnum.IndirectX, 2, 8); opcodes[0x44] = Opcode.init(OpcodeEnum.NOP, 0x44, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0x45] = Opcode.init(OpcodeEnum.EOR, 0x45, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0x46] = Opcode.init(OpcodeEnum.LSR, 0x46, AddressingModeEnum.ZeroPage, 2, 5); opcodes[0x47] = Opcode.init(OpcodeEnum.SRE, 0x47, AddressingModeEnum.ZeroPage, 2, 5); opcodes[0x48] = Opcode.init(OpcodeEnum.PHA, 0x48, AddressingModeEnum.Implicit, 1, 3); opcodes[0x49] = Opcode.init(OpcodeEnum.EOR, 0x49, AddressingModeEnum.Immediate, 2, 2); opcodes[0x4A] = Opcode.init(OpcodeEnum.LSR, 0x4A, AddressingModeEnum.Accumulator, 1, 2); opcodes[0x4B] = Opcode.init(OpcodeEnum.ASR, 0x4B, AddressingModeEnum.Immediate, 2, 2); opcodes[0x4C] = Opcode.init(OpcodeEnum.JMP, 0x4C, AddressingModeEnum.Absolute, 3, 3); opcodes[0x4D] = Opcode.init(OpcodeEnum.EOR, 0x4D, AddressingModeEnum.Absolute, 3, 4); opcodes[0x4E] = Opcode.init(OpcodeEnum.LSR, 0x4E, AddressingModeEnum.Absolute, 3, 6); opcodes[0x4F] = Opcode.init(OpcodeEnum.SRE, 0x4F, AddressingModeEnum.Absolute, 3, 6); opcodes[0x50] = Opcode.init(OpcodeEnum.BVC, 0x50, AddressingModeEnum.Relative, 2, 2); opcodes[0x51] = Opcode.init(OpcodeEnum.EOR, 0x51, AddressingModeEnum.IndirectY, 2, 5); opcodes[0x52] = Opcode.init(OpcodeEnum.KIL, 0x52, AddressingModeEnum.Implicit, 1, 0); opcodes[0x53] = Opcode.init(OpcodeEnum.SRE, 0x53, AddressingModeEnum.IndirectY, 2, 8); opcodes[0x54] = Opcode.init(OpcodeEnum.NOP, 0x54, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0x55] = Opcode.init(OpcodeEnum.EOR, 0x55, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0x56] = Opcode.init(OpcodeEnum.LSR, 0x56, AddressingModeEnum.ZeroPageX, 2, 6); opcodes[0x57] = Opcode.init(OpcodeEnum.SRE, 0x57, AddressingModeEnum.ZeroPageX, 2, 6); opcodes[0x58] = Opcode.init(OpcodeEnum.CLI, 0x58, AddressingModeEnum.Implicit, 1, 2); opcodes[0x59] = Opcode.init(OpcodeEnum.EOR, 0x59, AddressingModeEnum.AbsoluteY, 3, 4); opcodes[0x5A] = Opcode.init(OpcodeEnum.NOP, 0x5A, AddressingModeEnum.Implicit, 1, 2); opcodes[0x5B] = Opcode.init(OpcodeEnum.SRE, 0x5B, AddressingModeEnum.AbsoluteY, 3, 7); opcodes[0x5C] = Opcode.init(OpcodeEnum.NOP, 0x5C, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0x5D] = Opcode.init(OpcodeEnum.EOR, 0x5D, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0x5E] = Opcode.init(OpcodeEnum.LSR, 0x5E, AddressingModeEnum.AbsoluteX, 3, 7); opcodes[0x5F] = Opcode.init(OpcodeEnum.SRE, 0x5F, AddressingModeEnum.AbsoluteX, 3, 7); opcodes[0x60] = Opcode.init(OpcodeEnum.RTS, 0x60, AddressingModeEnum.Implicit, 1, 6); opcodes[0x61] = Opcode.init(OpcodeEnum.ADC, 0x61, AddressingModeEnum.IndirectX, 2, 6); opcodes[0x62] = Opcode.init(OpcodeEnum.KIL, 0x62, AddressingModeEnum.Implicit, 1, 0); opcodes[0x63] = Opcode.init(OpcodeEnum.RRA, 0x63, AddressingModeEnum.IndirectX, 2, 8); opcodes[0x64] = Opcode.init(OpcodeEnum.NOP, 0x64, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0x65] = Opcode.init(OpcodeEnum.ADC, 0x65, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0x66] = Opcode.init(OpcodeEnum.ROR, 0x66, AddressingModeEnum.ZeroPage, 2, 5); opcodes[0x67] = Opcode.init(OpcodeEnum.RRA, 0x67, AddressingModeEnum.ZeroPage, 2, 5); opcodes[0x68] = Opcode.init(OpcodeEnum.PLA, 0x68, AddressingModeEnum.Implicit, 1, 4); opcodes[0x69] = Opcode.init(OpcodeEnum.ADC, 0x69, AddressingModeEnum.Immediate, 2, 2); opcodes[0x6A] = Opcode.init(OpcodeEnum.ROR, 0x6A, AddressingModeEnum.Accumulator, 1, 2); opcodes[0x6B] = Opcode.init(OpcodeEnum.ARR, 0x6B, AddressingModeEnum.Immediate, 2, 2); opcodes[0x6C] = Opcode.init(OpcodeEnum.JMP, 0x6C, AddressingModeEnum.Indirect, 3, 5); opcodes[0x6D] = Opcode.init(OpcodeEnum.ADC, 0x6D, AddressingModeEnum.Absolute, 3, 4); opcodes[0x6E] = Opcode.init(OpcodeEnum.ROR, 0x6E, AddressingModeEnum.Absolute, 3, 6); opcodes[0x6F] = Opcode.init(OpcodeEnum.RRA, 0x6F, AddressingModeEnum.Absolute, 3, 6); opcodes[0x70] = Opcode.init(OpcodeEnum.BVS, 0x70, AddressingModeEnum.Relative, 2, 2); opcodes[0x71] = Opcode.init(OpcodeEnum.ADC, 0x71, AddressingModeEnum.IndirectY, 2, 5); opcodes[0x72] = Opcode.init(OpcodeEnum.KIL, 0x72, AddressingModeEnum.Implicit, 1, 0); opcodes[0x73] = Opcode.init(OpcodeEnum.RRA, 0x73, AddressingModeEnum.IndirectY, 2, 8); opcodes[0x74] = Opcode.init(OpcodeEnum.NOP, 0x74, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0x75] = Opcode.init(OpcodeEnum.ADC, 0x75, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0x76] = Opcode.init(OpcodeEnum.ROR, 0x76, AddressingModeEnum.ZeroPageX, 2, 6); opcodes[0x77] = Opcode.init(OpcodeEnum.RRA, 0x77, AddressingModeEnum.ZeroPageX, 2, 6); opcodes[0x78] = Opcode.init(OpcodeEnum.SEI, 0x78, AddressingModeEnum.Implicit, 1, 2); opcodes[0x79] = Opcode.init(OpcodeEnum.ADC, 0x79, AddressingModeEnum.AbsoluteY, 3, 4); opcodes[0x7A] = Opcode.init(OpcodeEnum.NOP, 0x7A, AddressingModeEnum.Implicit, 1, 2); opcodes[0x7B] = Opcode.init(OpcodeEnum.RRA, 0x7B, AddressingModeEnum.AbsoluteY, 3, 7); opcodes[0x7C] = Opcode.init(OpcodeEnum.NOP, 0x7C, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0x7D] = Opcode.init(OpcodeEnum.ADC, 0x7D, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0x7E] = Opcode.init(OpcodeEnum.ROR, 0x7E, AddressingModeEnum.AbsoluteX, 3, 7); opcodes[0x7F] = Opcode.init(OpcodeEnum.RRA, 0x7F, AddressingModeEnum.AbsoluteX, 3, 7); opcodes[0x80] = Opcode.init(OpcodeEnum.NOP, 0x80, AddressingModeEnum.Immediate, 2, 2); opcodes[0x81] = Opcode.init(OpcodeEnum.STA, 0x81, AddressingModeEnum.IndirectX, 2, 6); opcodes[0x82] = Opcode.init(OpcodeEnum.NOP, 0x82, AddressingModeEnum.Immediate, 2, 2); opcodes[0x83] = Opcode.init(OpcodeEnum.AAX, 0x83, AddressingModeEnum.IndirectX, 2, 6); opcodes[0x84] = Opcode.init(OpcodeEnum.STY, 0x84, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0x85] = Opcode.init(OpcodeEnum.STA, 0x85, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0x86] = Opcode.init(OpcodeEnum.STX, 0x86, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0x87] = Opcode.init(OpcodeEnum.AAX, 0x87, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0x88] = Opcode.init(OpcodeEnum.DEY, 0x88, AddressingModeEnum.Implicit, 1, 2); opcodes[0x89] = Opcode.init(OpcodeEnum.NOP, 0x89, AddressingModeEnum.Immediate, 2, 2); opcodes[0x8A] = Opcode.init(OpcodeEnum.TXA, 0x8A, AddressingModeEnum.Implicit, 1, 2); opcodes[0x8B] = Opcode.init(OpcodeEnum.XAA, 0x8B, AddressingModeEnum.Immediate, 2, 2); opcodes[0x8C] = Opcode.init(OpcodeEnum.STY, 0x8C, AddressingModeEnum.Absolute, 3, 4); opcodes[0x8D] = Opcode.init(OpcodeEnum.STA, 0x8D, AddressingModeEnum.Absolute, 3, 4); opcodes[0x8E] = Opcode.init(OpcodeEnum.STX, 0x8E, AddressingModeEnum.Absolute, 3, 4); opcodes[0x8F] = Opcode.init(OpcodeEnum.AAX, 0x8F, AddressingModeEnum.Absolute, 3, 4); opcodes[0x90] = Opcode.init(OpcodeEnum.BCC, 0x90, AddressingModeEnum.Relative, 2, 2); opcodes[0x91] = Opcode.init(OpcodeEnum.STA, 0x91, AddressingModeEnum.IndirectY, 2, 6); opcodes[0x92] = Opcode.init(OpcodeEnum.KIL, 0x92, AddressingModeEnum.Implicit, 1, 0); opcodes[0x93] = Opcode.init(OpcodeEnum.AXA, 0x93, AddressingModeEnum.IndirectY, 2, 6); opcodes[0x94] = Opcode.init(OpcodeEnum.STY, 0x94, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0x95] = Opcode.init(OpcodeEnum.STA, 0x95, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0x96] = Opcode.init(OpcodeEnum.STX, 0x96, AddressingModeEnum.ZeroPageY, 2, 4); opcodes[0x97] = Opcode.init(OpcodeEnum.AAX, 0x97, AddressingModeEnum.ZeroPageY, 2, 4); opcodes[0x98] = Opcode.init(OpcodeEnum.TYA, 0x98, AddressingModeEnum.Implicit, 1, 2); opcodes[0x99] = Opcode.init(OpcodeEnum.STA, 0x99, AddressingModeEnum.AbsoluteY, 3, 5); opcodes[0x9A] = Opcode.init(OpcodeEnum.TXS, 0x9A, AddressingModeEnum.Implicit, 1, 2); opcodes[0x9B] = Opcode.init(OpcodeEnum.XAS, 0x9B, AddressingModeEnum.Immediate, 3, 5); opcodes[0x9C] = Opcode.init(OpcodeEnum.SYA, 0x9C, AddressingModeEnum.AbsoluteY, 3, 5); opcodes[0x9D] = Opcode.init(OpcodeEnum.STA, 0x9D, AddressingModeEnum.AbsoluteX, 3, 5); opcodes[0x9E] = Opcode.init(OpcodeEnum.SXA, 0x9E, AddressingModeEnum.AbsoluteY, 3, 5); opcodes[0x9F] = Opcode.init(OpcodeEnum.AXA, 0x9F, AddressingModeEnum.AbsoluteY, 3, 5); opcodes[0xA0] = Opcode.init(OpcodeEnum.LDY, 0xA0, AddressingModeEnum.Immediate, 2, 2); opcodes[0xA1] = Opcode.init(OpcodeEnum.LDA, 0xA1, AddressingModeEnum.IndirectX, 2, 6); opcodes[0xA2] = Opcode.init(OpcodeEnum.LDX, 0xA2, AddressingModeEnum.Immediate, 2, 2); opcodes[0xA3] = Opcode.init(OpcodeEnum.LAX, 0xA3, AddressingModeEnum.IndirectX, 2, 6); opcodes[0xA4] = Opcode.init(OpcodeEnum.LDY, 0xA4, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0xA5] = Opcode.init(OpcodeEnum.LDA, 0xA5, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0xA6] = Opcode.init(OpcodeEnum.LDX, 0xA6, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0xA7] = Opcode.init(OpcodeEnum.LAX, 0xA7, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0xA8] = Opcode.init(OpcodeEnum.TAY, 0xA8, AddressingModeEnum.Implicit, 1, 2); opcodes[0xA9] = Opcode.init(OpcodeEnum.LDA, 0xA9, AddressingModeEnum.Immediate, 2, 2); opcodes[0xAA] = Opcode.init(OpcodeEnum.TAX, 0xAA, AddressingModeEnum.Implicit, 1, 2); opcodes[0xAB] = Opcode.init(OpcodeEnum.ATX, 0xAB, AddressingModeEnum.Immediate, 2, 2); opcodes[0xAC] = Opcode.init(OpcodeEnum.LDY, 0xAC, AddressingModeEnum.Absolute, 3, 4); opcodes[0xAD] = Opcode.init(OpcodeEnum.LDA, 0xAD, AddressingModeEnum.Absolute, 3, 4); opcodes[0xAE] = Opcode.init(OpcodeEnum.LDX, 0xAE, AddressingModeEnum.Absolute, 3, 4); opcodes[0xAF] = Opcode.init(OpcodeEnum.LAX, 0xAF, AddressingModeEnum.Absolute, 3, 4); opcodes[0xB0] = Opcode.init(OpcodeEnum.BCS, 0xB0, AddressingModeEnum.Relative, 2, 2); opcodes[0xB1] = Opcode.init(OpcodeEnum.LDA, 0xB1, AddressingModeEnum.IndirectY, 2, 5); opcodes[0xB2] = Opcode.init(OpcodeEnum.KIL, 0xB2, AddressingModeEnum.Implicit, 1, 0); opcodes[0xB3] = Opcode.init(OpcodeEnum.LAX, 0xB3, AddressingModeEnum.IndirectY, 2, 5); opcodes[0xB4] = Opcode.init(OpcodeEnum.LDY, 0xB4, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0xB5] = Opcode.init(OpcodeEnum.LDA, 0xB5, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0xB6] = Opcode.init(OpcodeEnum.LDX, 0xB6, AddressingModeEnum.ZeroPageY, 2, 4); opcodes[0xB7] = Opcode.init(OpcodeEnum.LAX, 0xB7, AddressingModeEnum.ZeroPageY, 2, 4); opcodes[0xB8] = Opcode.init(OpcodeEnum.CLV, 0xB8, AddressingModeEnum.Implicit, 1, 2); opcodes[0xB9] = Opcode.init(OpcodeEnum.LDA, 0xB9, AddressingModeEnum.AbsoluteY, 3, 4); opcodes[0xBA] = Opcode.init(OpcodeEnum.TSX, 0xBA, AddressingModeEnum.Implicit, 1, 2); opcodes[0xBB] = Opcode.init(OpcodeEnum.LAR, 0xBB, AddressingModeEnum.AbsoluteY, 3, 4); opcodes[0xBC] = Opcode.init(OpcodeEnum.LDY, 0xBC, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0xBD] = Opcode.init(OpcodeEnum.LDA, 0xBD, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0xBE] = Opcode.init(OpcodeEnum.LDX, 0xBE, AddressingModeEnum.AbsoluteY, 3, 4); opcodes[0xBF] = Opcode.init(OpcodeEnum.LAX, 0xBF, AddressingModeEnum.AbsoluteY, 3, 4); opcodes[0xC0] = Opcode.init(OpcodeEnum.CPY, 0xC0, AddressingModeEnum.Immediate, 2, 2); opcodes[0xC1] = Opcode.init(OpcodeEnum.CMP, 0xC1, AddressingModeEnum.IndirectX, 2, 6); opcodes[0xC2] = Opcode.init(OpcodeEnum.NOP, 0xC2, AddressingModeEnum.Immediate, 2, 2); opcodes[0xC3] = Opcode.init(OpcodeEnum.DCP, 0xC3, AddressingModeEnum.IndirectX, 2, 8); opcodes[0xC4] = Opcode.init(OpcodeEnum.CPY, 0xC4, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0xC5] = Opcode.init(OpcodeEnum.CMP, 0xC5, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0xC6] = Opcode.init(OpcodeEnum.DEC, 0xC6, AddressingModeEnum.ZeroPage, 2, 5); opcodes[0xC7] = Opcode.init(OpcodeEnum.DCP, 0xC7, AddressingModeEnum.ZeroPage, 2, 5); opcodes[0xC8] = Opcode.init(OpcodeEnum.INY, 0xC8, AddressingModeEnum.Implicit, 1, 2); opcodes[0xC9] = Opcode.init(OpcodeEnum.CMP, 0xC9, AddressingModeEnum.Immediate, 2, 2); opcodes[0xCA] = Opcode.init(OpcodeEnum.DEX, 0xCA, AddressingModeEnum.Implicit, 1, 2); opcodes[0xCB] = Opcode.init(OpcodeEnum.AXS, 0xCB, AddressingModeEnum.Immediate, 2, 2); opcodes[0xCC] = Opcode.init(OpcodeEnum.CPY, 0xCC, AddressingModeEnum.Absolute, 3, 4); opcodes[0xCD] = Opcode.init(OpcodeEnum.CMP, 0xCD, AddressingModeEnum.Absolute, 3, 4); opcodes[0xCE] = Opcode.init(OpcodeEnum.DEC, 0xCE, AddressingModeEnum.Absolute, 3, 6); opcodes[0xCF] = Opcode.init(OpcodeEnum.DCP, 0xCF, AddressingModeEnum.Absolute, 3, 6); opcodes[0xD0] = Opcode.init(OpcodeEnum.BNE, 0xD0, AddressingModeEnum.Relative, 2, 2); opcodes[0xD1] = Opcode.init(OpcodeEnum.CMP, 0xD1, AddressingModeEnum.IndirectY, 2, 5); opcodes[0xD2] = Opcode.init(OpcodeEnum.KIL, 0xD2, AddressingModeEnum.Implicit, 1, 0); opcodes[0xD3] = Opcode.init(OpcodeEnum.DCP, 0xD3, AddressingModeEnum.IndirectY, 2, 8); opcodes[0xD4] = Opcode.init(OpcodeEnum.NOP, 0xD4, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0xD5] = Opcode.init(OpcodeEnum.CMP, 0xD5, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0xD6] = Opcode.init(OpcodeEnum.DEC, 0xD6, AddressingModeEnum.ZeroPageX, 2, 6); opcodes[0xD7] = Opcode.init(OpcodeEnum.DCP, 0xD7, AddressingModeEnum.ZeroPageX, 2, 6); opcodes[0xD8] = Opcode.init(OpcodeEnum.CLD, 0xD8, AddressingModeEnum.Implicit, 1, 2); opcodes[0xD9] = Opcode.init(OpcodeEnum.CMP, 0xD9, AddressingModeEnum.AbsoluteY, 3, 4); opcodes[0xDA] = Opcode.init(OpcodeEnum.NOP, 0xDA, AddressingModeEnum.Implicit, 1, 2); opcodes[0xDB] = Opcode.init(OpcodeEnum.DCP, 0xDB, AddressingModeEnum.AbsoluteY, 3, 7); opcodes[0xDC] = Opcode.init(OpcodeEnum.NOP, 0xDC, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0xDD] = Opcode.init(OpcodeEnum.CMP, 0xDD, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0xDE] = Opcode.init(OpcodeEnum.DEC, 0xDE, AddressingModeEnum.AbsoluteX, 3, 7); opcodes[0xDF] = Opcode.init(OpcodeEnum.DCP, 0xDF, AddressingModeEnum.AbsoluteX, 3, 7); opcodes[0xE0] = Opcode.init(OpcodeEnum.CPX, 0xE0, AddressingModeEnum.Immediate, 2, 2); opcodes[0xE1] = Opcode.init(OpcodeEnum.SBC, 0xE1, AddressingModeEnum.IndirectX, 2, 6); opcodes[0xE2] = Opcode.init(OpcodeEnum.NOP, 0xE2, AddressingModeEnum.Immediate, 2, 2); opcodes[0xE3] = Opcode.init(OpcodeEnum.ISC, 0xE3, AddressingModeEnum.IndirectX, 2, 8); opcodes[0xE4] = Opcode.init(OpcodeEnum.CPX, 0xE4, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0xE5] = Opcode.init(OpcodeEnum.SBC, 0xE5, AddressingModeEnum.ZeroPage, 2, 3); opcodes[0xE6] = Opcode.init(OpcodeEnum.INC, 0xE6, AddressingModeEnum.ZeroPage, 2, 5); opcodes[0xE7] = Opcode.init(OpcodeEnum.ISC, 0xE7, AddressingModeEnum.ZeroPage, 2, 5); opcodes[0xE8] = Opcode.init(OpcodeEnum.INX, 0xE8, AddressingModeEnum.Implicit, 1, 2); opcodes[0xE9] = Opcode.init(OpcodeEnum.SBC, 0xE9, AddressingModeEnum.Immediate, 2, 2); opcodes[0xEA] = Opcode.init(OpcodeEnum.NOP, 0xEA, AddressingModeEnum.Implicit, 1, 2); opcodes[0xEB] = Opcode.init(OpcodeEnum.SBC, 0xEB, AddressingModeEnum.Immediate, 2, 2); opcodes[0xEC] = Opcode.init(OpcodeEnum.CPX, 0xEC, AddressingModeEnum.Absolute, 3, 4); opcodes[0xED] = Opcode.init(OpcodeEnum.SBC, 0xED, AddressingModeEnum.Absolute, 3, 4); opcodes[0xEE] = Opcode.init(OpcodeEnum.INC, 0xEE, AddressingModeEnum.Absolute, 3, 6); opcodes[0xEF] = Opcode.init(OpcodeEnum.ISC, 0xEF, AddressingModeEnum.Absolute, 3, 6); opcodes[0xF0] = Opcode.init(OpcodeEnum.BEQ, 0xF0, AddressingModeEnum.Relative, 2, 2); opcodes[0xF1] = Opcode.init(OpcodeEnum.SBC, 0xF1, AddressingModeEnum.IndirectY, 2, 5); opcodes[0xF2] = Opcode.init(OpcodeEnum.KIL, 0xF2, AddressingModeEnum.Implicit, 1, 0); opcodes[0xF3] = Opcode.init(OpcodeEnum.ISC, 0xF3, AddressingModeEnum.IndirectY, 2, 8); opcodes[0xF4] = Opcode.init(OpcodeEnum.NOP, 0xF4, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0xF5] = Opcode.init(OpcodeEnum.SBC, 0xF5, AddressingModeEnum.ZeroPageX, 2, 4); opcodes[0xF6] = Opcode.init(OpcodeEnum.INC, 0xF6, AddressingModeEnum.ZeroPageX, 2, 6); opcodes[0xF7] = Opcode.init(OpcodeEnum.ISC, 0xF7, AddressingModeEnum.ZeroPageX, 2, 6); opcodes[0xF8] = Opcode.init(OpcodeEnum.SED, 0xF8, AddressingModeEnum.Implicit, 1, 2); opcodes[0xF9] = Opcode.init(OpcodeEnum.SBC, 0xF9, AddressingModeEnum.AbsoluteY, 3, 4); opcodes[0xFA] = Opcode.init(OpcodeEnum.NOP, 0xFA, AddressingModeEnum.Implicit, 1, 2); opcodes[0xFB] = Opcode.init(OpcodeEnum.ISC, 0xFB, AddressingModeEnum.AbsoluteY, 3, 7); opcodes[0xFC] = Opcode.init(OpcodeEnum.NOP, 0xFC, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0xFD] = Opcode.init(OpcodeEnum.SBC, 0xFD, AddressingModeEnum.AbsoluteX, 3, 4); opcodes[0xFE] = Opcode.init(OpcodeEnum.INC, 0xFE, AddressingModeEnum.AbsoluteX, 3, 7); opcodes[0xFF] = Opcode.init(OpcodeEnum.ISC, 0xFF, AddressingModeEnum.AbsoluteX, 3, 7); return opcodes; }
src/opcode.zig
const std = @import("std"); const ecs = @import("ecs"); const math = @import("math.zig"); const sokol = @import("sokol"); const main = @import("main.zig"); const Texture = main.Texture; pub const _GRAVITY = struct { x: f32, y: f32 }; pub const Position = struct { x: f32, y: f32 }; pub const Velocity = struct { x: f32, y: f32 }; pub fn processVelocity(reg: *ecs.Registry, delta: f32) void { var view = reg.view(.{ Position, Velocity }, .{}); var iter = view.iterator(); while (iter.next()) |entity| { var pos = view.get(Position, entity); var vel = view.get(Velocity, entity); pos.*.x += vel.x * delta; pos.*.y += vel.y * delta; } } pub const Mass = struct { enable: bool = true, amount: f32 = 0 }; pub fn processGravity(reg: *ecs.Registry, gravity: _GRAVITY, delta: f32) void { var view = reg.view(.{ Mass, Velocity }, .{}); var iter = view.iterator(); while (iter.next()) |entity| { var mas = view.get(Mass, entity); var vel = view.get(Velocity, entity); vel.*.x += mas.amount * gravity.x; vel.*.y += mas.amount * gravity.y; } } pub fn drawPositions(reg: *ecs.Registry, delta: f32) void { var view = reg.view(.{ Position }, .{}); var iter = view.iterator(); while (iter.next()) |entity| { var pos = view.get(Position, entity); main.rectangle(pos.x, pos.y, 2, 2); } } // Name Static Collides with // -------- ---------- ------------------- // World True Nothing // Enemy False Player, World // Player False World pub const AABBKinds = enum { world, enemy, player }; pub const AABB = struct { enabled: bool = true, kind: AABBKinds = AABBKinds.world, w: f32, h: f32, ox: f32 = 0, oy: f32 = 0, pub fn collidingWith(a: *AABB, apos: Position, b: AABB, bpos: Position) bool { return (apos.x < bpos.x+b.w) & (bpos.x < apos.x+a.w) & (apos.y < bpos.y+b.h) & (bpos.y < apos.y+a.h); } }; ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// pub const PlayerStates = enum(u4) { normal, falling, dizzy }; pub const PlayerCharacter = enum(u2) { koli, sis }; // koli is his name. pub const Player = struct { enableMovement: bool = true, velocity: f32 = 100, currentState: PlayerStates = PlayerStates.normal, character: PlayerCharacter = PlayerCharacter.koli, texture_idle: Texture, }; pub fn initPlayer(char: PlayerCharacter) Player { var texture_idle: Texture = undefined; switch(char) { .koli => { texture_idle = Texture.fromPNGPath("sprites/koli.png") catch @panic("Error while loading koli's textures!"); }, else => @panic("Only koli has been implemented yet, sorry :(") } var player: Player = .{ .character = char, .texture_idle = texture_idle, }; return player; } pub fn processPlayer(reg: *ecs.Registry, delta: f32) void { var view = reg.view(.{ Velocity, Player }, .{}); var iter = view.iterator(); while (iter.next()) |entity| { var vel = view.get(Velocity, entity); var player = view.get(Player, entity); var keypress = main.getKeys(); switch (player.character) { .koli => switch (player.currentState) { .normal => { vel.*.x = math.lerp(vel.*.x, 0, 12 * delta); vel.*.y = math.lerp(vel.*.y, 0, 12 * delta); if (keypress.up) { vel.*.y -= player.velocity; } if (keypress.down) { vel.*.y += player.velocity; } if (keypress.left) { vel.*.x -= player.velocity; } if (keypress.right) { vel.*.x += player.velocity; } }, else => unreachable }, .sis => switch (player.currentState) { .normal => { vel.*.x = math.lerp(vel.*.x, 0, 12 * delta); vel.*.y = math.lerp(vel.*.y, 0, 12 * delta); if (keypress.up2) { vel.*.y -= player.velocity; } if (keypress.down2) { vel.*.y += player.velocity; } if (keypress.left2) { vel.*.x -= player.velocity; } if (keypress.right2) { vel.*.x += player.velocity; } }, else => unreachable } } } } pub fn drawPlayer(reg: *ecs.Registry, delta: f32) void { var view = reg.view(.{ Position, Player }, .{}); var iter = view.iterator(); while (iter.next()) |entity| { var pos = view.get(Position, entity); var pla = view.get(Player, entity); pla.*.texture_idle.draw(pos.x, pos.y, 2, 2); } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// pub const Tile = struct { static: bool = true };
src/definition.zig
const std = @import("std"); const mem = std.mem; const log = std.log; const testing = std.testing; const Allocator = mem.Allocator; const AutoHashMap = std.AutoHashMap; const readInput = @import("parser.zig").readInput; const Instruction = @import("instruction.zig").Instruction; // for testing const Deck = @import("deck.zig").Deck; pub fn part1(allocator: Allocator) !i64 { var instructions = try readInput(allocator); defer instructions.deinit(); const m = 10007; const poly = Instruction.manyToPolynomial(instructions.items, m); return poly.eval(2019, m); } fn part2_helper(allocator: Allocator, instructions: []const Instruction, p: i64, count: usize) !i64 { var inv_instructions = try Instruction.invertMany(allocator, instructions, p); defer allocator.free(inv_instructions); const poly = Instruction.manyToPolynomial(inv_instructions, p).power(count, p); return poly.eval(2020, p); } pub fn part2(allocator: Allocator) !i64 { // Key insight: all positions are `mod m` and correspond to a linear polynomial mod m. // So, shuffle(x) = (f_n * ... f_1)(x) = p(x) for some linear polynomial p. // Hence, (f_n * ... f_1)^n = p^n and we need to use "exponentiation by squaring" // (here, the term exponentiation means function composition). // Since we have to calculate the starting position, we need to invert the instructions first, // i.e. shuffle^{-1} = f_1^{-1} * ... f_n^{-1} = g(x) and then compute g^k(x) for large k, // where k is part of the puzzle input. var instructions = try readInput(allocator); defer instructions.deinit(); const p = 119315717514047; const count = 101741582076661; return part2_helper(allocator, instructions.items, p, count); } test "2019 Day 22, Part 1" { const answer = try part1(testing.allocator); try testing.expectEqual(@as(i64, 1867), answer); } test "2019 Day 22, Part 2" { const answer = try part2(testing.allocator); try testing.expectEqual(@as(i64, 71047285772808), answer); } test "2019 Day 22, Part 2 Small" { var instructions = try readInput(testing.allocator); defer instructions.deinit(); const p = 10007; const count = 1; const answer = try part2_helper(testing.allocator, instructions.items, p, count); // verify answer var deck = try Deck.init(testing.allocator, p); defer deck.deinit(); var i: usize = 0; while (i < count) : (i += 1) { try deck.apply_instructions(instructions.items); } try testing.expectEqual(@as(usize, 9596), deck.get_card(2020).?); try testing.expectEqual(@as(usize, 2020), deck.find_card(@intCast(usize, 9596)).?); try testing.expectEqual(@as(usize, 2020), deck.find_card(@intCast(usize, answer)).?); try testing.expectEqual(@as(i64, 9596), answer); }
day22/src/solve.zig
const std = @import("std"); const mem = std.mem; const math = std.math; const warn = std.debug.warn; const futexNs = @import("futex.zig"); const futex_wait = futexNs.futex_wait; const futex_wake = futexNs.futex_wake; const builtin = @import("builtin"); const AtomicOrder = builtin.AtomicOrder; const AtomicRmwOp = builtin.AtomicRmwOp; const messageQueueNs = @import("message_queue.zig"); const SignalContext = messageQueueNs.SignalContext; const ActorDispatcher = @import("actor_dispatcher.zig").ActorDispatcher; pub const ActorThreadContext = struct { const Self = @This(); idn: u8, name_len: usize, name: [32]u8, done: u8, dispatcher: ActorDispatcher(1), pub fn init(pSelf: *Self, idn: u8, name: [] const u8) void { // Set name_len and then copy with truncation pSelf.idn = idn; pSelf.name_len = math.min(name.len, pSelf.name.len); mem.copy(u8, pSelf.name[0..pSelf.name_len], name[0..pSelf.name_len]); warn("ActorThreadContext.init:+ name={}\n", pSelf.name); defer warn("ActorThreadContext.init:- name={}\n", pSelf.name); pSelf.dispatcher.init(); } // TODO: How to support multiple ActorDispatchers? fn threadDispatcherFn(pSelf: *ActorThreadContext) void { warn("threadDispatcherFn:+ {}\n", pSelf.name); defer warn("threadDispatcherFn:- {}\n", pSelf.name); while (@atomicLoad(u8, &pSelf.done, AtomicOrder.SeqCst) == 0) { if (pSelf.dispatcher.loop()) { //warn("TD{}WAIT\n", pSelf.idn); futex_wait(&pSelf.dispatcher.signal_context, 0); } } } // TODO: How to support multiple ActorDispatchers? fn threadDoneFn(doneFn_handle: usize) void { var pContext = @intToPtr(*ActorThreadContext, doneFn_handle); _ = @atomicRmw(u8, &pContext.done, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); _ = @atomicRmw(SignalContext, &pContext.dispatcher.signal_context, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); futex_wake(&pContext.dispatcher.signal_context, 1); } }; //pub fn ActorModel(comptime threads: usize) type { // return struct { // // }; //}
actor_model.zig
// for more info see: 'http://soundfile.sapp.org/doc/WaveFormat/' const builtin = @import("builtin"); const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; pub const Format = enum { U8, I16le, I24le, I32le, }; pub const Header = struct { format: Format, channel_ct: u16, sample_rate: u32, frame_ct: u32, byte_ct: u32, }; // TODO change this api, big rewrite // have loader be an obj that has a reader and verbose boolean // TODO handle f32 wavs // parameterized namespace thing // not a generic struct pub fn Loader(comptime Reader: type, comptime verbose: bool) type { return struct { fn readIdentifier(reader: *Reader) ![4]u8 { var ret: [4]u8 = undefined; try reader.readNoEof(&ret); return ret; } fn loaderError(comptime msg: []const u8) !Header { if (verbose) { std.debug.warn("{}\n", .{msg}); } return error.WavLoadFailed; } pub fn readHeader(reader: *Reader) !Header { const chunk_id = try readIdentifier(reader); if (!std.mem.eql(u8, &chunk_id, "RIFF")) { return loaderError("missing \"RIFF\" header"); } try reader.skipBytes(4, .{}); const format_id = try readIdentifier(reader); if (!std.mem.eql(u8, &format_id, "WAVE")) { return loaderError("missing \"WAVE\" identifier"); } const subchunk1_id = try readIdentifier(reader); if (!std.mem.eql(u8, &subchunk1_id, "fmt ")) { return loaderError("missing \"fmt \" header"); } const subchunk1_size = try reader.readIntLittle(u32); if (subchunk1_size != 16) { return loaderError("not PCM (subchunk1_size != 16)"); } const audio_format = try reader.readIntLittle(u16); if (audio_format != 1) { return loaderError("not integer PCM (audio_format != 1)"); } const channel_ct = try reader.readIntLittle(u16); const sample_rate = try reader.readIntLittle(u32); const byte_rate = try reader.readIntLittle(u32); const block_align = try reader.readIntLittle(u16); const bits_per_sample = try reader.readIntLittle(u16); const format: Format = switch (bits_per_sample) { 8 => .U8, 16 => .I16le, 24 => .I24le, 32 => .I32le, else => return loaderError("invalid bits per sample"), }; const bytes_per_sample = bits_per_sample / 8; if (byte_rate != sample_rate * channel_ct * bytes_per_sample) { return loaderError("invalid byte_rate"); } if (block_align != channel_ct * bytes_per_sample) { return loaderError("invalid block_align"); } const subchunk2_id = try readIdentifier(reader); if (!std.mem.eql(u8, &subchunk2_id, "data")) { return loaderError("missing \"data\" header"); } const subchunk2_size = try reader.readIntLittle(u32); if ((subchunk2_size % (channel_ct * bytes_per_sample)) != 0) { return loaderError("invalid subchunk2_size"); } const frame_ct = subchunk2_size / (channel_ct * bytes_per_sample); return Header{ .format = format, .sample_rate = sample_rate, .channel_ct = channel_ct, .frame_ct = frame_ct, .byte_ct = subchunk2_size, }; } fn loadRaw(reader: *Reader, header: Header, out: []u8) !void { // TODO maybe these lens should be exactly equal? assert(out.len >= header.byte_ct); try reader.readNoEof(out[0..header.byte_ct]); } pub fn load_U8(reader: *Reader, header: Header, out: []u8) !void { assert(header.format == .U8); try loadRaw(reader, header, out); } pub fn load_I16le(reader: *Reader, header: Header, out: []i16) !void { assert(header.format == .I16le); var buf: []u8 = undefined; buf.ptr = @ptrCast([*]u8, out.ptr); buf.len = out.len * 2; try loadRaw(reader, header, buf); // TODO verify this works if (builtin.endian == .Big) { var i: usize = 0; while (i < buf.len) : (i += 2) { std.mem.swap(u8, &buf[i], &buf[i + 1]); } } } // TODO // note: for now i24s are i32s // pub fn load_I24le(reader: *Reader, header: Header, out: []i32) !void { // assert(header.format == .I24le); // var buf: []u8 = undefined; // buf.ptr = @ptrCast([*]u8, out.ptr); // buf.len = out.len * 4; // try loadRaw(reader, header, buf); // // TODO verify this works // if (builtin.endian == .Big) { // var i: usize = 0; // while (i < buf.len) : (i += 4) { // std.mem.swap(u8, &buf[i], &buf[i + 3]); // std.mem.swap(u8, &buf[i + 1], &buf[i + 2]); // } // } // } pub fn load_I32le(reader: *Reader, header: Header, out: []i32) !void { assert(header.format == .I32le); var buf: []u8 = undefined; buf.ptr = @ptrCast([*]u8, out.ptr); buf.len = out.len * 4; try loadRaw(reader, header, buf); // TODO verify this works if (builtin.endian == .Big) { var i: usize = 0; while (i < buf.len) : (i += 4) { std.mem.swap(u8, &buf[i], &buf[i + 3]); std.mem.swap(u8, &buf[i + 1], &buf[i + 2]); } } } // TODO make this a more generic thing that acts on a buf of [u8] or whatever // TODO use the output as scratch space ?? // note: very sublty clips values of i16 and i32 wavs // TODO move workspace alloc to the begining pub fn loadConvert_F32( reader: *Reader, header: Header, out: []f32, workspace_allocator: *Allocator, ) !void { assert(out.len >= header.frame_ct * header.channel_ct); assert(header.format != .I24le); switch (header.format) { .U8 => { var buf = try workspace_allocator.alloc(u8, header.byte_ct); defer workspace_allocator.free(buf); try load_U8(reader, header, buf); for (buf) |val, i| { out[i] = @intToFloat(f32, val) / @intToFloat(f32, std.math.maxInt(u8)); } }, .I16le => { var buf = try workspace_allocator.alloc( i16, header.frame_ct * header.channel_ct, ); defer workspace_allocator.free(buf); try load_I16le(reader, header, buf); for (buf) |val, i| { if (val == std.math.minInt(i16)) { out[i] = -1.; continue; } out[i] = @intToFloat(f32, val) / @intToFloat(f32, std.math.maxInt(i16)); } }, .I32le => { var buf = try workspace_allocator.alloc( i32, header.frame_ct * header.channel_ct, ); defer workspace_allocator.free(buf); try load_I32le(reader, header, buf); for (buf) |val, i| { if (val == std.math.minInt(i32)) { out[i] = 1.; continue; } out[i] = @intToFloat(f32, val) / @intToFloat(f32, std.math.maxInt(i32)); } }, else => unreachable, } } }; } // TODO Saver // tests === test "wav Loader basic coverage" { const null_wav = [_]u8{ 0x52, 0x49, 0x46, 0x46, 0x7C, 0x00, 0x00, 0x00, 0x57, 0x41, 0x56, 0x45, 0x66, 0x6D, 0x74, 0x20, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x44, 0xAC, 0x00, 0x00, 0x88, 0x58, 0x01, 0x00, 0x02, 0x00, 0x10, 0x00, 0x64, 0x61, 0x74, 0x61, 0x58, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x02, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x02, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0xFE, 0xFF, 0x01, 0x00, 0x01, 0x00, 0xFE, 0xFF, 0x03, 0x00, 0xFD, 0xFF, 0x02, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0xFF, 0xFF, 0x01, 0x00, 0xFE, 0xFF, 0x02, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x01, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x01, 0x00, 0xFE, 0xFF, 0x02, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x03, 0x00, 0xFC, 0xFF, 0x03, 0x00, }; var reader = std.io.fixedBufferStream(&null_wav).reader(); const MyLoader = Loader(@TypeOf(reader), true); const header = try MyLoader.readHeader(&reader); std.testing.expectEqual(@as(usize, 1), header.channel_ct); std.testing.expectEqual(@as(usize, 44100), header.sample_rate); std.testing.expectEqual(@as(Format, .I16le), header.format); std.testing.expectEqual(@as(usize, 44), header.frame_ct); var buffer: [44]i16 = undefined; try MyLoader.load_I16le(&reader, header, &buffer); } test "wav Loader real file" { const file = @embedFile("../content/square.wav"); var reader = std.io.fixedBufferStream(file).reader(); const MyLoader = Loader(@TypeOf(reader), true); const header = try MyLoader.readHeader(&reader); std.testing.expectEqual(@as(usize, 2), header.channel_ct); std.testing.expectEqual(@as(usize, 44100), header.sample_rate); std.testing.expectEqual(@as(Format, .I16le), header.format); std.testing.expectEqual(@as(usize, 168), header.frame_ct); // var buffer: [168 * 2]i16 = undefined; // try MyLoader.load_I16le(&reader, header, &buffer); var fbuf: [168 * 2]f32 = undefined; try MyLoader.loadConvert_F32(&reader, header, &fbuf, std.testing.allocator); var i: usize = 0; while (i < header.frame_ct * header.channel_ct) : (i += 2) { // const l = buffer[i]; // const r = buffer[i + 1]; const fl = fbuf[i]; const fr = fbuf[i + 1]; if (i < 168) { // std.testing.expectEqual(@as(i16, std.math.maxInt(i16)), l); // std.testing.expectEqual(@as(i16, std.math.minInt(i16)), r); std.testing.expectEqual(@as(f32, 1.), fl); std.testing.expectEqual(@as(f32, -1.), fr); } else { // std.testing.expectEqual(@as(i16, std.math.minInt(i16)), l); // std.testing.expectEqual(@as(i16, std.math.maxInt(i16)), r); std.testing.expectEqual(@as(f32, -1.), fl); std.testing.expectEqual(@as(f32, 1.), fr); } } }
src/wav.zig
const std = @import("std"); const Step = std.build.Step; const Builder = std.build.Builder; builder: *Builder, step: Step, exe: *std.build.LibExeObjStep, files: std.ArrayList([]const u8), deps: std.ArrayList([]const u8), path: []const u8, const ZigValaStep = @This(); pub fn init(b: *Builder, name: []const u8) *ZigValaStep { var res = b.allocator.create(ZigValaStep) catch @panic("out of memory"); res.* = .{ .files = std.ArrayList([]const u8).init(b.allocator), .step = Step.init(.custom, "compile a vala project", b.allocator, make), .exe = b.addExecutable(name, null), .builder = b, .deps = std.ArrayList([]const u8).init(b.allocator), .path = std.fs.path.join(b.allocator, &.{ b.build_root, "zig-cache", "vala" }) catch @panic("out of memory"), }; res.exe.step.dependOn(&res.step); res.exe.linkLibC(); return res; } pub fn addSourceFile(self: *ZigValaStep, file: []const u8) void { const allocator = self.builder.allocator; const c_file = std.fs.path.join(allocator, &.{ self.path, std.mem.concat( allocator, u8, &.{ removeExtension(file), ".c" }, ) catch @panic("out of memory"), }) catch @panic("out of memory"); defer allocator.free(c_file); self.exe.addCSourceFile(c_file, &.{}); self.files.append(file) catch @panic("out of memory"); } pub fn addPackage(self: *ZigValaStep, pkg: []const u8) void { self.deps.append(pkg) catch @panic("out of memory"); self.exe.linkSystemLibrary(pkg); } fn removeExtension(filename: []const u8) []const u8 { const index = std.mem.lastIndexOfScalar(u8, filename, '.') orelse return filename; if (index == 0) return filename; return filename[0..index]; } fn make(step: *Step) !void { const self = @fieldParentPtr(ZigValaStep, "step", step); const builder = self.builder; const allocator = builder.allocator; var args = std.ArrayList([]const u8).init(allocator); defer args.deinit(); try args.append("valac"); try args.append("-C"); try args.append("-d"); try args.append(self.path); for (self.files.items) |file| { try args.append(file); } for (self.deps.items) |dep| { try args.append("--pkg"); try args.append(dep); } const proc = try std.ChildProcess.init(args.items, allocator); defer proc.deinit(); proc.stdin_behavior = .Ignore; proc.stdout_behavior = .Inherit; proc.stderr_behavior = .Inherit; proc.cwd = builder.build_root; proc.env_map = builder.env_map; try proc.spawn(); const result = try proc.wait(); switch (result) { .Exited => |code| if (code != 0) { std.os.exit(0xff); }, else => { std.log.err("valac failed with: {}", .{result}); std.os.exit(0xff); }, } }
ZigValaStep.zig
const std = @import("std"); usingnamespace (@import("../machine.zig")); usingnamespace (@import("../util.zig")); test "80486" { const m32 = Machine.init(.x86_32); const m64 = Machine.init(.x64); const rm8 = Operand.memoryRm(.DefaultSeg, .BYTE, .EAX, 0); const rm16 = Operand.memoryRm(.DefaultSeg, .WORD, .EAX, 0); const rm32 = Operand.memoryRm(.DefaultSeg, .DWORD, .EAX, 0); const rm64 = Operand.memoryRm(.DefaultSeg, .QWORD, .EAX, 0); const rm_mem = Operand.memoryRm(.DefaultSeg, .Void, .EAX, 0); const reg8 = Operand.register(.AL); const reg16 = Operand.register(.AX); const reg32 = Operand.register(.EAX); const reg64 = Operand.register(.RAX); const cx = Operand.register(.CX); const ecx = Operand.register(.ECX); const rcx = Operand.register(.RCX); const imm8 = Operand.immediate(0xff); debugPrint(false); { testOp1(m32, .BSWAP, reg16, "66 0F C8"); testOp1(m32, .BSWAP, reg32, "0F C8"); testOp1(m32, .BSWAP, reg64, AsmError.InvalidOperand); testOp1(m32, .BSWAP, cx, "66 0F C9"); testOp1(m32, .BSWAP, ecx, "0F C9"); testOp1(m32, .BSWAP, rcx, AsmError.InvalidOperand); // testOp1(m64, .BSWAP, reg16, "66 0F C8"); testOp1(m64, .BSWAP, reg32, "0F C8"); testOp1(m64, .BSWAP, reg64, "48 0F C8"); testOp1(m64, .BSWAP, cx, "66 0F C9"); testOp1(m64, .BSWAP, ecx, "0F C9"); testOp1(m64, .BSWAP, rcx, "48 0F C9"); } { testOp2(m32, .CMPXCHG, rm8, reg8, "0F B0 00"); testOp2(m32, .CMPXCHG, rm16, reg16, "66 0F B1 00"); testOp2(m32, .CMPXCHG, rm32, reg32, "0F B1 00"); testOp2(m32, .CMPXCHG, rm64, reg64, AsmError.InvalidOperand); // testOp2(m64, .CMPXCHG, rm8, reg8, "67 0F B0 00"); testOp2(m64, .CMPXCHG, rm16, reg16, "66 67 0F B1 00"); testOp2(m64, .CMPXCHG, rm32, reg32, "67 0F B1 00"); testOp2(m64, .CMPXCHG, rm64, reg64, "67 48 0F B1 00"); } { testOp0(m32, .INVD, "0F 08"); testOp0(m64, .INVD, "0F 08"); // testOp0(m32, .WBINVD, "0F 09"); testOp0(m64, .WBINVD, "0F 09"); // testOp1(m32, .INVLPG, rm_mem, "0F 01 38"); testOp1(m64, .INVLPG, rm_mem, "67 0F 01 38"); } { testOp2(m32, .XADD, rm8, reg8, "0F C0 00"); testOp2(m32, .XADD, rm16, reg16, "66 0F C1 00"); testOp2(m32, .XADD, rm32, reg32, "0F C1 00"); testOp2(m32, .XADD, rm64, reg64, AsmError.InvalidOperand); // testOp2(m64, .XADD, rm8, reg8, "67 0F C0 00"); testOp2(m64, .XADD, rm16, reg16, "66 67 0F C1 00"); testOp2(m64, .XADD, rm32, reg32, "67 0F C1 00"); testOp2(m64, .XADD, rm64, reg64, "67 48 0F C1 00"); } }
src/x86/tests/80486.zig
const std = @import("std"); const utils = @import("utils.zig"); const AttributeInfo = @import("attributes.zig").AttributeInfo; const ConstantPool = @import("ConstantPool.zig"); const MethodInfo = @This(); pub const AccessFlags = struct { public: bool = false, private: bool = false, protected: bool = false, static: bool = false, final: bool = false, synchronized: bool = false, bridge: bool = false, varargs: bool = false, native: bool = false, abstract: bool = false, strict: bool = false, synthetic: bool = false, }; constant_pool: *ConstantPool, access_flags: AccessFlags, name_index: u16, descriptor_index: u16, attributes: std.ArrayList(AttributeInfo), pub fn getName(self: MethodInfo) ConstantPool.Utf8Info { return self.constant_pool.get(self.name_index).utf8; } pub fn getDescriptor(self: MethodInfo) ConstantPool.Utf8Info { return self.constant_pool.get(self.descriptor_index).utf8; } pub fn format(self: MethodInfo, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { _ = fmt; _ = options; try writer.print("MethodInfo({s} {s})", .{ self.getName().bytes, self.getDescriptor().bytes }); } pub fn decode(constant_pool: *ConstantPool, allocator: std.mem.Allocator, reader: anytype) !MethodInfo { var access_flags_u = try reader.readIntBig(u16); var name_index = try reader.readIntBig(u16); var descriptor_index = try reader.readIntBig(u16); var attributes_length = try reader.readIntBig(u16); var attributes_index: usize = 0; var attributess = std.ArrayList(AttributeInfo).init(allocator); while (attributes_index < attributes_length) : (attributes_index += 1) { var decoded = try AttributeInfo.decode(constant_pool, allocator, reader); if (decoded == .unknown) { attributes_length -= 1; continue; } try attributess.append(decoded); } return MethodInfo{ .constant_pool = constant_pool, .access_flags = .{ .public = utils.isPresent(u16, access_flags_u, 0x0001), .private = utils.isPresent(u16, access_flags_u, 0x0002), .protected = utils.isPresent(u16, access_flags_u, 0x0004), .static = utils.isPresent(u16, access_flags_u, 0x0008), .final = utils.isPresent(u16, access_flags_u, 0x0010), .synchronized = utils.isPresent(u16, access_flags_u, 0x0020), .bridge = utils.isPresent(u16, access_flags_u, 0x0040), .varargs = utils.isPresent(u16, access_flags_u, 0x0080), .native = utils.isPresent(u16, access_flags_u, 0x0100), .abstract = utils.isPresent(u16, access_flags_u, 0x0400), .strict = utils.isPresent(u16, access_flags_u, 0x0800), .synthetic = utils.isPresent(u16, access_flags_u, 0x1000), }, .name_index = name_index, .descriptor_index = descriptor_index, .attributes = attributess, }; } pub fn encode(self: MethodInfo, writer: anytype) !void { var access_flags_u: u16 = 0; if (self.access_flags.public) utils.setPresent(u16, &access_flags_u, 0x0001); if (self.access_flags.private) utils.setPresent(u16, &access_flags_u, 0x0002); if (self.access_flags.protected) utils.setPresent(u16, &access_flags_u, 0x0004); if (self.access_flags.static) utils.setPresent(u16, &access_flags_u, 0x0008); if (self.access_flags.final) utils.setPresent(u16, &access_flags_u, 0x0010); if (self.access_flags.synchronized) utils.setPresent(u16, &access_flags_u, 0x0020); if (self.access_flags.bridge) utils.setPresent(u16, &access_flags_u, 0x0040); if (self.access_flags.varargs) utils.setPresent(u16, &access_flags_u, 0x0080); if (self.access_flags.native) utils.setPresent(u16, &access_flags_u, 0x0100); if (self.access_flags.abstract) utils.setPresent(u16, &access_flags_u, 0x0400); if (self.access_flags.strict) utils.setPresent(u16, &access_flags_u, 0x0800); if (self.access_flags.synthetic) utils.setPresent(u16, &access_flags_u, 0x1000); try writer.writeIntBig(u16, access_flags_u); try writer.writeIntBig(u16, self.name_index); try writer.writeIntBig(u16, self.descriptor_index); try writer.writeIntBig(u16, @intCast(u16, self.attributes.items.len)); for (self.attributes.items) |*att| try att.encode(writer); } pub fn deinit(self: MethodInfo) void { for (self.attributes.items) |*att| att.deinit(); self.attributes.deinit(); }
src/MethodInfo.zig
pub const cgltf_size = usize; pub const cgltf_float = f32; pub const cgltf_int = c_int; pub const cgltf_uint = c_uint; pub const cgltf_bool = c_int; pub const cgltf_file_type_invalid: c_int = 0; pub const cgltf_file_type_gltf: c_int = 1; pub const cgltf_file_type_glb: c_int = 2; pub const enum_cgltf_file_type = c_uint; pub const cgltf_file_type = enum_cgltf_file_type; pub const cgltf_result_success: c_int = 0; pub const cgltf_result_data_too_short: c_int = 1; pub const cgltf_result_unknown_format: c_int = 2; pub const cgltf_result_invalid_json: c_int = 3; pub const cgltf_result_invalid_gltf: c_int = 4; pub const cgltf_result_invalid_options: c_int = 5; pub const cgltf_result_file_not_found: c_int = 6; pub const cgltf_result_io_error: c_int = 7; pub const cgltf_result_out_of_memory: c_int = 8; pub const cgltf_result_legacy_gltf: c_int = 9; pub const enum_cgltf_result = c_uint; pub const cgltf_result = enum_cgltf_result; pub const struct_cgltf_memory_options = extern struct { alloc: ?fn (?*anyopaque, cgltf_size) callconv(.C) ?*anyopaque, free: ?fn (?*anyopaque, ?*anyopaque) callconv(.C) void, user_data: ?*anyopaque, }; pub const cgltf_memory_options = struct_cgltf_memory_options; pub const struct_cgltf_file_options = extern struct { read: ?fn ([*c]const struct_cgltf_memory_options, [*c]const struct_cgltf_file_options, [*c]const u8, [*c]cgltf_size, [*c]?*anyopaque) callconv(.C) cgltf_result, release: ?fn ([*c]const struct_cgltf_memory_options, [*c]const struct_cgltf_file_options, ?*anyopaque) callconv(.C) void, user_data: ?*anyopaque, }; pub const cgltf_file_options = struct_cgltf_file_options; pub const struct_cgltf_options = extern struct { type: cgltf_file_type, json_token_count: cgltf_size, memory: cgltf_memory_options, file: cgltf_file_options, }; pub const cgltf_options = struct_cgltf_options; pub const cgltf_buffer_view_type_invalid: c_int = 0; pub const cgltf_buffer_view_type_indices: c_int = 1; pub const cgltf_buffer_view_type_vertices: c_int = 2; pub const enum_cgltf_buffer_view_type = c_uint; pub const cgltf_buffer_view_type = enum_cgltf_buffer_view_type; pub const cgltf_attribute_type_invalid: c_int = 0; pub const cgltf_attribute_type_position: c_int = 1; pub const cgltf_attribute_type_normal: c_int = 2; pub const cgltf_attribute_type_tangent: c_int = 3; pub const cgltf_attribute_type_texcoord: c_int = 4; pub const cgltf_attribute_type_color: c_int = 5; pub const cgltf_attribute_type_joints: c_int = 6; pub const cgltf_attribute_type_weights: c_int = 7; pub const enum_cgltf_attribute_type = c_uint; pub const cgltf_attribute_type = enum_cgltf_attribute_type; pub const cgltf_component_type_invalid: c_int = 0; pub const cgltf_component_type_r_8: c_int = 1; pub const cgltf_component_type_r_8u: c_int = 2; pub const cgltf_component_type_r_16: c_int = 3; pub const cgltf_component_type_r_16u: c_int = 4; pub const cgltf_component_type_r_32u: c_int = 5; pub const cgltf_component_type_r_32f: c_int = 6; pub const enum_cgltf_component_type = c_uint; pub const cgltf_component_type = enum_cgltf_component_type; pub const cgltf_type_invalid: c_int = 0; pub const cgltf_type_scalar: c_int = 1; pub const cgltf_type_vec2: c_int = 2; pub const cgltf_type_vec3: c_int = 3; pub const cgltf_type_vec4: c_int = 4; pub const cgltf_type_mat2: c_int = 5; pub const cgltf_type_mat3: c_int = 6; pub const cgltf_type_mat4: c_int = 7; pub const enum_cgltf_type = c_uint; pub const cgltf_type = enum_cgltf_type; pub const cgltf_primitive_type_points: c_int = 0; pub const cgltf_primitive_type_lines: c_int = 1; pub const cgltf_primitive_type_line_loop: c_int = 2; pub const cgltf_primitive_type_line_strip: c_int = 3; pub const cgltf_primitive_type_triangles: c_int = 4; pub const cgltf_primitive_type_triangle_strip: c_int = 5; pub const cgltf_primitive_type_triangle_fan: c_int = 6; pub const enum_cgltf_primitive_type = c_uint; pub const cgltf_primitive_type = enum_cgltf_primitive_type; pub const cgltf_alpha_mode_opaque: c_int = 0; pub const cgltf_alpha_mode_mask: c_int = 1; pub const cgltf_alpha_mode_blend: c_int = 2; pub const enum_cgltf_alpha_mode = c_uint; pub const cgltf_alpha_mode = enum_cgltf_alpha_mode; pub const cgltf_animation_path_type_invalid: c_int = 0; pub const cgltf_animation_path_type_translation: c_int = 1; pub const cgltf_animation_path_type_rotation: c_int = 2; pub const cgltf_animation_path_type_scale: c_int = 3; pub const cgltf_animation_path_type_weights: c_int = 4; pub const enum_cgltf_animation_path_type = c_uint; pub const cgltf_animation_path_type = enum_cgltf_animation_path_type; pub const cgltf_interpolation_type_linear: c_int = 0; pub const cgltf_interpolation_type_step: c_int = 1; pub const cgltf_interpolation_type_cubic_spline: c_int = 2; pub const enum_cgltf_interpolation_type = c_uint; pub const cgltf_interpolation_type = enum_cgltf_interpolation_type; pub const cgltf_camera_type_invalid: c_int = 0; pub const cgltf_camera_type_perspective: c_int = 1; pub const cgltf_camera_type_orthographic: c_int = 2; pub const enum_cgltf_camera_type = c_uint; pub const cgltf_camera_type = enum_cgltf_camera_type; pub const cgltf_light_type_invalid: c_int = 0; pub const cgltf_light_type_directional: c_int = 1; pub const cgltf_light_type_point: c_int = 2; pub const cgltf_light_type_spot: c_int = 3; pub const enum_cgltf_light_type = c_uint; pub const cgltf_light_type = enum_cgltf_light_type; pub const cgltf_data_free_method_none: c_int = 0; pub const cgltf_data_free_method_file_release: c_int = 1; pub const cgltf_data_free_method_memory_free: c_int = 2; pub const enum_cgltf_data_free_method = c_uint; pub const cgltf_data_free_method = enum_cgltf_data_free_method; pub const struct_cgltf_extras = extern struct { start_offset: cgltf_size, end_offset: cgltf_size, }; pub const cgltf_extras = struct_cgltf_extras; pub const struct_cgltf_extension = extern struct { name: [*c]u8, data: [*c]u8, }; pub const cgltf_extension = struct_cgltf_extension; pub const struct_cgltf_buffer = extern struct { name: [*c]u8, size: cgltf_size, uri: [*c]u8, data: ?*anyopaque, data_free_method: cgltf_data_free_method, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_buffer = struct_cgltf_buffer; pub const cgltf_meshopt_compression_mode_invalid: c_int = 0; pub const cgltf_meshopt_compression_mode_attributes: c_int = 1; pub const cgltf_meshopt_compression_mode_triangles: c_int = 2; pub const cgltf_meshopt_compression_mode_indices: c_int = 3; pub const enum_cgltf_meshopt_compression_mode = c_uint; pub const cgltf_meshopt_compression_mode = enum_cgltf_meshopt_compression_mode; pub const cgltf_meshopt_compression_filter_none: c_int = 0; pub const cgltf_meshopt_compression_filter_octahedral: c_int = 1; pub const cgltf_meshopt_compression_filter_quaternion: c_int = 2; pub const cgltf_meshopt_compression_filter_exponential: c_int = 3; pub const enum_cgltf_meshopt_compression_filter = c_uint; pub const cgltf_meshopt_compression_filter = enum_cgltf_meshopt_compression_filter; pub const struct_cgltf_meshopt_compression = extern struct { buffer: [*c]cgltf_buffer, offset: cgltf_size, size: cgltf_size, stride: cgltf_size, count: cgltf_size, mode: cgltf_meshopt_compression_mode, filter: cgltf_meshopt_compression_filter, }; pub const cgltf_meshopt_compression = struct_cgltf_meshopt_compression; pub const struct_cgltf_buffer_view = extern struct { name: [*c]u8, buffer: [*c]cgltf_buffer, offset: cgltf_size, size: cgltf_size, stride: cgltf_size, type: cgltf_buffer_view_type, data: ?*anyopaque, has_meshopt_compression: cgltf_bool, meshopt_compression: cgltf_meshopt_compression, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_buffer_view = struct_cgltf_buffer_view; pub const struct_cgltf_accessor_sparse = extern struct { count: cgltf_size, indices_buffer_view: [*c]cgltf_buffer_view, indices_byte_offset: cgltf_size, indices_component_type: cgltf_component_type, values_buffer_view: [*c]cgltf_buffer_view, values_byte_offset: cgltf_size, extras: cgltf_extras, indices_extras: cgltf_extras, values_extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, indices_extensions_count: cgltf_size, indices_extensions: [*c]cgltf_extension, values_extensions_count: cgltf_size, values_extensions: [*c]cgltf_extension, }; pub const cgltf_accessor_sparse = struct_cgltf_accessor_sparse; pub const struct_cgltf_accessor = extern struct { name: [*c]u8, component_type: cgltf_component_type, normalized: cgltf_bool, type: cgltf_type, offset: cgltf_size, count: cgltf_size, stride: cgltf_size, buffer_view: [*c]cgltf_buffer_view, has_min: cgltf_bool, min: [16]cgltf_float, has_max: cgltf_bool, max: [16]cgltf_float, is_sparse: cgltf_bool, sparse: cgltf_accessor_sparse, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_accessor = struct_cgltf_accessor; pub const struct_cgltf_attribute = extern struct { name: [*c]u8, type: cgltf_attribute_type, index: cgltf_int, data: [*c]cgltf_accessor, }; pub const cgltf_attribute = struct_cgltf_attribute; pub const struct_cgltf_image = extern struct { name: [*c]u8, uri: [*c]u8, buffer_view: [*c]cgltf_buffer_view, mime_type: [*c]u8, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_image = struct_cgltf_image; pub const struct_cgltf_sampler = extern struct { name: [*c]u8, mag_filter: cgltf_int, min_filter: cgltf_int, wrap_s: cgltf_int, wrap_t: cgltf_int, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_sampler = struct_cgltf_sampler; pub const struct_cgltf_texture = extern struct { name: [*c]u8, image: [*c]cgltf_image, sampler: [*c]cgltf_sampler, has_basisu: cgltf_bool, basisu_image: [*c]cgltf_image, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_texture = struct_cgltf_texture; pub const struct_cgltf_texture_transform = extern struct { offset: [2]cgltf_float, rotation: cgltf_float, scale: [2]cgltf_float, has_texcoord: cgltf_bool, texcoord: cgltf_int, }; pub const cgltf_texture_transform = struct_cgltf_texture_transform; pub const struct_cgltf_texture_view = extern struct { texture: [*c]cgltf_texture, texcoord: cgltf_int, scale: cgltf_float, has_transform: cgltf_bool, transform: cgltf_texture_transform, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_texture_view = struct_cgltf_texture_view; pub const struct_cgltf_pbr_metallic_roughness = extern struct { base_color_texture: cgltf_texture_view, metallic_roughness_texture: cgltf_texture_view, base_color_factor: [4]cgltf_float, metallic_factor: cgltf_float, roughness_factor: cgltf_float, extras: cgltf_extras, }; pub const cgltf_pbr_metallic_roughness = struct_cgltf_pbr_metallic_roughness; pub const struct_cgltf_pbr_specular_glossiness = extern struct { diffuse_texture: cgltf_texture_view, specular_glossiness_texture: cgltf_texture_view, diffuse_factor: [4]cgltf_float, specular_factor: [3]cgltf_float, glossiness_factor: cgltf_float, }; pub const cgltf_pbr_specular_glossiness = struct_cgltf_pbr_specular_glossiness; pub const struct_cgltf_clearcoat = extern struct { clearcoat_texture: cgltf_texture_view, clearcoat_roughness_texture: cgltf_texture_view, clearcoat_normal_texture: cgltf_texture_view, clearcoat_factor: cgltf_float, clearcoat_roughness_factor: cgltf_float, }; pub const cgltf_clearcoat = struct_cgltf_clearcoat; pub const struct_cgltf_transmission = extern struct { transmission_texture: cgltf_texture_view, transmission_factor: cgltf_float, }; pub const cgltf_transmission = struct_cgltf_transmission; pub const struct_cgltf_ior = extern struct { ior: cgltf_float, }; pub const cgltf_ior = struct_cgltf_ior; pub const struct_cgltf_specular = extern struct { specular_texture: cgltf_texture_view, specular_color_texture: cgltf_texture_view, specular_color_factor: [3]cgltf_float, specular_factor: cgltf_float, }; pub const cgltf_specular = struct_cgltf_specular; pub const struct_cgltf_volume = extern struct { thickness_texture: cgltf_texture_view, thickness_factor: cgltf_float, attenuation_color: [3]cgltf_float, attenuation_distance: cgltf_float, }; pub const cgltf_volume = struct_cgltf_volume; pub const struct_cgltf_sheen = extern struct { sheen_color_texture: cgltf_texture_view, sheen_color_factor: [3]cgltf_float, sheen_roughness_texture: cgltf_texture_view, sheen_roughness_factor: cgltf_float, }; pub const cgltf_sheen = struct_cgltf_sheen; pub const struct_cgltf_material = extern struct { name: [*c]u8, has_pbr_metallic_roughness: cgltf_bool, has_pbr_specular_glossiness: cgltf_bool, has_clearcoat: cgltf_bool, has_transmission: cgltf_bool, has_volume: cgltf_bool, has_ior: cgltf_bool, has_specular: cgltf_bool, has_sheen: cgltf_bool, pbr_metallic_roughness: cgltf_pbr_metallic_roughness, pbr_specular_glossiness: cgltf_pbr_specular_glossiness, clearcoat: cgltf_clearcoat, ior: cgltf_ior, specular: cgltf_specular, sheen: cgltf_sheen, transmission: cgltf_transmission, volume: cgltf_volume, normal_texture: cgltf_texture_view, occlusion_texture: cgltf_texture_view, emissive_texture: cgltf_texture_view, emissive_factor: [3]cgltf_float, alpha_mode: cgltf_alpha_mode, alpha_cutoff: cgltf_float, double_sided: cgltf_bool, unlit: cgltf_bool, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_material = struct_cgltf_material; pub const struct_cgltf_material_mapping = extern struct { variant: cgltf_size, material: [*c]cgltf_material, extras: cgltf_extras, }; pub const cgltf_material_mapping = struct_cgltf_material_mapping; pub const struct_cgltf_morph_target = extern struct { attributes: [*c]cgltf_attribute, attributes_count: cgltf_size, }; pub const cgltf_morph_target = struct_cgltf_morph_target; pub const struct_cgltf_draco_mesh_compression = extern struct { buffer_view: [*c]cgltf_buffer_view, attributes: [*c]cgltf_attribute, attributes_count: cgltf_size, }; pub const cgltf_draco_mesh_compression = struct_cgltf_draco_mesh_compression; pub const struct_cgltf_primitive = extern struct { type: cgltf_primitive_type, indices: [*c]cgltf_accessor, material: [*c]cgltf_material, attributes: [*c]cgltf_attribute, attributes_count: cgltf_size, targets: [*c]cgltf_morph_target, targets_count: cgltf_size, extras: cgltf_extras, has_draco_mesh_compression: cgltf_bool, draco_mesh_compression: cgltf_draco_mesh_compression, mappings: [*c]cgltf_material_mapping, mappings_count: cgltf_size, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_primitive = struct_cgltf_primitive; pub const struct_cgltf_mesh = extern struct { name: [*c]u8, primitives: [*c]cgltf_primitive, primitives_count: cgltf_size, weights: [*c]cgltf_float, weights_count: cgltf_size, target_names: [*c][*c]u8, target_names_count: cgltf_size, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_mesh = struct_cgltf_mesh; pub const cgltf_node = struct_cgltf_node; pub const struct_cgltf_skin = extern struct { name: [*c]u8, joints: [*c][*c]cgltf_node, joints_count: cgltf_size, skeleton: [*c]cgltf_node, inverse_bind_matrices: [*c]cgltf_accessor, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_skin = struct_cgltf_skin; pub const struct_cgltf_camera_perspective = extern struct { has_aspect_ratio: cgltf_bool, aspect_ratio: cgltf_float, yfov: cgltf_float, has_zfar: cgltf_bool, zfar: cgltf_float, znear: cgltf_float, extras: cgltf_extras, }; pub const cgltf_camera_perspective = struct_cgltf_camera_perspective; pub const struct_cgltf_camera_orthographic = extern struct { xmag: cgltf_float, ymag: cgltf_float, zfar: cgltf_float, znear: cgltf_float, extras: cgltf_extras, }; pub const cgltf_camera_orthographic = struct_cgltf_camera_orthographic; const union_unnamed_1 = extern union { perspective: cgltf_camera_perspective, orthographic: cgltf_camera_orthographic, }; pub const struct_cgltf_camera = extern struct { name: [*c]u8, type: cgltf_camera_type, data: union_unnamed_1, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_camera = struct_cgltf_camera; pub const struct_cgltf_light = extern struct { name: [*c]u8, color: [3]cgltf_float, intensity: cgltf_float, type: cgltf_light_type, range: cgltf_float, spot_inner_cone_angle: cgltf_float, spot_outer_cone_angle: cgltf_float, extras: cgltf_extras, }; pub const cgltf_light = struct_cgltf_light; pub const struct_cgltf_node = extern struct { name: [*c]u8, parent: [*c]cgltf_node, children: [*c][*c]cgltf_node, children_count: cgltf_size, skin: [*c]cgltf_skin, mesh: [*c]cgltf_mesh, camera: [*c]cgltf_camera, light: [*c]cgltf_light, weights: [*c]cgltf_float, weights_count: cgltf_size, has_translation: cgltf_bool, has_rotation: cgltf_bool, has_scale: cgltf_bool, has_matrix: cgltf_bool, translation: [3]cgltf_float, rotation: [4]cgltf_float, scale: [3]cgltf_float, matrix: [16]cgltf_float, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const struct_cgltf_scene = extern struct { name: [*c]u8, nodes: [*c][*c]cgltf_node, nodes_count: cgltf_size, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_scene = struct_cgltf_scene; pub const struct_cgltf_animation_sampler = extern struct { input: [*c]cgltf_accessor, output: [*c]cgltf_accessor, interpolation: cgltf_interpolation_type, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_animation_sampler = struct_cgltf_animation_sampler; pub const struct_cgltf_animation_channel = extern struct { sampler: [*c]cgltf_animation_sampler, target_node: [*c]cgltf_node, target_path: cgltf_animation_path_type, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_animation_channel = struct_cgltf_animation_channel; pub const struct_cgltf_animation = extern struct { name: [*c]u8, samplers: [*c]cgltf_animation_sampler, samplers_count: cgltf_size, channels: [*c]cgltf_animation_channel, channels_count: cgltf_size, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_animation = struct_cgltf_animation; pub const struct_cgltf_material_variant = extern struct { name: [*c]u8, extras: cgltf_extras, }; pub const cgltf_material_variant = struct_cgltf_material_variant; pub const struct_cgltf_asset = extern struct { copyright: [*c]u8, generator: [*c]u8, version: [*c]u8, min_version: [*c]u8, extras: cgltf_extras, extensions_count: cgltf_size, extensions: [*c]cgltf_extension, }; pub const cgltf_asset = struct_cgltf_asset; pub const struct_cgltf_data = extern struct { file_type: cgltf_file_type, file_data: ?*anyopaque, asset: cgltf_asset, meshes: [*c]cgltf_mesh, meshes_count: cgltf_size, materials: [*c]cgltf_material, materials_count: cgltf_size, accessors: [*c]cgltf_accessor, accessors_count: cgltf_size, buffer_views: [*c]cgltf_buffer_view, buffer_views_count: cgltf_size, buffers: [*c]cgltf_buffer, buffers_count: cgltf_size, images: [*c]cgltf_image, images_count: cgltf_size, textures: [*c]cgltf_texture, textures_count: cgltf_size, samplers: [*c]cgltf_sampler, samplers_count: cgltf_size, skins: [*c]cgltf_skin, skins_count: cgltf_size, cameras: [*c]cgltf_camera, cameras_count: cgltf_size, lights: [*c]cgltf_light, lights_count: cgltf_size, nodes: [*c]cgltf_node, nodes_count: cgltf_size, scenes: [*c]cgltf_scene, scenes_count: cgltf_size, scene: [*c]cgltf_scene, animations: [*c]cgltf_animation, animations_count: cgltf_size, variants: [*c]cgltf_material_variant, variants_count: cgltf_size, extras: cgltf_extras, data_extensions_count: cgltf_size, data_extensions: [*c]cgltf_extension, extensions_used: [*c][*c]u8, extensions_used_count: cgltf_size, extensions_required: [*c][*c]u8, extensions_required_count: cgltf_size, json: [*c]const u8, json_size: cgltf_size, bin: ?*const anyopaque, bin_size: cgltf_size, memory: cgltf_memory_options, file: cgltf_file_options, }; pub const cgltf_data = struct_cgltf_data; pub extern fn cgltf_parse(options: [*c]const cgltf_options, data: ?*const anyopaque, size: cgltf_size, out_data: [*c][*c]cgltf_data) cgltf_result; pub extern fn cgltf_parse_file(options: [*c]const cgltf_options, path: [*c]const u8, out_data: [*c][*c]cgltf_data) cgltf_result; pub extern fn cgltf_free(data: [*c]cgltf_data) void; pub extern fn cgltf_load_buffers(options: [*c]const cgltf_options, data: [*c]cgltf_data, gltf_path: [*c]const u8) cgltf_result; pub extern fn cgltf_load_buffer_base64(options: [*c]const cgltf_options, size: cgltf_size, base64: [*c]const u8, out_data: [*c]?*anyopaque) cgltf_result; pub extern fn cgltf_decode_string(string: [*c]u8) cgltf_size; pub extern fn cgltf_decode_uri(uri: [*c]u8) cgltf_size; pub extern fn cgltf_validate(data: [*c]cgltf_data) cgltf_result; pub extern fn cgltf_node_transform_local(node: [*c]const cgltf_node, out_matrix: [*c]cgltf_float) void; pub extern fn cgltf_node_transform_world(node: [*c]const cgltf_node, out_matrix: [*c]cgltf_float) void; pub extern fn cgltf_accessor_read_float(accessor: [*c]const cgltf_accessor, index: cgltf_size, out: [*c]cgltf_float, element_size: cgltf_size) cgltf_bool; pub extern fn cgltf_accessor_read_uint(accessor: [*c]const cgltf_accessor, index: cgltf_size, out: [*c]cgltf_uint, element_size: cgltf_size) cgltf_bool; pub extern fn cgltf_accessor_read_index(accessor: [*c]const cgltf_accessor, index: cgltf_size) cgltf_size; pub extern fn cgltf_num_components(@"type": cgltf_type) cgltf_size; pub extern fn cgltf_accessor_unpack_floats(accessor: [*c]const cgltf_accessor, out: [*c]cgltf_float, float_count: cgltf_size) cgltf_size; pub extern fn cgltf_copy_extras_json(data: [*c]const cgltf_data, extras: [*c]const cgltf_extras, dest: [*c]u8, dest_size: [*c]cgltf_size) cgltf_result;
src/deps/gltf/c.zig
const std = @import("std"); const io = std.io; const FFT = @import("fft.zig").FFT; const DCT = @import("dct.zig").DCT; pub const MelOpts = struct { const Self = @This(); frame_length: u16 = 256, sample_rate: u32 = 16000, remove_dc_offset: bool = true, preemph_coeff: f32 = 0.97, liftering_coeff: f32 = 22.0, blackman_coeff: f32 = 0.42, window: WindowType = .Hamming, filterbank_floor: f32 = 1.0, filterbank_num_bins: u8 = 20, mfcc_order: u8 = 12, output_type: OutputType = .MFCC, output_energy: bool = true, output_c0: bool = false, pub const OutputType = enum { MelEnergy, MFCC, }; pub const WindowType = enum { Hanning, Hamming, Rectangular, Blackman, Povey, }; pub fn fftFrameLength(self: Self) !u32 { if (std.math.isPowerOfTwo(self.frame_length)) return self.frame_length * 2; return try std.math.ceilPowerOfTwo(u16, self.frame_length); } pub fn featLength(self: Self) usize { var l: usize = switch (self.output_type) { .MelEnergy => self.filterbank_num_bins, .MFCC => self.mfcc_order, }; if (self.output_energy) l += 1; if (self.output_c0) l += 1; return l; } }; const FilterBank = struct { const Self = @This(); const mel = 1127.01048; bin: []usize, weight: []f32, floor: f32, num_bins: u8, allocator: *std.mem.Allocator, fn freq_mel(freq: f32) f32 { return mel * std.math.ln(freq / 700 + 1.0); } fn sample_mel(sample: usize, num: u32, sample_rate: u32) f32 { const sample_f32 = @intToFloat(f32, sample); const num_f32 = @intToFloat(f32, num); const rate_f32 = @intToFloat(f32, sample_rate); const freq = (sample_f32 + 1) / num_f32 * (rate_f32 / 2); return freq_mel(freq); } pub fn init(allocator: *std.mem.Allocator, floor: f32, sample_rate: u32, frame_length: u32, num_bins: u8) !Self { var weight = try allocator.alloc(f32, frame_length / 2); var bin = try allocator.alloc(usize, frame_length / 2); var count = try allocator.alloc(f32, num_bins + 1); defer allocator.free(count); const rate = @intToFloat(f32, sample_rate); const max_mel = freq_mel(rate / 2); var k: usize = 0; while (k <= num_bins) : (k += 1) { count[k] = @intToFloat(f32, k + 1) / @intToFloat(f32, num_bins + 1) * max_mel; } var chan_num: usize = 0; k = 1; while (k < frame_length / 2) : (k += 1) { const k_mel = sample_mel(k - 1, frame_length / 2, sample_rate); while (count[chan_num] < k_mel and chan_num < num_bins) { chan_num += 1; } bin[k] = chan_num; } k = 1; while (k < frame_length / 2) : (k += 1) { chan_num = bin[k]; const k_mel = sample_mel(k - 1, frame_length / 2, sample_rate); weight[k] = (count[chan_num] - k_mel) / count[0]; } return Self{ .allocator = allocator, .bin = bin, .weight = weight, .floor = floor, .num_bins = num_bins, }; } pub fn deinit(self: *Self) void { self.allocator.free(self.weight); self.allocator.free(self.bin); } pub fn apply(self: Self, frame: []f32, dst: []f32) !void { if (frame.len != self.bin.len or dst.len != 1 + 2 * self.num_bins) return error.InvalidSize; std.mem.set(f32, dst, 0); var k: usize = 1; while (k < frame.len) : (k += 1) { const fnum = self.bin[k]; if (fnum > 0) dst[fnum] += frame[k] * self.weight[k]; if (fnum <= self.weight.len) { dst[fnum + 1] += (1 - self.weight[k]) * frame[k]; } } k = 1; while (k <= self.num_bins) : (k += 1) { if (dst[k] < self.floor) { dst[k] = self.floor; } dst[k] = std.math.ln(dst[k]); } } }; pub fn MFCCMaker(comptime ReaderType: type) type { return struct { const Self = @This(); const Error = ReaderType.Error || error{ IncorrectFrameSize, BufferTooShort, UnexpectedEOF }; pub const Reader = io.Reader(*Self, Error, readFn); allocator: *std.mem.Allocator, source: ReaderType, fft: FFT, dct: DCT, opts: MelOpts, buf: []f32, buf2: []f32, window: []f32, readfn_scratch: []f32, fbank: FilterBank, pub fn init(allocator: *std.mem.Allocator, source: ReaderType, opts: MelOpts) !Self { var padded_frame_length = try opts.fftFrameLength(); var buf = try allocator.alloc(f32, padded_frame_length); var buf2 = try allocator.alloc(f32, padded_frame_length); var readfn_scratch = try allocator.alloc(f32, opts.featLength()); var window = try allocator.alloc(f32, opts.frame_length); const a = std.math.pi * 2.0 / @intToFloat(f32, opts.frame_length - 1); for (window) |*v, idx| { const i = @intToFloat(f32, idx); switch (opts.window) { MelOpts.WindowType.Hanning => { v.* = 0.5 - 0.5 * std.math.cos(a * i); }, MelOpts.WindowType.Hamming => { v.* = 0.54 - 0.46 * std.math.cos(a * i); }, MelOpts.WindowType.Povey => { v.* = std.math.pow(f32, (0.5 - 0.5 * std.math.cos(a * i)), 0.85); }, MelOpts.WindowType.Rectangular => { v.* = 1.0; }, MelOpts.WindowType.Blackman => { v.* = opts.blackman_coeff - 0.5 * std.math.cos(a * i) + (0.5 - opts.blackman_coeff) * std.math.cos(2 * a * i); }, } } const fft = try FFT.init(allocator, padded_frame_length); const fbank = try FilterBank.init(allocator, opts.filterbank_floor, opts.sample_rate, padded_frame_length, opts.filterbank_num_bins); const dct = try DCT.init(allocator, opts.filterbank_num_bins); return Self{ .allocator = allocator, .source = source, .opts = opts, .buf = buf, .buf2 = buf2, .window = window, .readfn_scratch = readfn_scratch, .fft = fft, .fbank = fbank, .dct = dct, }; } pub fn deinit(self: *Self) void { self.fft.deinit(); self.fbank.deinit(); self.dct.deinit(); self.allocator.free(self.buf); self.allocator.free(self.buf2); self.allocator.free(self.window); self.allocator.free(self.readfn_scratch); } pub fn readFrame(self: *Self, dst: []f32) Error!bool { if (dst.len != self.opts.featLength()) return error.IncorrectFrameSize; // read input frame into buf for modification in-place if (!try self.readSourceFrameIntoBuf()) return false; // end of input std.mem.set(f32, self.buf[self.opts.frame_length..], 0.0); self.removeDCOffset(); const energy: f32 = if (self.opts.output_energy) self.calculateEnergy() else 0; var frame = self.buf[0..self.opts.frame_length]; self.preEmphasize(frame); self.applyWindow(frame); // buf has real values, set buf2 to zero for imaginary values for computing power spectrum. output is in the first half of buf. std.mem.set(f32, self.buf2, 0); self.spec(self.buf, self.buf2); var spectrum = self.buf[0 .. self.buf.len / 2]; // reserve space for filter bank an zero it out. var filter_bank = self.buf2[0 .. 1 + 2 * self.opts.filterbank_num_bins]; std.mem.set(f32, filter_bank, 0); self.fbank.apply(spectrum, filter_bank) catch unreachable; filter_bank = filter_bank[1 .. 1 + self.opts.filterbank_num_bins]; var c0: f32 = 0; if (self.opts.output_c0) { for (filter_bank) |v| { c0 += v; } c0 *= std.math.sqrt(2.0 / @intToFloat(f32, self.opts.filterbank_num_bins)); } if (self.opts.output_type == .MelEnergy) { // ignore first mfcc and save others to dst. var k: usize = 0; while (k < self.opts.filterbank_num_bins) : (k += 1) { dst[k] = filter_bank[k]; } if (self.opts.output_c0) { dst[k] = c0; k += 1; } if (self.opts.output_energy) { dst[k] = energy; k += 1; } return true; } // compute DCT of filter-bank in place. output mfcc are in the first part of the buffer. var dct_data = self.buf2[1 .. 1 + 2 * self.opts.filterbank_num_bins]; self.dct.apply(dct_data) catch unreachable; var mfcc = dct_data[0 .. self.opts.mfcc_order + 1]; // liftering for (mfcc) |*x, idx| { const theta = std.math.pi * @intToFloat(f32, idx) / self.opts.liftering_coeff; x.* *= (1.0 + self.opts.liftering_coeff / 2.0 * std.math.sin(theta)); } // ignore first mfcc and save others to dst. var k: usize = 1; while (k <= self.opts.mfcc_order) : (k += 1) { dst[k - 1] = mfcc[k]; } if (self.opts.output_c0) { dst[k - 1] = c0; k += 1; } if (self.opts.output_energy) { dst[k - 1] = energy; k += 1; } return true; } fn readSourceFrameIntoBuf(self: *Self) Error!bool { if (comptime std.meta.trait.hasFn("readFrame")(ReaderType)) { // read samples directly from the source, since it's a frame reader. return try self.source.readFrame(self.buf[0..self.opts.frame_length]); } // we are dealing with a raw reader, read samples by reinterpreting bytes. var dst_u8 = std.mem.sliceAsBytes(self.buf[0..self.opts.frame_length]); const n = try self.source.readAll(dst_u8); if (n == 0) return false; if (n != self.opts.frame_length * @sizeOf(f32)) { return Error.UnexpectedEOF; } return true; } fn removeDCOffset(self: *Self) void { if (!self.opts.remove_dc_offset) return; var sum: f32 = 0; for (self.buf[0..self.opts.frame_length]) |v| { sum += v; } const offset = sum / @intToFloat(f32, self.opts.frame_length); for (self.buf[0..self.opts.frame_length]) |*v| { v.* -= offset; } } fn calculateEnergy(self: Self) f32 { const energy_floor = -1.0E+10; var energy: f32 = 0; for (self.buf[0..self.opts.frame_length]) |v| { energy += v * v; } return if (energy <= 0) energy_floor else std.math.ln(energy); } fn preEmphasize(self: Self, buf: []f32) void { if (self.opts.preemph_coeff == 0) { return; } var i: usize = self.opts.frame_length - 1; while (i > 0) : (i -= 1) { buf[i] -= self.opts.preemph_coeff * buf[i - 1]; } buf[0] -= self.opts.preemph_coeff * buf[0]; } fn applyWindow(self: Self, buf: []f32) void { for (buf) |*v, idx| { v.* *= self.window[idx]; } } fn spec(self: Self, real: []f32, imag: []f32) void { self.fft.fftr(real, imag) catch unreachable; var k: usize = 1; while (k < real.len / 2) : (k += 1) { real[k] = std.math.sqrt(real[k] * real[k] + imag[k] * imag[k]); } } /// implements the io.Reader interface. Provided buffer must be long /// enough to hold an entire frame. fn readFn(self: *Self, buf: []u8) Error!usize { if (buf.len < self.opts.featLength() * @sizeOf(f32)) { return Error.BufferTooShort; } if (!try self.readFrame(self.readfn_scratch)) { return 0; } const scratch_u8 = std.mem.sliceAsBytes(self.readfn_scratch); std.mem.copy(u8, buf, scratch_u8); return scratch_u8.len; } pub fn reader(self: *Self) Reader { return .{ .context = self }; } }; } pub fn mfccMaker(allocator: *std.mem.Allocator, reader: anytype, opts: MelOpts) !MFCCMaker(@TypeOf(reader)) { return MFCCMaker(@TypeOf(reader)).init(allocator, reader, opts); } test "mfcc" { var frames = std.io.fixedBufferStream(@embedFile("testdata/test_pcm16.f32.frames")); var mm = try mfccMaker(std.testing.allocator, frames.reader(), .{ .output_c0 = true, .remove_dc_offset = false }); defer mm.deinit(); const truth = std.io.fixedBufferStream(@embedFile("testdata/test_pcm16.f32.mfcc")).reader(); var got: [14]f32 = undefined; var want: [14]f32 = undefined; while (true) { if (!try mm.readFrame(got[0..])) { try std.testing.expectError(error.EndOfStream, truth.readByte()); break; } const want_u8 = std.mem.sliceAsBytes(want[0..]); try truth.readNoEof(want_u8); for (got) |g, idx| { const w = want[idx]; try std.testing.expectApproxEqRel(w, g, 0.01); } } }
src/dsp/melfreq.zig
/// `composite` maps the code points `a` and `b` to their canonical composition, if any. pub fn composite(a: u21, b: u21) ?u21 { if (a == 0x41 and b == 0x300) return 0xC0; if (a == 0x41 and b == 0x301) return 0xC1; if (a == 0x41 and b == 0x302) return 0xC2; if (a == 0x41 and b == 0x303) return 0xC3; if (a == 0x41 and b == 0x308) return 0xC4; if (a == 0x41 and b == 0x30A) return 0xC5; if (a == 0x43 and b == 0x327) return 0xC7; if (a == 0x45 and b == 0x300) return 0xC8; if (a == 0x45 and b == 0x301) return 0xC9; if (a == 0x45 and b == 0x302) return 0xCA; if (a == 0x45 and b == 0x308) return 0xCB; if (a == 0x49 and b == 0x300) return 0xCC; if (a == 0x49 and b == 0x301) return 0xCD; if (a == 0x49 and b == 0x302) return 0xCE; if (a == 0x49 and b == 0x308) return 0xCF; if (a == 0x4E and b == 0x303) return 0xD1; if (a == 0x4F and b == 0x300) return 0xD2; if (a == 0x4F and b == 0x301) return 0xD3; if (a == 0x4F and b == 0x302) return 0xD4; if (a == 0x4F and b == 0x303) return 0xD5; if (a == 0x4F and b == 0x308) return 0xD6; if (a == 0x55 and b == 0x300) return 0xD9; if (a == 0x55 and b == 0x301) return 0xDA; if (a == 0x55 and b == 0x302) return 0xDB; if (a == 0x55 and b == 0x308) return 0xDC; if (a == 0x59 and b == 0x301) return 0xDD; if (a == 0x61 and b == 0x300) return 0xE0; if (a == 0x61 and b == 0x301) return 0xE1; if (a == 0x61 and b == 0x302) return 0xE2; if (a == 0x61 and b == 0x303) return 0xE3; if (a == 0x61 and b == 0x308) return 0xE4; if (a == 0x61 and b == 0x30A) return 0xE5; if (a == 0x63 and b == 0x327) return 0xE7; if (a == 0x65 and b == 0x300) return 0xE8; if (a == 0x65 and b == 0x301) return 0xE9; if (a == 0x65 and b == 0x302) return 0xEA; if (a == 0x65 and b == 0x308) return 0xEB; if (a == 0x69 and b == 0x300) return 0xEC; if (a == 0x69 and b == 0x301) return 0xED; if (a == 0x69 and b == 0x302) return 0xEE; if (a == 0x69 and b == 0x308) return 0xEF; if (a == 0x6E and b == 0x303) return 0xF1; if (a == 0x6F and b == 0x300) return 0xF2; if (a == 0x6F and b == 0x301) return 0xF3; if (a == 0x6F and b == 0x302) return 0xF4; if (a == 0x6F and b == 0x303) return 0xF5; if (a == 0x6F and b == 0x308) return 0xF6; if (a == 0x75 and b == 0x300) return 0xF9; if (a == 0x75 and b == 0x301) return 0xFA; if (a == 0x75 and b == 0x302) return 0xFB; if (a == 0x75 and b == 0x308) return 0xFC; if (a == 0x79 and b == 0x301) return 0xFD; if (a == 0x79 and b == 0x308) return 0xFF; if (a == 0x41 and b == 0x304) return 0x100; if (a == 0x61 and b == 0x304) return 0x101; if (a == 0x41 and b == 0x306) return 0x102; if (a == 0x61 and b == 0x306) return 0x103; if (a == 0x41 and b == 0x328) return 0x104; if (a == 0x61 and b == 0x328) return 0x105; if (a == 0x43 and b == 0x301) return 0x106; if (a == 0x63 and b == 0x301) return 0x107; if (a == 0x43 and b == 0x302) return 0x108; if (a == 0x63 and b == 0x302) return 0x109; if (a == 0x43 and b == 0x307) return 0x10A; if (a == 0x63 and b == 0x307) return 0x10B; if (a == 0x43 and b == 0x30C) return 0x10C; if (a == 0x63 and b == 0x30C) return 0x10D; if (a == 0x44 and b == 0x30C) return 0x10E; if (a == 0x64 and b == 0x30C) return 0x10F; if (a == 0x45 and b == 0x304) return 0x112; if (a == 0x65 and b == 0x304) return 0x113; if (a == 0x45 and b == 0x306) return 0x114; if (a == 0x65 and b == 0x306) return 0x115; if (a == 0x45 and b == 0x307) return 0x116; if (a == 0x65 and b == 0x307) return 0x117; if (a == 0x45 and b == 0x328) return 0x118; if (a == 0x65 and b == 0x328) return 0x119; if (a == 0x45 and b == 0x30C) return 0x11A; if (a == 0x65 and b == 0x30C) return 0x11B; if (a == 0x47 and b == 0x302) return 0x11C; if (a == 0x67 and b == 0x302) return 0x11D; if (a == 0x47 and b == 0x306) return 0x11E; if (a == 0x67 and b == 0x306) return 0x11F; if (a == 0x47 and b == 0x307) return 0x120; if (a == 0x67 and b == 0x307) return 0x121; if (a == 0x47 and b == 0x327) return 0x122; if (a == 0x67 and b == 0x327) return 0x123; if (a == 0x48 and b == 0x302) return 0x124; if (a == 0x68 and b == 0x302) return 0x125; if (a == 0x49 and b == 0x303) return 0x128; if (a == 0x69 and b == 0x303) return 0x129; if (a == 0x49 and b == 0x304) return 0x12A; if (a == 0x69 and b == 0x304) return 0x12B; if (a == 0x49 and b == 0x306) return 0x12C; if (a == 0x69 and b == 0x306) return 0x12D; if (a == 0x49 and b == 0x328) return 0x12E; if (a == 0x69 and b == 0x328) return 0x12F; if (a == 0x49 and b == 0x307) return 0x130; if (a == 0x4A and b == 0x302) return 0x134; if (a == 0x6A and b == 0x302) return 0x135; if (a == 0x4B and b == 0x327) return 0x136; if (a == 0x6B and b == 0x327) return 0x137; if (a == 0x4C and b == 0x301) return 0x139; if (a == 0x6C and b == 0x301) return 0x13A; if (a == 0x4C and b == 0x327) return 0x13B; if (a == 0x6C and b == 0x327) return 0x13C; if (a == 0x4C and b == 0x30C) return 0x13D; if (a == 0x6C and b == 0x30C) return 0x13E; if (a == 0x4E and b == 0x301) return 0x143; if (a == 0x6E and b == 0x301) return 0x144; if (a == 0x4E and b == 0x327) return 0x145; if (a == 0x6E and b == 0x327) return 0x146; if (a == 0x4E and b == 0x30C) return 0x147; if (a == 0x6E and b == 0x30C) return 0x148; if (a == 0x4F and b == 0x304) return 0x14C; if (a == 0x6F and b == 0x304) return 0x14D; if (a == 0x4F and b == 0x306) return 0x14E; if (a == 0x6F and b == 0x306) return 0x14F; if (a == 0x4F and b == 0x30B) return 0x150; if (a == 0x6F and b == 0x30B) return 0x151; if (a == 0x52 and b == 0x301) return 0x154; if (a == 0x72 and b == 0x301) return 0x155; if (a == 0x52 and b == 0x327) return 0x156; if (a == 0x72 and b == 0x327) return 0x157; if (a == 0x52 and b == 0x30C) return 0x158; if (a == 0x72 and b == 0x30C) return 0x159; if (a == 0x53 and b == 0x301) return 0x15A; if (a == 0x73 and b == 0x301) return 0x15B; if (a == 0x53 and b == 0x302) return 0x15C; if (a == 0x73 and b == 0x302) return 0x15D; if (a == 0x53 and b == 0x327) return 0x15E; if (a == 0x73 and b == 0x327) return 0x15F; if (a == 0x53 and b == 0x30C) return 0x160; if (a == 0x73 and b == 0x30C) return 0x161; if (a == 0x54 and b == 0x327) return 0x162; if (a == 0x74 and b == 0x327) return 0x163; if (a == 0x54 and b == 0x30C) return 0x164; if (a == 0x74 and b == 0x30C) return 0x165; if (a == 0x55 and b == 0x303) return 0x168; if (a == 0x75 and b == 0x303) return 0x169; if (a == 0x55 and b == 0x304) return 0x16A; if (a == 0x75 and b == 0x304) return 0x16B; if (a == 0x55 and b == 0x306) return 0x16C; if (a == 0x75 and b == 0x306) return 0x16D; if (a == 0x55 and b == 0x30A) return 0x16E; if (a == 0x75 and b == 0x30A) return 0x16F; if (a == 0x55 and b == 0x30B) return 0x170; if (a == 0x75 and b == 0x30B) return 0x171; if (a == 0x55 and b == 0x328) return 0x172; if (a == 0x75 and b == 0x328) return 0x173; if (a == 0x57 and b == 0x302) return 0x174; if (a == 0x77 and b == 0x302) return 0x175; if (a == 0x59 and b == 0x302) return 0x176; if (a == 0x79 and b == 0x302) return 0x177; if (a == 0x59 and b == 0x308) return 0x178; if (a == 0x5A and b == 0x301) return 0x179; if (a == 0x7A and b == 0x301) return 0x17A; if (a == 0x5A and b == 0x307) return 0x17B; if (a == 0x7A and b == 0x307) return 0x17C; if (a == 0x5A and b == 0x30C) return 0x17D; if (a == 0x7A and b == 0x30C) return 0x17E; if (a == 0x4F and b == 0x31B) return 0x1A0; if (a == 0x6F and b == 0x31B) return 0x1A1; if (a == 0x55 and b == 0x31B) return 0x1AF; if (a == 0x75 and b == 0x31B) return 0x1B0; if (a == 0x41 and b == 0x30C) return 0x1CD; if (a == 0x61 and b == 0x30C) return 0x1CE; if (a == 0x49 and b == 0x30C) return 0x1CF; if (a == 0x69 and b == 0x30C) return 0x1D0; if (a == 0x4F and b == 0x30C) return 0x1D1; if (a == 0x6F and b == 0x30C) return 0x1D2; if (a == 0x55 and b == 0x30C) return 0x1D3; if (a == 0x75 and b == 0x30C) return 0x1D4; if (a == 0xDC and b == 0x304) return 0x1D5; if (a == 0xFC and b == 0x304) return 0x1D6; if (a == 0xDC and b == 0x301) return 0x1D7; if (a == 0xFC and b == 0x301) return 0x1D8; if (a == 0xDC and b == 0x30C) return 0x1D9; if (a == 0xFC and b == 0x30C) return 0x1DA; if (a == 0xDC and b == 0x300) return 0x1DB; if (a == 0xFC and b == 0x300) return 0x1DC; if (a == 0xC4 and b == 0x304) return 0x1DE; if (a == 0xE4 and b == 0x304) return 0x1DF; if (a == 0x226 and b == 0x304) return 0x1E0; if (a == 0x227 and b == 0x304) return 0x1E1; if (a == 0xC6 and b == 0x304) return 0x1E2; if (a == 0xE6 and b == 0x304) return 0x1E3; if (a == 0x47 and b == 0x30C) return 0x1E6; if (a == 0x67 and b == 0x30C) return 0x1E7; if (a == 0x4B and b == 0x30C) return 0x1E8; if (a == 0x6B and b == 0x30C) return 0x1E9; if (a == 0x4F and b == 0x328) return 0x1EA; if (a == 0x6F and b == 0x328) return 0x1EB; if (a == 0x1EA and b == 0x304) return 0x1EC; if (a == 0x1EB and b == 0x304) return 0x1ED; if (a == 0x1B7 and b == 0x30C) return 0x1EE; if (a == 0x292 and b == 0x30C) return 0x1EF; if (a == 0x6A and b == 0x30C) return 0x1F0; if (a == 0x47 and b == 0x301) return 0x1F4; if (a == 0x67 and b == 0x301) return 0x1F5; if (a == 0x4E and b == 0x300) return 0x1F8; if (a == 0x6E and b == 0x300) return 0x1F9; if (a == 0xC5 and b == 0x301) return 0x1FA; if (a == 0xE5 and b == 0x301) return 0x1FB; if (a == 0xC6 and b == 0x301) return 0x1FC; if (a == 0xE6 and b == 0x301) return 0x1FD; if (a == 0xD8 and b == 0x301) return 0x1FE; if (a == 0xF8 and b == 0x301) return 0x1FF; if (a == 0x41 and b == 0x30F) return 0x200; if (a == 0x61 and b == 0x30F) return 0x201; if (a == 0x41 and b == 0x311) return 0x202; if (a == 0x61 and b == 0x311) return 0x203; if (a == 0x45 and b == 0x30F) return 0x204; if (a == 0x65 and b == 0x30F) return 0x205; if (a == 0x45 and b == 0x311) return 0x206; if (a == 0x65 and b == 0x311) return 0x207; if (a == 0x49 and b == 0x30F) return 0x208; if (a == 0x69 and b == 0x30F) return 0x209; if (a == 0x49 and b == 0x311) return 0x20A; if (a == 0x69 and b == 0x311) return 0x20B; if (a == 0x4F and b == 0x30F) return 0x20C; if (a == 0x6F and b == 0x30F) return 0x20D; if (a == 0x4F and b == 0x311) return 0x20E; if (a == 0x6F and b == 0x311) return 0x20F; if (a == 0x52 and b == 0x30F) return 0x210; if (a == 0x72 and b == 0x30F) return 0x211; if (a == 0x52 and b == 0x311) return 0x212; if (a == 0x72 and b == 0x311) return 0x213; if (a == 0x55 and b == 0x30F) return 0x214; if (a == 0x75 and b == 0x30F) return 0x215; if (a == 0x55 and b == 0x311) return 0x216; if (a == 0x75 and b == 0x311) return 0x217; if (a == 0x53 and b == 0x326) return 0x218; if (a == 0x73 and b == 0x326) return 0x219; if (a == 0x54 and b == 0x326) return 0x21A; if (a == 0x74 and b == 0x326) return 0x21B; if (a == 0x48 and b == 0x30C) return 0x21E; if (a == 0x68 and b == 0x30C) return 0x21F; if (a == 0x41 and b == 0x307) return 0x226; if (a == 0x61 and b == 0x307) return 0x227; if (a == 0x45 and b == 0x327) return 0x228; if (a == 0x65 and b == 0x327) return 0x229; if (a == 0xD6 and b == 0x304) return 0x22A; if (a == 0xF6 and b == 0x304) return 0x22B; if (a == 0xD5 and b == 0x304) return 0x22C; if (a == 0xF5 and b == 0x304) return 0x22D; if (a == 0x4F and b == 0x307) return 0x22E; if (a == 0x6F and b == 0x307) return 0x22F; if (a == 0x22E and b == 0x304) return 0x230; if (a == 0x22F and b == 0x304) return 0x231; if (a == 0x59 and b == 0x304) return 0x232; if (a == 0x79 and b == 0x304) return 0x233; if (a == 0x308 and b == 0x301) return 0x344; if (a == 0xA8 and b == 0x301) return 0x385; if (a == 0x391 and b == 0x301) return 0x386; if (a == 0x395 and b == 0x301) return 0x388; if (a == 0x397 and b == 0x301) return 0x389; if (a == 0x399 and b == 0x301) return 0x38A; if (a == 0x39F and b == 0x301) return 0x38C; if (a == 0x3A5 and b == 0x301) return 0x38E; if (a == 0x3A9 and b == 0x301) return 0x38F; if (a == 0x3CA and b == 0x301) return 0x390; if (a == 0x399 and b == 0x308) return 0x3AA; if (a == 0x3A5 and b == 0x308) return 0x3AB; if (a == 0x3B1 and b == 0x301) return 0x3AC; if (a == 0x3B5 and b == 0x301) return 0x3AD; if (a == 0x3B7 and b == 0x301) return 0x3AE; if (a == 0x3B9 and b == 0x301) return 0x3AF; if (a == 0x3CB and b == 0x301) return 0x3B0; if (a == 0x3B9 and b == 0x308) return 0x3CA; if (a == 0x3C5 and b == 0x308) return 0x3CB; if (a == 0x3BF and b == 0x301) return 0x3CC; if (a == 0x3C5 and b == 0x301) return 0x3CD; if (a == 0x3C9 and b == 0x301) return 0x3CE; if (a == 0x3D2 and b == 0x301) return 0x3D3; if (a == 0x3D2 and b == 0x308) return 0x3D4; if (a == 0x415 and b == 0x300) return 0x400; if (a == 0x415 and b == 0x308) return 0x401; if (a == 0x413 and b == 0x301) return 0x403; if (a == 0x406 and b == 0x308) return 0x407; if (a == 0x41A and b == 0x301) return 0x40C; if (a == 0x418 and b == 0x300) return 0x40D; if (a == 0x423 and b == 0x306) return 0x40E; if (a == 0x418 and b == 0x306) return 0x419; if (a == 0x438 and b == 0x306) return 0x439; if (a == 0x435 and b == 0x300) return 0x450; if (a == 0x435 and b == 0x308) return 0x451; if (a == 0x433 and b == 0x301) return 0x453; if (a == 0x456 and b == 0x308) return 0x457; if (a == 0x43A and b == 0x301) return 0x45C; if (a == 0x438 and b == 0x300) return 0x45D; if (a == 0x443 and b == 0x306) return 0x45E; if (a == 0x474 and b == 0x30F) return 0x476; if (a == 0x475 and b == 0x30F) return 0x477; if (a == 0x416 and b == 0x306) return 0x4C1; if (a == 0x436 and b == 0x306) return 0x4C2; if (a == 0x410 and b == 0x306) return 0x4D0; if (a == 0x430 and b == 0x306) return 0x4D1; if (a == 0x410 and b == 0x308) return 0x4D2; if (a == 0x430 and b == 0x308) return 0x4D3; if (a == 0x415 and b == 0x306) return 0x4D6; if (a == 0x435 and b == 0x306) return 0x4D7; if (a == 0x4D8 and b == 0x308) return 0x4DA; if (a == 0x4D9 and b == 0x308) return 0x4DB; if (a == 0x416 and b == 0x308) return 0x4DC; if (a == 0x436 and b == 0x308) return 0x4DD; if (a == 0x417 and b == 0x308) return 0x4DE; if (a == 0x437 and b == 0x308) return 0x4DF; if (a == 0x418 and b == 0x304) return 0x4E2; if (a == 0x438 and b == 0x304) return 0x4E3; if (a == 0x418 and b == 0x308) return 0x4E4; if (a == 0x438 and b == 0x308) return 0x4E5; if (a == 0x41E and b == 0x308) return 0x4E6; if (a == 0x43E and b == 0x308) return 0x4E7; if (a == 0x4E8 and b == 0x308) return 0x4EA; if (a == 0x4E9 and b == 0x308) return 0x4EB; if (a == 0x42D and b == 0x308) return 0x4EC; if (a == 0x44D and b == 0x308) return 0x4ED; if (a == 0x423 and b == 0x304) return 0x4EE; if (a == 0x443 and b == 0x304) return 0x4EF; if (a == 0x423 and b == 0x308) return 0x4F0; if (a == 0x443 and b == 0x308) return 0x4F1; if (a == 0x423 and b == 0x30B) return 0x4F2; if (a == 0x443 and b == 0x30B) return 0x4F3; if (a == 0x427 and b == 0x308) return 0x4F4; if (a == 0x447 and b == 0x308) return 0x4F5; if (a == 0x42B and b == 0x308) return 0x4F8; if (a == 0x44B and b == 0x308) return 0x4F9; if (a == 0x627 and b == 0x653) return 0x622; if (a == 0x627 and b == 0x654) return 0x623; if (a == 0x648 and b == 0x654) return 0x624; if (a == 0x627 and b == 0x655) return 0x625; if (a == 0x64A and b == 0x654) return 0x626; if (a == 0x6D5 and b == 0x654) return 0x6C0; if (a == 0x6C1 and b == 0x654) return 0x6C2; if (a == 0x6D2 and b == 0x654) return 0x6D3; if (a == 0x928 and b == 0x93C) return 0x929; if (a == 0x930 and b == 0x93C) return 0x931; if (a == 0x933 and b == 0x93C) return 0x934; if (a == 0x915 and b == 0x93C) return 0x958; if (a == 0x916 and b == 0x93C) return 0x959; if (a == 0x917 and b == 0x93C) return 0x95A; if (a == 0x91C and b == 0x93C) return 0x95B; if (a == 0x921 and b == 0x93C) return 0x95C; if (a == 0x922 and b == 0x93C) return 0x95D; if (a == 0x92B and b == 0x93C) return 0x95E; if (a == 0x92F and b == 0x93C) return 0x95F; if (a == 0x9C7 and b == 0x9BE) return 0x9CB; if (a == 0x9C7 and b == 0x9D7) return 0x9CC; if (a == 0x9A1 and b == 0x9BC) return 0x9DC; if (a == 0x9A2 and b == 0x9BC) return 0x9DD; if (a == 0x9AF and b == 0x9BC) return 0x9DF; if (a == 0xA32 and b == 0xA3C) return 0xA33; if (a == 0xA38 and b == 0xA3C) return 0xA36; if (a == 0xA16 and b == 0xA3C) return 0xA59; if (a == 0xA17 and b == 0xA3C) return 0xA5A; if (a == 0xA1C and b == 0xA3C) return 0xA5B; if (a == 0xA2B and b == 0xA3C) return 0xA5E; if (a == 0xB47 and b == 0xB56) return 0xB48; if (a == 0xB47 and b == 0xB3E) return 0xB4B; if (a == 0xB47 and b == 0xB57) return 0xB4C; if (a == 0xB21 and b == 0xB3C) return 0xB5C; if (a == 0xB22 and b == 0xB3C) return 0xB5D; if (a == 0xB92 and b == 0xBD7) return 0xB94; if (a == 0xBC6 and b == 0xBBE) return 0xBCA; if (a == 0xBC7 and b == 0xBBE) return 0xBCB; if (a == 0xBC6 and b == 0xBD7) return 0xBCC; if (a == 0xC46 and b == 0xC56) return 0xC48; if (a == 0xCBF and b == 0xCD5) return 0xCC0; if (a == 0xCC6 and b == 0xCD5) return 0xCC7; if (a == 0xCC6 and b == 0xCD6) return 0xCC8; if (a == 0xCC6 and b == 0xCC2) return 0xCCA; if (a == 0xCCA and b == 0xCD5) return 0xCCB; if (a == 0xD46 and b == 0xD3E) return 0xD4A; if (a == 0xD47 and b == 0xD3E) return 0xD4B; if (a == 0xD46 and b == 0xD57) return 0xD4C; if (a == 0xDD9 and b == 0xDCA) return 0xDDA; if (a == 0xDD9 and b == 0xDCF) return 0xDDC; if (a == 0xDDC and b == 0xDCA) return 0xDDD; if (a == 0xDD9 and b == 0xDDF) return 0xDDE; if (a == 0xF42 and b == 0xFB7) return 0xF43; if (a == 0xF4C and b == 0xFB7) return 0xF4D; if (a == 0xF51 and b == 0xFB7) return 0xF52; if (a == 0xF56 and b == 0xFB7) return 0xF57; if (a == 0xF5B and b == 0xFB7) return 0xF5C; if (a == 0xF40 and b == 0xFB5) return 0xF69; if (a == 0xF71 and b == 0xF72) return 0xF73; if (a == 0xF71 and b == 0xF74) return 0xF75; if (a == 0xFB2 and b == 0xF80) return 0xF76; if (a == 0xFB3 and b == 0xF80) return 0xF78; if (a == 0xF71 and b == 0xF80) return 0xF81; if (a == 0xF92 and b == 0xFB7) return 0xF93; if (a == 0xF9C and b == 0xFB7) return 0xF9D; if (a == 0xFA1 and b == 0xFB7) return 0xFA2; if (a == 0xFA6 and b == 0xFB7) return 0xFA7; if (a == 0xFAB and b == 0xFB7) return 0xFAC; if (a == 0xF90 and b == 0xFB5) return 0xFB9; if (a == 0x1025 and b == 0x102E) return 0x1026; if (a == 0x1B05 and b == 0x1B35) return 0x1B06; if (a == 0x1B07 and b == 0x1B35) return 0x1B08; if (a == 0x1B09 and b == 0x1B35) return 0x1B0A; if (a == 0x1B0B and b == 0x1B35) return 0x1B0C; if (a == 0x1B0D and b == 0x1B35) return 0x1B0E; if (a == 0x1B11 and b == 0x1B35) return 0x1B12; if (a == 0x1B3A and b == 0x1B35) return 0x1B3B; if (a == 0x1B3C and b == 0x1B35) return 0x1B3D; if (a == 0x1B3E and b == 0x1B35) return 0x1B40; if (a == 0x1B3F and b == 0x1B35) return 0x1B41; if (a == 0x1B42 and b == 0x1B35) return 0x1B43; if (a == 0x41 and b == 0x325) return 0x1E00; if (a == 0x61 and b == 0x325) return 0x1E01; if (a == 0x42 and b == 0x307) return 0x1E02; if (a == 0x62 and b == 0x307) return 0x1E03; if (a == 0x42 and b == 0x323) return 0x1E04; if (a == 0x62 and b == 0x323) return 0x1E05; if (a == 0x42 and b == 0x331) return 0x1E06; if (a == 0x62 and b == 0x331) return 0x1E07; if (a == 0xC7 and b == 0x301) return 0x1E08; if (a == 0xE7 and b == 0x301) return 0x1E09; if (a == 0x44 and b == 0x307) return 0x1E0A; if (a == 0x64 and b == 0x307) return 0x1E0B; if (a == 0x44 and b == 0x323) return 0x1E0C; if (a == 0x64 and b == 0x323) return 0x1E0D; if (a == 0x44 and b == 0x331) return 0x1E0E; if (a == 0x64 and b == 0x331) return 0x1E0F; if (a == 0x44 and b == 0x327) return 0x1E10; if (a == 0x64 and b == 0x327) return 0x1E11; if (a == 0x44 and b == 0x32D) return 0x1E12; if (a == 0x64 and b == 0x32D) return 0x1E13; if (a == 0x112 and b == 0x300) return 0x1E14; if (a == 0x113 and b == 0x300) return 0x1E15; if (a == 0x112 and b == 0x301) return 0x1E16; if (a == 0x113 and b == 0x301) return 0x1E17; if (a == 0x45 and b == 0x32D) return 0x1E18; if (a == 0x65 and b == 0x32D) return 0x1E19; if (a == 0x45 and b == 0x330) return 0x1E1A; if (a == 0x65 and b == 0x330) return 0x1E1B; if (a == 0x228 and b == 0x306) return 0x1E1C; if (a == 0x229 and b == 0x306) return 0x1E1D; if (a == 0x46 and b == 0x307) return 0x1E1E; if (a == 0x66 and b == 0x307) return 0x1E1F; if (a == 0x47 and b == 0x304) return 0x1E20; if (a == 0x67 and b == 0x304) return 0x1E21; if (a == 0x48 and b == 0x307) return 0x1E22; if (a == 0x68 and b == 0x307) return 0x1E23; if (a == 0x48 and b == 0x323) return 0x1E24; if (a == 0x68 and b == 0x323) return 0x1E25; if (a == 0x48 and b == 0x308) return 0x1E26; if (a == 0x68 and b == 0x308) return 0x1E27; if (a == 0x48 and b == 0x327) return 0x1E28; if (a == 0x68 and b == 0x327) return 0x1E29; if (a == 0x48 and b == 0x32E) return 0x1E2A; if (a == 0x68 and b == 0x32E) return 0x1E2B; if (a == 0x49 and b == 0x330) return 0x1E2C; if (a == 0x69 and b == 0x330) return 0x1E2D; if (a == 0xCF and b == 0x301) return 0x1E2E; if (a == 0xEF and b == 0x301) return 0x1E2F; if (a == 0x4B and b == 0x301) return 0x1E30; if (a == 0x6B and b == 0x301) return 0x1E31; if (a == 0x4B and b == 0x323) return 0x1E32; if (a == 0x6B and b == 0x323) return 0x1E33; if (a == 0x4B and b == 0x331) return 0x1E34; if (a == 0x6B and b == 0x331) return 0x1E35; if (a == 0x4C and b == 0x323) return 0x1E36; if (a == 0x6C and b == 0x323) return 0x1E37; if (a == 0x1E36 and b == 0x304) return 0x1E38; if (a == 0x1E37 and b == 0x304) return 0x1E39; if (a == 0x4C and b == 0x331) return 0x1E3A; if (a == 0x6C and b == 0x331) return 0x1E3B; if (a == 0x4C and b == 0x32D) return 0x1E3C; if (a == 0x6C and b == 0x32D) return 0x1E3D; if (a == 0x4D and b == 0x301) return 0x1E3E; if (a == 0x6D and b == 0x301) return 0x1E3F; if (a == 0x4D and b == 0x307) return 0x1E40; if (a == 0x6D and b == 0x307) return 0x1E41; if (a == 0x4D and b == 0x323) return 0x1E42; if (a == 0x6D and b == 0x323) return 0x1E43; if (a == 0x4E and b == 0x307) return 0x1E44; if (a == 0x6E and b == 0x307) return 0x1E45; if (a == 0x4E and b == 0x323) return 0x1E46; if (a == 0x6E and b == 0x323) return 0x1E47; if (a == 0x4E and b == 0x331) return 0x1E48; if (a == 0x6E and b == 0x331) return 0x1E49; if (a == 0x4E and b == 0x32D) return 0x1E4A; if (a == 0x6E and b == 0x32D) return 0x1E4B; if (a == 0xD5 and b == 0x301) return 0x1E4C; if (a == 0xF5 and b == 0x301) return 0x1E4D; if (a == 0xD5 and b == 0x308) return 0x1E4E; if (a == 0xF5 and b == 0x308) return 0x1E4F; if (a == 0x14C and b == 0x300) return 0x1E50; if (a == 0x14D and b == 0x300) return 0x1E51; if (a == 0x14C and b == 0x301) return 0x1E52; if (a == 0x14D and b == 0x301) return 0x1E53; if (a == 0x50 and b == 0x301) return 0x1E54; if (a == 0x70 and b == 0x301) return 0x1E55; if (a == 0x50 and b == 0x307) return 0x1E56; if (a == 0x70 and b == 0x307) return 0x1E57; if (a == 0x52 and b == 0x307) return 0x1E58; if (a == 0x72 and b == 0x307) return 0x1E59; if (a == 0x52 and b == 0x323) return 0x1E5A; if (a == 0x72 and b == 0x323) return 0x1E5B; if (a == 0x1E5A and b == 0x304) return 0x1E5C; if (a == 0x1E5B and b == 0x304) return 0x1E5D; if (a == 0x52 and b == 0x331) return 0x1E5E; if (a == 0x72 and b == 0x331) return 0x1E5F; if (a == 0x53 and b == 0x307) return 0x1E60; if (a == 0x73 and b == 0x307) return 0x1E61; if (a == 0x53 and b == 0x323) return 0x1E62; if (a == 0x73 and b == 0x323) return 0x1E63; if (a == 0x15A and b == 0x307) return 0x1E64; if (a == 0x15B and b == 0x307) return 0x1E65; if (a == 0x160 and b == 0x307) return 0x1E66; if (a == 0x161 and b == 0x307) return 0x1E67; if (a == 0x1E62 and b == 0x307) return 0x1E68; if (a == 0x1E63 and b == 0x307) return 0x1E69; if (a == 0x54 and b == 0x307) return 0x1E6A; if (a == 0x74 and b == 0x307) return 0x1E6B; if (a == 0x54 and b == 0x323) return 0x1E6C; if (a == 0x74 and b == 0x323) return 0x1E6D; if (a == 0x54 and b == 0x331) return 0x1E6E; if (a == 0x74 and b == 0x331) return 0x1E6F; if (a == 0x54 and b == 0x32D) return 0x1E70; if (a == 0x74 and b == 0x32D) return 0x1E71; if (a == 0x55 and b == 0x324) return 0x1E72; if (a == 0x75 and b == 0x324) return 0x1E73; if (a == 0x55 and b == 0x330) return 0x1E74; if (a == 0x75 and b == 0x330) return 0x1E75; if (a == 0x55 and b == 0x32D) return 0x1E76; if (a == 0x75 and b == 0x32D) return 0x1E77; if (a == 0x168 and b == 0x301) return 0x1E78; if (a == 0x169 and b == 0x301) return 0x1E79; if (a == 0x16A and b == 0x308) return 0x1E7A; if (a == 0x16B and b == 0x308) return 0x1E7B; if (a == 0x56 and b == 0x303) return 0x1E7C; if (a == 0x76 and b == 0x303) return 0x1E7D; if (a == 0x56 and b == 0x323) return 0x1E7E; if (a == 0x76 and b == 0x323) return 0x1E7F; if (a == 0x57 and b == 0x300) return 0x1E80; if (a == 0x77 and b == 0x300) return 0x1E81; if (a == 0x57 and b == 0x301) return 0x1E82; if (a == 0x77 and b == 0x301) return 0x1E83; if (a == 0x57 and b == 0x308) return 0x1E84; if (a == 0x77 and b == 0x308) return 0x1E85; if (a == 0x57 and b == 0x307) return 0x1E86; if (a == 0x77 and b == 0x307) return 0x1E87; if (a == 0x57 and b == 0x323) return 0x1E88; if (a == 0x77 and b == 0x323) return 0x1E89; if (a == 0x58 and b == 0x307) return 0x1E8A; if (a == 0x78 and b == 0x307) return 0x1E8B; if (a == 0x58 and b == 0x308) return 0x1E8C; if (a == 0x78 and b == 0x308) return 0x1E8D; if (a == 0x59 and b == 0x307) return 0x1E8E; if (a == 0x79 and b == 0x307) return 0x1E8F; if (a == 0x5A and b == 0x302) return 0x1E90; if (a == 0x7A and b == 0x302) return 0x1E91; if (a == 0x5A and b == 0x323) return 0x1E92; if (a == 0x7A and b == 0x323) return 0x1E93; if (a == 0x5A and b == 0x331) return 0x1E94; if (a == 0x7A and b == 0x331) return 0x1E95; if (a == 0x68 and b == 0x331) return 0x1E96; if (a == 0x74 and b == 0x308) return 0x1E97; if (a == 0x77 and b == 0x30A) return 0x1E98; if (a == 0x79 and b == 0x30A) return 0x1E99; if (a == 0x17F and b == 0x307) return 0x1E9B; if (a == 0x41 and b == 0x323) return 0x1EA0; if (a == 0x61 and b == 0x323) return 0x1EA1; if (a == 0x41 and b == 0x309) return 0x1EA2; if (a == 0x61 and b == 0x309) return 0x1EA3; if (a == 0xC2 and b == 0x301) return 0x1EA4; if (a == 0xE2 and b == 0x301) return 0x1EA5; if (a == 0xC2 and b == 0x300) return 0x1EA6; if (a == 0xE2 and b == 0x300) return 0x1EA7; if (a == 0xC2 and b == 0x309) return 0x1EA8; if (a == 0xE2 and b == 0x309) return 0x1EA9; if (a == 0xC2 and b == 0x303) return 0x1EAA; if (a == 0xE2 and b == 0x303) return 0x1EAB; if (a == 0x1EA0 and b == 0x302) return 0x1EAC; if (a == 0x1EA1 and b == 0x302) return 0x1EAD; if (a == 0x102 and b == 0x301) return 0x1EAE; if (a == 0x103 and b == 0x301) return 0x1EAF; if (a == 0x102 and b == 0x300) return 0x1EB0; if (a == 0x103 and b == 0x300) return 0x1EB1; if (a == 0x102 and b == 0x309) return 0x1EB2; if (a == 0x103 and b == 0x309) return 0x1EB3; if (a == 0x102 and b == 0x303) return 0x1EB4; if (a == 0x103 and b == 0x303) return 0x1EB5; if (a == 0x1EA0 and b == 0x306) return 0x1EB6; if (a == 0x1EA1 and b == 0x306) return 0x1EB7; if (a == 0x45 and b == 0x323) return 0x1EB8; if (a == 0x65 and b == 0x323) return 0x1EB9; if (a == 0x45 and b == 0x309) return 0x1EBA; if (a == 0x65 and b == 0x309) return 0x1EBB; if (a == 0x45 and b == 0x303) return 0x1EBC; if (a == 0x65 and b == 0x303) return 0x1EBD; if (a == 0xCA and b == 0x301) return 0x1EBE; if (a == 0xEA and b == 0x301) return 0x1EBF; if (a == 0xCA and b == 0x300) return 0x1EC0; if (a == 0xEA and b == 0x300) return 0x1EC1; if (a == 0xCA and b == 0x309) return 0x1EC2; if (a == 0xEA and b == 0x309) return 0x1EC3; if (a == 0xCA and b == 0x303) return 0x1EC4; if (a == 0xEA and b == 0x303) return 0x1EC5; if (a == 0x1EB8 and b == 0x302) return 0x1EC6; if (a == 0x1EB9 and b == 0x302) return 0x1EC7; if (a == 0x49 and b == 0x309) return 0x1EC8; if (a == 0x69 and b == 0x309) return 0x1EC9; if (a == 0x49 and b == 0x323) return 0x1ECA; if (a == 0x69 and b == 0x323) return 0x1ECB; if (a == 0x4F and b == 0x323) return 0x1ECC; if (a == 0x6F and b == 0x323) return 0x1ECD; if (a == 0x4F and b == 0x309) return 0x1ECE; if (a == 0x6F and b == 0x309) return 0x1ECF; if (a == 0xD4 and b == 0x301) return 0x1ED0; if (a == 0xF4 and b == 0x301) return 0x1ED1; if (a == 0xD4 and b == 0x300) return 0x1ED2; if (a == 0xF4 and b == 0x300) return 0x1ED3; if (a == 0xD4 and b == 0x309) return 0x1ED4; if (a == 0xF4 and b == 0x309) return 0x1ED5; if (a == 0xD4 and b == 0x303) return 0x1ED6; if (a == 0xF4 and b == 0x303) return 0x1ED7; if (a == 0x1ECC and b == 0x302) return 0x1ED8; if (a == 0x1ECD and b == 0x302) return 0x1ED9; if (a == 0x1A0 and b == 0x301) return 0x1EDA; if (a == 0x1A1 and b == 0x301) return 0x1EDB; if (a == 0x1A0 and b == 0x300) return 0x1EDC; if (a == 0x1A1 and b == 0x300) return 0x1EDD; if (a == 0x1A0 and b == 0x309) return 0x1EDE; if (a == 0x1A1 and b == 0x309) return 0x1EDF; if (a == 0x1A0 and b == 0x303) return 0x1EE0; if (a == 0x1A1 and b == 0x303) return 0x1EE1; if (a == 0x1A0 and b == 0x323) return 0x1EE2; if (a == 0x1A1 and b == 0x323) return 0x1EE3; if (a == 0x55 and b == 0x323) return 0x1EE4; if (a == 0x75 and b == 0x323) return 0x1EE5; if (a == 0x55 and b == 0x309) return 0x1EE6; if (a == 0x75 and b == 0x309) return 0x1EE7; if (a == 0x1AF and b == 0x301) return 0x1EE8; if (a == 0x1B0 and b == 0x301) return 0x1EE9; if (a == 0x1AF and b == 0x300) return 0x1EEA; if (a == 0x1B0 and b == 0x300) return 0x1EEB; if (a == 0x1AF and b == 0x309) return 0x1EEC; if (a == 0x1B0 and b == 0x309) return 0x1EED; if (a == 0x1AF and b == 0x303) return 0x1EEE; if (a == 0x1B0 and b == 0x303) return 0x1EEF; if (a == 0x1AF and b == 0x323) return 0x1EF0; if (a == 0x1B0 and b == 0x323) return 0x1EF1; if (a == 0x59 and b == 0x300) return 0x1EF2; if (a == 0x79 and b == 0x300) return 0x1EF3; if (a == 0x59 and b == 0x323) return 0x1EF4; if (a == 0x79 and b == 0x323) return 0x1EF5; if (a == 0x59 and b == 0x309) return 0x1EF6; if (a == 0x79 and b == 0x309) return 0x1EF7; if (a == 0x59 and b == 0x303) return 0x1EF8; if (a == 0x79 and b == 0x303) return 0x1EF9; if (a == 0x3B1 and b == 0x313) return 0x1F00; if (a == 0x3B1 and b == 0x314) return 0x1F01; if (a == 0x1F00 and b == 0x300) return 0x1F02; if (a == 0x1F01 and b == 0x300) return 0x1F03; if (a == 0x1F00 and b == 0x301) return 0x1F04; if (a == 0x1F01 and b == 0x301) return 0x1F05; if (a == 0x1F00 and b == 0x342) return 0x1F06; if (a == 0x1F01 and b == 0x342) return 0x1F07; if (a == 0x391 and b == 0x313) return 0x1F08; if (a == 0x391 and b == 0x314) return 0x1F09; if (a == 0x1F08 and b == 0x300) return 0x1F0A; if (a == 0x1F09 and b == 0x300) return 0x1F0B; if (a == 0x1F08 and b == 0x301) return 0x1F0C; if (a == 0x1F09 and b == 0x301) return 0x1F0D; if (a == 0x1F08 and b == 0x342) return 0x1F0E; if (a == 0x1F09 and b == 0x342) return 0x1F0F; if (a == 0x3B5 and b == 0x313) return 0x1F10; if (a == 0x3B5 and b == 0x314) return 0x1F11; if (a == 0x1F10 and b == 0x300) return 0x1F12; if (a == 0x1F11 and b == 0x300) return 0x1F13; if (a == 0x1F10 and b == 0x301) return 0x1F14; if (a == 0x1F11 and b == 0x301) return 0x1F15; if (a == 0x395 and b == 0x313) return 0x1F18; if (a == 0x395 and b == 0x314) return 0x1F19; if (a == 0x1F18 and b == 0x300) return 0x1F1A; if (a == 0x1F19 and b == 0x300) return 0x1F1B; if (a == 0x1F18 and b == 0x301) return 0x1F1C; if (a == 0x1F19 and b == 0x301) return 0x1F1D; if (a == 0x3B7 and b == 0x313) return 0x1F20; if (a == 0x3B7 and b == 0x314) return 0x1F21; if (a == 0x1F20 and b == 0x300) return 0x1F22; if (a == 0x1F21 and b == 0x300) return 0x1F23; if (a == 0x1F20 and b == 0x301) return 0x1F24; if (a == 0x1F21 and b == 0x301) return 0x1F25; if (a == 0x1F20 and b == 0x342) return 0x1F26; if (a == 0x1F21 and b == 0x342) return 0x1F27; if (a == 0x397 and b == 0x313) return 0x1F28; if (a == 0x397 and b == 0x314) return 0x1F29; if (a == 0x1F28 and b == 0x300) return 0x1F2A; if (a == 0x1F29 and b == 0x300) return 0x1F2B; if (a == 0x1F28 and b == 0x301) return 0x1F2C; if (a == 0x1F29 and b == 0x301) return 0x1F2D; if (a == 0x1F28 and b == 0x342) return 0x1F2E; if (a == 0x1F29 and b == 0x342) return 0x1F2F; if (a == 0x3B9 and b == 0x313) return 0x1F30; if (a == 0x3B9 and b == 0x314) return 0x1F31; if (a == 0x1F30 and b == 0x300) return 0x1F32; if (a == 0x1F31 and b == 0x300) return 0x1F33; if (a == 0x1F30 and b == 0x301) return 0x1F34; if (a == 0x1F31 and b == 0x301) return 0x1F35; if (a == 0x1F30 and b == 0x342) return 0x1F36; if (a == 0x1F31 and b == 0x342) return 0x1F37; if (a == 0x399 and b == 0x313) return 0x1F38; if (a == 0x399 and b == 0x314) return 0x1F39; if (a == 0x1F38 and b == 0x300) return 0x1F3A; if (a == 0x1F39 and b == 0x300) return 0x1F3B; if (a == 0x1F38 and b == 0x301) return 0x1F3C; if (a == 0x1F39 and b == 0x301) return 0x1F3D; if (a == 0x1F38 and b == 0x342) return 0x1F3E; if (a == 0x1F39 and b == 0x342) return 0x1F3F; if (a == 0x3BF and b == 0x313) return 0x1F40; if (a == 0x3BF and b == 0x314) return 0x1F41; if (a == 0x1F40 and b == 0x300) return 0x1F42; if (a == 0x1F41 and b == 0x300) return 0x1F43; if (a == 0x1F40 and b == 0x301) return 0x1F44; if (a == 0x1F41 and b == 0x301) return 0x1F45; if (a == 0x39F and b == 0x313) return 0x1F48; if (a == 0x39F and b == 0x314) return 0x1F49; if (a == 0x1F48 and b == 0x300) return 0x1F4A; if (a == 0x1F49 and b == 0x300) return 0x1F4B; if (a == 0x1F48 and b == 0x301) return 0x1F4C; if (a == 0x1F49 and b == 0x301) return 0x1F4D; if (a == 0x3C5 and b == 0x313) return 0x1F50; if (a == 0x3C5 and b == 0x314) return 0x1F51; if (a == 0x1F50 and b == 0x300) return 0x1F52; if (a == 0x1F51 and b == 0x300) return 0x1F53; if (a == 0x1F50 and b == 0x301) return 0x1F54; if (a == 0x1F51 and b == 0x301) return 0x1F55; if (a == 0x1F50 and b == 0x342) return 0x1F56; if (a == 0x1F51 and b == 0x342) return 0x1F57; if (a == 0x3A5 and b == 0x314) return 0x1F59; if (a == 0x1F59 and b == 0x300) return 0x1F5B; if (a == 0x1F59 and b == 0x301) return 0x1F5D; if (a == 0x1F59 and b == 0x342) return 0x1F5F; if (a == 0x3C9 and b == 0x313) return 0x1F60; if (a == 0x3C9 and b == 0x314) return 0x1F61; if (a == 0x1F60 and b == 0x300) return 0x1F62; if (a == 0x1F61 and b == 0x300) return 0x1F63; if (a == 0x1F60 and b == 0x301) return 0x1F64; if (a == 0x1F61 and b == 0x301) return 0x1F65; if (a == 0x1F60 and b == 0x342) return 0x1F66; if (a == 0x1F61 and b == 0x342) return 0x1F67; if (a == 0x3A9 and b == 0x313) return 0x1F68; if (a == 0x3A9 and b == 0x314) return 0x1F69; if (a == 0x1F68 and b == 0x300) return 0x1F6A; if (a == 0x1F69 and b == 0x300) return 0x1F6B; if (a == 0x1F68 and b == 0x301) return 0x1F6C; if (a == 0x1F69 and b == 0x301) return 0x1F6D; if (a == 0x1F68 and b == 0x342) return 0x1F6E; if (a == 0x1F69 and b == 0x342) return 0x1F6F; if (a == 0x3B1 and b == 0x300) return 0x1F70; if (a == 0x3B5 and b == 0x300) return 0x1F72; if (a == 0x3B7 and b == 0x300) return 0x1F74; if (a == 0x3B9 and b == 0x300) return 0x1F76; if (a == 0x3BF and b == 0x300) return 0x1F78; if (a == 0x3C5 and b == 0x300) return 0x1F7A; if (a == 0x3C9 and b == 0x300) return 0x1F7C; if (a == 0x1F00 and b == 0x345) return 0x1F80; if (a == 0x1F01 and b == 0x345) return 0x1F81; if (a == 0x1F02 and b == 0x345) return 0x1F82; if (a == 0x1F03 and b == 0x345) return 0x1F83; if (a == 0x1F04 and b == 0x345) return 0x1F84; if (a == 0x1F05 and b == 0x345) return 0x1F85; if (a == 0x1F06 and b == 0x345) return 0x1F86; if (a == 0x1F07 and b == 0x345) return 0x1F87; if (a == 0x1F08 and b == 0x345) return 0x1F88; if (a == 0x1F09 and b == 0x345) return 0x1F89; if (a == 0x1F0A and b == 0x345) return 0x1F8A; if (a == 0x1F0B and b == 0x345) return 0x1F8B; if (a == 0x1F0C and b == 0x345) return 0x1F8C; if (a == 0x1F0D and b == 0x345) return 0x1F8D; if (a == 0x1F0E and b == 0x345) return 0x1F8E; if (a == 0x1F0F and b == 0x345) return 0x1F8F; if (a == 0x1F20 and b == 0x345) return 0x1F90; if (a == 0x1F21 and b == 0x345) return 0x1F91; if (a == 0x1F22 and b == 0x345) return 0x1F92; if (a == 0x1F23 and b == 0x345) return 0x1F93; if (a == 0x1F24 and b == 0x345) return 0x1F94; if (a == 0x1F25 and b == 0x345) return 0x1F95; if (a == 0x1F26 and b == 0x345) return 0x1F96; if (a == 0x1F27 and b == 0x345) return 0x1F97; if (a == 0x1F28 and b == 0x345) return 0x1F98; if (a == 0x1F29 and b == 0x345) return 0x1F99; if (a == 0x1F2A and b == 0x345) return 0x1F9A; if (a == 0x1F2B and b == 0x345) return 0x1F9B; if (a == 0x1F2C and b == 0x345) return 0x1F9C; if (a == 0x1F2D and b == 0x345) return 0x1F9D; if (a == 0x1F2E and b == 0x345) return 0x1F9E; if (a == 0x1F2F and b == 0x345) return 0x1F9F; if (a == 0x1F60 and b == 0x345) return 0x1FA0; if (a == 0x1F61 and b == 0x345) return 0x1FA1; if (a == 0x1F62 and b == 0x345) return 0x1FA2; if (a == 0x1F63 and b == 0x345) return 0x1FA3; if (a == 0x1F64 and b == 0x345) return 0x1FA4; if (a == 0x1F65 and b == 0x345) return 0x1FA5; if (a == 0x1F66 and b == 0x345) return 0x1FA6; if (a == 0x1F67 and b == 0x345) return 0x1FA7; if (a == 0x1F68 and b == 0x345) return 0x1FA8; if (a == 0x1F69 and b == 0x345) return 0x1FA9; if (a == 0x1F6A and b == 0x345) return 0x1FAA; if (a == 0x1F6B and b == 0x345) return 0x1FAB; if (a == 0x1F6C and b == 0x345) return 0x1FAC; if (a == 0x1F6D and b == 0x345) return 0x1FAD; if (a == 0x1F6E and b == 0x345) return 0x1FAE; if (a == 0x1F6F and b == 0x345) return 0x1FAF; if (a == 0x3B1 and b == 0x306) return 0x1FB0; if (a == 0x3B1 and b == 0x304) return 0x1FB1; if (a == 0x1F70 and b == 0x345) return 0x1FB2; if (a == 0x3B1 and b == 0x345) return 0x1FB3; if (a == 0x3AC and b == 0x345) return 0x1FB4; if (a == 0x3B1 and b == 0x342) return 0x1FB6; if (a == 0x1FB6 and b == 0x345) return 0x1FB7; if (a == 0x391 and b == 0x306) return 0x1FB8; if (a == 0x391 and b == 0x304) return 0x1FB9; if (a == 0x391 and b == 0x300) return 0x1FBA; if (a == 0x391 and b == 0x345) return 0x1FBC; if (a == 0xA8 and b == 0x342) return 0x1FC1; if (a == 0x1F74 and b == 0x345) return 0x1FC2; if (a == 0x3B7 and b == 0x345) return 0x1FC3; if (a == 0x3AE and b == 0x345) return 0x1FC4; if (a == 0x3B7 and b == 0x342) return 0x1FC6; if (a == 0x1FC6 and b == 0x345) return 0x1FC7; if (a == 0x395 and b == 0x300) return 0x1FC8; if (a == 0x397 and b == 0x300) return 0x1FCA; if (a == 0x397 and b == 0x345) return 0x1FCC; if (a == 0x1FBF and b == 0x300) return 0x1FCD; if (a == 0x1FBF and b == 0x301) return 0x1FCE; if (a == 0x1FBF and b == 0x342) return 0x1FCF; if (a == 0x3B9 and b == 0x306) return 0x1FD0; if (a == 0x3B9 and b == 0x304) return 0x1FD1; if (a == 0x3CA and b == 0x300) return 0x1FD2; if (a == 0x3B9 and b == 0x342) return 0x1FD6; if (a == 0x3CA and b == 0x342) return 0x1FD7; if (a == 0x399 and b == 0x306) return 0x1FD8; if (a == 0x399 and b == 0x304) return 0x1FD9; if (a == 0x399 and b == 0x300) return 0x1FDA; if (a == 0x1FFE and b == 0x300) return 0x1FDD; if (a == 0x1FFE and b == 0x301) return 0x1FDE; if (a == 0x1FFE and b == 0x342) return 0x1FDF; if (a == 0x3C5 and b == 0x306) return 0x1FE0; if (a == 0x3C5 and b == 0x304) return 0x1FE1; if (a == 0x3CB and b == 0x300) return 0x1FE2; if (a == 0x3C1 and b == 0x313) return 0x1FE4; if (a == 0x3C1 and b == 0x314) return 0x1FE5; if (a == 0x3C5 and b == 0x342) return 0x1FE6; if (a == 0x3CB and b == 0x342) return 0x1FE7; if (a == 0x3A5 and b == 0x306) return 0x1FE8; if (a == 0x3A5 and b == 0x304) return 0x1FE9; if (a == 0x3A5 and b == 0x300) return 0x1FEA; if (a == 0x3A1 and b == 0x314) return 0x1FEC; if (a == 0xA8 and b == 0x300) return 0x1FED; if (a == 0x1F7C and b == 0x345) return 0x1FF2; if (a == 0x3C9 and b == 0x345) return 0x1FF3; if (a == 0x3CE and b == 0x345) return 0x1FF4; if (a == 0x3C9 and b == 0x342) return 0x1FF6; if (a == 0x1FF6 and b == 0x345) return 0x1FF7; if (a == 0x39F and b == 0x300) return 0x1FF8; if (a == 0x3A9 and b == 0x300) return 0x1FFA; if (a == 0x3A9 and b == 0x345) return 0x1FFC; if (a == 0x2190 and b == 0x338) return 0x219A; if (a == 0x2192 and b == 0x338) return 0x219B; if (a == 0x2194 and b == 0x338) return 0x21AE; if (a == 0x21D0 and b == 0x338) return 0x21CD; if (a == 0x21D4 and b == 0x338) return 0x21CE; if (a == 0x21D2 and b == 0x338) return 0x21CF; if (a == 0x2203 and b == 0x338) return 0x2204; if (a == 0x2208 and b == 0x338) return 0x2209; if (a == 0x220B and b == 0x338) return 0x220C; if (a == 0x2223 and b == 0x338) return 0x2224; if (a == 0x2225 and b == 0x338) return 0x2226; if (a == 0x223C and b == 0x338) return 0x2241; if (a == 0x2243 and b == 0x338) return 0x2244; if (a == 0x2245 and b == 0x338) return 0x2247; if (a == 0x2248 and b == 0x338) return 0x2249; if (a == 0x3D and b == 0x338) return 0x2260; if (a == 0x2261 and b == 0x338) return 0x2262; if (a == 0x224D and b == 0x338) return 0x226D; if (a == 0x3C and b == 0x338) return 0x226E; if (a == 0x3E and b == 0x338) return 0x226F; if (a == 0x2264 and b == 0x338) return 0x2270; if (a == 0x2265 and b == 0x338) return 0x2271; if (a == 0x2272 and b == 0x338) return 0x2274; if (a == 0x2273 and b == 0x338) return 0x2275; if (a == 0x2276 and b == 0x338) return 0x2278; if (a == 0x2277 and b == 0x338) return 0x2279; if (a == 0x227A and b == 0x338) return 0x2280; if (a == 0x227B and b == 0x338) return 0x2281; if (a == 0x2282 and b == 0x338) return 0x2284; if (a == 0x2283 and b == 0x338) return 0x2285; if (a == 0x2286 and b == 0x338) return 0x2288; if (a == 0x2287 and b == 0x338) return 0x2289; if (a == 0x22A2 and b == 0x338) return 0x22AC; if (a == 0x22A8 and b == 0x338) return 0x22AD; if (a == 0x22A9 and b == 0x338) return 0x22AE; if (a == 0x22AB and b == 0x338) return 0x22AF; if (a == 0x227C and b == 0x338) return 0x22E0; if (a == 0x227D and b == 0x338) return 0x22E1; if (a == 0x2291 and b == 0x338) return 0x22E2; if (a == 0x2292 and b == 0x338) return 0x22E3; if (a == 0x22B2 and b == 0x338) return 0x22EA; if (a == 0x22B3 and b == 0x338) return 0x22EB; if (a == 0x22B4 and b == 0x338) return 0x22EC; if (a == 0x22B5 and b == 0x338) return 0x22ED; if (a == 0x2ADD and b == 0x338) return 0x2ADC; if (a == 0x304B and b == 0x3099) return 0x304C; if (a == 0x304D and b == 0x3099) return 0x304E; if (a == 0x304F and b == 0x3099) return 0x3050; if (a == 0x3051 and b == 0x3099) return 0x3052; if (a == 0x3053 and b == 0x3099) return 0x3054; if (a == 0x3055 and b == 0x3099) return 0x3056; if (a == 0x3057 and b == 0x3099) return 0x3058; if (a == 0x3059 and b == 0x3099) return 0x305A; if (a == 0x305B and b == 0x3099) return 0x305C; if (a == 0x305D and b == 0x3099) return 0x305E; if (a == 0x305F and b == 0x3099) return 0x3060; if (a == 0x3061 and b == 0x3099) return 0x3062; if (a == 0x3064 and b == 0x3099) return 0x3065; if (a == 0x3066 and b == 0x3099) return 0x3067; if (a == 0x3068 and b == 0x3099) return 0x3069; if (a == 0x306F and b == 0x3099) return 0x3070; if (a == 0x306F and b == 0x309A) return 0x3071; if (a == 0x3072 and b == 0x3099) return 0x3073; if (a == 0x3072 and b == 0x309A) return 0x3074; if (a == 0x3075 and b == 0x3099) return 0x3076; if (a == 0x3075 and b == 0x309A) return 0x3077; if (a == 0x3078 and b == 0x3099) return 0x3079; if (a == 0x3078 and b == 0x309A) return 0x307A; if (a == 0x307B and b == 0x3099) return 0x307C; if (a == 0x307B and b == 0x309A) return 0x307D; if (a == 0x3046 and b == 0x3099) return 0x3094; if (a == 0x309D and b == 0x3099) return 0x309E; if (a == 0x30AB and b == 0x3099) return 0x30AC; if (a == 0x30AD and b == 0x3099) return 0x30AE; if (a == 0x30AF and b == 0x3099) return 0x30B0; if (a == 0x30B1 and b == 0x3099) return 0x30B2; if (a == 0x30B3 and b == 0x3099) return 0x30B4; if (a == 0x30B5 and b == 0x3099) return 0x30B6; if (a == 0x30B7 and b == 0x3099) return 0x30B8; if (a == 0x30B9 and b == 0x3099) return 0x30BA; if (a == 0x30BB and b == 0x3099) return 0x30BC; if (a == 0x30BD and b == 0x3099) return 0x30BE; if (a == 0x30BF and b == 0x3099) return 0x30C0; if (a == 0x30C1 and b == 0x3099) return 0x30C2; if (a == 0x30C4 and b == 0x3099) return 0x30C5; if (a == 0x30C6 and b == 0x3099) return 0x30C7; if (a == 0x30C8 and b == 0x3099) return 0x30C9; if (a == 0x30CF and b == 0x3099) return 0x30D0; if (a == 0x30CF and b == 0x309A) return 0x30D1; if (a == 0x30D2 and b == 0x3099) return 0x30D3; if (a == 0x30D2 and b == 0x309A) return 0x30D4; if (a == 0x30D5 and b == 0x3099) return 0x30D6; if (a == 0x30D5 and b == 0x309A) return 0x30D7; if (a == 0x30D8 and b == 0x3099) return 0x30D9; if (a == 0x30D8 and b == 0x309A) return 0x30DA; if (a == 0x30DB and b == 0x3099) return 0x30DC; if (a == 0x30DB and b == 0x309A) return 0x30DD; if (a == 0x30A6 and b == 0x3099) return 0x30F4; if (a == 0x30EF and b == 0x3099) return 0x30F7; if (a == 0x30F0 and b == 0x3099) return 0x30F8; if (a == 0x30F1 and b == 0x3099) return 0x30F9; if (a == 0x30F2 and b == 0x3099) return 0x30FA; if (a == 0x30FD and b == 0x3099) return 0x30FE; if (a == 0x5D9 and b == 0x5B4) return 0xFB1D; if (a == 0x5F2 and b == 0x5B7) return 0xFB1F; if (a == 0x5E9 and b == 0x5C1) return 0xFB2A; if (a == 0x5E9 and b == 0x5C2) return 0xFB2B; if (a == 0xFB49 and b == 0x5C1) return 0xFB2C; if (a == 0xFB49 and b == 0x5C2) return 0xFB2D; if (a == 0x5D0 and b == 0x5B7) return 0xFB2E; if (a == 0x5D0 and b == 0x5B8) return 0xFB2F; if (a == 0x5D0 and b == 0x5BC) return 0xFB30; if (a == 0x5D1 and b == 0x5BC) return 0xFB31; if (a == 0x5D2 and b == 0x5BC) return 0xFB32; if (a == 0x5D3 and b == 0x5BC) return 0xFB33; if (a == 0x5D4 and b == 0x5BC) return 0xFB34; if (a == 0x5D5 and b == 0x5BC) return 0xFB35; if (a == 0x5D6 and b == 0x5BC) return 0xFB36; if (a == 0x5D8 and b == 0x5BC) return 0xFB38; if (a == 0x5D9 and b == 0x5BC) return 0xFB39; if (a == 0x5DA and b == 0x5BC) return 0xFB3A; if (a == 0x5DB and b == 0x5BC) return 0xFB3B; if (a == 0x5DC and b == 0x5BC) return 0xFB3C; if (a == 0x5DE and b == 0x5BC) return 0xFB3E; if (a == 0x5E0 and b == 0x5BC) return 0xFB40; if (a == 0x5E1 and b == 0x5BC) return 0xFB41; if (a == 0x5E3 and b == 0x5BC) return 0xFB43; if (a == 0x5E4 and b == 0x5BC) return 0xFB44; if (a == 0x5E6 and b == 0x5BC) return 0xFB46; if (a == 0x5E7 and b == 0x5BC) return 0xFB47; if (a == 0x5E8 and b == 0x5BC) return 0xFB48; if (a == 0x5E9 and b == 0x5BC) return 0xFB49; if (a == 0x5EA and b == 0x5BC) return 0xFB4A; if (a == 0x5D5 and b == 0x5B9) return 0xFB4B; if (a == 0x5D1 and b == 0x5BF) return 0xFB4C; if (a == 0x5DB and b == 0x5BF) return 0xFB4D; if (a == 0x5E4 and b == 0x5BF) return 0xFB4E; if (a == 0x11099 and b == 0x110BA) return 0x1109A; if (a == 0x1109B and b == 0x110BA) return 0x1109C; if (a == 0x110A5 and b == 0x110BA) return 0x110AB; if (a == 0x11131 and b == 0x11127) return 0x1112E; if (a == 0x11132 and b == 0x11127) return 0x1112F; if (a == 0x11347 and b == 0x1133E) return 0x1134B; if (a == 0x11347 and b == 0x11357) return 0x1134C; if (a == 0x114B9 and b == 0x114BA) return 0x114BB; if (a == 0x114B9 and b == 0x114B0) return 0x114BC; if (a == 0x114B9 and b == 0x114BD) return 0x114BE; if (a == 0x115B8 and b == 0x115AF) return 0x115BA; if (a == 0x115B9 and b == 0x115AF) return 0x115BB; if (a == 0x11935 and b == 0x11930) return 0x11938; if (a == 0x1D157 and b == 0x1D165) return 0x1D15E; if (a == 0x1D158 and b == 0x1D165) return 0x1D15F; if (a == 0x1D15F and b == 0x1D16E) return 0x1D160; if (a == 0x1D15F and b == 0x1D16F) return 0x1D161; if (a == 0x1D15F and b == 0x1D170) return 0x1D162; if (a == 0x1D15F and b == 0x1D171) return 0x1D163; if (a == 0x1D15F and b == 0x1D172) return 0x1D164; if (a == 0x1D1B9 and b == 0x1D165) return 0x1D1BB; if (a == 0x1D1BA and b == 0x1D165) return 0x1D1BC; if (a == 0x1D1BB and b == 0x1D16E) return 0x1D1BD; if (a == 0x1D1BC and b == 0x1D16E) return 0x1D1BE; if (a == 0x1D1BB and b == 0x1D16F) return 0x1D1BF; if (a == 0x1D1BC and b == 0x1D16F) return 0x1D1C0; return null; }
.gyro/ziglyph-jecolon-github.com-c37d93b6/pkg/src/autogen/canonicals.zig
const std = @import("../std.zig"); const wasi = std.os.wasi; const FDFLAG = wasi.FDFLAG; extern threadlocal var errno: c_int; pub fn _errno() *c_int { return &errno; } pub const fd_t = wasi.fd_t; pub const pid_t = c_int; pub const uid_t = u32; pub const gid_t = u32; pub const off_t = i64; pub const ino_t = wasi.ino_t; pub const mode_t = wasi.mode_t; pub const time_t = wasi.time_t; pub const timespec = wasi.timespec; pub const STDERR_FILENO = wasi.STDERR_FILENO; pub const STDIN_FILENO = wasi.STDIN_FILENO; pub const STDOUT_FILENO = wasi.STDOUT_FILENO; pub const E = wasi.E; pub const CLOCK = wasi.CLOCK; pub const S = wasi.S; pub const IOV_MAX = wasi.IOV_MAX; pub const AT = wasi.AT; pub const Stat = extern struct { dev: i32, ino: ino_t, nlink: u64, mode: mode_t, uid: uid_t, gid: gid_t, __pad0: isize, rdev: i32, size: off_t, blksize: i32, blocks: i64, atimesec: time_t, atimensec: isize, mtimesec: time_t, mtimensec: isize, ctimesec: time_t, ctimensec: isize, pub fn atime(self: @This()) timespec { return timespec{ .tv_sec = self.atimesec, .tv_nsec = self.atimensec, }; } pub fn mtime(self: @This()) timespec { return timespec{ .tv_sec = self.mtimesec, .tv_nsec = self.mtimensec, }; } pub fn ctime(self: @This()) timespec { return timespec{ .tv_sec = self.ctimesec, .tv_nsec = self.ctimensec, }; } }; /// Derived from /// https://github.com/WebAssembly/wasi-libc/blob/main/expected/wasm32-wasi/predefined-macros.txt pub const O = struct { pub const ACCMODE = (EXEC | RDWR | SEARCH); pub const APPEND = FDFLAG.APPEND; pub const CLOEXEC = (0); pub const CREAT = ((1 << 0) << 12); // = __WASI_OFLAGS_CREAT << 12 pub const DIRECTORY = ((1 << 1) << 12); // = __WASI_OFLAGS_DIRECTORY << 12 pub const DSYNC = FDFLAG.DSYNC; pub const EXCL = ((1 << 2) << 12); // = __WASI_OFLAGS_EXCL << 12 pub const EXEC = (0x02000000); pub const NOCTTY = (0); pub const NOFOLLOW = (0x01000000); pub const NONBLOCK = (1 << FDFLAG.NONBLOCK); pub const RDONLY = (0x04000000); pub const RDWR = (RDONLY | WRONLY); pub const RSYNC = (1 << FDFLAG.RSYNC); pub const SEARCH = (0x08000000); pub const SYNC = (1 << FDFLAG.SYNC); pub const TRUNC = ((1 << 3) << 12); // = __WASI_OFLAGS_TRUNC << 12 pub const TTY_INIT = (0); pub const WRONLY = (0x10000000); }; pub const SEEK = struct { pub const SET: wasi.whence_t = .SET; pub const CUR: wasi.whence_t = .CUR; pub const END: wasi.whence_t = .END; };
lib/std/c/wasi.zig
const builtin = @import("builtin"); const std = @import("std"); const Allocator = std.mem.Allocator; // ---------------------------------------------------------------------------- const TCThreadCreateInfo = @import("ffi.zig").TCThreadCreateInfo; const log = @import("debug.zig").log; const options = @import("options.zig"); const ABORTING_SHADOWSTACK = options.ABORTING_SHADOWSTACK; const setShadowStackGuard = options.setShadowStackGuard; const markEvent = @import("profiler.zig").markEvent; const ENABLE_PROFILER = @import("profiler.zig").ACTIVE; // ---------------------------------------------------------------------------- export var g_shadow_stack_top: [*]usize = undefined; const MPU_GRANULARITY = 32; const STACK_ALIGN = MPU_GRANULARITY; const STACK_SIZE = 16; /// Bundles the state of a single instance of shadow stack. pub const StackState = struct { top: [*]usize, frames: []usize, const Self = @This(); /// Construct a `StackState` by allocating memory from `allocator`. pub fn new(allocator: *Allocator, create_info: ?*const TCThreadCreateInfo) !Self { // TODO: make stack size variable // Add two guard pages before and after the stack. MPU is used to detect // stack overflow/underflow by prohibiting memory access to the guard // pages. const gp_ents = MPU_GRANULARITY / @sizeOf(usize); var frames = try allocator.alignedAlloc(usize, STACK_ALIGN, STACK_SIZE + gp_ents * 2); // Exclude the guard pages frames = frames[gp_ents .. frames.len - gp_ents]; for (frames) |*frame| { frame.* = 0; } return fromSlice(frames); } /// Release the memory allocated for `self`. `self` must have been created /// by `new(allocator, _)`. pub fn destroy(self: *const Self, allocator: *Allocator) void { allocator.free(self.frames); } fn fromSlice(frames: []usize) Self { return Self{ .frames = frames, .top = @ptrCast([*]usize, &frames[0]), }; } }; pub fn saveState(state: *StackState) void { state.top = g_shadow_stack_top; } pub fn loadState(state: *const StackState) void { g_shadow_stack_top = state.top; // Configure MPU regions for the stack guard pages const start = @ptrToInt(&state.frames[0]); const end = start + state.frames.len * @sizeOf(usize); setShadowStackGuard(start, end); log(.Trace, "shadowstack.loadState({?x})\n", .{state}); } export fn TCShadowStackMismatch() noreturn { @panic("Shadow stack: Return target mismatch"); } export fn TCShadowStackLogPush() void { markEvent(.ShadowPush); } export fn TCShadowStackLogAssertReturn() void { markEvent(.ShadowAssertReturn); } export fn TCShadowStackLogAssert() void { markEvent(.ShadowAssert); } // Non-Secure application interface // ---------------------------------------------------------------------------- export fn __TCPrivateShadowPush() callconv(.Naked) noreturn { @setRuntimeSafety(false); asm volatile ( \\ sg ); if (ENABLE_PROFILER) { asm volatile ( \\ push {r0, r1, r2, r3, lr} \\ bl TCShadowStackLogPush \\ pop {r0, r1, r2, r3, lr} ); } // r12 = continuation // lr = trustworthy return target of the caller with bit[0] cleared // kill: r12 // // assume(lr != 0); // g_shadow_stack_top.* = lr; // g_shadow_stack_top += 1; // asm volatile ( \\ .syntax unified \\ \\ ldr r5, .L_g_shadow_stack_top_const1 // Get &g_shadow_stack_top \\ ldr r4, [r5] // Get g_shadow_stack_top \\ bic r12, #1 // Mark that `r12` is a Non-Secure address. \\ str lr, [r4], #4 // g_shadow_stack_top[0] = lr, g_shadow_stack_top + 1 \\ str r4, [r5] // g_shadow_stack_top = (g_shadow_stack_top + 1) \\ \\ bxns r12 \\ \\ .align 2 \\ .L_g_shadow_stack_top_const1: .word g_shadow_stack_top ); unreachable; } export fn __TCPrivateShadowAssertReturn() callconv(.Naked) noreturn { @setRuntimeSafety(false); asm volatile ( \\ sg ); if (ENABLE_PROFILER) { asm volatile ( \\ push {r0, r1, r2, r3, lr} \\ bl TCShadowStackLogAssertReturn \\ pop {r0, r1, r2, r3, lr} ); } // lr = non-trustworthy return target of the caller with bit[0] cleared // kill: r12 if (comptime ABORTING_SHADOWSTACK) { // if (g_shadow_stack_top[-1] != lr) { panic(); } // g_shadow_stack_top -= 1; // bxns(lr) // asm volatile ( \\ .syntax unified \\ \\ push {r0, r1} \\ ldr r12, .L_g_shadow_stack_top_const2 // Get &g_shadow_stack_top \\ ldr r0, [r12] // Get g_shadow_stack_top \\ ldr r1, [r0, #-4]! // g_shadow_stack_top - 1, Load g_shadow_stack_top[-1] \\ str r0, [r12] // g_shadow_stack_top = (g_shadow_stack_top - 1) \\ cmp r1, lr // g_shadow_stack_top[-1] != lr \\ pop {r0, r1} \\ bne .L_mismatch_trampoline // if (g_shadow_stack_top[-1] != lr) { ... } \\ \\ bxns lr \\ \\ .align 2 \\ .L_mismatch_trampoline: b TCShadowStackMismatch \\ .L_g_shadow_stack_top_const2: .word g_shadow_stack_top ); } else { // ABORTING_SHADOWSTACK // // lr = g_shadow_stack_top[-1]; // g_shadow_stack_top -= 1; // bxns(lr) // asm volatile ( \\ .syntax unified \\ \\ push {r0} \\ ldr r12, .L_g_shadow_stack_top_const2 // Get &g_shadow_stack_top \\ ldr r0, [r12] // Get g_shadow_stack_top \\ ldr lr, [r0, #-4]! // g_shadow_stack_top - 1, load g_shadow_stack_top[-1] \\ str r0, [r12] // g_shadow_stack_top = (g_shadow_stack_top - 1) \\ pop {r0} \\ \\ bxns lr \\ \\ .align 2 \\ .L_g_shadow_stack_top_const2: .word g_shadow_stack_top ); } // ABORTING_SHADOWSTACK unreachable; } export fn __TCPrivateShadowAssertReturnFast() callconv(.Naked) noreturn { @setRuntimeSafety(false); asm volatile ( \\ sg ); if (ENABLE_PROFILER) { asm volatile ( \\ push {r0, r1, r2, r3, lr} \\ bl TCShadowStackLogAssertReturn \\ pop {r0, r1, r2, r3, lr} ); } // Exactly the same as `__TCPrivateShadowAssertReturn` except that additional registers // are killed. // lr = non-trustworthy return target of the caller with bit[0] cleared // kill: r12 if (comptime ABORTING_SHADOWSTACK) { // [kill: r2, r3] // if (g_shadow_stack_top[-1] != lr) { panic(); } // g_shadow_stack_top -= 1; // bxns(lr) // asm volatile ( \\ .syntax unified \\ \\ ldr r12, .L_g_shadow_stack_top_const5 // Get &g_shadow_stack_top \\ ldr r2, [r12] // Get g_shadow_stack_top \\ ldr r3, [r2, #-4]! // g_shadow_stack_top - 1, Load g_shadow_stack_top[-1] \\ str r2, [r12] // g_shadow_stack_top = (g_shadow_stack_top - 1) \\ cmp r3, lr // g_shadow_stack_top[-1] != lr \\ bne .L_mismatch_trampoline2 // if (g_shadow_stack_top[-1] != lr) { ... } \\ \\ bxns lr \\ \\ .align 2 \\ .L_mismatch_trampoline2: b TCShadowStackMismatch \\ .L_g_shadow_stack_top_const5: .word g_shadow_stack_top ); } else { // ABORTING_SHADOWSTACK // // [kill: r3] // lr = g_shadow_stack_top[-1]; // g_shadow_stack_top -= 1; // bxns(lr) // asm volatile ( \\ .syntax unified \\ \\ ldr r12, .L_g_shadow_stack_top_const4 // Get &g_shadow_stack_top \\ ldr r3, [r12] // Get g_shadow_stack_top \\ ldr lr, [r3, #-4]! // g_shadow_stack_top - 1, load g_shadow_stack_top[-1] \\ str r3, [r12] // g_shadow_stack_top = (g_shadow_stack_top - 1) \\ \\ bxns lr \\ \\ .align 2 \\ .L_g_shadow_stack_top_const4: .word g_shadow_stack_top ); } // ABORTING_SHADOWSTACK unreachable; } export fn __TCPrivateShadowAssert() callconv(.Naked) noreturn { @setRuntimeSafety(false); asm volatile ( \\ sg ); if (ENABLE_PROFILER) { asm volatile ( \\ push {r0, r1, r2, r3, lr} \\ bl TCShadowStackLogAssert \\ pop {r0, r1, r2, r3, lr} ); } // r12 = continuation // lr = non-trustworthy return target of the caller with bit[0] cleared // kill: r12 if (comptime ABORTING_SHADOWSTACK) { // if (g_shadow_stack_top[-1] != lr) { panic(); } // g_shadow_stack_top -= 1; // bxns(r12) // asm volatile ( \\ .syntax unified \\ \\ push {r0, r1, r2} \\ ldr r2, .L_g_shadow_stack_top_const3 // Get &g_shadow_stack_top \\ bic r12, #1 // Mark that `r12` is a Non-Secure address. \\ ldr r0, [r2] // Get g_shadow_stack_top \\ ldr r1, [r0, #-4]! // g_shadow_stack_top - 1, Load g_shadow_stack_top[-1] \\ str r0, [r2] // g_shadow_stack_top = (g_shadow_stack_top - 1) \\ cmp r1, lr // g_shadow_stack_top[-1] != lr \\ pop {r0, r1, r2} \\ bne .L_mismatch_trampoline3 // if (g_shadow_stack_top[-1] != lr) { ... } \\ \\ // Calling a secure gateway automatically clears LR[0]. It's useful \\ // for doing `bxns lr` in Secure code, but when used in Non-Secure \\ // mode, it just causes SecureFault. \\ orr lr, #1 \\ \\ bxns r12 \\ \\ .align 2 \\ .L_mismatch_trampoline3: b TCShadowStackMismatch \\ .L_g_shadow_stack_top_const3: .word g_shadow_stack_top ); } else { // ABORTING_SHADOWSTACK // // lr = g_shadow_stack_top[-1]; // g_shadow_stack_top -= 1; // bxns(r12) // asm volatile ( \\ .syntax unified \\ \\ push {r0, r2} \\ ldr r2, .L_g_shadow_stack_top_const3 // Get &g_shadow_stack_top \\ bic r12, #1 // Mark that `r12` is a Non-Secure address. \\ ldr r0, [r2] // Get g_shadow_stack_top \\ ldr lr, [r0, #-4]! // g_shadow_stack_top - 1, load g_shadow_stack_top[-1] \\ str r0, [r2] // g_shadow_stack_top = (g_shadow_stack_top - 1) \\ pop {r0, r2} \\ \\ // Calling a secure gateway automatically clears LR[0]. It's useful \\ // for doing `bxns lr` in Secure code, but when used in Non-Secure \\ // mode, it just causes SecureFault. \\ orr lr, #1 \\ \\ bxns r12 \\ \\ .align 2 \\ .L_g_shadow_stack_top_const3: .word g_shadow_stack_top ); } // ABORTING_SHADOWSTACK unreachable; } // Export the gateway functions to Non-Secure comptime { @export(__TCPrivateShadowPush, .{ .name = "__acle_se___TCPrivateShadowPush", .linkage = .Strong, .section = ".gnu.sgstubs" }); @export(__TCPrivateShadowAssertReturn, .{ .name = "__acle_se___TCPrivateShadowAssertReturn", .linkage = .Strong, .section = ".gnu.sgstubs" }); @export(__TCPrivateShadowAssertReturnFast, .{ .name = "__acle_se___TCPrivateShadowAssertReturnFast", .linkage = .Strong, .section = ".gnu.sgstubs" }); @export(__TCPrivateShadowAssert, .{ .name = "__acle_se___TCPrivateShadowAssert", .linkage = .Strong, .section = ".gnu.sgstubs" }); }
src/monitor/shadowstack.zig
const std = @import("std"); const fs = std.fs; const Image = @import("../../image.zig").Image; const upaya = @import("../../upaya_cli.zig"); const math = upaya.math; const stb = upaya.stb; pub const PyxelEdit = struct { name: []const u8, canvas: CanvasJSON, version: []const u8, layers: []Layer, animations: []AnimationJSON, pub fn initFromFile(allocator: *std.mem.Allocator, file: []const u8) !PyxelEdit { @setEvalBranchQuota(2000); var zip_file = @ptrCast([*c]const u8, file); var zip = upaya.zip.zip_open(zip_file, 0, 'r'); var buf: ?*c_void = null; var size: u64 = 0; _ = upaya.zip.zip_entry_open(zip, "docData.json"); _ = upaya.zip.zip_entry_read(zip, &buf, &size); var content: []const u8 = @ptrCast([*]const u8, buf)[0..size]; const layersText = "layers\": {"; const layersEndText = "\"numLayers"; const indexLayersStart = std.mem.indexOfPos(u8, content, 0, layersText).? + layersText.len; const indexLayersEnd = std.mem.indexOfPos(u8, content, indexLayersStart, layersEndText).? - 7; const layersContent = content[indexLayersStart..indexLayersEnd]; const options = std.json.ParseOptions{ .allocator = allocator, .duplicate_field_behavior = .UseFirst, .ignore_unknown_fields = true, .allow_trailing_data = true }; const pyxelEditJson = try std.json.parse(PyxelEditJSON, &std.json.TokenStream.init(content), options); defer std.json.parseFree(PyxelEditJSON, pyxelEditJson, options); var layers: std.ArrayList(Layer) = std.ArrayList(Layer).init(upaya.mem.allocator); var i: usize = 0; while (i < pyxelEditJson.canvas.numLayers) : (i += 1) { const startText = try std.fmt.allocPrint(upaya.mem.allocator, "\"{d}\": ", .{i}); if (std.mem.indexOfPos(u8, layersContent, 0, startText)) |start| { if (std.mem.indexOfPos(u8, layersContent, start + startText.len, " },")) |end| { const layerJSON = try std.json.parse(LayerJSON, &std.json.TokenStream.init(layersContent[start + startText.len .. end + 2]), options); //defer std.json.parseFree(LayerJSON, layerJSON, options); const layer: Layer = .{ .type = layerJSON.type, .alpha = layerJSON.alpha, .name = layerJSON.name, .hidden = layerJSON.hidden, .blendMode = layerJSON.blendMode, .soloed = layerJSON.soloed, .muted = layerJSON.muted, .collapsed = layerJSON.collapsed, .parentIndex = layerJSON.parentIndex, .texture = undefined, }; try layers.append(layer); //std.debug.print("{s}", .{layersContent[start + startText.len - 1 .. end + 1]}); } else { var end = layersContent.len; const layerJSON = try std.json.parse(LayerJSON, &std.json.TokenStream.init(layersContent[start + startText.len .. end]), options); //defer std.json.parseFree(LayerJSON, layerJSON, options); const layer: Layer = .{ .type = layerJSON.type, .alpha = layerJSON.alpha, .name = layerJSON.name, .hidden = layerJSON.hidden, .blendMode = layerJSON.blendMode, .soloed = layerJSON.soloed, .muted = layerJSON.muted, .collapsed = layerJSON.collapsed, .parentIndex = layerJSON.parentIndex, .texture = undefined, }; //defer std.json.parseFree(Layer, layer, options); try layers.append(layer); } } else break; } const animationsText = "animations\": "; const indexAnimationStart = std.mem.indexOfPos(u8, content, 0, animationsText).? + animationsText.len; const indexAnimationEnd = size - 4; const animationsContent = content[indexAnimationStart..indexAnimationEnd]; var animations: std.ArrayList(AnimationJSON) = std.ArrayList(AnimationJSON).init(upaya.mem.allocator); i = 0; while (true) : (i += 1) { const startText = try std.fmt.allocPrint(upaya.mem.allocator, "\"{d}\": ", .{i}); if (std.mem.indexOfPos(u8, animationsContent, 0, startText)) |start| { if (std.mem.indexOfPos(u8, animationsContent, start + startText.len, " },")) |end| { const animationsJSON = try std.json.parse(AnimationJSON, &std.json.TokenStream.init(animationsContent[start + startText.len .. end + 2]), options); //defer std.json.parseFree(LayerJSON, layerJSON, options); try animations.append(animationsJSON); //std.debug.print("{s}", .{animationsContent[start + startText.len - 1 .. end + 1]}); } else { var end = animationsContent.len; const animationsJSON = try std.json.parse(AnimationJSON, &std.json.TokenStream.init(animationsContent[start + startText.len .. end]), options); //defer std.json.parseFree(LayerJSON, layerJSON, options); //defer std.json.parseFree(Layer, layer, options); try animations.append(animationsJSON); } } else { break; } } var pyxelEdit: PyxelEdit = .{ .name = pyxelEditJson.name, .canvas = pyxelEditJson.canvas, .version = pyxelEditJson.version, .layers = layers.items, .animations = animations.items, }; _ = upaya.zip.zip_entry_close(zip); i = 0; while (i < pyxelEdit.layers.len) : (i += 1) { var pngBuf: ?*c_void = null; var pngSize: u64 = 0; const png = try std.fmt.allocPrint(upaya.mem.allocator, "layer{d}.png\u{0}", .{i}); _ = upaya.zip.zip_entry_open(zip, @ptrCast([*c]const u8, png)); _ = upaya.zip.zip_entry_read(zip, &pngBuf, &pngSize); pyxelEdit.layers[i].texture = upaya.Image.initFromData(@ptrCast([*c]const u8, pngBuf), pngSize); _ = upaya.zip.zip_entry_close(zip); //upaya.zip.zip_close(zip); } upaya.zip.zip_close(zip); return pyxelEdit; } pub const PyxelEditJSON = struct { name: []const u8, canvas: CanvasJSON, version: []const u8, }; pub const CanvasJSON = struct { height: i32, tileHeight: i32, numLayers: i32, currentLayerIndex: i32, width: i32, tileWidth: i32, }; pub const Layer = struct { type: []const u8, alpha: u8, name: []const u8, hidden: bool, blendMode: []const u8, soloed: bool, muted: bool, collapsed: bool, parentIndex: i32, texture: upaya.Image, }; pub const LayerJSON = struct { type: []const u8, alpha: u8, name: []const u8, hidden: bool, blendMode: []const u8, soloed: bool, muted: bool, collapsed: bool, parentIndex: i32, }; // pub const Tileset = struct { // tilesWide: i32, // tileHeight: i32, // numTiles: i32, // tileWidth: i32, // fixedWidth: bool, // }; // pub const Palette = struct { height: i32, width: i32, numColors: i32, colors: struct { // @"0": ?*u32, // @"1": ?*u32, // @"2": ?*u32, // @"3": ?*u32, // @"4": ?*u32, // @"5": ?*u32, // @"6": ?*u32, // @"7": ?*u32, // } }; pub const AnimationJSON = struct { frameDuration: i32, length: i32, frameDurationMultipliers: []i32, baseTile: i32, name: []const u8, }; };
src/utils/importers/pyxeledit.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const List = std.ArrayList; const Map = std.AutoHashMap; const StrMap = std.StringHashMap; const BitSet = std.DynamicBitSet; const Str = []const u8; const util = @import("util.zig"); const gpa = util.gpa; const data = @embedFile("../data/day04.txt"); pub fn main() !void { const input = outer: { var iter = tokenize(u8, data, "\n\r "); const numbers = blk: { // drawing numbers var numbers = std.ArrayList(u8).init(gpa); const line1 = iter.next().?; var line1_tokens = tokenize(u8, line1, ","); while (line1_tokens.next()) |s| { const n = try parseInt(u8, s, 10); try numbers.append(n); } break :blk numbers.toOwnedSlice(); }; const boards = blk: { // boards var boards = std.ArrayList([5][5]u8).init(gpa); var index: usize = 0; var board: [5][5]u8 = undefined; while(iter.next()) |line| { const i: usize = (index / 5) % 5; const j: usize = index % 5; const n = try parseInt(u8, line, 10); board[i][j] = n; if (i==4 and j==4) try boards.append(board); index += 1; } break :blk boards.toOwnedSlice(); }; break :outer .{.numbers = numbers, .boards = boards}; }; const numbers = input.numbers; const boards = input.boards; { // part 1 var winning_board_index: usize = 0; var winning_number_index: usize = 0; loop: for (numbers) |_, index| { for (boards) |board, board_index| { if (isWinner(numbers[0..index+1], board)) { winning_board_index = board_index; winning_number_index = index; break :loop; } } } const win_board = boards[winning_board_index]; const win_nums = numbers[0..winning_number_index+1]; // put all numbers from winning board into one array var board_nums: [25]u8 = undefined; { var index: usize = 0; for (win_board) |line| { for (line) |n| { board_nums[index] = n; index += 1; } } } var sum: u32 = 0; for (board_nums) |n| { for (win_nums) |m| { if (n == m) { break; } } else { sum += n; } } print("{}\n", .{sum * win_nums[win_nums.len-1]}); } { // part 2 var max_board_index: usize = 0; var max_index: usize = 0; for (boards) |board, board_index| { for (numbers) |_, index| { if (isWinner(numbers[0..index+1], board)) { if (index > max_index) { max_board_index = board_index; max_index = index; } break; } } } const win_board = boards[max_board_index]; const win_nums = numbers[0..max_index+1]; // put all numbers from winning board into one array var board_nums: [25]u8 = undefined; { var index: usize = 0; for (win_board) |line| { for (line) |n| { board_nums[index] = n; index += 1; } } } var sum: u32 = 0; for (board_nums) |n| { for (win_nums) |m| { if (n == m) { break; } } else { sum += n; } } print("{}\n", .{sum * win_nums[win_nums.len-1]}); } } fn isWinner (numbers: []u8, board: [5][5]u8) bool { if (numbers.len < 5) return false; { // check each row var i: usize = 0; while (i < 5) { var j: usize = 0; var allMatch = true; while (j < 5) { const n = board[i][j]; for (numbers) |m| { if (m == n) break; } else { allMatch = false; } j += 1; } if (allMatch) return true; i += 1; } } { // check each column var j: usize = 0; while (j < 5) { var i: usize = 0; var allMatch = true; while (i < 5) { const n = board[i][j]; for (numbers) |m| { if (m == n) break; } else { allMatch = false; } i += 1; } if (allMatch) return true; j += 1; } } return false; } // Useful stdlib functions const tokenize = std.mem.tokenize; const split = std.mem.split; const indexOf = std.mem.indexOfScalar; const indexOfAny = std.mem.indexOfAny; const indexOfStr = std.mem.indexOfPosLinear; const lastIndexOf = std.mem.lastIndexOfScalar; const lastIndexOfAny = std.mem.lastIndexOfAny; const lastIndexOfStr = std.mem.lastIndexOfLinear; const trim = std.mem.trim; const sliceMin = std.mem.min; const sliceMax = std.mem.max; const parseInt = std.fmt.parseInt; const parseFloat = std.fmt.parseFloat; const min = std.math.min; const min3 = std.math.min3; const max = std.math.max; const max3 = std.math.max3; const print = std.debug.print; const assert = std.debug.assert; const sort = std.sort.sort; const asc = std.sort.asc; const desc = std.sort.desc;
src/day04.zig
const std = @import("std"); const vk = @import("vulkan"); const c = @import("c.zig"); const resources = @import("resources"); const GraphicsContext = @import("graphics_context.zig").GraphicsContext; const Swapchain = @import("swapchain.zig").Swapchain; const Allocator = std.mem.Allocator; const za = @import("zalgebra"); const Mat4 = za.Mat4; const Vec3 = za.Vec3; const Vec2 = za.Vec2; const assert = std.debug.assert; const app_name = "vulkan-zig triangle example"; const Mesh = struct { index_offset: u32, vertex_offset: u32, num_indices: u32, num_vertices: u32, }; const Vertex = struct { const binding_description = vk.VertexInputBindingDescription{ .binding = 0, .stride = @sizeOf(Vertex), .input_rate = .vertex, }; const attribute_description = [_]vk.VertexInputAttributeDescription{ .{ .binding = 0, .location = 0, .format = .r32g32b32_sfloat, .offset = @offsetOf(Vertex, "pos"), }, .{ .binding = 0, .location = 1, .format = .r32g32b32_sfloat, .offset = @offsetOf(Vertex, "color"), }, .{ .binding = 0, .location = 2, .format = .r32g32_sfloat, .offset = @offsetOf(Vertex, "tex_coord"), }, }; pos: Vec3, color: Vec3 = Vec3.zero(), tex_coord: Vec2, }; const UniformBufferObject = struct { model: Mat4, view: Mat4, proj: Mat4, }; const CameraPos = struct { pitch: f32, yaw: f32, pos: Vec3, quat: za.Quat = za.Quat.zero(), const rotate_speed: f32 = 85; const move_speed: f32 = 12; pub fn getMatrix(self: *CameraPos) Mat4 { const target = self.quat.rotateVec(Vec3.forward()); return za.lookAt(self.pos, self.pos.add(target), Vec3.up()); } fn moveCamera(self: *CameraPos, window: *c.GLFWwindow, dt: f32) void { var x_dir: f32 = 0; var y_dir: f32 = 0; if (c.glfwGetKey(window, c.GLFW_KEY_J) == c.GLFW_PRESS) y_dir += dt; if (c.glfwGetKey(window, c.GLFW_KEY_K) == c.GLFW_PRESS) y_dir -= dt; if (c.glfwGetKey(window, c.GLFW_KEY_H) == c.GLFW_PRESS) x_dir += dt; if (c.glfwGetKey(window, c.GLFW_KEY_L) == c.GLFW_PRESS) x_dir -= dt; // limit pitch values between about +/- 85ish degrees self.yaw += x_dir * rotate_speed; self.pitch += y_dir * rotate_speed; self.pitch = std.math.clamp(self.pitch, -85, 85); self.yaw = std.math.mod(f32, self.yaw, 360) catch unreachable; var move_dir = Vec3.zero(); if (c.glfwGetKey(window, c.GLFW_KEY_W) == c.GLFW_PRESS) move_dir.z += dt; if (c.glfwGetKey(window, c.GLFW_KEY_S) == c.GLFW_PRESS) move_dir.z -= dt; if (c.glfwGetKey(window, c.GLFW_KEY_A) == c.GLFW_PRESS) move_dir.x += dt; if (c.glfwGetKey(window, c.GLFW_KEY_D) == c.GLFW_PRESS) move_dir.x -= dt; if (c.glfwGetKey(window, c.GLFW_KEY_SPACE) == c.GLFW_PRESS) move_dir.y += dt; if (c.glfwGetKey(window, c.GLFW_KEY_LEFT_CONTROL) == c.GLFW_PRESS) move_dir.y -= dt; self.quat = za.Quat.fromEulerAngle(Vec3.new(self.pitch, self.yaw, 0)); const translation = self.quat.rotateVec(move_dir.scale(move_speed)); self.pos = self.pos.add(translation); } }; const BufferMemory = struct { buffer: vk.Buffer, memory: vk.DeviceMemory, pub fn init( gc: GraphicsContext, size: vk.DeviceSize, usage: vk.BufferUsageFlags, properties: vk.MemoryPropertyFlags, ) !BufferMemory { var bm: BufferMemory = undefined; bm.buffer = try gc.vkd.createBuffer(gc.dev, &.{ .flags = .{}, .size = size, .usage = usage, .sharing_mode = .exclusive, .queue_family_index_count = 0, .p_queue_family_indices = undefined, }, null); const mem_reqs = gc.vkd.getBufferMemoryRequirements(gc.dev, bm.buffer); bm.memory = try gc.allocate(mem_reqs, properties); try gc.vkd.bindBufferMemory(gc.dev, bm.buffer, bm.memory, 0); return bm; } pub fn deinit(self: BufferMemory, gc: GraphicsContext) void { gc.vkd.freeMemory(gc.dev, self.memory, null); gc.vkd.destroyBuffer(gc.dev, self.buffer, null); } }; const TextureImage = struct { image: vk.Image, memory: vk.DeviceMemory, view: vk.ImageView, sampler: vk.Sampler, pub fn init( gc: GraphicsContext, width: u32, height: u32, format: vk.Format, tiling: vk.ImageTiling, usage: vk.ImageUsageFlags, properties: vk.MemoryPropertyFlags, ) !TextureImage { var bm: TextureImage = undefined; const ici = vk.ImageCreateInfo{ .flags = .{}, .image_type = .@"2d", .format = format, .extent = .{ .width = width, .height = height, .depth = 1, }, .mip_levels = 1, .array_layers = 1, .samples = .{ .@"1_bit" = true }, .tiling = tiling, .usage = usage, .sharing_mode = .exclusive, .queue_family_index_count = 0, .p_queue_family_indices = undefined, .initial_layout = .@"undefined", }; bm.image = try gc.vkd.createImage(gc.dev, &ici, null); errdefer gc.vkd.destroyImage(gc.dev, bm.image, null); const mem_reqs = gc.vkd.getImageMemoryRequirements(gc.dev, bm.image); bm.memory = try gc.allocate(mem_reqs, properties); errdefer gc.vkd.freeMemory(gc.dev, bm.memory, null); try gc.vkd.bindImageMemory(gc.dev, bm.image, bm.memory, 0); bm.view = try gc.vkd.createImageView(gc.dev, &.{ .flags = .{}, .image = bm.image, .view_type = .@"2d", .format = format, .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, .subresource_range = .{ .aspect_mask = .{ .color_bit = true }, .base_mip_level = 0, .level_count = 1, .base_array_layer = 0, .layer_count = 1, }, }, null); errdefer gc.vkd.destroyImageView(gc.dev, bm.view, null); const sci = vk.SamplerCreateInfo{ .flags = .{}, .mag_filter = .linear, .min_filter = .linear, .mipmap_mode = .linear, .address_mode_u = .repeat, .address_mode_v = .repeat, .address_mode_w = .repeat, .mip_lod_bias = 0, .anisotropy_enable = vk.TRUE, .max_anisotropy = gc.props.limits.max_sampler_anisotropy, .compare_enable = vk.FALSE, .compare_op = .always, .min_lod = 0, .max_lod = 0, .border_color = .int_opaque_black, .unnormalized_coordinates = vk.FALSE, }; bm.sampler = try gc.vkd.createSampler(gc.dev, &sci, null); return bm; } pub fn deinit(self: TextureImage, gc: GraphicsContext) void { gc.vkd.freeMemory(gc.dev, self.memory, null); gc.vkd.destroyImage(gc.dev, self.image, null); gc.vkd.destroyImageView(gc.dev, self.view, null); gc.vkd.destroySampler(gc.dev, self.sampler, null); } }; const DepthImage = struct { image: vk.Image, memory: vk.DeviceMemory, view: vk.ImageView, pub fn init( gc: GraphicsContext, width: u32, height: u32, format: vk.Format, tiling: vk.ImageTiling, usage: vk.ImageUsageFlags, properties: vk.MemoryPropertyFlags, ) !DepthImage { var bm: DepthImage = undefined; const ici = vk.ImageCreateInfo{ .flags = .{}, .image_type = .@"2d", .format = format, .extent = .{ .width = width, .height = height, .depth = 1, }, .mip_levels = 1, .array_layers = 1, .samples = .{ .@"1_bit" = true }, .tiling = tiling, .usage = usage, .sharing_mode = .exclusive, .queue_family_index_count = 0, .p_queue_family_indices = undefined, .initial_layout = .@"undefined", }; bm.image = try gc.vkd.createImage(gc.dev, &ici, null); errdefer gc.vkd.destroyImage(gc.dev, bm.image, null); const mem_reqs = gc.vkd.getImageMemoryRequirements(gc.dev, bm.image); bm.memory = try gc.allocate(mem_reqs, properties); errdefer gc.vkd.freeMemory(gc.dev, bm.memory, null); try gc.vkd.bindImageMemory(gc.dev, bm.image, bm.memory, 0); bm.view = try gc.vkd.createImageView(gc.dev, &.{ .flags = .{}, .image = bm.image, .view_type = .@"2d", .format = format, .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, .subresource_range = .{ // NOTE: different compare to Swapchain image and TextureImage .aspect_mask = .{ .depth_bit = true }, .base_mip_level = 0, .level_count = 1, .base_array_layer = 0, .layer_count = 1, }, }, null); return bm; } pub fn deinit(self: DepthImage, gc: GraphicsContext) void { gc.vkd.freeMemory(gc.dev, self.memory, null); gc.vkd.destroyImage(gc.dev, self.image, null); gc.vkd.destroyImageView(gc.dev, self.view, null); } pub fn findDepthFormat(gc: GraphicsContext) ?vk.Format { return gc.findSupportedFormat( &.{ .d32_sfloat, .d32_sfloat_s8_uint, .d24_unorm_s8_uint }, .optimal, .{ .depth_stencil_attachment_bit = true }, ); } pub fn hasStencilComponent(format: vk.Format) bool { return format == .d32_sfloat_s8_uint or format == .d24_unorm_s8_uint; } }; pub fn main() !void { if (c.glfwInit() != c.GLFW_TRUE) return error.GlfwInitFailed; defer c.glfwTerminate(); var extent = vk.Extent2D{ .width = 800, .height = 600 }; c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); const window = c.glfwCreateWindow( @intCast(c_int, extent.width), @intCast(c_int, extent.height), app_name, null, null, ) orelse return error.WindowInitFailed; defer c.glfwDestroyWindow(window); const allocator = std.heap.page_allocator; const gc = try GraphicsContext.init(allocator, app_name, window); defer gc.deinit(); std.debug.print("Using device: {s}\n", .{gc.deviceName()}); var swapchain = try Swapchain.init(&gc, allocator, extent); defer swapchain.deinit(); const render_pass = try createRenderPass(gc, swapchain); defer gc.vkd.destroyRenderPass(gc.dev, render_pass, null); const descriptor_layout = try createDescriptorSetLayout(gc); defer gc.vkd.destroyDescriptorSetLayout(gc.dev, descriptor_layout, null); const pipeline_layout = try gc.vkd.createPipelineLayout(gc.dev, &.{ .flags = .{}, .set_layout_count = 1, .p_set_layouts = @ptrCast([*]const vk.DescriptorSetLayout, &descriptor_layout), .push_constant_range_count = 0, .p_push_constant_ranges = undefined, }, null); defer gc.vkd.destroyPipelineLayout(gc.dev, pipeline_layout, null); var pipeline = try createPipeline(&gc, pipeline_layout, render_pass); defer gc.vkd.destroyPipeline(gc.dev, pipeline, null); var depth_image = try createDepthResources(gc, swapchain.extent); defer depth_image.deinit(gc); var framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain, depth_image); defer destroyFramebuffers(&gc, allocator, framebuffers); const pool = try gc.vkd.createCommandPool(gc.dev, &.{ .flags = .{}, .queue_family_index = gc.graphics_queue.family, }, null); defer gc.vkd.destroyCommandPool(gc.dev, pool, null); const model_path = "assets/untitled.gltf"; const texture_path = "assets/viking_room.png"; var indices = std.ArrayList(u32).init(allocator); defer indices.deinit(); var vertices = std.ArrayList(Vertex).init(allocator); var meshs = std.ArrayList(Mesh).init(allocator); defer vertices.deinit(); // var arena = std.heap.ArenaAllocator.init(allocator); loadScene(allocator, &meshs, &vertices, &indices, model_path); // arena.deinit(); const vertex_buffer = try createVertexBuffer(gc, pool, vertices.items); defer vertex_buffer.deinit(gc); const index_buffer = try createIndexBuffer(gc, pool, indices.items); defer index_buffer.deinit(gc); var unibufs = try createUniformBuffer(gc, allocator, framebuffers); defer destroyUniformBuffers(gc, allocator, unibufs); const texture = try createTextureImage(gc, pool, texture_path); defer texture.deinit(gc); var descriptor_pool = try createDescriptorPool(gc, framebuffers); defer gc.vkd.destroyDescriptorPool(gc.dev, descriptor_pool, null); var descriptor_sets = try createDescriptorSets( gc, allocator, descriptor_pool, descriptor_layout, unibufs, texture, ); defer allocator.free(descriptor_sets); var cmdbufs = try createCommandBuffers( &gc, pool, allocator, vertex_buffer.buffer, index_buffer.buffer, swapchain.extent, render_pass, pipeline, framebuffers, pipeline_layout, descriptor_sets, meshs.items, ); defer destroyCommandBuffers(&gc, pool, allocator, cmdbufs); var update_timer = try std.time.Timer.start(); var camera = CameraPos{ .pos = Vec3.new(0, 0, -2), .pitch = 0, .yaw = 0, }; while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { const dt = @intToFloat(f32, update_timer.lap()) / @intToFloat(f32, std.time.ns_per_s); const cmdbuf = cmdbufs[swapchain.image_index]; const unibuf = unibufs[swapchain.image_index]; camera.moveCamera(window, dt); try updateUniformBuffer( gc, unibuf, swapchain.extent, camera.getMatrix(), ); //TODO: chapter 2 descriptor set const state = swapchain.present(cmdbuf) catch |err| switch (err) { error.OutOfDateKHR => Swapchain.PresentState.suboptimal, else => |narrow| return narrow, }; var w: c_int = undefined; var h: c_int = undefined; c.glfwGetWindowSize(window, &w, &h); if (state == .suboptimal or extent.width != @intCast(u32, w) or extent.height != @intCast(u32, h)) { extent.width = @intCast(u32, w); extent.height = @intCast(u32, h); try swapchain.recreate(extent); depth_image.deinit(gc); depth_image = try createDepthResources(gc, swapchain.extent); destroyFramebuffers(&gc, allocator, framebuffers); framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain, depth_image); destroyUniformBuffers(gc, allocator, unibufs); unibufs = try createUniformBuffer(gc, allocator, framebuffers); gc.vkd.destroyDescriptorPool(gc.dev, descriptor_pool, null); descriptor_pool = try createDescriptorPool(gc, framebuffers); allocator.free(descriptor_sets); descriptor_sets = try createDescriptorSets( gc, allocator, descriptor_pool, descriptor_layout, unibufs, texture, ); destroyCommandBuffers(&gc, pool, allocator, cmdbufs); cmdbufs = try createCommandBuffers( &gc, pool, allocator, vertex_buffer.buffer, index_buffer.buffer, swapchain.extent, render_pass, pipeline, framebuffers, pipeline_layout, descriptor_sets, meshs.items, ); } c.glfwPollEvents(); } try swapchain.waitForAllFences(); } fn updateUniformBuffer(gc: GraphicsContext, buffer: BufferMemory, extent: vk.Extent2D, view: Mat4) !void { var proj = za.perspective( 60.0, @intToFloat(f32, extent.width) / @intToFloat(f32, extent.height), 0.1, 100.0, ); proj.data[1][1] *= -1; const ubo = UniformBufferObject{ .proj = proj, .view = view, .model = Mat4.identity(), }; { const data = try gc.vkd.mapMemory(gc.dev, buffer.memory, 0, @sizeOf(UniformBufferObject), .{}); defer gc.vkd.unmapMemory(gc.dev, buffer.memory); const gpu_memory = @ptrCast(*UniformBufferObject, @alignCast(@alignOf(UniformBufferObject), data)); gpu_memory.* = ubo; } } fn createIndexBuffer(gc: GraphicsContext, pool: vk.CommandPool, indices: []const u32) !BufferMemory { const size = @sizeOf(u32) * indices.len; var buffer = try BufferMemory.init( gc, size, .{ .transfer_dst_bit = true, .index_buffer_bit = true }, .{ .device_local_bit = true }, ); var stage_buffer = try BufferMemory.init( gc, size, .{ .transfer_src_bit = true }, .{ .host_coherent_bit = true, .host_visible_bit = true }, ); defer stage_buffer.deinit(gc); // Copy vertices to stage buffer { const data = try gc.vkd.mapMemory(gc.dev, stage_buffer.memory, 0, vk.WHOLE_SIZE, .{}); defer gc.vkd.unmapMemory(gc.dev, stage_buffer.memory); const gpu_memory = @ptrCast([*]u32, @alignCast(@alignOf(u32), data)); for (indices) |indice, i| { gpu_memory[i] = indice; } } // Copy containt form stage buffer to vertex buffer try copyBuffer(gc, pool, buffer.buffer, stage_buffer.buffer, size); return buffer; } fn createVertexBuffer(gc: GraphicsContext, pool: vk.CommandPool, vertices: []const Vertex) !BufferMemory { const size = @sizeOf(Vertex) * vertices.len; var vertex_buffer = try BufferMemory.init( gc, size, .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, .{ .device_local_bit = true }, ); var stage_buffer = try BufferMemory.init( gc, size, .{ .transfer_src_bit = true }, .{ .host_coherent_bit = true, .host_visible_bit = true }, ); defer stage_buffer.deinit(gc); // Copy vertices to stage buffer { const data = try gc.vkd.mapMemory(gc.dev, stage_buffer.memory, 0, vk.WHOLE_SIZE, .{}); defer gc.vkd.unmapMemory(gc.dev, stage_buffer.memory); const gpu_vertices = @ptrCast([*]Vertex, @alignCast(@alignOf(Vertex), data)); for (vertices) |vertex, i| { gpu_vertices[i] = vertex; } } // Copy containt form stage buffer to vertex buffer try copyBuffer(gc, pool, vertex_buffer.buffer, stage_buffer.buffer, size); return vertex_buffer; } fn copyBuffer(gc: GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void { const cmdbuf = try beginSingleTimeCommand(gc, pool); const region = vk.BufferCopy{ .src_offset = 0, .dst_offset = 0, .size = size, }; gc.vkd.cmdCopyBuffer(cmdbuf, src, dst, 1, @ptrCast([*]const vk.BufferCopy, &region)); try endSingleTimeCommands(gc, pool, cmdbuf); } fn createCommandBuffers( gc: *const GraphicsContext, pool: vk.CommandPool, allocator: Allocator, buffer: vk.Buffer, index_buffer: vk.Buffer, extent: vk.Extent2D, render_pass: vk.RenderPass, pipeline: vk.Pipeline, framebuffers: []vk.Framebuffer, pipeline_layout: vk.PipelineLayout, descriptor_sets: []vk.DescriptorSet, meshs: []const Mesh, ) ![]vk.CommandBuffer { const cmdbufs = try allocator.alloc(vk.CommandBuffer, framebuffers.len); errdefer allocator.free(cmdbufs); try gc.vkd.allocateCommandBuffers(gc.dev, &.{ .command_pool = pool, .level = .primary, .command_buffer_count = @truncate(u32, cmdbufs.len), }, cmdbufs.ptr); errdefer gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(u32, cmdbufs.len), cmdbufs.ptr); const clear = [_]vk.ClearValue{ .{ .color = .{ .float_32 = .{ 0, 0, 0, 1 } }, }, .{ .depth_stencil = .{ .depth = 1, .stencil = 0 }, }, }; const viewport = vk.Viewport{ .x = 0, .y = 0, .width = @intToFloat(f32, extent.width), .height = @intToFloat(f32, extent.height), .min_depth = 0, .max_depth = 1, }; const scissor = vk.Rect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = extent, }; for (cmdbufs) |cmdbuf, i| { try gc.vkd.beginCommandBuffer(cmdbuf, &.{ .flags = .{}, .p_inheritance_info = null, }); gc.vkd.cmdSetViewport(cmdbuf, 0, 1, @ptrCast([*]const vk.Viewport, &viewport)); gc.vkd.cmdSetScissor(cmdbuf, 0, 1, @ptrCast([*]const vk.Rect2D, &scissor)); // This needs to be a separate definition - see https://github.com/ziglang/zig/issues/7627. const render_area = vk.Rect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = extent, }; gc.vkd.cmdBeginRenderPass(cmdbuf, &.{ .render_pass = render_pass, .framebuffer = framebuffers[i], .render_area = render_area, .clear_value_count = @truncate(u32, clear.len), .p_clear_values = @ptrCast([*]const vk.ClearValue, &clear), }, .@"inline"); gc.vkd.cmdBindPipeline(cmdbuf, .graphics, pipeline); const offset = [_]vk.DeviceSize{0}; gc.vkd.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast([*]const vk.Buffer, &buffer), &offset); gc.vkd.cmdBindIndexBuffer(cmdbuf, index_buffer, 0, .uint32); gc.vkd.cmdBindDescriptorSets( cmdbuf, .graphics, pipeline_layout, 0, 1, @ptrCast([*]const vk.DescriptorSet, &descriptor_sets[i]), 0, undefined, ); for (meshs) |mesh| { gc.vkd.cmdDrawIndexed(cmdbuf, mesh.num_indices, 1, mesh.index_offset, @intCast(i32, mesh.vertex_offset), 0); } gc.vkd.cmdEndRenderPass(cmdbuf); try gc.vkd.endCommandBuffer(cmdbuf); } return cmdbufs; } fn destroyCommandBuffers( gc: *const GraphicsContext, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer, ) void { gc.vkd.freeCommandBuffers(gc.dev, pool, @truncate(u32, cmdbufs.len), cmdbufs.ptr); allocator.free(cmdbufs); } fn createFramebuffers( gc: *const GraphicsContext, allocator: Allocator, render_pass: vk.RenderPass, swapchain: Swapchain, depth_image: DepthImage, ) ![]vk.Framebuffer { const framebuffers = try allocator.alloc(vk.Framebuffer, swapchain.swap_images.len); errdefer allocator.free(framebuffers); var i: usize = 0; errdefer for (framebuffers[0..i]) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null); for (framebuffers) |*fb| { const attachments = [_]vk.ImageView{ swapchain.swap_images[i].view, depth_image.view, }; fb.* = try gc.vkd.createFramebuffer(gc.dev, &.{ .flags = .{}, .render_pass = render_pass, .attachment_count = @truncate(u32, attachments.len), .p_attachments = @ptrCast([*]const vk.ImageView, &attachments), .width = swapchain.extent.width, .height = swapchain.extent.height, .layers = 1, }, null); i += 1; } return framebuffers; } fn destroyFramebuffers(gc: *const GraphicsContext, allocator: Allocator, framebuffers: []const vk.Framebuffer) void { for (framebuffers) |fb| gc.vkd.destroyFramebuffer(gc.dev, fb, null); allocator.free(framebuffers); } fn createRenderPass(gc: GraphicsContext, swapchain: Swapchain) !vk.RenderPass { const attachments = [_]vk.AttachmentDescription{ // color attachment .{ .flags = .{}, .format = swapchain.surface_format.format, .samples = .{ .@"1_bit" = true }, .load_op = .clear, .store_op = .store, .stencil_load_op = .dont_care, .stencil_store_op = .dont_care, .initial_layout = .@"undefined", .final_layout = .present_src_khr, }, //depth_attachment .{ .flags = .{}, .format = DepthImage.findDepthFormat(gc).?, .samples = .{ .@"1_bit" = true }, .load_op = .clear, .store_op = .dont_care, .stencil_load_op = .dont_care, .stencil_store_op = .dont_care, .initial_layout = .@"undefined", .final_layout = .depth_stencil_attachment_optimal, }, }; const color_attachment_ref = vk.AttachmentReference{ .attachment = 0, .layout = .color_attachment_optimal, }; const depth_attachment_ref = vk.AttachmentReference{ .attachment = 1, .layout = .depth_stencil_attachment_optimal, }; const subpass = vk.SubpassDescription{ .flags = .{}, .pipeline_bind_point = .graphics, .input_attachment_count = 0, .p_input_attachments = undefined, .color_attachment_count = 1, .p_color_attachments = @ptrCast([*]const vk.AttachmentReference, &color_attachment_ref), .p_resolve_attachments = null, .p_depth_stencil_attachment = &depth_attachment_ref, .preserve_attachment_count = 0, .p_preserve_attachments = undefined, }; const sd = vk.SubpassDependency{ .src_subpass = vk.SUBPASS_EXTERNAL, .dst_subpass = 0, .src_stage_mask = .{ .color_attachment_output_bit = true, .early_fragment_tests_bit = true }, .dst_stage_mask = .{ .color_attachment_output_bit = true, .early_fragment_tests_bit = true }, .src_access_mask = .{}, .dst_access_mask = .{ .color_attachment_write_bit = true, .depth_stencil_attachment_write_bit = true }, .dependency_flags = .{}, }; return try gc.vkd.createRenderPass(gc.dev, &.{ .flags = .{}, .attachment_count = @truncate(u32, attachments.len), .p_attachments = @ptrCast([*]const vk.AttachmentDescription, &attachments), .subpass_count = 1, .p_subpasses = @ptrCast([*]const vk.SubpassDescription, &subpass), .dependency_count = 1, .p_dependencies = @ptrCast([*]const vk.SubpassDependency, &sd), }, null); } fn createPipeline( gc: *const GraphicsContext, layout: vk.PipelineLayout, render_pass: vk.RenderPass, ) !vk.Pipeline { const vert = try gc.vkd.createShaderModule(gc.dev, &.{ .flags = .{}, .code_size = resources.triangle_vert.len, .p_code = @ptrCast([*]const u32, resources.triangle_vert), }, null); defer gc.vkd.destroyShaderModule(gc.dev, vert, null); const frag = try gc.vkd.createShaderModule(gc.dev, &.{ .flags = .{}, .code_size = resources.triangle_frag.len, .p_code = @ptrCast([*]const u32, resources.triangle_frag), }, null); defer gc.vkd.destroyShaderModule(gc.dev, frag, null); const pssci = [_]vk.PipelineShaderStageCreateInfo{ .{ .flags = .{}, .stage = .{ .vertex_bit = true }, .module = vert, .p_name = "main", // There is one more (optional) member, pSpecializationInfo, which we won't be using here, // but is worth discussing. It allows you to specify values for shader constants. // You can use a single shader module where its behavior can be configured at pipeline // creation by specifying different values for the constants used in it. This is more efficient // than configuring the shader using variables at render time, because the compiler can // do optimizations like eliminating if statements that depend on these values .p_specialization_info = null, }, .{ .flags = .{}, .stage = .{ .fragment_bit = true }, .module = frag, .p_name = "main", .p_specialization_info = null, }, }; const pvisci = vk.PipelineVertexInputStateCreateInfo{ .flags = .{}, .vertex_binding_description_count = 1, .p_vertex_binding_descriptions = @ptrCast([*]const vk.VertexInputBindingDescription, &Vertex.binding_description), .vertex_attribute_description_count = Vertex.attribute_description.len, .p_vertex_attribute_descriptions = &Vertex.attribute_description, }; const piasci = vk.PipelineInputAssemblyStateCreateInfo{ .flags = .{}, .topology = .triangle_list, .primitive_restart_enable = vk.FALSE, }; const pvsci = vk.PipelineViewportStateCreateInfo{ .flags = .{}, .viewport_count = 1, .p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport .scissor_count = 1, .p_scissors = undefined, // set in createCommandBuffers with cmdSetScissor }; // https://vulkan-tutorial.com/en/Drawing_a_triangle/Graphics_pipeline_basics/Fixed_functions#page_Rasterizer const prsci = vk.PipelineRasterizationStateCreateInfo{ .flags = .{}, // If depthClampEnable is set to VK_TRUE, then fragments that are beyond the near and far planes // are clamped to them as opposed to discarding them. This is useful in some special cases like shadow maps. // Using this requires enabling a GPU feature .depth_clamp_enable = vk.FALSE, // If rasterizerDiscardEnable is set to VK_TRUE, then geometry never passes through the rasterizer stage. // This basically disables any output to the framebuffer. .rasterizer_discard_enable = vk.FALSE, .polygon_mode = .fill, .cull_mode = .{ .back_bit = true }, .front_face = .counter_clockwise, .depth_bias_enable = vk.FALSE, .depth_bias_constant_factor = 0, .depth_bias_clamp = 0, .depth_bias_slope_factor = 0, .line_width = 1, }; const pmsci = vk.PipelineMultisampleStateCreateInfo{ .flags = .{}, .rasterization_samples = .{ .@"1_bit" = true }, .sample_shading_enable = vk.FALSE, .min_sample_shading = 1, .p_sample_mask = null, .alpha_to_coverage_enable = vk.FALSE, .alpha_to_one_enable = vk.FALSE, }; const pcbas = vk.PipelineColorBlendAttachmentState{ .blend_enable = vk.FALSE, .src_color_blend_factor = .one, .dst_color_blend_factor = .zero, .color_blend_op = .add, .src_alpha_blend_factor = .one, .dst_alpha_blend_factor = .zero, .alpha_blend_op = .add, .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, }; const pcbsci = vk.PipelineColorBlendStateCreateInfo{ .flags = .{}, .logic_op_enable = vk.FALSE, .logic_op = .copy, .attachment_count = 1, .p_attachments = @ptrCast([*]const vk.PipelineColorBlendAttachmentState, &pcbas), .blend_constants = [_]f32{ 0, 0, 0, 0 }, }; const dynstate = [_]vk.DynamicState{ .viewport, .scissor }; const pdsci = vk.PipelineDynamicStateCreateInfo{ .flags = .{}, .dynamic_state_count = dynstate.len, .p_dynamic_states = &dynstate, }; const pdssci = vk.PipelineDepthStencilStateCreateInfo{ .flags = .{}, .depth_test_enable = vk.TRUE, .depth_write_enable = vk.TRUE, .depth_compare_op = .less, .depth_bounds_test_enable = vk.FALSE, .stencil_test_enable = vk.FALSE, .front = undefined, .back = undefined, .min_depth_bounds = 0, .max_depth_bounds = 1, }; const gpci = vk.GraphicsPipelineCreateInfo{ .flags = .{}, .stage_count = 2, .p_stages = &pssci, .p_vertex_input_state = &pvisci, .p_input_assembly_state = &piasci, .p_tessellation_state = null, .p_viewport_state = &pvsci, .p_rasterization_state = &prsci, .p_multisample_state = &pmsci, .p_depth_stencil_state = &pdssci, .p_color_blend_state = &pcbsci, .p_dynamic_state = &pdsci, .layout = layout, .render_pass = render_pass, .subpass = 0, .base_pipeline_handle = .null_handle, .base_pipeline_index = -1, }; var pipeline: vk.Pipeline = undefined; _ = try gc.vkd.createGraphicsPipelines( gc.dev, .null_handle, 1, @ptrCast([*]const vk.GraphicsPipelineCreateInfo, &gpci), null, @ptrCast([*]vk.Pipeline, &pipeline), ); return pipeline; } fn createDescriptorSetLayout(gc: GraphicsContext) !vk.DescriptorSetLayout { const dslb = [2]vk.DescriptorSetLayoutBinding{ .{ .binding = 0, .descriptor_type = .uniform_buffer, .descriptor_count = 1, .stage_flags = .{ .vertex_bit = true }, .p_immutable_samplers = null, }, .{ .binding = 1, .descriptor_type = .combined_image_sampler, .descriptor_count = 1, .stage_flags = .{ .fragment_bit = true }, .p_immutable_samplers = null, }, }; return try gc.vkd.createDescriptorSetLayout(gc.dev, &.{ .flags = .{}, .binding_count = @truncate(u32, dslb.len), .p_bindings = @ptrCast([*]const vk.DescriptorSetLayoutBinding, &dslb), }, null); } fn createDescriptorSets( gc: GraphicsContext, allocator: Allocator, descriptor_pool: vk.DescriptorPool, layout: vk.DescriptorSetLayout, unibufs: []const BufferMemory, texture: TextureImage, ) ![]vk.DescriptorSet { const size = @truncate(u32, unibufs.len); var layouts = try allocator.alloc(@TypeOf(layout), size); defer allocator.free(layouts); for (layouts) |*l| { l.* = layout; } const dsai = vk.DescriptorSetAllocateInfo{ .descriptor_pool = descriptor_pool, .descriptor_set_count = size, .p_set_layouts = @ptrCast([*]const vk.DescriptorSetLayout, layouts), }; var sets = try allocator.alloc(vk.DescriptorSet, size); try gc.vkd.allocateDescriptorSets(gc.dev, &dsai, sets.ptr); for (unibufs) |unibuf, i| { const dbi = vk.DescriptorBufferInfo{ .buffer = unibuf.buffer, .offset = 0, .range = @sizeOf(UniformBufferObject), }; const dii = vk.DescriptorImageInfo{ .sampler = texture.sampler, .image_view = texture.view, .image_layout = .shader_read_only_optimal, }; const wds = [2]vk.WriteDescriptorSet{ .{ .dst_set = sets[i], .dst_binding = 0, .dst_array_element = 0, .descriptor_count = 1, .descriptor_type = .uniform_buffer, .p_image_info = undefined, .p_buffer_info = @ptrCast([*]const vk.DescriptorBufferInfo, &dbi), .p_texel_buffer_view = undefined, }, .{ .dst_set = sets[i], .dst_binding = 1, .dst_array_element = 0, .descriptor_count = 1, .descriptor_type = .combined_image_sampler, .p_image_info = @ptrCast([*]const vk.DescriptorImageInfo, &dii), .p_buffer_info = undefined, .p_texel_buffer_view = undefined, }, }; gc.vkd.updateDescriptorSets( gc.dev, @truncate(u32, wds.len), @ptrCast([*]const vk.WriteDescriptorSet, &wds), 0, undefined, ); } return sets; } fn createUniformBuffer(gc: GraphicsContext, allocator: Allocator, framebuffers: []const vk.Framebuffer) ![]BufferMemory { const unibufs = try allocator.alloc(BufferMemory, framebuffers.len); errdefer allocator.free(unibufs); const size = @sizeOf(UniformBufferObject); for (unibufs) |*buf| { buf.* = try BufferMemory.init( gc, size, .{ .uniform_buffer_bit = true }, .{ .host_coherent_bit = true, .host_visible_bit = true }, ); } return unibufs; } fn destroyUniformBuffers(gc: GraphicsContext, allocator: Allocator, bufs: []BufferMemory) void { for (bufs) |b| { b.deinit(gc); } allocator.free(bufs); } fn createDescriptorPool(gc: GraphicsContext, framebuffers: []const vk.Framebuffer) !vk.DescriptorPool { const size = @truncate(u32, framebuffers.len); var pool_size = [2]vk.DescriptorPoolSize{ .{ .@"type" = .uniform_buffer, .descriptor_count = size, }, .{ .@"type" = .combined_image_sampler, .descriptor_count = size, }, }; const dpci = vk.DescriptorPoolCreateInfo{ .flags = .{}, .max_sets = size, .pool_size_count = @truncate(u32, pool_size.len), .p_pool_sizes = @ptrCast([*]const vk.DescriptorPoolSize, &pool_size), }; return try gc.vkd.createDescriptorPool(gc.dev, &dpci, null); } pub fn createTextureImage(gc: GraphicsContext, pool: vk.CommandPool, path: []const u8) !TextureImage { var tex_width: i32 = 0; var tex_height: i32 = 0; var tex_channels: i32 = 0; const pixels = c.stbi_load(@ptrCast([*c]const u8, path), &tex_width, &tex_height, &tex_channels, c.STBI_rgb_alpha); const size = @intCast(vk.DeviceSize, tex_width) * @intCast(vk.DeviceSize, tex_height) * 4; assert(pixels != null and size > 0); defer c.stbi_image_free(pixels); var stage_buffer = try BufferMemory.init( gc, size, .{ .transfer_src_bit = true }, .{ .host_coherent_bit = true, .host_visible_bit = true }, ); defer stage_buffer.deinit(gc); { const data = try gc.vkd.mapMemory(gc.dev, stage_buffer.memory, 0, size, .{}); defer gc.vkd.unmapMemory(gc.dev, stage_buffer.memory); const gpu_memory = @ptrCast([*]u8, @alignCast(@alignOf(u8), data)); for (pixels[0..size]) |p, i| { gpu_memory[i] = p; } } const width = @intCast(u32, tex_width); const height = @intCast(u32, tex_height); const image = try TextureImage.init( gc, width, height, .r8g8b8a8_srgb, .optimal, .{ .transfer_dst_bit = true, .sampled_bit = true }, .{ .device_local_bit = true }, ); //TODO continue Layout transitions and nor deinit image here try transitionImageLayout(gc, pool, image.image, .r8g8b8a8_srgb, .@"undefined", .transfer_dst_optimal); try copyBufferToImage(gc, pool, stage_buffer.buffer, image.image, width, height); try transitionImageLayout( gc, pool, image.image, .r8g8b8a8_srgb, .transfer_dst_optimal, .shader_read_only_optimal, ); return image; } fn transitionImageLayout( gc: GraphicsContext, pool: vk.CommandPool, image: vk.Image, format: vk.Format, old_layout: vk.ImageLayout, new_layout: vk.ImageLayout, ) !void { // TOTO: format _ = format; const TransferType = enum { @"undefined", transfer_dst_optimal, unsupported, }; const cmdbuf = try beginSingleTimeCommand(gc, pool); const transfer_type: TransferType = blk: { if (old_layout == .@"undefined" and new_layout == .transfer_dst_optimal) break :blk .@"undefined"; if (old_layout == .transfer_dst_optimal and new_layout == .shader_read_only_optimal) break :blk .transfer_dst_optimal; break :blk .unsupported; }; assert(transfer_type != .unsupported); const imb = vk.ImageMemoryBarrier{ .src_access_mask = if (transfer_type == .@"undefined") .{} else .{ .transfer_write_bit = true }, .dst_access_mask = if (transfer_type == .@"undefined") .{ .transfer_write_bit = true } else .{ .shader_read_bit = true }, .old_layout = old_layout, .new_layout = new_layout, .src_queue_family_index = vk.QUEUE_FAMILY_IGNORED, .dst_queue_family_index = vk.QUEUE_FAMILY_IGNORED, .image = image, .subresource_range = .{ .aspect_mask = .{ .color_bit = true }, .base_mip_level = 0, .level_count = 1, .base_array_layer = 0, .layer_count = 1, }, }; gc.vkd.cmdPipelineBarrier( cmdbuf, if (transfer_type == .@"undefined") .{ .top_of_pipe_bit = true } else .{ .transfer_bit = true }, if (transfer_type == .@"undefined") .{ .transfer_bit = true } else .{ .fragment_shader_bit = true }, .{}, 0, undefined, 0, undefined, 1, @ptrCast([*]const vk.ImageMemoryBarrier, &imb), ); try endSingleTimeCommands(gc, pool, cmdbuf); } fn copyBufferToImage( gc: GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer, image: vk.Image, width: u32, height: u32, ) !void { const cmdbuf = try beginSingleTimeCommand(gc, pool); const bic = vk.BufferImageCopy{ .buffer_offset = 0, .buffer_row_length = 0, .buffer_image_height = 0, .image_subresource = .{ .aspect_mask = .{ .color_bit = true }, .mip_level = 0, .base_array_layer = 0, .layer_count = 1, }, .image_offset = .{ .x = 0, .y = 0, .z = 0 }, .image_extent = .{ .width = width, .height = height, .depth = 1 }, }; gc.vkd.cmdCopyBufferToImage( cmdbuf, buffer, image, .transfer_dst_optimal, 1, @ptrCast([*]const vk.BufferImageCopy, &bic), ); try endSingleTimeCommands(gc, pool, cmdbuf); } fn beginSingleTimeCommand( gc: GraphicsContext, pool: vk.CommandPool, ) !vk.CommandBuffer { var cmdbuf: vk.CommandBuffer = undefined; try gc.vkd.allocateCommandBuffers(gc.dev, &.{ .command_pool = pool, .level = .primary, .command_buffer_count = 1, }, @ptrCast([*]vk.CommandBuffer, &cmdbuf)); try gc.vkd.beginCommandBuffer(cmdbuf, &.{ .flags = .{ .one_time_submit_bit = true }, .p_inheritance_info = null, }); return cmdbuf; } fn endSingleTimeCommands(gc: GraphicsContext, pool: vk.CommandPool, cmdbuf: vk.CommandBuffer) !void { defer gc.vkd.freeCommandBuffers(gc.dev, pool, 1, @ptrCast([*]const vk.CommandBuffer, &cmdbuf)); try gc.vkd.endCommandBuffer(cmdbuf); const si = vk.SubmitInfo{ .wait_semaphore_count = 0, .p_wait_semaphores = undefined, .p_wait_dst_stage_mask = undefined, .command_buffer_count = 1, .p_command_buffers = @ptrCast([*]const vk.CommandBuffer, &cmdbuf), .signal_semaphore_count = 0, .p_signal_semaphores = undefined, }; try gc.vkd.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast([*]const vk.SubmitInfo, &si), .null_handle); try gc.vkd.queueWaitIdle(gc.graphics_queue.handle); } fn createDepthResources(gc: GraphicsContext, extent: vk.Extent2D) !DepthImage { const depth_format = DepthImage.findDepthFormat(gc).?; const depth_image = try DepthImage.init( gc, extent.width, extent.height, depth_format, .optimal, .{ .depth_stencil_attachment_bit = true }, .{ .device_local_bit = true }, ); return depth_image; // TODO: Explicitly transitioning the depth image // try transitionImageLayout(gc, pool, image.image, .r8g8b8a8_srgb, .@"undefined", .transfer_dst_optimal); } fn loadScene( arena: Allocator, all_meshes: *std.ArrayList(Mesh), all_vertices: *std.ArrayList(Vertex), all_indices: *std.ArrayList(u32), path: []const u8, ) void { var indices = std.ArrayList(u32).init(arena); var positions = std.ArrayList(Vec3).init(arena); // var normals = std.ArrayList(Vec3).init(arena); var texcoords0 = std.ArrayList(Vec2).init(arena); // var tangents = std.ArrayList(Vec4).init(arena); const data = parseAndLoadGltfFile(path); defer c.cgltf_free(data); const num_meshes = @intCast(u32, data.meshes_count); var mesh_index: u32 = 0; while (mesh_index < num_meshes) : (mesh_index += 1) { const num_prims = @intCast(u32, data.meshes[mesh_index].primitives_count); var prim_index: u32 = 0; while (prim_index < num_prims) : (prim_index += 1) { const pre_indices_len = indices.items.len; const pre_positions_len = positions.items.len; appendMeshPrimitive(data, mesh_index, prim_index, &indices, &positions, null, &texcoords0); all_meshes.append(.{ .index_offset = @intCast(u32, pre_indices_len), .vertex_offset = @intCast(u32, pre_positions_len), .num_indices = @intCast(u32, indices.items.len - pre_indices_len), .num_vertices = @intCast(u32, positions.items.len - pre_positions_len), }) catch unreachable; } } all_indices.ensureTotalCapacity(indices.items.len) catch unreachable; for (indices.items) |index| { all_indices.appendAssumeCapacity(index); } all_vertices.ensureTotalCapacity(positions.items.len) catch unreachable; for (positions.items) |_, index| { all_vertices.appendAssumeCapacity(.{ // .pos = positions.items[index].scale(0.08), // NOTE(mziulek): Sponza requires scaling. .pos = positions.items[index], // .normal = normals.items[index], .tex_coord = texcoords0.items[index], // .tangent = tangents.items[index], }); } } fn parseAndLoadGltfFile(gltf_path: []const u8) *c.cgltf_data { var data: *c.cgltf_data = undefined; const options = std.mem.zeroes(c.cgltf_options); // Parse. { const result = c.cgltf_parse_file(&options, gltf_path.ptr, @ptrCast([*c][*c]c.cgltf_data, &data)); assert(result == c.cgltf_result_success); } // Load. { const result = c.cgltf_load_buffers(&options, data, gltf_path.ptr); assert(result == c.cgltf_result_success); } return data; } fn appendMeshPrimitive( data: *c.cgltf_data, mesh_index: u32, prim_index: u32, indices: *std.ArrayList(u32), positions: *std.ArrayList(Vec3), normals: ?*std.ArrayList(Vec3), texcoords0: ?*std.ArrayList(Vec2), // tangents: ?*std.ArrayList(Vec4), ) void { assert(mesh_index < data.meshes_count); assert(prim_index < data.meshes[mesh_index].primitives_count); const num_vertices: u32 = @intCast(u32, data.meshes[mesh_index].primitives[prim_index].attributes[0].data.*.count); const num_indices: u32 = @intCast(u32, data.meshes[mesh_index].primitives[prim_index].indices.*.count); // Indices. { indices.ensureTotalCapacity(indices.items.len + num_indices) catch unreachable; const accessor = data.meshes[mesh_index].primitives[prim_index].indices; assert(accessor.*.buffer_view != null); assert(accessor.*.stride == accessor.*.buffer_view.*.stride or accessor.*.buffer_view.*.stride == 0); assert((accessor.*.stride * accessor.*.count) == accessor.*.buffer_view.*.size); assert(accessor.*.buffer_view.*.buffer.*.data != null); const data_addr = @alignCast(4, @ptrCast([*]const u8, accessor.*.buffer_view.*.buffer.*.data) + accessor.*.offset + accessor.*.buffer_view.*.offset); if (accessor.*.stride == 1) { assert(accessor.*.component_type == c.cgltf_component_type_r_8u); const src = @ptrCast([*]const u8, data_addr); var i: u32 = 0; while (i < num_indices) : (i += 1) { indices.appendAssumeCapacity(src[i]); } } else if (accessor.*.stride == 2) { assert(accessor.*.component_type == c.cgltf_component_type_r_16u); const src = @ptrCast([*]const u16, data_addr); var i: u32 = 0; while (i < num_indices) : (i += 1) { indices.appendAssumeCapacity(src[i]); } } else if (accessor.*.stride == 4) { assert(accessor.*.component_type == c.cgltf_component_type_r_32u); const src = @ptrCast([*]const u32, data_addr); var i: u32 = 0; while (i < num_indices) : (i += 1) { indices.appendAssumeCapacity(src[i]); } } else { unreachable; } } // Attributes. { positions.resize(positions.items.len + num_vertices) catch unreachable; if (normals != null) normals.?.resize(normals.?.items.len + num_vertices) catch unreachable; if (texcoords0 != null) texcoords0.?.resize(texcoords0.?.items.len + num_vertices) catch unreachable; // if (tangents != null) tangents.?.resize(tangents.?.items.len + num_vertices) catch unreachable; const num_attribs: u32 = @intCast(u32, data.meshes[mesh_index].primitives[prim_index].attributes_count); var attrib_index: u32 = 0; while (attrib_index < num_attribs) : (attrib_index += 1) { const attrib = &data.meshes[mesh_index].primitives[prim_index].attributes[attrib_index]; const accessor = attrib.data; assert(accessor.*.buffer_view != null); assert(accessor.*.stride == accessor.*.buffer_view.*.stride or accessor.*.buffer_view.*.stride == 0); assert((accessor.*.stride * accessor.*.count) == accessor.*.buffer_view.*.size); assert(accessor.*.buffer_view.*.buffer.*.data != null); const data_addr = @ptrCast([*]const u8, accessor.*.buffer_view.*.buffer.*.data) + accessor.*.offset + accessor.*.buffer_view.*.offset; if (attrib.*.type == c.cgltf_attribute_type_position) { assert(accessor.*.type == c.cgltf_type_vec3); assert(accessor.*.component_type == c.cgltf_component_type_r_32f); @memcpy( @ptrCast([*]u8, &positions.items[positions.items.len - num_vertices]), data_addr, accessor.*.count * accessor.*.stride, ); } else if (attrib.*.type == c.cgltf_attribute_type_normal and normals != null) { assert(accessor.*.type == c.cgltf_type_vec3); assert(accessor.*.component_type == c.cgltf_component_type_r_32f); @memcpy( @ptrCast([*]u8, &normals.?.items[normals.?.items.len - num_vertices]), data_addr, accessor.*.count * accessor.*.stride, ); } else if (attrib.*.type == c.cgltf_attribute_type_texcoord and texcoords0 != null) { assert(accessor.*.type == c.cgltf_type_vec2); assert(accessor.*.component_type == c.cgltf_component_type_r_32f); @memcpy( @ptrCast([*]u8, &texcoords0.?.items[texcoords0.?.items.len - num_vertices]), data_addr, accessor.*.count * accessor.*.stride, ); } // else if (attrib.*.type == c.cgltf_attribute_type_tangent and tangents != null) { // assert(accessor.*.type == c.cgltf_type_vec4); // assert(accessor.*.component_type == c.cgltf_component_type_r_32f); // @memcpy( // @ptrCast([*]u8, &tangents.?.items[tangents.?.items.len - num_vertices]), // data_addr, // accessor.*.count * accessor.*.stride, // ); // } } } }
src/triangle.zig
const std = @import("std"); const builtin = @import("builtin"); const panic = std.debug.panic; usingnamespace @import("c.zig"); // settings const SCR_WIDTH: u32 = 1920; const SCR_HEIGHT: u32 = 1080; pub fn main() void { const ok = glfwInit(); if (ok == 0) { panic("Failed to initialise GLFW\n", .{}); } defer glfwTerminate(); glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); if (builtin.os.tag == .macosx) { glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); } // glfw: initialize and configure var window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "Learn OpenGL", null, null); if (window == null) { panic("Failed to create GLFW window\n", .{}); } glfwMakeContextCurrent(window); const resizeCallback = glfwSetFramebufferSizeCallback(window, framebuffer_size_callback); // glad: load all OpenGL function pointers if (gladLoadGLLoader(@ptrCast(GLADloadproc, glfwGetProcAddress)) == 0) { panic("Failed to initialise GLAD\n", .{}); } // render loop while (glfwWindowShouldClose(window) == 0) { // input processInput(window); // render glClearColor(0.2, 0.3, 0.3, 1.0); glClear(GL_COLOR_BUFFER_BIT); // glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.) glfwSwapBuffers(window); glfwPollEvents(); } } // process all input: query GLFW whether relevant keys are pressed/released this frame and react accordingly pub fn processInput(window: ?*GLFWwindow) callconv(.C) void { if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS) glfwSetWindowShouldClose(window, 1); } // glfw: whenever the window size changed (by OS or user resize) this callback function executes pub fn framebuffer_size_callback(window: ?*GLFWwindow, width: c_int, height: c_int) callconv(.C) void { // make sure the viewport matches the new window dimensions; note that width and // height will be significantly larger than specified on retina displays. glViewport(0, 0, width, height); }
src/1_1_hello_window.zig
const std = @import("std"); const root = @import("root"); const Request = @import("Request.zig"); const Response = @import("Response.zig"); const MimeType = @import("MimeType.zig"); const net = std.net; const Allocator = std.mem.Allocator; const atomic = std.atomic; /// Scoped logging, prepending [lemon_pie] to our logs. const log = std.log.scoped(.lemon_pie); /// User API function signature of a request handler pub const Handle = fn handle(*Response, Request) anyerror!void; /// Allows users to set the max buffer size before we allocate memory on the heap to store our data const max_buffer_size = blk: { const given = if (@hasDecl(root, "buffer_size")) root.buffer_size else 1024 * 64; // 64kB (16 pages) break :blk std.math.min(given, 1024 * 1024 * 16); // max stack size (16MB) }; /// Initializes a new `Server` instance and starts listening to a new connection. /// On exit, will cleanup any resources that were allocated. /// /// This will use default options when initializing a `Server`, apart from the address. /// /// If more options are required, such as maximum amount of connections, the ability to quit, etc, /// then initialize a new `Server` manually by calling `run` and providing the options. pub fn listenAndServe( gpa: *Allocator, /// Address the server will bind to. address: net.Addres, /// User-defined handler that provides access to a `Response` /// and a parsed-and-validated `Request`. comptime handler: Handle, ) !void { try (Server.init()).run(gpa, address, .{ .reuse_address = true }, handler); } /// The server handles the connection between the host and the clients. /// Ensures requests are valid before dispatching to the user and provides /// safe response handling. pub const Server = struct { should_quit: atomic.Atomic(bool), /// Options to control the server pub const Options = struct { /// When false, disallows to reuse an address until full exit. reuse_address: bool = false, /// Maximum amount of connections before clients receiving "connection refused". max_connections: u32 = 128, }; /// Initializes a new `Server` pub fn init() Server { return .{ .should_quit = atomic.Atomic(bool).init(false) }; } /// Tell the server to shutdown gracefully pub fn shutdown(self: *Server) void { self.should_quit.store(true, .SeqCst); } /// Starts the server by listening for new connections. /// At shutdown, will cleanup any resources that were allocated. pub fn run( self: *Server, /// Allocations are required when the response body is larger /// than `max_buffer_size`. gpa: *Allocator, /// Address to listen on address: net.Address, /// Options to fine-type the server comptime options: Options, /// User-defined handler that provides access to a `Response` /// and a parsed-and-validated `Request`. comptime handler: Handle, ) !void { var clients: [options.max_connections]Client(handler) = undefined; var client_count: u32 = 0; // initialize our tcp server and start listening var stream = net.StreamServer.init(.{ .reuse_address = options.reuse_address, .kernel_backlog = options.max_connections, }); try stream.listen(address); // Make sure to await any open connections defer while (client_count > 0) : (client_count -= 1) { await clients[client_count].frame; } else stream.deinit(); // main loop, awaits new connections and dispatches them while (!self.should_quit.load(.SeqCst)) { var connection = stream.accept() catch |err| switch (err) { error.ConnectionResetByPeer, error.ConnectionAborted, => { log.err("Could not accept connection: '{s}'", .{@errorName(err)}); continue; }, else => |e| return e, }; // initialize our client. var client: Client(handler) = .{ .stream = connection.stream, .frame = undefined, }; clients[client_count] = client; client_count += 1; client.frame = async client.run(gpa); } } }; /// Generates a generic `Client` type, providing access to a user-defined `Handle` fn Client(comptime handler: Handle) type { return struct { const Self = @This(); /// Reference to its own run function's frame, ensures its /// lifetime is extends to `Client`'s own lifetime. frame: @Frame(run), /// Connection with the client stream: net.Stream, /// Wraps `server` by catching its error and logging it to stderr. /// Will also print the error trace in debug modes. /// NOTE: It will not shutdown the server on an error, it will simply close /// the connection with the client. fn run(self: *Self, gpa: *Allocator) void { self.serve(gpa) catch |err| { log.err("An error occured handling request: '{s}'", .{@errorName(err)}); if (@errorReturnTrace()) |trace| { std.debug.dumpStackTrace(trace.*); } }; } /// Handles the request and response of a transaction. /// It will first parse and validate the request. /// On success it will call the user-defined handle to allow /// the user to customize the response. `serve` ensures the response /// is sent to the client by checking if `is_flushed` is set on the `Response`. fn serve(self: *Self, gpa: *Allocator) !void { // After each transaction, we close the connection. defer self.stream.close(); const buffered_writer = std.io.bufferedWriter(self.stream.writer()); var body_writer = std.ArrayList(u8).init(gpa); defer body_writer.deinit(); var response = Response{ .buffered_writer = buffered_writer, .body = body_writer.writer(), }; var request_buf: [1026]u8 = undefined; const request = Request.parse(self.stream.reader(), &request_buf) catch |err| switch (err) { error.EndOfStream, error.ConnectionResetByPeer, error.ConnectionTimedOut, => return, // connection was closed/timedout. error.BufferTooSmall => unreachable, error.MissingCRLF, error.MissingUri, error.UriTooLong, => { // Client has sent an invalid request. Send a response to inform them and close // the connection. try response.writeHeader(.bad_request, "Malformed request"); return; }, else => |e| { // Unhandable error, simply return it and log it. // But do attempt to send a temporary failure response. try response.writeHeader(.temporary_failure, "Unexpected error. Retry later."); return e; }, }; // call user-defined handle. handler(&response, request) catch |err| { // An error occured in the user's function. As we do not know the reason it failed, // simply tell the client to try again later. try response.writeHeader(.temporary_failure, "Unexpected error. Retry later."); return err; }; // Ensure the response is sent to the client. if (!response.is_flushed) { try response.flush(MimeType.fromExtension(".gmi")); } } }; } test "Full transaction" { // We rely on multi-threading for the test if (std.builtin.single_threaded) return error.SkipZigTest; const ally = std.testing.allocator; const addr = try net.Address.parseIp("0.0.0.0", 8081); var server = Server.init(); const ServerThread = struct { var _addr: net.Address = undefined; fn index(response: *Response, req: Request) !void { _ = req; try response.body.writeAll("Hello, world!"); } fn runServer(ctx: *Server) !void { try ctx.run(ally, _addr, .{ .reuse_address = true }, index); } }; ServerThread._addr = addr; const thread = try std.Thread.spawn(.{}, ServerThread.runServer, .{&server}); errdefer server.shutdown(); var stream = while (true) { var conn = net.tcpConnectToAddress(addr) catch |err| switch (err) { error.ConnectionRefused => continue, else => |e| return e, }; break conn; } else unreachable; errdefer stream.close(); // tell server to shutdown // Will finish the current request and then shutdown server.shutdown(); try stream.writer().writeAll("gemini://localhost\r\n"); var buf: [1024]u8 = undefined; const len = try stream.reader().read(&buf); stream.close(); thread.join(); const content = buf[0..len]; try std.testing.expectEqualStrings("20", buf[0..2]); const body_index = std.mem.indexOf(u8, content, "\r\n").?; try std.testing.expectEqualStrings("Hello, world!", content[body_index + 2 ..]); }
src/server.zig
const View = @import("View.zig"); pub const AttachMode = enum { top, bottom, }; /// A specialized doubly-linked stack that allows for filtered iteration /// over the nodes. T must be View or *View. pub fn ViewStack(comptime T: type) type { if (!(T == View or T == *View)) { @compileError("ViewStack: T must be View or *View"); } return struct { const Self = @This(); pub const Node = struct { /// Previous/next nodes in the stack prev: ?*Node, next: ?*Node, /// The view stored in this node view: T, }; /// Top/bottom nodes in the stack first: ?*Node = null, last: ?*Node = null, /// Add a node to the top of the stack. pub fn push(self: *Self, new_node: *Node) void { // Set the prev/next pointers of the new node new_node.prev = null; new_node.next = self.first; if (self.first) |first| { // If the list is not empty, set the prev pointer of the current // first node to the new node. first.prev = new_node; } else { // If the list is empty set the last pointer to the new node. self.last = new_node; } // Set the first pointer to the new node self.first = new_node; } /// Add a node to the bottom of the stack. pub fn append(self: *Self, new_node: *Node) void { // Set the prev/next pointers of the new node new_node.prev = self.last; new_node.next = null; if (self.last) |last| { // If the list is not empty, set the next pointer of the current // first node to the new node. last.next = new_node; } else { // If the list is empty set the first pointer to the new node. self.first = new_node; } // Set the last pointer to the new node self.last = new_node; } /// Attach a node into the viewstack based on the attach mode pub fn attach(self: *Self, new_node: *Node, mode: AttachMode) void { switch (mode) { .top => self.push(new_node), .bottom => self.append(new_node), } } /// Remove a node from the view stack. This removes it from the stack of /// all views as well as the stack of visible ones. pub fn remove(self: *Self, target_node: *Node) void { // Set the previous node/list head to the next pointer if (target_node.prev) |prev_node| { prev_node.next = target_node.next; } else { self.first = target_node.next; } // Set the next node/list tail to the previous pointer if (target_node.next) |next_node| { next_node.prev = target_node.prev; } else { self.last = target_node.prev; } } /// Swap the nodes a and b. /// pointers to Node.T will point to the same data as before pub fn swap(self: *Self, a: *Node, b: *Node) void { // Set self.first and self.last const first = self.first; const last = self.last; if (a == first) { self.first = b; } else if (a == last) { self.last = b; } if (b == first) { self.first = a; } else if (b == last) { self.last = a; } // This is so complicated to make sure everything works when a and b are neighbors const a_next = if (b.next == a) b else b.next; const a_prev = if (b.prev == a) b else b.prev; const b_next = if (a.next == b) a else a.next; const b_prev = if (a.prev == b) a else a.prev; a.next = a_next; a.prev = a_prev; b.next = b_next; b.prev = b_prev; // Update all neighbors if (a.next) |next| { next.prev = a; } if (a.prev) |prev| { prev.next = a; } if (b.next) |next| { next.prev = b; } if (b.prev) |prev| { prev.next = b; } } const Direction = enum { forward, reverse, }; fn Iter(comptime Context: type) type { return struct { it: ?*Node, dir: Direction, context: Context, filter: fn (*View, Context) bool, /// Returns the next node in iteration order which passes the /// filter, or null if done. pub fn next(self: *@This()) ?*View { return while (self.it) |node| : (self.it = if (self.dir == .forward) node.next else node.prev) { const view = if (T == View) &node.view else node.view; if (self.filter(view, self.context)) { self.it = if (self.dir == .forward) node.next else node.prev; break view; } } else null; } }; } /// Return a filtered iterator over the stack given a start node, /// iteration direction, and filter function. Views for which the /// filter function returns false will be skipped. pub fn iter( start: ?*Node, dir: Direction, context: anytype, filter: fn (*View, @TypeOf(context)) bool, ) Iter(@TypeOf(context)) { return .{ .it = start, .dir = dir, .context = context, .filter = filter }; } }; } test "push/remove (*View)" { const testing = @import("std").testing; const allocator = testing.allocator; var views = ViewStack(*View){}; const one = try allocator.create(ViewStack(*View).Node); defer allocator.destroy(one); const two = try allocator.create(ViewStack(*View).Node); defer allocator.destroy(two); const three = try allocator.create(ViewStack(*View).Node); defer allocator.destroy(three); const four = try allocator.create(ViewStack(*View).Node); defer allocator.destroy(four); const five = try allocator.create(ViewStack(*View).Node); defer allocator.destroy(five); views.push(three); // {3} views.push(one); // {1, 3} views.push(four); // {4, 1, 3} views.push(five); // {5, 4, 1, 3} views.push(two); // {2, 5, 4, 1, 3} // Simple insertion { var it = views.first; try testing.expect(it == two); it = it.?.next; try testing.expect(it == five); it = it.?.next; try testing.expect(it == four); it = it.?.next; try testing.expect(it == one); it = it.?.next; try testing.expect(it == three); it = it.?.next; try testing.expect(it == null); try testing.expect(views.first == two); try testing.expect(views.last == three); } // Removal of first views.remove(two); { var it = views.first; try testing.expect(it == five); it = it.?.next; try testing.expect(it == four); it = it.?.next; try testing.expect(it == one); it = it.?.next; try testing.expect(it == three); it = it.?.next; try testing.expect(it == null); try testing.expect(views.first == five); try testing.expect(views.last == three); } // Removal of last views.remove(three); { var it = views.first; try testing.expect(it == five); it = it.?.next; try testing.expect(it == four); it = it.?.next; try testing.expect(it == one); it = it.?.next; try testing.expect(it == null); try testing.expect(views.first == five); try testing.expect(views.last == one); } // Remove from middle views.remove(four); { var it = views.first; try testing.expect(it == five); it = it.?.next; try testing.expect(it == one); it = it.?.next; try testing.expect(it == null); try testing.expect(views.first == five); try testing.expect(views.last == one); } // Reinsertion views.push(two); views.push(three); views.push(four); { var it = views.first; try testing.expect(it == four); it = it.?.next; try testing.expect(it == three); it = it.?.next; try testing.expect(it == two); it = it.?.next; try testing.expect(it == five); it = it.?.next; try testing.expect(it == one); it = it.?.next; try testing.expect(it == null); try testing.expect(views.first == four); try testing.expect(views.last == one); } // Clear views.remove(four); views.remove(two); views.remove(three); views.remove(one); views.remove(five); try testing.expect(views.first == null); try testing.expect(views.last == null); } test "iteration (View)" { const std = @import("std"); const testing = std.testing; const allocator = testing.allocator; const filters = struct { fn all(view: *View, context: void) bool { return true; } fn none(view: *View, context: void) bool { return false; } fn current(view: *View, filter_tags: u32) bool { return view.current.tags & filter_tags != 0; } }; var views = ViewStack(View){}; const one_a_pb = try allocator.create(ViewStack(View).Node); defer allocator.destroy(one_a_pb); one_a_pb.view.current.tags = 1 << 0; one_a_pb.view.pending.tags = 1 << 1; const two_a = try allocator.create(ViewStack(View).Node); defer allocator.destroy(two_a); two_a.view.current.tags = 1 << 0; two_a.view.pending.tags = 1 << 0; const three_b_pa = try allocator.create(ViewStack(View).Node); defer allocator.destroy(three_b_pa); three_b_pa.view.current.tags = 1 << 1; three_b_pa.view.pending.tags = 1 << 0; const four_b = try allocator.create(ViewStack(View).Node); defer allocator.destroy(four_b); four_b.view.current.tags = 1 << 1; four_b.view.pending.tags = 1 << 1; const five_b = try allocator.create(ViewStack(View).Node); defer allocator.destroy(five_b); five_b.view.current.tags = 1 << 1; five_b.view.pending.tags = 1 << 1; views.push(three_b_pa); // {3} views.push(one_a_pb); // {1, 3} views.push(four_b); // {4, 1, 3} views.push(five_b); // {5, 4, 1, 3} views.push(two_a); // {2, 5, 4, 1, 3} // Iteration over all views { var it = ViewStack(View).iter(views.first, .forward, {}, filters.all); try testing.expect(it.next() == &two_a.view); try testing.expect(it.next() == &five_b.view); try testing.expect(it.next() == &four_b.view); try testing.expect(it.next() == &one_a_pb.view); try testing.expect(it.next() == &three_b_pa.view); try testing.expect(it.next() == null); } // Iteration over no views { var it = ViewStack(View).iter(views.first, .forward, {}, filters.none); try testing.expect(it.next() == null); } // Iteration over 'a' tags { var it = ViewStack(View).iter(views.first, .forward, @as(u32, 1 << 0), filters.current); try testing.expect(it.next() == &two_a.view); try testing.expect(it.next() == &one_a_pb.view); try testing.expect(it.next() == null); } // Iteration over 'b' tags { var it = ViewStack(View).iter(views.first, .forward, @as(u32, 1 << 1), filters.current); try testing.expect(it.next() == &five_b.view); try testing.expect(it.next() == &four_b.view); try testing.expect(it.next() == &three_b_pa.view); try testing.expect(it.next() == null); } // Reverse iteration over all views { var it = ViewStack(View).iter(views.last, .reverse, {}, filters.all); try testing.expect(it.next() == &three_b_pa.view); try testing.expect(it.next() == &one_a_pb.view); try testing.expect(it.next() == &four_b.view); try testing.expect(it.next() == &five_b.view); try testing.expect(it.next() == &two_a.view); try testing.expect(it.next() == null); } // Reverse iteration over no views { var it = ViewStack(View).iter(views.last, .reverse, {}, filters.none); try testing.expect(it.next() == null); } // Reverse iteration over 'a' tags { var it = ViewStack(View).iter(views.last, .reverse, @as(u32, 1 << 0), filters.current); try testing.expect(it.next() == &one_a_pb.view); try testing.expect(it.next() == &two_a.view); try testing.expect(it.next() == null); } // Reverse iteration over 'b' tags { var it = ViewStack(View).iter(views.last, .reverse, @as(u32, 1 << 1), filters.current); try testing.expect(it.next() == &three_b_pa.view); try testing.expect(it.next() == &four_b.view); try testing.expect(it.next() == &five_b.view); try testing.expect(it.next() == null); } // Swap, then iterate { var view_a = views.first orelse unreachable; var view_b = view_a.next orelse unreachable; ViewStack(View).swap(&views, view_a, view_b); // {2, 5, 4, 1, 3} -> {5, 2, 4, 1, 3} view_a = views.last orelse unreachable; view_b = view_a.prev orelse unreachable; ViewStack(View).swap(&views, view_a, view_b); // {5, 2, 4, 1, 3} -> {5, 2, 4, 3, 1} view_a = views.last orelse unreachable; view_b = views.first orelse unreachable; ViewStack(View).swap(&views, view_a, view_b); // {5, 2, 4, 3, 1} -> {1, 2, 4, 3, 5} view_a = views.first orelse unreachable; view_b = views.last orelse unreachable; ViewStack(View).swap(&views, view_a, view_b); // {1, 2, 4, 3, 5} -> {5, 2, 4, 3, 1} view_a = views.first orelse unreachable; view_a = view_a.next orelse unreachable; view_b = view_a.next orelse unreachable; view_b = view_b.next orelse unreachable; ViewStack(View).swap(&views, view_a, view_b); // {5, 2, 4, 3, 1} -> {5, 3, 4, 2, 1} var it = ViewStack(View).iter(views.first, .forward, {}, filters.all); try testing.expect(it.next() == &five_b.view); try testing.expect(it.next() == &three_b_pa.view); try testing.expect(it.next() == &four_b.view); try testing.expect(it.next() == &two_a.view); try testing.expect(it.next() == &one_a_pb.view); try testing.expect(it.next() == null); it = ViewStack(View).iter(views.last, .reverse, {}, filters.all); try testing.expect(it.next() == &one_a_pb.view); try testing.expect(it.next() == &two_a.view); try testing.expect(it.next() == &four_b.view); try testing.expect(it.next() == &three_b_pa.view); try testing.expect(it.next() == &five_b.view); try testing.expect(it.next() == null); } }
source/river-0.1.0/river/view_stack.zig
const std = @import("std"); const builtin = @import("builtin"); const assert = std.debug.assert; const ast = std.zig.ast; const Token = std.zig.Token; usingnamespace @import("clang.zig"); pub const Mode = enum { import, translate, }; // TODO merge with Type.Fn.CallingConvention const CallingConvention = builtin.TypeInfo.CallingConvention; pub const ClangErrMsg = Stage2ErrorMsg; pub const Error = error{OutOfMemory}; const TypeError = Error || error{UnsupportedType}; const TransError = Error || error{UnsupportedTranslation}; const DeclTable = std.HashMap(usize, void, addrHash, addrEql); fn addrHash(x: usize) u32 { switch (@typeInfo(usize).Int.bits) { 32 => return x, // pointers are usually aligned so we ignore the bits that are probably all 0 anyway // usually the larger bits of addr space are unused so we just chop em off 64 => return @truncate(u32, x >> 4), else => @compileError("unreachable"), } } fn addrEql(a: usize, b: usize) bool { return a == b; } const Scope = struct { id: Id, parent: ?*Scope, const Id = enum { Switch, Var, Block, Root, While, }; const Switch = struct { base: Scope, }; const Var = struct { base: Scope, c_name: []const u8, zig_name: []const u8, }; const Block = struct { base: Scope, block_node: *ast.Node.Block, /// Don't forget to set rbrace token later fn create(c: *Context, parent: *Scope, lbrace_tok: ast.TokenIndex) !*Block { const block = try c.a().create(Block); block.* = Block{ .base = Scope{ .id = Id.Block, .parent = parent, }, .block_node = try c.a().create(ast.Node.Block), }; block.block_node.* = ast.Node.Block{ .base = ast.Node{ .id = ast.Node.Id.Block }, .label = null, .lbrace = lbrace_tok, .statements = ast.Node.Block.StatementList.init(c.a()), .rbrace = undefined, }; return block; } }; const Root = struct { base: Scope, }; const While = struct { base: Scope, }; }; const TransResult = struct { node: *ast.Node, node_scope: *Scope, child_scope: *Scope, }; const Context = struct { tree: *ast.Tree, source_buffer: *std.Buffer, err: Error, source_manager: *ZigClangSourceManager, decl_table: DeclTable, global_scope: *Scope.Root, mode: Mode, fn a(c: *Context) *std.mem.Allocator { return &c.tree.arena_allocator.allocator; } /// Convert a null-terminated C string to a slice allocated in the arena fn str(c: *Context, s: [*]const u8) ![]u8 { return std.mem.dupe(c.a(), u8, std.mem.toSliceConst(u8, s)); } /// Convert a clang source location to a file:line:column string fn locStr(c: *Context, loc: ZigClangSourceLocation) ![]u8 { const spelling_loc = ZigClangSourceManager_getSpellingLoc(c.source_manager, loc); const filename_c = ZigClangSourceManager_getFilename(c.source_manager, spelling_loc); const filename = if (filename_c) |s| try c.str(s) else ([]const u8)("(no file)"); const line = ZigClangSourceManager_getSpellingLineNumber(c.source_manager, spelling_loc); const column = ZigClangSourceManager_getSpellingColumnNumber(c.source_manager, spelling_loc); return std.fmt.allocPrint(c.a(), "{}:{}:{}", filename, line, column); } }; pub fn translate( backing_allocator: *std.mem.Allocator, args_begin: [*]?[*]const u8, args_end: [*]?[*]const u8, mode: Mode, errors: *[]ClangErrMsg, resources_path: [*]const u8, ) !*ast.Tree { const ast_unit = ZigClangLoadFromCommandLine( args_begin, args_end, &errors.ptr, &errors.len, resources_path, ) orelse { if (errors.len == 0) return error.OutOfMemory; return error.SemanticAnalyzeFail; }; defer ZigClangASTUnit_delete(ast_unit); var tree_arena = std.heap.ArenaAllocator.init(backing_allocator); errdefer tree_arena.deinit(); const tree = try tree_arena.allocator.create(ast.Tree); tree.* = ast.Tree{ .source = undefined, // need to use Buffer.toOwnedSlice later .root_node = undefined, .arena_allocator = tree_arena, .tokens = undefined, // can't reference the allocator yet .errors = undefined, // can't reference the allocator yet }; const arena = &tree.arena_allocator.allocator; // now we can reference the allocator tree.tokens = ast.Tree.TokenList.init(arena); tree.errors = ast.Tree.ErrorList.init(arena); tree.root_node = try arena.create(ast.Node.Root); tree.root_node.* = ast.Node.Root{ .base = ast.Node{ .id = ast.Node.Id.Root }, .decls = ast.Node.Root.DeclList.init(arena), .doc_comments = null, // initialized with the eof token at the end .eof_token = undefined, }; var source_buffer = try std.Buffer.initSize(arena, 0); var context = Context{ .tree = tree, .source_buffer = &source_buffer, .source_manager = ZigClangASTUnit_getSourceManager(ast_unit), .err = undefined, .decl_table = DeclTable.init(arena), .global_scope = try arena.create(Scope.Root), .mode = mode, }; context.global_scope.* = Scope.Root{ .base = Scope{ .id = Scope.Id.Root, .parent = null, }, }; if (!ZigClangASTUnit_visitLocalTopLevelDecls(ast_unit, &context, declVisitorC)) { return context.err; } _ = try appendToken(&context, .Eof, ""); tree.source = source_buffer.toOwnedSlice(); if (false) { std.debug.warn("debug source:\n{}\n==EOF==\ntokens:\n", tree.source); var i: usize = 0; while (i < tree.tokens.len) : (i += 1) { const token = tree.tokens.at(i); std.debug.warn("{}\n", token); } } return tree; } extern fn declVisitorC(context: ?*c_void, decl: *const ZigClangDecl) bool { const c = @ptrCast(*Context, @alignCast(@alignOf(Context), context)); declVisitor(c, decl) catch |err| { c.err = err; return false; }; return true; } fn declVisitor(c: *Context, decl: *const ZigClangDecl) Error!void { switch (ZigClangDecl_getKind(decl)) { .Function => { return visitFnDecl(c, @ptrCast(*const ZigClangFunctionDecl, decl)); }, .Typedef => { try emitWarning(c, ZigClangDecl_getLocation(decl), "TODO implement translate-c for typedefs"); }, .Enum => { try emitWarning(c, ZigClangDecl_getLocation(decl), "TODO implement translate-c for enums"); }, .Record => { try emitWarning(c, ZigClangDecl_getLocation(decl), "TODO implement translate-c for structs"); }, .Var => { try emitWarning(c, ZigClangDecl_getLocation(decl), "TODO implement translate-c for variables"); }, else => { const decl_name = try c.str(ZigClangDecl_getDeclKindName(decl)); try emitWarning(c, ZigClangDecl_getLocation(decl), "ignoring {} declaration", decl_name); }, } } fn visitFnDecl(c: *Context, fn_decl: *const ZigClangFunctionDecl) Error!void { if (try c.decl_table.put(@ptrToInt(fn_decl), {})) |_| return; // Avoid processing this decl twice const rp = makeRestorePoint(c); const fn_name = try c.str(ZigClangDecl_getName_bytes_begin(@ptrCast(*const ZigClangDecl, fn_decl))); const fn_decl_loc = ZigClangFunctionDecl_getLocation(fn_decl); const fn_qt = ZigClangFunctionDecl_getType(fn_decl); const fn_type = ZigClangQualType_getTypePtr(fn_qt); var scope = &c.global_scope.base; const has_body = ZigClangFunctionDecl_hasBody(fn_decl); const storage_class = ZigClangFunctionDecl_getStorageClass(fn_decl); const decl_ctx = FnDeclContext{ .fn_name = fn_name, .has_body = has_body, .storage_class = storage_class, .scope = &scope, .is_export = switch (storage_class) { .None => has_body, .Extern, .Static => false, .PrivateExtern => return failDecl(c, fn_decl_loc, fn_name, "unsupported storage class: private extern"), .Auto => unreachable, // Not legal on functions .Register => unreachable, // Not legal on functions }, }; const proto_node = switch (ZigClangType_getTypeClass(fn_type)) { .FunctionProto => blk: { const fn_proto_type = @ptrCast(*const ZigClangFunctionProtoType, fn_type); break :blk transFnProto(rp, fn_proto_type, fn_decl_loc, decl_ctx) catch |err| switch (err) { error.UnsupportedType => { return failDecl(c, fn_decl_loc, fn_name, "unable to resolve prototype of function"); }, error.OutOfMemory => |e| return e, }; }, .FunctionNoProto => blk: { const fn_no_proto_type = @ptrCast(*const ZigClangFunctionType, fn_type); break :blk transFnNoProto(rp, fn_no_proto_type, fn_decl_loc, decl_ctx) catch |err| switch (err) { error.UnsupportedType => { return failDecl(c, fn_decl_loc, fn_name, "unable to resolve prototype of function"); }, error.OutOfMemory => |e| return e, }; }, else => unreachable, }; if (!decl_ctx.has_body) { const semi_tok = try appendToken(c, .Semicolon, ";"); return addTopLevelDecl(c, fn_name, &proto_node.base); } // actual function definition with body const body_stmt = ZigClangFunctionDecl_getBody(fn_decl); const result = transStmt(rp, scope, body_stmt, .unused, .r_value) catch |err| switch (err) { error.OutOfMemory => |e| return e, error.UnsupportedTranslation => return failDecl(c, fn_decl_loc, fn_name, "unable to translate function"), }; assert(result.node.id == ast.Node.Id.Block); proto_node.body_node = result.node; return addTopLevelDecl(c, fn_name, &proto_node.base); } const ResultUsed = enum { used, unused, }; const LRValue = enum { l_value, r_value, }; fn transStmt( rp: RestorePoint, scope: *Scope, stmt: *const ZigClangStmt, result_used: ResultUsed, lrvalue: LRValue, ) !TransResult { const sc = ZigClangStmt_getStmtClass(stmt); switch (sc) { .CompoundStmtClass => return transCompoundStmt(rp, scope, @ptrCast(*const ZigClangCompoundStmt, stmt)), else => { return revertAndWarn( rp, error.UnsupportedTranslation, ZigClangStmt_getBeginLoc(stmt), "TODO implement translation of stmt class {}", @tagName(sc), ); }, } } fn transCompoundStmtInline( rp: RestorePoint, parent_scope: *Scope, stmt: *const ZigClangCompoundStmt, block_node: *ast.Node.Block, ) TransError!TransResult { var it = ZigClangCompoundStmt_body_begin(stmt); const end_it = ZigClangCompoundStmt_body_end(stmt); var scope = parent_scope; while (it != end_it) : (it += 1) { const result = try transStmt(rp, scope, it.*, .unused, .r_value); scope = result.child_scope; try block_node.statements.push(result.node); } return TransResult{ .node = &block_node.base, .child_scope = scope, .node_scope = scope, }; } fn transCompoundStmt(rp: RestorePoint, scope: *Scope, stmt: *const ZigClangCompoundStmt) !TransResult { const lbrace_tok = try appendToken(rp.c, .LBrace, "{"); const block_scope = try Scope.Block.create(rp.c, scope, lbrace_tok); const inline_result = try transCompoundStmtInline(rp, &block_scope.base, stmt, block_scope.block_node); block_scope.block_node.rbrace = try appendToken(rp.c, .RBrace, "}"); return TransResult{ .node = &block_scope.block_node.base, .node_scope = inline_result.node_scope, .child_scope = inline_result.child_scope, }; } fn addTopLevelDecl(c: *Context, name: []const u8, decl_node: *ast.Node) !void { try c.tree.root_node.decls.push(decl_node); } fn transQualType(rp: RestorePoint, qt: ZigClangQualType, source_loc: ZigClangSourceLocation) TypeError!*ast.Node { return transType(rp, ZigClangQualType_getTypePtr(qt), source_loc); } fn qualTypeCanon(qt: ZigClangQualType) *const ZigClangType { const canon = ZigClangQualType_getCanonicalType(qt); return ZigClangQualType_getTypePtr(canon); } const RestorePoint = struct { c: *Context, token_index: ast.TokenIndex, src_buf_index: usize, fn activate(self: RestorePoint) void { self.c.tree.tokens.shrink(self.token_index); self.c.source_buffer.shrink(self.src_buf_index); } }; fn makeRestorePoint(c: *Context) RestorePoint { return RestorePoint{ .c = c, .token_index = c.tree.tokens.len, .src_buf_index = c.source_buffer.len(), }; } fn transType(rp: RestorePoint, ty: *const ZigClangType, source_loc: ZigClangSourceLocation) TypeError!*ast.Node { switch (ZigClangType_getTypeClass(ty)) { .Builtin => { const builtin_ty = @ptrCast(*const ZigClangBuiltinType, ty); switch (ZigClangBuiltinType_getKind(builtin_ty)) { .Void => return appendIdentifier(rp.c, "c_void"), .Bool => return appendIdentifier(rp.c, "bool"), .Char_U, .UChar, .Char_S, .Char8 => return appendIdentifier(rp.c, "u8"), .SChar => return appendIdentifier(rp.c, "i8"), .UShort => return appendIdentifier(rp.c, "c_ushort"), .UInt => return appendIdentifier(rp.c, "c_uint"), .ULong => return appendIdentifier(rp.c, "c_ulong"), .ULongLong => return appendIdentifier(rp.c, "c_ulonglong"), .Short => return appendIdentifier(rp.c, "c_short"), .Int => return appendIdentifier(rp.c, "c_int"), .Long => return appendIdentifier(rp.c, "c_long"), .LongLong => return appendIdentifier(rp.c, "c_longlong"), .UInt128 => return appendIdentifier(rp.c, "u128"), .Int128 => return appendIdentifier(rp.c, "i128"), .Float => return appendIdentifier(rp.c, "f32"), .Double => return appendIdentifier(rp.c, "f64"), .Float128 => return appendIdentifier(rp.c, "f128"), .Float16 => return appendIdentifier(rp.c, "f16"), .LongDouble => return appendIdentifier(rp.c, "c_longdouble"), else => return revertAndWarn(rp, error.UnsupportedType, source_loc, "unsupported builtin type"), } }, .FunctionProto => { const fn_proto_ty = @ptrCast(*const ZigClangFunctionProtoType, ty); const fn_proto = try transFnProto(rp, fn_proto_ty, source_loc, null); return &fn_proto.base; }, else => { const type_name = rp.c.str(ZigClangType_getTypeClassName(ty)); return revertAndWarn(rp, error.UnsupportedType, source_loc, "unsupported type: '{}'", type_name); }, } } const FnDeclContext = struct { fn_name: []const u8, has_body: bool, storage_class: ZigClangStorageClass, scope: **Scope, is_export: bool, }; fn transCC( rp: RestorePoint, fn_ty: *const ZigClangFunctionType, source_loc: ZigClangSourceLocation, ) !CallingConvention { const clang_cc = ZigClangFunctionType_getCallConv(fn_ty); switch (clang_cc) { .C => return CallingConvention.C, .X86StdCall => return CallingConvention.Stdcall, else => return revertAndWarn(rp, error.UnsupportedType, source_loc, "unsupported calling convention: {}", @tagName(clang_cc)), } } fn transFnProto( rp: RestorePoint, fn_proto_ty: *const ZigClangFunctionProtoType, source_loc: ZigClangSourceLocation, fn_decl_context: ?FnDeclContext, ) !*ast.Node.FnProto { const fn_ty = @ptrCast(*const ZigClangFunctionType, fn_proto_ty); const cc = try transCC(rp, fn_ty, source_loc); const is_var_args = ZigClangFunctionProtoType_isVariadic(fn_proto_ty); const param_count: usize = ZigClangFunctionProtoType_getNumParams(fn_proto_ty); var i: usize = 0; while (i < param_count) : (i += 1) { return revertAndWarn(rp, error.UnsupportedType, source_loc, "TODO: implement parameters for FunctionProto in transType"); } return finishTransFnProto(rp, fn_ty, source_loc, fn_decl_context, is_var_args, cc); } fn transFnNoProto( rp: RestorePoint, fn_ty: *const ZigClangFunctionType, source_loc: ZigClangSourceLocation, fn_decl_context: ?FnDeclContext, ) !*ast.Node.FnProto { const cc = try transCC(rp, fn_ty, source_loc); const is_var_args = if (fn_decl_context) |ctx| !ctx.is_export else true; return finishTransFnProto(rp, fn_ty, source_loc, fn_decl_context, is_var_args, cc); } fn finishTransFnProto( rp: RestorePoint, fn_ty: *const ZigClangFunctionType, source_loc: ZigClangSourceLocation, fn_decl_context: ?FnDeclContext, is_var_args: bool, cc: CallingConvention, ) !*ast.Node.FnProto { const is_export = if (fn_decl_context) |ctx| ctx.is_export else false; // TODO check for always_inline attribute // TODO check for align attribute // pub extern fn name(...) T const pub_tok = try appendToken(rp.c, .Keyword_pub, "pub"); const cc_tok = if (cc == .Stdcall) try appendToken(rp.c, .Keyword_stdcallcc, "stdcallcc") else null; const extern_export_inline_tok = if (is_export) try appendToken(rp.c, .Keyword_export, "export") else if (cc == .C) try appendToken(rp.c, .Keyword_extern, "extern") else null; const fn_tok = try appendToken(rp.c, .Keyword_fn, "fn"); const name_tok = if (fn_decl_context) |ctx| try appendToken(rp.c, .Identifier, ctx.fn_name) else null; const lparen_tok = try appendToken(rp.c, .LParen, "("); const var_args_tok = if (is_var_args) try appendToken(rp.c, .Ellipsis3, "...") else null; const rparen_tok = try appendToken(rp.c, .RParen, ")"); const return_type_node = blk: { if (ZigClangFunctionType_getNoReturnAttr(fn_ty)) { break :blk try appendIdentifier(rp.c, "noreturn"); } else { const return_qt = ZigClangFunctionType_getReturnType(fn_ty); if (ZigClangType_isVoidType(qualTypeCanon(return_qt))) { break :blk try appendIdentifier(rp.c, "void"); } else { break :blk transQualType(rp, return_qt, source_loc) catch |err| switch (err) { error.UnsupportedType => { try emitWarning(rp.c, source_loc, "unsupported function proto return type"); return err; }, error.OutOfMemory => |e| return e, }; } } }; const fn_proto = try rp.c.a().create(ast.Node.FnProto); fn_proto.* = ast.Node.FnProto{ .base = ast.Node{ .id = ast.Node.Id.FnProto }, .doc_comments = null, .visib_token = pub_tok, .fn_token = fn_tok, .name_token = name_tok, .params = ast.Node.FnProto.ParamList.init(rp.c.a()), .return_type = ast.Node.FnProto.ReturnType{ .Explicit = return_type_node }, .var_args_token = null, // TODO this field is broken in the AST data model .extern_export_inline_token = extern_export_inline_tok, .cc_token = cc_tok, .async_attr = null, .body_node = null, .lib_name = null, .align_expr = null, .section_expr = null, }; if (is_var_args) { const var_arg_node = try rp.c.a().create(ast.Node.ParamDecl); var_arg_node.* = ast.Node.ParamDecl{ .base = ast.Node{ .id = ast.Node.Id.ParamDecl }, .doc_comments = null, .comptime_token = null, .noalias_token = null, .name_token = null, .type_node = undefined, .var_args_token = var_args_tok, }; try fn_proto.params.push(&var_arg_node.base); } return fn_proto; } fn revertAndWarn( rp: RestorePoint, err: var, source_loc: ZigClangSourceLocation, comptime format: []const u8, args: ..., ) (@typeOf(err) || error{OutOfMemory}) { rp.activate(); try emitWarning(rp.c, source_loc, format, args); return err; } fn emitWarning(c: *Context, loc: ZigClangSourceLocation, comptime format: []const u8, args: ...) !void { _ = try appendTokenFmt(c, .LineComment, "// {}: warning: " ++ format, c.locStr(loc), args); } fn failDecl(c: *Context, loc: ZigClangSourceLocation, name: []const u8, comptime format: []const u8, args: ...) !void { // const name = @compileError(msg); const const_tok = try appendToken(c, .Keyword_const, "const"); const name_tok = try appendToken(c, .Identifier, name); const eq_tok = try appendToken(c, .Equal, "="); const builtin_tok = try appendToken(c, .Builtin, "@compileError"); const lparen_tok = try appendToken(c, .LParen, "("); const msg_tok = try appendTokenFmt(c, .StringLiteral, "\"" ++ format ++ "\"", args); const rparen_tok = try appendToken(c, .RParen, ")"); const semi_tok = try appendToken(c, .Semicolon, ";"); const msg_node = try c.a().create(ast.Node.StringLiteral); msg_node.* = ast.Node.StringLiteral{ .base = ast.Node{ .id = ast.Node.Id.StringLiteral }, .token = msg_tok, }; const call_node = try c.a().create(ast.Node.BuiltinCall); call_node.* = ast.Node.BuiltinCall{ .base = ast.Node{ .id = ast.Node.Id.BuiltinCall }, .builtin_token = builtin_tok, .params = ast.Node.BuiltinCall.ParamList.init(c.a()), .rparen_token = rparen_tok, }; try call_node.params.push(&msg_node.base); const var_decl_node = try c.a().create(ast.Node.VarDecl); var_decl_node.* = ast.Node.VarDecl{ .base = ast.Node{ .id = ast.Node.Id.VarDecl }, .doc_comments = null, .visib_token = null, .thread_local_token = null, .name_token = name_tok, .eq_token = eq_tok, .mut_token = const_tok, .comptime_token = null, .extern_export_token = null, .lib_name = null, .type_node = null, .align_node = null, .section_node = null, .init_node = &call_node.base, .semicolon_token = semi_tok, }; try c.tree.root_node.decls.push(&var_decl_node.base); } fn appendToken(c: *Context, token_id: Token.Id, bytes: []const u8) !ast.TokenIndex { return appendTokenFmt(c, token_id, "{}", bytes); } fn appendTokenFmt(c: *Context, token_id: Token.Id, comptime format: []const u8, args: ...) !ast.TokenIndex { const S = struct { fn callback(context: *Context, bytes: []const u8) error{OutOfMemory}!void { return context.source_buffer.append(bytes); } }; const start_index = c.source_buffer.len(); errdefer c.source_buffer.shrink(start_index); try std.fmt.format(c, error{OutOfMemory}, S.callback, format, args); const end_index = c.source_buffer.len(); const token_index = c.tree.tokens.len; const new_token = try c.tree.tokens.addOne(); errdefer c.tree.tokens.shrink(token_index); new_token.* = Token{ .id = token_id, .start = start_index, .end = end_index, }; try c.source_buffer.appendByte('\n'); return token_index; } fn appendIdentifier(c: *Context, name: []const u8) !*ast.Node { const token_index = try appendToken(c, .Identifier, name); const identifier = try c.a().create(ast.Node.Identifier); identifier.* = ast.Node.Identifier{ .base = ast.Node{ .id = ast.Node.Id.Identifier }, .token = token_index, }; return &identifier.base; } pub fn freeErrors(errors: []ClangErrMsg) void { ZigClangErrorMsg_delete(errors.ptr, errors.len); }
src-self-hosted/translate_c.zig
const x86_64 = @import("../index.zig"); const bitjuggle = @import("bitjuggle"); const std = @import("std"); const port = x86_64.structures.port.Portu8; /// Command sent to begin PIC initialization. const CMD_INIT: u8 = 0x11; /// Command sent to acknowledge an interrupt. const CMD_END_INTERRUPT: u8 = 0x20; /// The mode in which we want to run our PICs. const MODE_8086: u8 = 0x01; const DEFAULT_PRIMARY_MASK: u8 = blk: { var temp = PicPrimaryInterruptMask.allMasked(); temp.chain = false; break :blk temp.toU8(); }; const DEFAULT_SECONDARY_MASK: u8 = PicSecondaryInterruptMask.allMasked().toU8(); const PRIMARY_COMMAND_PORT: port = port.init(0x20); const PRIMARY_DATA_PORT: port = port.init(0x21); const SECONDARY_COMMAND_PORT: port = port.init(0xA0); const SECONDARY_DATA_PORT: port = port.init(0xA1); pub const SimplePic = struct { primary_interrupt_offset: u8, secondary_interrupt_offset: u8, /// Initialize both our PICs. We initialize them together, at the same /// time, because it's traditional to do so, and because I/O operations /// might not be instantaneous on older processors. /// /// NOTE: All interrupts start masked, except the connection from primary to secondary. pub fn init(primary_interrupt_offset: u8, secondary_interrupt_offset: u8) SimplePic { // We need to add a delay between writes to our PICs, especially on // older motherboards. But we don't necessarily have any kind of // timers yet, because most of them require interrupts. Various // older versions of Linux and other PC operating systems have // worked around this by writing garbage data to port 0x80, which // allegedly takes long enough to make everything work on most // hardware. const wait_port = port.init(0x80); // Tell each PIC that we're going to send it a three-byte // initialization sequence on its data port. PRIMARY_COMMAND_PORT.write(CMD_INIT); wait_port.write(0); SECONDARY_COMMAND_PORT.write(CMD_INIT); wait_port.write(0); // Byte 1: Set up our base offsets. PRIMARY_DATA_PORT.write(primary_interrupt_offset); wait_port.write(0); SECONDARY_DATA_PORT.write(secondary_interrupt_offset); wait_port.write(0); // Byte 2: Configure chaining between PIC1 and PIC2. PRIMARY_DATA_PORT.write(4); wait_port.write(0); SECONDARY_DATA_PORT.write(2); wait_port.write(0); // Byte 3: Set our mode. PRIMARY_DATA_PORT.write(MODE_8086); wait_port.write(0); SECONDARY_DATA_PORT.write(MODE_8086); wait_port.write(0); // Set the default interrupt masks PRIMARY_DATA_PORT.write(DEFAULT_PRIMARY_MASK); SECONDARY_DATA_PORT.write(DEFAULT_SECONDARY_MASK); return .{ .primary_interrupt_offset = primary_interrupt_offset, .secondary_interrupt_offset = secondary_interrupt_offset, }; } fn handlesInterrupt(offset: u8, interrupt_id: u8) bool { return offset <= interrupt_id and interrupt_id < offset + 8; } /// Figure out which (if any) PICs in our chain need to know about this interrupt pub fn notifyEndOfInterrupt(self: SimplePic, interrupt_id: u8) void { if (handlesInterrupt(self.secondary_interrupt_offset, interrupt_id)) { SECONDARY_COMMAND_PORT.write(CMD_END_INTERRUPT); PRIMARY_COMMAND_PORT.write(CMD_END_INTERRUPT); } else if (handlesInterrupt(self.primary_interrupt_offset, interrupt_id)) { PRIMARY_COMMAND_PORT.write(CMD_END_INTERRUPT); } } pub fn rawGetPrimaryInterruptMask() PicPrimaryInterruptMask { return PicPrimaryInterruptMask.fromU8(PRIMARY_DATA_PORT.read()); } pub fn rawSetPrimaryInterruptMask(mask: PicPrimaryInterruptMask) void { PRIMARY_DATA_PORT.write(mask.toU8()); } pub fn rawGetSecondaryInterruptMask() PicSecondaryInterruptMask { return PicSecondaryInterruptMask.fromU8(SECONDARY_DATA_PORT.read()); } pub fn rawSetSecondaryInterruptMask(mask: PicSecondaryInterruptMask) void { SECONDARY_DATA_PORT.write(mask.toU8()); } pub fn isInterruptMasked(self: SimplePic, interrupt: PicInterrupt) bool { _ = self; return switch (interrupt) { // Primary .Timer => rawGetPrimaryInterruptMask().timer, .Keyboard => rawGetPrimaryInterruptMask().keyboard, .Chain => rawGetPrimaryInterruptMask().chain, .SerialPort2 => rawGetPrimaryInterruptMask().serial_port_2, .SerialPort1 => rawGetPrimaryInterruptMask().serial_port_1, .ParallelPort23 => rawGetPrimaryInterruptMask().parallel_port_23, .FloppyDisk => rawGetPrimaryInterruptMask().floppy_disk, .ParallelPort1 => rawGetPrimaryInterruptMask().parallel_port_1, // Secondary .RealTimeClock => rawGetSecondaryInterruptMask().real_time_clock, .Acpi => rawGetSecondaryInterruptMask().acpi, .Available1 => rawGetSecondaryInterruptMask().available_1, .Available2 => rawGetSecondaryInterruptMask().available_2, .Mouse => rawGetSecondaryInterruptMask().mouse, .CoProcessor => rawGetSecondaryInterruptMask().co_processor, .PrimaryAta => rawGetSecondaryInterruptMask().primary_ata, .SecondaryAta => rawGetSecondaryInterruptMask().secondary_ata, }; } fn isPrimaryPic(interrupt: PicInterrupt) bool { return switch (interrupt) { .Timer, .Keyboard, .Chain, .SerialPort2, .SerialPort1, .ParallelPort23, .FloppyDisk, .ParallelPort1 => true, else => false, }; } pub fn setInterruptMask(self: SimplePic, interrupt: PicInterrupt, mask: bool) void { _ = self; if (isPrimaryPic(interrupt)) { var current_mask = rawGetPrimaryInterruptMask(); switch (interrupt) { .Timer => current_mask.timer = mask, .Keyboard => current_mask.keyboard = mask, .Chain => current_mask.chain = mask, .SerialPort2 => current_mask.serial_port_2 = mask, .SerialPort1 => current_mask.serial_port_1 = mask, .ParallelPort23 => current_mask.parallel_port_23 = mask, .FloppyDisk => current_mask.floppy_disk = mask, .ParallelPort1 => current_mask.parallel_port_1 = mask, else => unreachable, } rawSetPrimaryInterruptMask(current_mask); } else { var current_mask = rawGetSecondaryInterruptMask(); switch (interrupt) { .RealTimeClock => current_mask.real_time_clock = mask, .Acpi => current_mask.acpi = mask, .Available1 => current_mask.available_1 = mask, .Available2 => current_mask.available_2 = mask, .Mouse => current_mask.mouse = mask, .CoProcessor => current_mask.co_processor = mask, .PrimaryAta => current_mask.primary_ata = mask, .SecondaryAta => current_mask.secondary_ata = mask, else => unreachable, } rawSetSecondaryInterruptMask(current_mask); } } comptime { std.testing.refAllDecls(@This()); } }; pub const PicInterrupt = enum { Timer, Keyboard, Chain, SerialPort2, SerialPort1, ParallelPort23, FloppyDisk, ParallelPort1, RealTimeClock, Acpi, Available1, Available2, Mouse, CoProcessor, PrimaryAta, SecondaryAta, }; pub const PicPrimaryInterruptMask = packed struct { timer: bool, keyboard: bool, chain: bool, serial_port_2: bool, serial_port_1: bool, parallel_port_23: bool, floppy_disk: bool, parallel_port_1: bool, pub fn noneMasked() PicPrimaryInterruptMask { return fromU8(0); } pub fn allMasked() PicPrimaryInterruptMask { return fromU8(0b11111111); } pub fn toU8(value: PicPrimaryInterruptMask) u8 { return @bitCast(u8, value); } pub fn fromU8(value: u8) PicPrimaryInterruptMask { return @bitCast(PicPrimaryInterruptMask, value); } test { std.testing.refAllDecls(@This()); try std.testing.expectEqual(@bitSizeOf(u8), @bitSizeOf(PicPrimaryInterruptMask)); try std.testing.expectEqual(@sizeOf(u8), @sizeOf(PicPrimaryInterruptMask)); } }; pub const PicSecondaryInterruptMask = packed struct { real_time_clock: bool, acpi: bool, available_1: bool, available_2: bool, mouse: bool, co_processor: bool, primary_ata: bool, secondary_ata: bool, pub fn noneMasked() PicSecondaryInterruptMask { return fromU8(0); } pub fn allMasked() PicSecondaryInterruptMask { return fromU8(0b11111111); } pub fn toU8(value: PicSecondaryInterruptMask) u8 { return @bitCast(u8, value); } pub fn fromU8(value: u8) PicSecondaryInterruptMask { return @bitCast(PicSecondaryInterruptMask, value); } test { std.testing.refAllDecls(@This()); try std.testing.expectEqual(@bitSizeOf(u8), @bitSizeOf(PicSecondaryInterruptMask)); try std.testing.expectEqual(@sizeOf(u8), @sizeOf(PicSecondaryInterruptMask)); } }; comptime { std.testing.refAllDecls(@This()); }
src/additional/pic8259.zig
const std = @import("std"); const webgpu = @import("./webgpu.zig"); pub const Instance = struct { pub const VTable = struct { destroy_fn: fn(*Instance) void, create_surface_fn: fn(*Instance, webgpu.SurfaceDescriptor) CreateSurfaceError!*Surface, request_adapter_fn: fn(*Instance, webgpu.RequestAdapterOptions) RequestAdapterError!*Adapter, }; __vtable: *const VTable, pub const CreateError = error { OutOfMemory, Failed, }; pub inline fn create(comptime B: type, descriptor: webgpu.InstanceDescriptor) CreateError!*Instance { return B.create(descriptor); } pub inline fn destroy(instance: *Instance) void { instance.__vtable.destroy_fn(instance); } pub const CreateSurfaceError = error { OutOfMemory, Failed, }; pub inline fn createSurface(instance: *Instance, descriptor: webgpu.SurfaceDescriptor) CreateSurfaceError!*Surface { return instance.__vtable.create_surface_fn(instance, descriptor); } pub const RequestAdapterError = error { OutOfMemory, Unavailable, Failed, Unknown, }; pub inline fn requestAdapter(instance: *Instance, options: webgpu.RequestAdapterOptions) RequestAdapterError!*Adapter { return instance.__vtable.request_adapter_fn(instance, options); } }; pub const Adapter = struct { pub const VTable = struct { request_device_fn: fn(*Adapter, descriptor: webgpu.DeviceDescriptor) RequestDeviceError!*webgpu.Device, }; __vtable: *const VTable, instance: *Instance, features: webgpu.Features, limits: webgpu.Limits, adapter_type: webgpu.AdapterType, backend_type: webgpu.BackendType, vendor_id: u32, device_id: u32, name: [:0]const u8, pub const RequestDeviceError = error { OutOfMemory, Failed, }; pub inline fn requestDevice(adapter: *Adapter, descriptor: webgpu.DeviceDescriptor) RequestDeviceError!*webgpu.Device { return adapter.__vtable.request_device_fn(adapter, descriptor); } }; pub const Surface = struct { pub const VTable = struct { destroy_fn: fn(*Surface) void, get_preferred_format_fn: fn(*Surface) webgpu.TextureFormat, }; __vtable: *const VTable, instance: *Instance, pub inline fn destroy(surface: *Surface) void { surface.__vtable.destroy_fn(surface); } pub inline fn getPreferredFormat(surface: *Surface) webgpu.TextureFormat { return surface.__vtable.get_preferred_format_fn(surface); } };
src/instance.zig
const std = @import("std"); const streql = std.ascii.eqlIgnoreCase; var byte_num : u16 = 0; pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = gpa.allocator(); const args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); var source = try std.fs.cwd().readFileAlloc(allocator, args[1], 0x100_0000); // 16 MiB defer allocator.free(source); var buf = try allocator.alloc(u8, 0x10000); // 64 KiB buffer defer allocator.free(buf); try assemble(source, buf); try std.fs.cwd().writeFile(args[2], buf[0..byte_num]); } fn assemble(source : []u8, buf : []u8) !void { var lines = std.mem.tokenize(u8, source, "\n"); var line_number : u32 = 0; var byte : u8 = undefined; var reg : u8 = undefined; while (lines.next()) |line| { line_number +=1; var line_len : usize = (std.mem.indexOf(u8, line, ";") orelse line.len); var tokens = std.mem.tokenize(u8, line[0..line_len], std.ascii.spaces[0..]); if (tokens.next()) |token| { if (streql(token, ".bbyte")) { while (tokens.next()) |token_num| { byte = try std.fmt.parseInt(u8, token_num, 2); extendBuf(buf, byte); } } else if(streql(token, ".obyte")) { while (tokens.next()) |token_num| { byte = try std.fmt.parseInt(u8, token_num, 8); extendBuf(buf, byte); } } else if (streql(token, ".dbyte")) { while (tokens.next()) |token_num| { byte = try std.fmt.parseInt(u8, token_num, 10); extendBuf(buf, byte); } } else if (streql(token, ".xbyte")) { while (tokens.next()) |token_num| { byte = try std.fmt.parseInt(u8, token_num, 16); extendBuf(buf, byte); } } else { if (streql(token, "sub")) { byte = 0; } else if (streql(token, "subr")) { byte = (0b01 << 6); } else if (streql(token, "leq")) { byte = (0b10 << 6); } else if (streql(token, "leqr")) { byte = (0b11 << 6); } if (tokens.next()) |token_arg| { reg = regId(token_arg) catch |err|{ std.log.err("Line {d}: {s} at {s}", .{line_number, @errorName(err), token_arg}); return err; }; byte += reg << 3; } else { std.log.err("Line {d}: not enough arguments", .{line_number}); return error.notEnoughTokens; } if (tokens.next()) |token_arg| { reg = regId(token_arg) catch |err|{ std.log.err("Line {d}: {s} at {s}", .{line_number, @errorName(err), token_arg}); return err; }; byte += reg; } else { std.log.err("Line {d}: not enough arguments", .{line_number}); return error.notEnoughTokens; } if (tokens.next()) |token_extra| { std.log.err("Line {d}: too many arguments at {s}", .{line_number, token_extra}); return error.tooManyTokens; } extendBuf(buf, byte); } } } } fn regId(name : []const u8) !u3 { if (streql(name, "r0")) { return 0; } else if (streql(name, "r1")) { return 1; } else if (streql(name, "r2")) { return 2; } else if (streql(name, "pc") or streql(name, "r3")) { return 3; } else if (streql(name, "(r0)")) { return 4; } else if (streql(name, "(r1)")) { return 5; } else if (streql(name, "(r2)")) { return 6; } else if (streql(name, "(pc)") or streql(name, "(r3)")) { return 7; } else { return error.badRegisterName; } } fn extendBuf(buf : []u8, value : u8) void { buf[byte_num] = value; byte_num += 1; }
assem/src/main.zig
const std = @import("std"); const generator = @import("./generator.zig"); const extractFn = @import("./_.zig").extractFn; fn initializer(comptime Self: type, comptime G: type, comptime F: type, comptime stateful: bool) type { return if (stateful) struct { pub const Mapper = F; pub fn init(inner: G, f_state: F) Self { return Self{ .inner = inner, .state = f_state, }; } } else struct { pub fn init(inner: G) Self { return Self{ .inner = inner, }; } }; } /// `Map` creates a generator that maps yielded values from wrapped generator `G` of type `I` to type `O` pub fn Map(comptime G: type, comptime I: type, comptime O: type, comptime F: type, comptime stateful: bool) type { const f = if (stateful) extractFn(F, fn (*F, I) O) else extractFn(F, fn (I) O); return generator.Generator(struct { pub const Inner = G; inner: G, state: if (stateful) F else void = undefined, pub usingnamespace initializer(@This(), G, F, stateful); pub fn generate(self: *@This(), handle: *generator.Handle(O)) !void { while (try self.inner.next()) |v| { if (stateful) try handle.yield(f(&self.state, v)) else try handle.yield(f(v)); } } }, I); } test { const expect = std.testing.expect; const ty = struct { pub fn generate(_: *@This(), handle: *generator.Handle(u8)) !void { try handle.yield(0); try handle.yield(1); try handle.yield(2); } }; const G = Map(generator.Generator(ty, u8), u8, u8, struct { pub fn incr(i: u8) u8 { return i + 1; } }, false); var g = G.init(G.Context.init(G.Context.Inner.init(ty{}))); try expect((try g.next()).? == 1); try expect((try g.next()).? == 2); try expect((try g.next()).? == 3); try expect((try g.next()) == null); try expect(g.state == .Returned); } test "stateful" { const expect = std.testing.expect; const ty = struct { pub fn generate(_: *@This(), handle: *generator.Handle(u8)) !void { try handle.yield(0); try handle.yield(1); try handle.yield(2); } }; const G = Map(generator.Generator(ty, u8), u8, u8, struct { n: u8 = 0, pub fn incr(self: *@This(), i: u8) u8 { self.n += 1; return i + self.n; } }, true); var g = G.init(G.Context.init(G.Context.Inner.init(ty{}), G.Context.Mapper{})); try expect((try g.next()).? == 1); try expect((try g.next()).? == 3); try expect((try g.next()).? == 5); try expect((try g.next()) == null); try expect(g.state == .Returned); }
src/map.zig
const std = @import("std"); const stdx = @import("stdx"); const ft = @import("freetype"); const stbtt = @import("stbtt"); const graphics = @import("graphics.zig"); const FontId = graphics.FontId; const OpenTypeFont = graphics.OpenTypeFont; const log = std.log.scoped(.font); pub const FontType = enum(u1) { /// Scalable font. Outline = 0, /// Scalable at fixed steps. Bitmap = 1, }; /// Duped info about a font without doing a lookup. pub const FontDesc = struct { font_type: FontType, /// Only defined for Bitmap font. bmfont_scaler: BitmapFontScaler, }; pub const BitmapFontScaler = struct { /// Direct mapping from requested font size to the final font size and the render font size. mapping: [64]struct { bmfont_idx: u8, final_font_size: u16, render_font_size: u16, }, }; // Contains rendering metadata about one font face. // Contains the backing bitmap font size to scale to user requested font size. pub const Font = struct { const Self = @This(); id: FontId, font_type: FontType, name: []const u8, impl: switch (graphics.FontRendererBackend) { .Freetype => *ft.Face, // Only define for Outline font. .Stbtt => stbtt.fontinfo, }, ot_font: OpenTypeFont, data: []const u8, /// Only defined for Bitmap font. bmfont_scaler: BitmapFontScaler, bmfont_strikes: []const BitmapFontStrike, pub fn initTTF(self: *Self, alloc: std.mem.Allocator, id: FontId, data: []const u8) void { switch (graphics.FontRendererBackend) { .Freetype => { // Dupe font data since we will be continually querying data from it. const own_data = alloc.dupe(u8, data) catch @panic("error"); const ot_font = OpenTypeFont.init(alloc, own_data, 0) catch @panic("error"); const family_name = ot_font.allocFontFamilyName(alloc) orelse @panic("error"); self.* = .{ .id = id, .font_type = .Outline, .ot_font = ot_font, .impl = undefined, .name = family_name, .data = own_data, .bmfont_scaler = undefined, .bmfont_strikes = undefined, }; FreetypeBackend.initFont(graphics.ft_library, &self.impl, own_data, 0); }, .Stbtt => { // Dupe font data since we will be continually querying data from it. const own_data = alloc.dupe(u8, data) catch @panic("error"); const ot_font = OpenTypeFont.init(alloc, own_data, 0) catch @panic("error"); var stbtt_font: stbtt.fontinfo = undefined; if (ot_font.hasGlyphOutlines()) { stbtt.InitFont(&stbtt_font, own_data, 0) catch @panic("failed to load font"); } const family_name = ot_font.allocFontFamilyName(alloc) orelse @panic("error"); self.* = .{ .id = id, .font_type = .Outline, .ot_font = ot_font, .impl = stbtt_font, .name = family_name, .data = own_data, .bmfont_scaler = undefined, .bmfont_strikes = undefined, }; }, } } pub fn initOTB(self: *Self, alloc: std.mem.Allocator, id: FontId, data: []const graphics.BitmapFontData) void { const strikes = alloc.alloc(BitmapFontStrike, data.len) catch @panic("error"); var last_size: u8 = 0; for (data) |it, i| { if (it.size <= last_size) { @panic("Expected ascending font size."); } const own_data = alloc.dupe(u8, it.data) catch @panic("error"); strikes[i] = .{ .impl = undefined, .ot_font = OpenTypeFont.init(alloc, own_data, 0) catch @panic("failed to load font"), .data = own_data, }; switch (graphics.FontRendererBackend) { .Freetype => { FreetypeBackend.initFont(graphics.ft_library, &strikes[i].impl, own_data, 0); }, .Stbtt => { stbtt.InitFont(&strikes[i].impl, own_data, 0) catch @panic("failed to load font"); }, } } const family_name = strikes[0].ot_font.allocFontFamilyName(alloc) orelse unreachable; self.* = .{ .id = id, .font_type = .Bitmap, .ot_font = undefined, .impl = undefined, .name = family_name, .data = undefined, .bmfont_scaler = undefined, .bmfont_strikes = strikes, }; // Build BitmapFontScaler. self.bmfont_scaler.mapping = undefined; var cur_bm_idx: u8 = 0; var scale: u16 = 1; for (self.bmfont_scaler.mapping) |_, i| { var bmdata = data[cur_bm_idx]; if (i > bmdata.size) { if (cur_bm_idx < data.len-1) { cur_bm_idx += 1; bmdata = data[cur_bm_idx]; scale = 1; } else if (i % bmdata.size == 0) { // Increment the scaling factor of the current bitmap font. scale = @intCast(u16, i) / bmdata.size; } } self.bmfont_scaler.mapping[i] = .{ .bmfont_idx = cur_bm_idx, .final_font_size = bmdata.size * scale, .render_font_size = bmdata.size, }; } } pub fn deinit(self: Self, alloc: std.mem.Allocator) void { switch (self.font_type) { .Outline => { self.ot_font.deinit(); alloc.free(self.data); }, .Bitmap => { for (self.bmfont_strikes) |font| { font.deinit(alloc); } alloc.free(self.bmfont_strikes); }, } alloc.free(self.name); } pub fn getOtFontBySize(self: Self, font_size: u16) OpenTypeFont { switch (self.font_type) { .Outline => { return self.ot_font; }, .Bitmap => { if (font_size > self.bmfont_scaler.mapping.len) { const mapping = self.bmfont_scaler.mapping[self.bmfont_scaler.mapping.len-1]; return self.bmfont_strikes[mapping.bmfont_idx].ot_font; } else { const mapping = self.bmfont_scaler.mapping[font_size]; return self.bmfont_strikes[mapping.bmfont_idx].ot_font; } }, } } pub fn getBitmapFontBySize(self: Self, font_size: u16) BitmapFontStrike { if (font_size > self.bmfont_scaler.mapping.len) { const mapping = self.bmfont_scaler.mapping[self.bmfont_scaler.mapping.len-1]; return self.bmfont_strikes[mapping.bmfont_idx]; } else { const mapping = self.bmfont_scaler.mapping[font_size]; return self.bmfont_strikes[mapping.bmfont_idx]; } } pub fn getKernAdvance(self: Self, prev_glyph_id: u16, glyph_id: u16) i32 { switch (graphics.FontRendererBackend) { .Stbtt => { const res = stbtt.stbtt_GetGlyphKernAdvance(&self.impl, prev_glyph_id, glyph_id); if (res != 0) { log.debug("kerning: {}", .{res}); } return res; }, .Freetype => { var res: ft.FT_Vector = undefined; // Return unscaled so it's not dependent on the current font size setting. const err = ft.FT_Get_Kerning(self.impl, prev_glyph_id, glyph_id, ft.FT_KERNING_UNSCALED, &res); if (err != 0) { log.debug("freetype error {}: {s}", .{err, ft.FT_Error_String(err)}); return 0; } return @intCast(i32, res.x); } } } }; pub const BitmapFontStrike = struct { /// This is only used to get kern values. Once that is implemented in ttf.zig, this won't be needed anymore. impl: switch (graphics.FontRendererBackend) { .Stbtt => stbtt.fontinfo, .Freetype => *ft.Face, }, ot_font: OpenTypeFont, data: []const u8, const Self = @This(); fn deinit(self: Self, alloc: std.mem.Allocator) void { self.ot_font.deinit(); alloc.free(self.data); } pub fn getKernAdvance(self: Self, prev_glyph_id: u16, glyph_id: u16) i32 { switch (graphics.FontRendererBackend) { .Stbtt => return stbtt.stbtt_GetGlyphKernAdvance(&self.impl, prev_glyph_id, glyph_id), .Freetype => { var res: ft.FT_Vector = undefined; // Return unscaled so it's not dependent on the current font size setting. const err = ft.FT_Get_Kerning(self.impl, prev_glyph_id, glyph_id, ft.FT_KERNING_UNSCALED, &res); if (err != 0) { log.debug("freetype error {}: {s}", .{err, ft.FT_Error_String(err)}); return 0; } return @intCast(i32, res.x); }, } } }; const FreetypeBackend = struct { pub fn initFont(lib: ft.FT_Library, face: **ft.Face, data: []const u8, face_idx: u32) void { const err = ft.FT_New_Memory_Face(lib, data.ptr, @intCast(c_long, data.len), @intCast(c_long, face_idx), @ptrCast([*c][*c]ft.Face, face)); if (err != 0) { stdx.panicFmt("freetype error {}: {s}", .{err, ft.FT_Error_String(err)}); } face.*.glyph[0].format = ft.FT_GLYPH_FORMAT_BITMAP; } };
graphics/src/font.zig
const Liveness = @This(); const std = @import("std"); const trace = @import("tracy.zig").trace; const log = std.log.scoped(.liveness); const assert = std.debug.assert; const Allocator = std.mem.Allocator; const Air = @import("Air.zig"); const Log2Int = std.math.Log2Int; /// This array is split into sets of 4 bits per AIR instruction. /// The MSB (0bX000) is whether the instruction is unreferenced. /// The LSB (0b000X) is the first operand, and so on, up to 3 operands. A set bit means the /// operand dies after this instruction. /// Instructions which need more data to track liveness have special handling via the /// `special` table. tomb_bits: []usize, /// Sparse table of specially handled instructions. The value is an index into the `extra` /// array. The meaning of the data depends on the AIR tag. /// * `cond_br` - points to a `CondBr` in `extra` at this index. /// * `switch_br` - points to a `SwitchBr` in `extra` at this index. /// * `asm`, `call`, `aggregate_init` - the value is a set of bits which are the extra tomb /// bits of operands. /// The main tomb bits are still used and the extra ones are starting with the lsb of the /// value here. special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), /// Auxiliary data. The way this data is interpreted is determined contextually. extra: []const u32, /// Trailing is the set of instructions whose lifetimes end at the start of the then branch, /// followed by the set of instructions whose lifetimes end at the start of the else branch. pub const CondBr = struct { then_death_count: u32, else_death_count: u32, }; /// Trailing is: /// * For each case in the same order as in the AIR: /// - case_death_count: u32 /// - Air.Inst.Index for each `case_death_count`: set of instructions whose lifetimes /// end at the start of this case. /// * Air.Inst.Index for each `else_death_count`: set of instructions whose lifetimes /// end at the start of the else case. pub const SwitchBr = struct { else_death_count: u32, }; pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness { const tracy = trace(@src()); defer tracy.end(); var a: Analysis = .{ .gpa = gpa, .air = air, .table = .{}, .tomb_bits = try gpa.alloc( usize, (air.instructions.len * bpi + @bitSizeOf(usize) - 1) / @bitSizeOf(usize), ), .extra = .{}, .special = .{}, }; errdefer gpa.free(a.tomb_bits); errdefer a.special.deinit(gpa); defer a.extra.deinit(gpa); defer a.table.deinit(gpa); std.mem.set(usize, a.tomb_bits, 0); const main_body = air.getMainBody(); try a.table.ensureTotalCapacity(gpa, @intCast(u32, main_body.len)); try analyzeWithContext(&a, null, main_body); return Liveness{ .tomb_bits = a.tomb_bits, .special = a.special, .extra = a.extra.toOwnedSlice(gpa), }; } pub fn getTombBits(l: Liveness, inst: Air.Inst.Index) Bpi { const usize_index = (inst * bpi) / @bitSizeOf(usize); return @truncate(Bpi, l.tomb_bits[usize_index] >> @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi)); } pub fn isUnused(l: Liveness, inst: Air.Inst.Index) bool { const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); return (l.tomb_bits[usize_index] & mask) != 0; } pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); return (l.tomb_bits[usize_index] & mask) != 0; } pub fn clearOperandDeath(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) void { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); l.tomb_bits[usize_index] &= ~mask; } /// Higher level API. pub const CondBrSlices = struct { then_deaths: []const Air.Inst.Index, else_deaths: []const Air.Inst.Index, }; pub fn getCondBr(l: Liveness, inst: Air.Inst.Index) CondBrSlices { var index: usize = l.special.get(inst) orelse return .{ .then_deaths = &.{}, .else_deaths = &.{}, }; const then_death_count = l.extra[index]; index += 1; const else_death_count = l.extra[index]; index += 1; const then_deaths = l.extra[index..][0..then_death_count]; index += then_death_count; return .{ .then_deaths = then_deaths, .else_deaths = l.extra[index..][0..else_death_count], }; } /// Indexed by case number as they appear in AIR. /// Else is the last element. pub const SwitchBrTable = struct { deaths: []const []const Air.Inst.Index, }; /// Caller owns the memory. pub fn getSwitchBr(l: Liveness, gpa: Allocator, inst: Air.Inst.Index, cases_len: u32) Allocator.Error!SwitchBrTable { var index: usize = l.special.get(inst) orelse return SwitchBrTable{ .deaths = &.{}, }; const else_death_count = l.extra[index]; index += 1; var deaths = std.ArrayList([]const Air.Inst.Index).init(gpa); defer deaths.deinit(); try deaths.ensureTotalCapacity(cases_len + 1); var case_i: u32 = 0; while (case_i < cases_len - 1) : (case_i += 1) { const case_death_count: u32 = l.extra[index]; index += 1; const case_deaths = l.extra[index..][0..case_death_count]; index += case_death_count; deaths.appendAssumeCapacity(case_deaths); } { // Else const else_deaths = l.extra[index..][0..else_death_count]; deaths.appendAssumeCapacity(else_deaths); } return SwitchBrTable{ .deaths = deaths.toOwnedSlice(), }; } pub fn deinit(l: *Liveness, gpa: Allocator) void { gpa.free(l.tomb_bits); gpa.free(l.extra); l.special.deinit(gpa); l.* = undefined; } /// How many tomb bits per AIR instruction. pub const bpi = 4; pub const Bpi = std.meta.Int(.unsigned, bpi); pub const OperandInt = std.math.Log2Int(Bpi); /// In-progress data; on successful analysis converted into `Liveness`. const Analysis = struct { gpa: Allocator, air: Air, table: std.AutoHashMapUnmanaged(Air.Inst.Index, void), tomb_bits: []usize, special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { const usize_index = (inst * bpi) / @bitSizeOf(usize); a.tomb_bits[usize_index] |= @as(usize, tomb_bits) << @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi); } fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 { const fields = std.meta.fields(@TypeOf(extra)); try a.extra.ensureUnusedCapacity(a.gpa, fields.len); return addExtraAssumeCapacity(a, extra); } fn addExtraAssumeCapacity(a: *Analysis, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); const result = @intCast(u32, a.extra.items.len); inline for (fields) |field| { a.extra.appendAssumeCapacity(switch (field.field_type) { u32 => @field(extra, field.name), else => @compileError("bad field type"), }); } return result; } }; fn analyzeWithContext( a: *Analysis, new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), body: []const Air.Inst.Index, ) Allocator.Error!void { var i: usize = body.len; if (new_set) |ns| { // We are only interested in doing this for instructions which are born // before a conditional branch, so after obtaining the new set for // each branch we prune the instructions which were born within. while (i != 0) { i -= 1; const inst = body[i]; _ = ns.remove(inst); try analyzeInst(a, new_set, inst); } } else { while (i != 0) { i -= 1; const inst = body[i]; try analyzeInst(a, new_set, inst); } } } fn analyzeInst( a: *Analysis, new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, ) Allocator.Error!void { const gpa = a.gpa; const table = &a.table; const inst_tags = a.air.instructions.items(.tag); const inst_datas = a.air.instructions.items(.data); // No tombstone for this instruction means it is never referenced, // and its birth marks its own death. Very metal 🤘 const main_tomb = !table.contains(inst); switch (inst_tags[inst]) { .add, .addwrap, .add_sat, .sub, .subwrap, .sub_sat, .mul, .mulwrap, .mul_sat, .div_float, .div_trunc, .div_floor, .div_exact, .rem, .mod, .ptr_add, .ptr_sub, .bit_and, .bit_or, .xor, .cmp_lt, .cmp_lte, .cmp_eq, .cmp_gte, .cmp_gt, .cmp_neq, .bool_and, .bool_or, .store, .array_elem_val, .slice_elem_val, .ptr_elem_val, .shl, .shl_exact, .shl_sat, .shr, .shr_exact, .atomic_store_unordered, .atomic_store_monotonic, .atomic_store_release, .atomic_store_seq_cst, .set_union_tag, .min, .max, => { const o = inst_datas[inst].bin_op; return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none }); }, .arg, .alloc, .ret_ptr, .constant, .const_ty, .breakpoint, .dbg_stmt, .dbg_inline_begin, .dbg_inline_end, .dbg_block_begin, .dbg_block_end, .unreach, .fence, .ret_addr, .frame_addr, .wasm_memory_size, => return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }), .not, .bitcast, .load, .fpext, .fptrunc, .intcast, .trunc, .optional_payload, .optional_payload_ptr, .optional_payload_ptr_set, .errunion_payload_ptr_set, .wrap_optional, .unwrap_errunion_payload, .unwrap_errunion_err, .unwrap_errunion_payload_ptr, .unwrap_errunion_err_ptr, .wrap_errunion_payload, .wrap_errunion_err, .slice_ptr, .slice_len, .ptr_slice_len_ptr, .ptr_slice_ptr_ptr, .struct_field_ptr_index_0, .struct_field_ptr_index_1, .struct_field_ptr_index_2, .struct_field_ptr_index_3, .array_to_slice, .float_to_int, .int_to_float, .get_union_tag, .clz, .ctz, .popcount, .byte_swap, .bit_reverse, .splat, => { const o = inst_datas[inst].ty_op; return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none }); }, .is_null, .is_non_null, .is_null_ptr, .is_non_null_ptr, .is_err, .is_non_err, .is_err_ptr, .is_non_err_ptr, .ptrtoint, .bool_to_int, .ret, .ret_load, .tag_name, .error_name, .sqrt, .sin, .cos, .exp, .exp2, .log, .log2, .log10, .fabs, .floor, .ceil, .round, .trunc_float, => { const operand = inst_datas[inst].un_op; return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none }); }, .dbg_var_ptr, .dbg_var_val, => { const operand = inst_datas[inst].pl_op.operand; return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none }); }, .prefetch => { const prefetch = inst_datas[inst].prefetch; return trackOperands(a, new_set, inst, main_tomb, .{ prefetch.ptr, .none, .none }); }, .call, .call_always_tail, .call_never_tail, .call_never_inline => { const inst_data = inst_datas[inst].pl_op; const callee = inst_data.operand; const extra = a.air.extraData(Air.Call, inst_data.payload); const args = @bitCast([]const Air.Inst.Ref, a.air.extra[extra.end..][0..extra.data.args_len]); if (args.len + 1 <= bpi - 1) { var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1); buf[0] = callee; std.mem.copy(Air.Inst.Ref, buf[1..], args); return trackOperands(a, new_set, inst, main_tomb, buf); } var extra_tombs: ExtraTombs = .{ .analysis = a, .new_set = new_set, .inst = inst, .main_tomb = main_tomb, }; try extra_tombs.feed(callee); for (args) |arg| { try extra_tombs.feed(arg); } return extra_tombs.finish(); }, .shuffle => { const extra = a.air.extraData(Air.Shuffle, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.a, extra.b, .none }); }, .reduce => { const reduce = inst_datas[inst].reduce; return trackOperands(a, new_set, inst, main_tomb, .{ reduce.operand, .none, .none }); }, .aggregate_init => { const ty_pl = inst_datas[inst].ty_pl; const aggregate_ty = a.air.getRefType(ty_pl.ty); const len = @intCast(usize, aggregate_ty.arrayLen()); const elements = @bitCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1); std.mem.copy(Air.Inst.Ref, &buf, elements); return trackOperands(a, new_set, inst, main_tomb, buf); } var extra_tombs: ExtraTombs = .{ .analysis = a, .new_set = new_set, .inst = inst, .main_tomb = main_tomb, }; for (elements) |elem| { try extra_tombs.feed(elem); } return extra_tombs.finish(); }, .union_init => { const extra = a.air.extraData(Air.UnionInit, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.init, .none, .none }); }, .struct_field_ptr, .struct_field_val => { const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_operand, .none, .none }); }, .field_parent_ptr => { const extra = a.air.extraData(Air.FieldParentPtr, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.field_ptr, .none, .none }); }, .ptr_elem_ptr, .slice_elem_ptr, .slice => { const extra = a.air.extraData(Air.Bin, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, .none }); }, .cmpxchg_strong, .cmpxchg_weak => { const extra = a.air.extraData(Air.Cmpxchg, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.ptr, extra.expected_value, extra.new_value }); }, .mul_add => { const pl_op = inst_datas[inst].pl_op; const extra = a.air.extraData(Air.Bin, pl_op.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, pl_op.operand }); }, .atomic_load => { const ptr = inst_datas[inst].atomic_load.ptr; return trackOperands(a, new_set, inst, main_tomb, .{ ptr, .none, .none }); }, .atomic_rmw => { const pl_op = inst_datas[inst].pl_op; const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.operand, .none }); }, .memset, .memcpy, .add_with_overflow, .sub_with_overflow, .mul_with_overflow, .shl_with_overflow, => { const pl_op = inst_datas[inst].pl_op; const extra = a.air.extraData(Air.Bin, pl_op.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs }); }, .br => { const br = inst_datas[inst].br; return trackOperands(a, new_set, inst, main_tomb, .{ br.operand, .none, .none }); }, .assembly => { const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload); var extra_i: usize = extra.end; const outputs = @bitCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.outputs_len]); extra_i += outputs.len; const inputs = @bitCast([]const Air.Inst.Ref, a.air.extra[extra_i..][0..extra.data.inputs_len]); extra_i += inputs.len; simple: { var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1); var buf_index: usize = 0; for (outputs) |output| { if (output != .none) { if (buf_index >= buf.len) break :simple; buf[buf_index] = output; buf_index += 1; } } if (buf_index + inputs.len > buf.len) break :simple; std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs); return trackOperands(a, new_set, inst, main_tomb, buf); } var extra_tombs: ExtraTombs = .{ .analysis = a, .new_set = new_set, .inst = inst, .main_tomb = main_tomb, }; for (outputs) |output| { if (output != .none) { try extra_tombs.feed(output); } } for (inputs) |input| { try extra_tombs.feed(input); } return extra_tombs.finish(); }, .block => { const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = a.air.extra[extra.end..][0..extra.data.body_len]; try analyzeWithContext(a, new_set, body); return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }); }, .loop => { const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = a.air.extra[extra.end..][0..extra.data.body_len]; try analyzeWithContext(a, new_set, body); return; // Loop has no operands and it is always unreferenced. }, .cond_br => { // Each death that occurs inside one branch, but not the other, needs // to be added as a death immediately upon entering the other branch. const inst_data = inst_datas[inst].pl_op; const condition = inst_data.operand; const extra = a.air.extraData(Air.CondBr, inst_data.payload); const then_body = a.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; var then_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; defer then_table.deinit(gpa); try analyzeWithContext(a, &then_table, then_body); // Reset the table back to its state from before the branch. { var it = then_table.keyIterator(); while (it.next()) |key| { assert(table.remove(key.*)); } } var else_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; defer else_table.deinit(gpa); try analyzeWithContext(a, &else_table, else_body); var then_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa); defer then_entry_deaths.deinit(); var else_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa); defer else_entry_deaths.deinit(); { var it = else_table.keyIterator(); while (it.next()) |key| { const else_death = key.*; if (!then_table.contains(else_death)) { try then_entry_deaths.append(else_death); } } } // This loop is the same, except it's for the then branch, and it additionally // has to put its items back into the table to undo the reset. { var it = then_table.keyIterator(); while (it.next()) |key| { const then_death = key.*; if (!else_table.contains(then_death)) { try else_entry_deaths.append(then_death); } try table.put(gpa, then_death, {}); } } // Now we have to correctly populate new_set. if (new_set) |ns| { try ns.ensureUnusedCapacity(gpa, @intCast(u32, then_table.count() + else_table.count())); var it = then_table.keyIterator(); while (it.next()) |key| { _ = ns.putAssumeCapacity(key.*, {}); } it = else_table.keyIterator(); while (it.next()) |key| { _ = ns.putAssumeCapacity(key.*, {}); } } const then_death_count = @intCast(u32, then_entry_deaths.items.len); const else_death_count = @intCast(u32, else_entry_deaths.items.len); try a.extra.ensureUnusedCapacity(gpa, std.meta.fields(Air.CondBr).len + then_death_count + else_death_count); const extra_index = a.addExtraAssumeCapacity(CondBr{ .then_death_count = then_death_count, .else_death_count = else_death_count, }); a.extra.appendSliceAssumeCapacity(then_entry_deaths.items); a.extra.appendSliceAssumeCapacity(else_entry_deaths.items); try a.special.put(gpa, inst, extra_index); // Continue on with the instruction analysis. The following code will find the condition // instruction, and the deaths flag for the CondBr instruction will indicate whether the // condition's lifetime ends immediately before entering any branch. return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none }); }, .switch_br => { const pl_op = inst_datas[inst].pl_op; const condition = pl_op.operand; const switch_br = a.air.extraData(Air.SwitchBr, pl_op.payload); const Table = std.AutoHashMapUnmanaged(Air.Inst.Index, void); const case_tables = try gpa.alloc(Table, switch_br.data.cases_len + 1); // +1 for else defer gpa.free(case_tables); std.mem.set(Table, case_tables, .{}); defer for (case_tables) |*ct| ct.deinit(gpa); var air_extra_index: usize = switch_br.end; for (case_tables[0..switch_br.data.cases_len]) |*case_table| { const case = a.air.extraData(Air.SwitchBr.Case, air_extra_index); const case_body = a.air.extra[case.end + case.data.items_len ..][0..case.data.body_len]; air_extra_index = case.end + case.data.items_len + case_body.len; try analyzeWithContext(a, case_table, case_body); // Reset the table back to its state from before the case. var it = case_table.keyIterator(); while (it.next()) |key| { assert(table.remove(key.*)); } } { // else const else_table = &case_tables[case_tables.len - 1]; const else_body = a.air.extra[air_extra_index..][0..switch_br.data.else_body_len]; try analyzeWithContext(a, else_table, else_body); // Reset the table back to its state from before the case. var it = else_table.keyIterator(); while (it.next()) |key| { assert(table.remove(key.*)); } } const List = std.ArrayListUnmanaged(Air.Inst.Index); const case_deaths = try gpa.alloc(List, case_tables.len); // includes else defer gpa.free(case_deaths); std.mem.set(List, case_deaths, .{}); defer for (case_deaths) |*cd| cd.deinit(gpa); var total_deaths: u32 = 0; for (case_tables) |*ct, i| { total_deaths += ct.count(); var it = ct.keyIterator(); while (it.next()) |key| { const case_death = key.*; for (case_tables) |*ct_inner, j| { if (i == j) continue; if (!ct_inner.contains(case_death)) { // instruction is not referenced in this case try case_deaths[j].append(gpa, case_death); } } // undo resetting the table try table.put(gpa, case_death, {}); } } // Now we have to correctly populate new_set. if (new_set) |ns| { try ns.ensureUnusedCapacity(gpa, total_deaths); for (case_tables) |*ct| { var it = ct.keyIterator(); while (it.next()) |key| { _ = ns.putAssumeCapacity(key.*, {}); } } } const else_death_count = @intCast(u32, case_deaths[case_deaths.len - 1].items.len); const extra_index = try a.addExtra(SwitchBr{ .else_death_count = else_death_count, }); for (case_deaths[0 .. case_deaths.len - 1]) |*cd| { const case_death_count = @intCast(u32, cd.items.len); try a.extra.ensureUnusedCapacity(gpa, 1 + case_death_count + else_death_count); a.extra.appendAssumeCapacity(case_death_count); a.extra.appendSliceAssumeCapacity(cd.items); } a.extra.appendSliceAssumeCapacity(case_deaths[case_deaths.len - 1].items); try a.special.put(gpa, inst, extra_index); return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none }); }, .wasm_memory_grow => { const pl_op = inst_datas[inst].pl_op; return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, .none, .none }); }, } } fn trackOperands( a: *Analysis, new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, main_tomb: bool, operands: [bpi - 1]Air.Inst.Ref, ) Allocator.Error!void { const table = &a.table; const gpa = a.gpa; var tomb_bits: Bpi = @boolToInt(main_tomb); var i = operands.len; while (i > 0) { i -= 1; tomb_bits <<= 1; const op_int = @enumToInt(operands[i]); if (op_int < Air.Inst.Ref.typed_value_map.len) continue; const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); const prev = try table.fetchPut(gpa, operand, {}); if (prev == null) { // Death. tomb_bits |= 1; if (new_set) |ns| try ns.putNoClobber(gpa, operand, {}); } } a.storeTombBits(inst, tomb_bits); } const ExtraTombs = struct { analysis: *Analysis, new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, main_tomb: bool, bit_index: usize = 0, tomb_bits: Bpi = 0, big_tomb_bits: u32 = 0, fn feed(et: *ExtraTombs, op_ref: Air.Inst.Ref) !void { const this_bit_index = et.bit_index; assert(this_bit_index < 32); // TODO mechanism for when there are greater than 32 operands et.bit_index += 1; const gpa = et.analysis.gpa; const op_int = @enumToInt(op_ref); if (op_int < Air.Inst.Ref.typed_value_map.len) return; const op_index: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); const prev = try et.analysis.table.fetchPut(gpa, op_index, {}); if (prev == null) { // Death. if (et.new_set) |ns| try ns.putNoClobber(gpa, op_index, {}); if (this_bit_index < bpi - 1) { et.tomb_bits |= @as(Bpi, 1) << @intCast(OperandInt, this_bit_index); } else { const big_bit_index = this_bit_index - (bpi - 1); et.big_tomb_bits |= @as(u32, 1) << @intCast(u5, big_bit_index); } } } fn finish(et: *ExtraTombs) !void { et.tomb_bits |= @as(Bpi, @boolToInt(et.main_tomb)) << (bpi - 1); et.analysis.storeTombBits(et.inst, et.tomb_bits); try et.analysis.special.put(et.analysis.gpa, et.inst, et.big_tomb_bits); } };
src/Liveness.zig
const root = @import("@build"); const std = @import("std"); const builtin = @import("builtin"); const io = std.io; const fmt = std.fmt; const Builder = std.build.Builder; const mem = std.mem; const process = std.process; const ArrayList = std.ArrayList; const warn = std.debug.warn; const File = std.fs.File; pub fn main() !void { // Here we use an ArenaAllocator backed by a DirectAllocator because a build is a short-lived, // one shot program. We don't need to waste time freeing memory and finding places to squish // bytes into. So we free everything all at once at the very end. var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); const allocator = &arena.allocator; var args = try process.argsAlloc(allocator); defer process.argsFree(allocator, args); // skip my own exe name var arg_idx: usize = 1; const zig_exe = nextArg(args, &arg_idx) orelse { warn("Expected first argument to be path to zig compiler\n", .{}); return error.InvalidArgs; }; const build_root = nextArg(args, &arg_idx) orelse { warn("Expected second argument to be build root directory path\n", .{}); return error.InvalidArgs; }; const cache_root = nextArg(args, &arg_idx) orelse { warn("Expected third argument to be cache root directory path\n", .{}); return error.InvalidArgs; }; const builder = try Builder.create(allocator, zig_exe, build_root, cache_root); defer builder.destroy(); var targets = ArrayList([]const u8).init(allocator); const stderr_stream = io.getStdErr().outStream(); const stdout_stream = io.getStdOut().outStream(); while (nextArg(args, &arg_idx)) |arg| { if (mem.startsWith(u8, arg, "-D")) { const option_contents = arg[2..]; if (option_contents.len == 0) { warn("Expected option name after '-D'\n\n", .{}); return usageAndErr(builder, false, stderr_stream); } if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| { const option_name = option_contents[0..name_end]; const option_value = option_contents[name_end + 1 ..]; if (try builder.addUserInputOption(option_name, option_value)) return usageAndErr(builder, false, stderr_stream); } else { if (try builder.addUserInputFlag(option_contents)) return usageAndErr(builder, false, stderr_stream); } } else if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "--verbose")) { builder.verbose = true; } else if (mem.eql(u8, arg, "--help")) { return usage(builder, false, stdout_stream); } else if (mem.eql(u8, arg, "--prefix")) { builder.install_prefix = nextArg(args, &arg_idx) orelse { warn("Expected argument after --prefix\n\n", .{}); return usageAndErr(builder, false, stderr_stream); }; } else if (mem.eql(u8, arg, "--search-prefix")) { const search_prefix = nextArg(args, &arg_idx) orelse { warn("Expected argument after --search-prefix\n\n", .{}); return usageAndErr(builder, false, stderr_stream); }; builder.addSearchPrefix(search_prefix); } else if (mem.eql(u8, arg, "--override-lib-dir")) { builder.override_lib_dir = nextArg(args, &arg_idx) orelse { warn("Expected argument after --override-lib-dir\n\n", .{}); return usageAndErr(builder, false, stderr_stream); }; } else if (mem.eql(u8, arg, "--verbose-tokenize")) { builder.verbose_tokenize = true; } else if (mem.eql(u8, arg, "--verbose-ast")) { builder.verbose_ast = true; } else if (mem.eql(u8, arg, "--verbose-link")) { builder.verbose_link = true; } else if (mem.eql(u8, arg, "--verbose-ir")) { builder.verbose_ir = true; } else if (mem.eql(u8, arg, "--verbose-llvm-ir")) { builder.verbose_llvm_ir = true; } else if (mem.eql(u8, arg, "--verbose-cimport")) { builder.verbose_cimport = true; } else if (mem.eql(u8, arg, "--verbose-cc")) { builder.verbose_cc = true; } else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) { builder.verbose_llvm_cpu_features = true; } else if (mem.eql(u8, arg, "--")) { builder.args = argsRest(args, arg_idx); break; } else { warn("Unrecognized argument: {}\n\n", .{arg}); return usageAndErr(builder, false, stderr_stream); } } else { try targets.append(arg); } } builder.resolveInstallPrefix(); try runBuild(builder); if (builder.validateUserInputDidItFail()) return usageAndErr(builder, true, stderr_stream); builder.make(targets.span()) catch |err| { switch (err) { error.InvalidStepName => { return usageAndErr(builder, true, stderr_stream); }, error.UncleanExit => process.exit(1), else => return err, } }; } fn runBuild(builder: *Builder) anyerror!void { switch (@typeInfo(@typeInfo(@TypeOf(root.build)).Fn.return_type.?)) { .Void => root.build(builder), .ErrorUnion => try root.build(builder), else => @compileError("expected return type of build to be 'void' or '!void'"), } } fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void { // run the build script to collect the options if (!already_ran_build) { builder.setInstallPrefix(null); builder.resolveInstallPrefix(); try runBuild(builder); } try out_stream.print( \\Usage: {} build [steps] [options] \\ \\Steps: \\ , .{builder.zig_exe}); const allocator = builder.allocator; for (builder.top_level_steps.span()) |top_level_step| { const name = if (&top_level_step.step == builder.default_step) try fmt.allocPrint(allocator, "{} (default)", .{top_level_step.step.name}) else top_level_step.step.name; try out_stream.print(" {s:22} {}\n", .{ name, top_level_step.description }); } try out_stream.writeAll( \\ \\General Options: \\ --help Print this help and exit \\ --verbose Print commands before executing them \\ --prefix [path] Override default install prefix \\ --search-prefix [path] Add a path to look for binaries, libraries, headers \\ \\Project-Specific Options: \\ ); if (builder.available_options_list.items.len == 0) { try out_stream.print(" (none)\n", .{}); } else { for (builder.available_options_list.span()) |option| { const name = try fmt.allocPrint(allocator, " -D{}=[{}]", .{ option.name, Builder.typeIdName(option.type_id), }); defer allocator.free(name); try out_stream.print("{s:24} {}\n", .{ name, option.description }); } } try out_stream.writeAll( \\ \\Advanced Options: \\ --build-file [file] Override path to build.zig \\ --cache-dir [path] Override path to zig cache directory \\ --override-lib-dir [arg] Override path to Zig lib directory \\ --verbose-tokenize Enable compiler debug output for tokenization \\ --verbose-ast Enable compiler debug output for parsing into an AST \\ --verbose-link Enable compiler debug output for linking \\ --verbose-ir Enable compiler debug output for Zig IR \\ --verbose-llvm-ir Enable compiler debug output for LLVM IR \\ --verbose-cimport Enable compiler debug output for C imports \\ --verbose-cc Enable compiler debug output for C compilation \\ --verbose-llvm-cpu-features Enable compiler debug output for LLVM CPU features \\ ); } fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: anytype) void { usage(builder, already_ran_build, out_stream) catch {}; process.exit(1); } fn nextArg(args: [][]const u8, idx: *usize) ?[]const u8 { if (idx.* >= args.len) return null; defer idx.* += 1; return args[idx.*]; } fn argsRest(args: [][]const u8, idx: usize) ?[][]const u8 { if (idx >= args.len) return null; return args[idx..]; }
lib/std/special/build_runner.zig
const std = @import("std"); const fs = std.fs; const io = std.io; const info = std.log.info; const print = std.debug.print; const fmt = std.fmt; const utils = @import("utils.zig"); var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const SEEBACK: usize = 25; const sort_by = std.sort.asc(usize); pub fn main() !void { const begin = @divTrunc(std.time.nanoTimestamp(), 1000); // setup // defer _ = gpa.deinit(); var allo = &gpa.allocator; var lines: std.mem.TokenIterator = try utils.readInputLines(allo, "./input1"); defer allo.free(lines.buffer); var nums = allo.alloc(usize, 0) catch unreachable; defer allo.free(nums); while (lines.next()) |line| { nums = allo.realloc(nums, nums.len + 1) catch unreachable; nums[nums.len - 1] = fmt.parseUnsigned(usize, line, 10) catch unreachable; } std.sort.sort(usize, nums, {}, sort_by); // part 1 // var jmp_1: u32 = 0; var jmp_3: u32 = 1; // last jump always exists var prev: usize = 0; const target = for (nums) |num| { const jmp = num - prev; info("num: {} jmp: {}", .{ num, jmp }); switch (jmp) { 1 => jmp_1 += 1, 3 => jmp_3 += 1, else => unreachable, } prev = num; } else blk: { break :blk prev + 3; }; print("part1: 1_jumps: {}, 3_jumps: {} => a*b={}\n", .{ jmp_1, jmp_3, jmp_1 * jmp_3 }); // part 2 nums = allo.realloc(nums, nums.len + 1) catch unreachable; nums[nums.len - 1] = target; var p2: usize = 1; var ones_seq: usize = 1; // count first 0 too prev = 0; for (nums) |num| { // p2 const jmp = num - prev; switch (jmp) { 1 => ones_seq += 1, 3 => { info("jmp!", .{}); p2 *= switch (ones_seq) { 1 => @intCast(usize, 1), 2 => @intCast(usize, 1), 3 => @intCast(usize, 2), 4 => @intCast(usize, 4), 5 => @intCast(usize, 7), else => unreachable, }; ones_seq = 1; }, else => unreachable, } info("[{}/{}] num: {}", .{ p2, ones_seq, num }); prev = num; } print("part2: {}\n", .{p2}); const delta = @divTrunc(std.time.nanoTimestamp(), 1000) - begin; print("all done in {} microseconds\n", .{delta}); }
day_10/src/main.zig
const rl = @import("raylib"); const print = @import("std").debug.print; const disco = @import("disco"); pub fn main() anyerror!void { // Initialization //-------------------------------------------------------------------------------------- const screenWidth = 1366; const screenHeight = 768; rl.InitWindow(screenWidth, screenHeight, "Raylib Experiments"); const monitor_count = rl.GetMonitorCount(); print("Monitors = {}\n", .{monitor_count}); print("Screen = {}x{}\n", .{ rl.GetScreenWidth(), rl.GetScreenHeight() }); rl.SetTargetFPS(60); // Set our game to run at 60 frames-per-second // rl.InitAudioDevice(); // const track = rl.LoadSound("sounds/exorcism.opus"); // const track = rl.LoadSound("sounds/vril.mp4"); // const track = rl.LoadSound("sounds/detroit.mp3"); // const track = rl.LoadSound("sounds/backhome.ogg"); // const track = rl.LoadSound("sounds/cherrypie.ogg"); // print("track = {}\n", .{track}); // rl.PlaySound(track); //-------------------------------------------------------------------------------------- // Lets discover some servers disco.discover(); //-------------------------------------------------------------------------------------- var mx: c_int = 0; var my: c_int = 0; // Main game loop while (!rl.WindowShouldClose()) { // Detect window close button or ESC key // Update //---------------------------------------------------------------------------------- // TODO: Update your variables here //---------------------------------------------------------------------------------- const mouse_x = rl.GetMouseX(); const mouse_y = rl.GetMouseY(); const mp = rl.GetMousePosition(); const md = rl.GetMouseDelta(); if (mouse_x != mx or mouse_y != my) { mx = mouse_x; my = mouse_y; print("mouse {}:{} {}->{}\n", .{ mx, my, mp, md }); } // Draw //---------------------------------------------------------------------------------- rl.BeginDrawing(); rl.ClearBackground(rl.WHITE); rl.DrawText("Looking for Game Servers ...", screenWidth / 4, screenHeight / 2, 48, rl.BLUE); //rl.DrawText("Start Game", screenWidth / 2 - 100, screenHeight / 2, 48, rl.BLUE); rl.DrawFPS(20, 20); rl.EndDrawing(); //---------------------------------------------------------------------------------- disco.discover(); } // De-Initialization //-------------------------------------------------------------------------------------- // rl.StopSound(track); // rl.CloseAudioDevice(); rl.CloseWindow(); // Close window and OpenGL context //-------------------------------------------------------------------------------------- }
src/client/main.zig
const clap = @import("../clap.zig"); const std = @import("std"); const debug = std.debug; const heap = std.heap; const io = std.io; const mem = std.mem; const testing = std.testing; /// Deprecated: Use `parseEx` instead pub fn ComptimeClap( comptime Id: type, comptime params: []const clap.Param(Id), ) type { comptime var flags: usize = 0; comptime var single_options: usize = 0; comptime var multi_options: usize = 0; comptime var converted_params: []const clap.Param(usize) = &.{}; for (params) |param| { var index: usize = 0; if (param.names.long != null or param.names.short != null) { const ptr = switch (param.takes_value) { .none => &flags, .one => &single_options, .many => &multi_options, }; index = ptr.*; ptr.* += 1; } converted_params = converted_params ++ [_]clap.Param(usize){.{ .id = index, .names = param.names, .takes_value = param.takes_value, }}; } return struct { multi_options: [multi_options][]const []const u8, single_options: [single_options][]const u8, single_options_is_set: std.PackedIntArray(u1, single_options), flags: std.PackedIntArray(u1, flags), pos: []const []const u8, allocator: *mem.Allocator, pub fn parse(iter: anytype, opt: clap.ParseOptions) !@This() { const allocator = opt.allocator; var multis = [_]std.ArrayList([]const u8){undefined} ** multi_options; for (multis) |*multi| multi.* = std.ArrayList([]const u8).init(allocator); var pos = std.ArrayList([]const u8).init(allocator); var res = @This(){ .multi_options = .{undefined} ** multi_options, .single_options = .{undefined} ** single_options, .single_options_is_set = std.PackedIntArray(u1, single_options).init( .{0} ** single_options, ), .flags = std.PackedIntArray(u1, flags).init(.{0} ** flags), .pos = undefined, .allocator = allocator, }; var stream = clap.StreamingClap(usize, @typeInfo(@TypeOf(iter)).Pointer.child){ .params = converted_params, .iter = iter, .diagnostic = opt.diagnostic, }; while (try stream.next()) |arg| { const param = arg.param; if (param.names.long == null and param.names.short == null) { try pos.append(arg.value.?); } else if (param.takes_value == .one) { debug.assert(res.single_options.len != 0); if (res.single_options.len != 0) { res.single_options[param.id] = arg.value.?; res.single_options_is_set.set(param.id, 1); } } else if (param.takes_value == .many) { debug.assert(multis.len != 0); if (multis.len != 0) try multis[param.id].append(arg.value.?); } else { debug.assert(res.flags.len() != 0); if (res.flags.len() != 0) res.flags.set(param.id, 1); } } for (multis) |*multi, i| res.multi_options[i] = multi.toOwnedSlice(); res.pos = pos.toOwnedSlice(); return res; } pub fn deinit(parser: @This()) void { for (parser.multi_options) |o| parser.allocator.free(o); parser.allocator.free(parser.pos); } pub fn flag(parser: @This(), comptime name: []const u8) bool { const param = comptime findParam(name); if (param.takes_value != .none) @compileError(name ++ " is an option and not a flag."); return parser.flags.get(param.id) != 0; } pub fn option(parser: @This(), comptime name: []const u8) ?[]const u8 { const param = comptime findParam(name); if (param.takes_value == .none) @compileError(name ++ " is a flag and not an option."); if (param.takes_value == .many) @compileError(name ++ " takes many options, not one."); if (parser.single_options_is_set.get(param.id) == 0) return null; return parser.single_options[param.id]; } pub fn options(parser: @This(), comptime name: []const u8) []const []const u8 { const param = comptime findParam(name); if (param.takes_value == .none) @compileError(name ++ " is a flag and not an option."); if (param.takes_value == .one) @compileError(name ++ " takes one option, not multiple."); return parser.multi_options[param.id]; } pub fn positionals(parser: @This()) []const []const u8 { return parser.pos; } fn findParam(comptime name: []const u8) clap.Param(usize) { comptime { for (converted_params) |param| { if (param.names.short) |s| { if (mem.eql(u8, name, "-" ++ [_]u8{s})) return param; } if (param.names.long) |l| { if (mem.eql(u8, name, "--" ++ l)) return param; } } @compileError(name ++ " is not a parameter."); } } }; } test "" { const params = comptime &.{ clap.parseParam("-a, --aa") catch unreachable, clap.parseParam("-b, --bb") catch unreachable, clap.parseParam("-c, --cc <V>") catch unreachable, clap.parseParam("-d, --dd <V>...") catch unreachable, clap.parseParam("<P>") catch unreachable, }; var iter = clap.args.SliceIterator{ .args = &.{ "-a", "-c", "0", "something", "-d", "a", "--dd", "b", }, }; var args = try clap.parseEx(clap.Help, params, &iter, .{ .allocator = testing.allocator }); defer args.deinit(); try testing.expect(args.flag("-a")); try testing.expect(args.flag("--aa")); try testing.expect(!args.flag("-b")); try testing.expect(!args.flag("--bb")); try testing.expectEqualStrings("0", args.option("-c").?); try testing.expectEqualStrings("0", args.option("--cc").?); try testing.expectEqual(@as(usize, 1), args.positionals().len); try testing.expectEqualStrings("something", args.positionals()[0]); try testing.expectEqualSlices([]const u8, &.{ "a", "b" }, args.options("-d")); try testing.expectEqualSlices([]const u8, &.{ "a", "b" }, args.options("--dd")); } test "empty" { var iter = clap.args.SliceIterator{ .args = &.{} }; var args = try clap.parseEx(u8, &.{}, &iter, .{ .allocator = testing.allocator }); defer args.deinit(); } fn testErr( comptime params: []const clap.Param(u8), args_strings: []const []const u8, expected: []const u8, ) !void { var diag = clap.Diagnostic{}; var iter = clap.args.SliceIterator{ .args = args_strings }; _ = clap.parseEx(u8, params, &iter, .{ .allocator = testing.allocator, .diagnostic = &diag, }) catch |err| { var buf: [1024]u8 = undefined; var fbs = io.fixedBufferStream(&buf); diag.report(fbs.writer(), err) catch return error.TestFailed; try testing.expectEqualStrings(expected, fbs.getWritten()); return; }; try testing.expect(false); } test "errors" { const params = [_]clap.Param(u8){ .{ .id = 0, .names = .{ .short = 'a', .long = "aa" }, }, .{ .id = 1, .names = .{ .short = 'c', .long = "cc" }, .takes_value = .one, }, }; try testErr(&params, &.{"q"}, "Invalid argument 'q'\n"); try testErr(&params, &.{"-q"}, "Invalid argument '-q'\n"); try testErr(&params, &.{"--q"}, "Invalid argument '--q'\n"); try testErr(&params, &.{"--q=1"}, "Invalid argument '--q'\n"); try testErr(&params, &.{"-a=1"}, "The argument '-a' does not take a value\n"); try testErr(&params, &.{"--aa=1"}, "The argument '--aa' does not take a value\n"); try testErr(&params, &.{"-c"}, "The argument '-c' requires a value but none was supplied\n"); try testErr( &params, &.{"--cc"}, "The argument '--cc' requires a value but none was supplied\n", ); }
clap/comptime.zig
const std = @import("std"); const math = std.math; const meta = std.meta; const mem = std.mem; const expectEqual = std.testing.expectEqual; const root = @import("main.zig"); const generic_vector = @import("generic_vector.zig"); const quat = @import("quaternion.zig"); const Vec3 = generic_vector.Vec3; const Vec3_f64 = generic_vector.Vec3_f64; const GenericVector = generic_vector.GenericVector; const Quaternion = quat.Quaternion; const Quat = quat.Quat; pub const Mat4 = Mat4x4(f32); pub const Mat4_f64 = Mat4x4(f64); pub const perspective = Mat4.perspective; pub const orthographic = Mat4.orthographic; pub const lookAt = Mat4.lookAt; /// A column-major 4x4 matrix /// Note: Column-major means accessing data like m.data[COLUMN][ROW]. pub fn Mat4x4(comptime T: type) type { if (@typeInfo(T) != .Float) { @compileError("Mat4x4 not implemented for " ++ @typeName(T)); } const Vector3 = GenericVector(3, T); const Vector4 = GenericVector(4, T); return extern struct { data: [4][4]T, const Self = @This(); /// Shorthand for identity matrix. pub fn identity() Self { return .{ .data = .{ .{ 1, 0, 0, 0 }, .{ 0, 1, 0, 0 }, .{ 0, 0, 1, 0 }, .{ 0, 0, 0, 1 }, }, }; } /// Shorthand for matrix with all zeros. pub fn zero() Self { return Self.set(0); } /// Set all mat4 values to given value. pub fn set(value: T) Self { const data: [16]T = .{value} ** 16; return Self.fromSlice(&data); } /// Construct new 4x4 matrix from given slice. pub fn fromSlice(data: *const [16]T) Self { return .{ .data = .{ data[0..4].*, data[4..8].*, data[8..12].*, data[12..16].*, }, }; } /// Negate the given matrix. pub fn negate(self: Self) Self { var result = self; for (result.data) |_, column| { for (result.data[column]) |_, row| { result.data[column][row] = -result.data[column][row]; } } return result; } /// Transpose the given matrix. pub fn transpose(self: Self) Self { var result = self; for (result.data) |_, column| { var row: usize = column; while (row < 4) : (row += 1) { std.mem.swap(T, &result.data[column][row], &result.data[row][column]); } } return result; } /// Return a pointer to the inner data of the matrix. pub fn getData(self: *const Self) *const T { return @ptrCast(*const T, &self.data); } /// Return true if two matrices are equals. pub fn eql(left: Self, right: Self) bool { return meta.eql(left, right); } pub fn mulByVec4(self: Self, v: Vector4) Vector4 { const x = (self.data[0][0] * v.x()) + (self.data[1][0] * v.y()) + (self.data[2][0] * v.z()) + (self.data[3][0] * v.w()); const y = (self.data[0][1] * v.x()) + (self.data[1][1] * v.y()) + (self.data[2][1] * v.z()) + (self.data[3][1] * v.w()); const z = (self.data[0][2] * v.x()) + (self.data[1][2] * v.y()) + (self.data[2][2] * v.z()) + (self.data[3][2] * v.w()); const w = (self.data[0][3] * v.x()) + (self.data[1][3] * v.y()) + (self.data[2][3] * v.z()) + (self.data[3][3] * v.w()); return Vector4.new(x, y, z, w); } /// Construct 4x4 translation matrix by multiplying identity matrix and /// given translation vector. pub fn fromTranslate(axis: Vector3) Self { var result = Self.identity(); result.data[3][0] = axis.data[0]; result.data[3][1] = axis.data[1]; result.data[3][2] = axis.data[2]; return result; } /// Make a translation between the given matrix and the given axis. pub fn translate(self: Self, axis: Vector3) Self { const trans_mat = Self.fromTranslate(axis); return Self.mul(trans_mat, self); } /// Get translation Vec3 from current matrix. pub fn extractTranslation(self: Self) Vector3 { return Vector3.new(self.data[3][0], self.data[3][1], self.data[3][2]); } /// Construct a 4x4 matrix from given axis and angle (in degrees). pub fn fromRotation(angle_in_degrees: T, axis: Vector3) Self { var result = Self.identity(); const norm_axis = axis.norm(); const sin_theta = @sin(root.toRadians(angle_in_degrees)); const cos_theta = @cos(root.toRadians(angle_in_degrees)); const cos_value = 1 - cos_theta; const x = norm_axis.x(); const y = norm_axis.y(); const z = norm_axis.z(); result.data[0][0] = (x * x * cos_value) + cos_theta; result.data[0][1] = (x * y * cos_value) + (z * sin_theta); result.data[0][2] = (x * z * cos_value) - (y * sin_theta); result.data[1][0] = (y * x * cos_value) - (z * sin_theta); result.data[1][1] = (y * y * cos_value) + cos_theta; result.data[1][2] = (y * z * cos_value) + (x * sin_theta); result.data[2][0] = (z * x * cos_value) + (y * sin_theta); result.data[2][1] = (z * y * cos_value) - (x * sin_theta); result.data[2][2] = (z * z * cos_value) + cos_theta; return result; } pub fn rotate(self: Self, angle_in_degrees: T, axis: Vector3) Self { const rotation_mat = Self.fromRotation(angle_in_degrees, axis); return Self.mul(self, rotation_mat); } /// Construct a rotation matrix from euler angles (X * Y * Z). /// Order matters because matrix multiplication are NOT commutative. pub fn fromEulerAngles(euler_angle: Vector3) Self { const x = Self.fromRotation(euler_angle.x(), Vector3.right()); const y = Self.fromRotation(euler_angle.y(), Vector3.up()); const z = Self.fromRotation(euler_angle.z(), Vector3.forward()); return z.mul(y.mul(x)); } /// Ortho normalize given matrix. pub fn orthoNormalize(self: Self) Self { const column_1 = Vector3.new(self.data[0][0], self.data[0][1], self.data[0][2]).norm(); const column_2 = Vector3.new(self.data[1][0], self.data[1][1], self.data[1][2]).norm(); const column_3 = Vector3.new(self.data[2][0], self.data[2][1], self.data[2][2]).norm(); var result = self; result.data[0][0] = column_1.x(); result.data[0][1] = column_1.y(); result.data[0][2] = column_1.z(); result.data[1][0] = column_2.x(); result.data[1][1] = column_2.y(); result.data[1][2] = column_2.z(); result.data[2][0] = column_3.x(); result.data[2][1] = column_3.y(); result.data[2][2] = column_3.z(); return result; } /// Return the rotation as Euler angles in degrees. /// Taken from <NAME> at Insomniac Games (and `glm` as the same function). /// For more details: https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2012/07/euler-angles1.pdf pub fn extractEulerAngles(self: Self) Vector3 { const m = self.orthoNormalize(); const theta_x = math.atan2(T, m.data[1][2], m.data[2][2]); const c2 = @sqrt(math.pow(T, m.data[0][0], 2) + math.pow(T, m.data[0][1], 2)); const theta_y = math.atan2(T, -m.data[0][2], @sqrt(c2)); const s1 = @sin(theta_x); const c1 = @cos(theta_x); const theta_z = math.atan2(T, s1 * m.data[2][0] - c1 * m.data[1][0], c1 * m.data[1][1] - s1 * m.data[2][1]); return Vector3.new(root.toDegrees(theta_x), root.toDegrees(theta_y), root.toDegrees(theta_z)); } pub fn fromScale(axis: Vector3) Self { var result = Self.identity(); result.data[0][0] = axis.x(); result.data[1][1] = axis.y(); result.data[2][2] = axis.z(); return result; } pub fn scale(self: Self, axis: Vector3) Self { const scale_mat = Self.fromScale(axis); return Self.mul(scale_mat, self); } pub fn extractScale(self: Self) Vector3 { const scale_x = Vector3.new(self.data[0][0], self.data[0][1], self.data[0][2]); const scale_y = Vector3.new(self.data[1][0], self.data[1][1], self.data[1][2]); const scale_z = Vector3.new(self.data[2][0], self.data[2][1], self.data[2][2]); return Vector3.new(scale_x.length(), scale_y.length(), scale_z.length()); } /// Construct a perspective 4x4 matrix. /// Note: Field of view is given in degrees. /// Also for more details https://www.khronos.org/registry/OpenGL-Refpages/gl2.1/xhtml/gluPerspective.xml. pub fn perspective(fovy_in_degrees: T, aspect_ratio: T, z_near: T, z_far: T) Self { var result = Self.identity(); const f = 1 / @tan(root.toRadians(fovy_in_degrees) * 0.5); result.data[0][0] = f / aspect_ratio; result.data[1][1] = f; result.data[2][2] = (z_near + z_far) / (z_near - z_far); result.data[2][3] = -1; result.data[3][2] = 2 * z_far * z_near / (z_near - z_far); result.data[3][3] = 0; return result; } /// Construct an orthographic 4x4 matrix. pub fn orthographic(left: T, right: T, bottom: T, top: T, z_near: T, z_far: T) Self { var result = Self.zero(); result.data[0][0] = 2 / (right - left); result.data[1][1] = 2 / (top - bottom); result.data[2][2] = 2 / (z_near - z_far); result.data[3][3] = 1; result.data[3][0] = (left + right) / (left - right); result.data[3][1] = (bottom + top) / (bottom - top); result.data[3][2] = (z_far + z_near) / (z_near - z_far); return result; } /// Right-handed lookAt function. pub fn lookAt(eye: Vector3, target: Vector3, up: Vector3) Self { const f = Vector3.sub(target, eye).norm(); const s = Vector3.cross(f, up).norm(); const u = Vector3.cross(s, f); var result: Self = undefined; result.data[0][0] = s.x(); result.data[0][1] = u.x(); result.data[0][2] = -f.x(); result.data[0][3] = 0; result.data[1][0] = s.y(); result.data[1][1] = u.y(); result.data[1][2] = -f.y(); result.data[1][3] = 0; result.data[2][0] = s.z(); result.data[2][1] = u.z(); result.data[2][2] = -f.z(); result.data[2][3] = 0; result.data[3][0] = -Vector3.dot(s, eye); result.data[3][1] = -Vector3.dot(u, eye); result.data[3][2] = Vector3.dot(f, eye); result.data[3][3] = 1; return result; } /// Matrices' multiplication. /// Produce a new matrix from given two matrices. pub fn mul(left: Self, right: Self) Self { var result = Self.identity(); for (result.data) |_, column| { for (result.data[column]) |_, row| { var sum: T = 0; for (left.data) |_, left_column| { sum += left.data[left_column][row] * right.data[column][left_column]; } result.data[column][row] = sum; } } return result; } /// Construct inverse 4x4 from given matrix. /// Note: This is not the most efficient way to do this. /// TODO: Make it more efficient. pub fn inv(self: Self) Self { var inv_mat: Self = undefined; var s: [6]T = undefined; var c: [6]T = undefined; s[0] = self.data[0][0] * self.data[1][1] - self.data[1][0] * self.data[0][1]; s[1] = self.data[0][0] * self.data[1][2] - self.data[1][0] * self.data[0][2]; s[2] = self.data[0][0] * self.data[1][3] - self.data[1][0] * self.data[0][3]; s[3] = self.data[0][1] * self.data[1][2] - self.data[1][1] * self.data[0][2]; s[4] = self.data[0][1] * self.data[1][3] - self.data[1][1] * self.data[0][3]; s[5] = self.data[0][2] * self.data[1][3] - self.data[1][2] * self.data[0][3]; c[0] = self.data[2][0] * self.data[3][1] - self.data[3][0] * self.data[2][1]; c[1] = self.data[2][0] * self.data[3][2] - self.data[3][0] * self.data[2][2]; c[2] = self.data[2][0] * self.data[3][3] - self.data[3][0] * self.data[2][3]; c[3] = self.data[2][1] * self.data[3][2] - self.data[3][1] * self.data[2][2]; c[4] = self.data[2][1] * self.data[3][3] - self.data[3][1] * self.data[2][3]; c[5] = self.data[2][2] * self.data[3][3] - self.data[3][2] * self.data[2][3]; const determ = 1 / (s[0] * c[5] - s[1] * c[4] + s[2] * c[3] + s[3] * c[2] - s[4] * c[1] + s[5] * c[0]); inv_mat.data[0][0] = (self.data[1][1] * c[5] - self.data[1][2] * c[4] + self.data[1][3] * c[3]) * determ; inv_mat.data[0][1] = (-self.data[0][1] * c[5] + self.data[0][2] * c[4] - self.data[0][3] * c[3]) * determ; inv_mat.data[0][2] = (self.data[3][1] * s[5] - self.data[3][2] * s[4] + self.data[3][3] * s[3]) * determ; inv_mat.data[0][3] = (-self.data[2][1] * s[5] + self.data[2][2] * s[4] - self.data[2][3] * s[3]) * determ; inv_mat.data[1][0] = (-self.data[1][0] * c[5] + self.data[1][2] * c[2] - self.data[1][3] * c[1]) * determ; inv_mat.data[1][1] = (self.data[0][0] * c[5] - self.data[0][2] * c[2] + self.data[0][3] * c[1]) * determ; inv_mat.data[1][2] = (-self.data[3][0] * s[5] + self.data[3][2] * s[2] - self.data[3][3] * s[1]) * determ; inv_mat.data[1][3] = (self.data[2][0] * s[5] - self.data[2][2] * s[2] + self.data[2][3] * s[1]) * determ; inv_mat.data[2][0] = (self.data[1][0] * c[4] - self.data[1][1] * c[2] + self.data[1][3] * c[0]) * determ; inv_mat.data[2][1] = (-self.data[0][0] * c[4] + self.data[0][1] * c[2] - self.data[0][3] * c[0]) * determ; inv_mat.data[2][2] = (self.data[3][0] * s[4] - self.data[3][1] * s[2] + self.data[3][3] * s[0]) * determ; inv_mat.data[2][3] = (-self.data[2][0] * s[4] + self.data[2][1] * s[2] - self.data[2][3] * s[0]) * determ; inv_mat.data[3][0] = (-self.data[1][0] * c[3] + self.data[1][1] * c[1] - self.data[1][2] * c[0]) * determ; inv_mat.data[3][1] = (self.data[0][0] * c[3] - self.data[0][1] * c[1] + self.data[0][2] * c[0]) * determ; inv_mat.data[3][2] = (-self.data[3][0] * s[3] + self.data[3][1] * s[1] - self.data[3][2] * s[0]) * determ; inv_mat.data[3][3] = (self.data[2][0] * s[3] - self.data[2][1] * s[1] + self.data[2][2] * s[0]) * determ; return inv_mat; } /// Return 4x4 matrix from given all transform components; `translation`, `rotation` and `scale`. /// The final order is T * R * S. /// Note: `rotation` could be `Vec3` (Euler angles) or a `quat`. pub fn recompose(translation: Vector3, rotation: anytype, scalar: Vector3) Self { const t = Self.fromTranslate(translation); const s = Self.fromScale(scalar); const r = switch (@TypeOf(rotation)) { Quaternion(T) => Quaternion(T).toMat4(rotation), Vector3 => Self.fromEulerAngles(rotation), else => @compileError("Recompose not implemented for " ++ @typeName(@TypeOf(rotation))), }; return t.mul(r.mul(s)); } /// Return `translation`, `rotation` and `scale` components from given matrix. /// For now, the rotation returned is a quaternion. If you want to get Euler angles /// from it, just do: `returned_quat.extractEulerAngles()`. /// Note: We ortho nornalize the given matrix before extracting the rotation. pub fn decompose(self: Self) struct { t: Vector3, r: Quaternion(T), s: Vector3 } { const t = self.extractTranslation(); const s = self.extractScale(); const r = Quaternion(T).fromMat4(self.orthoNormalize()); return .{ .t = t, .r = r, .s = s, }; } /// Print the 4x4 to stderr. pub fn debugPrint(self: Self) void { const print = std.debug.print; print("({d}, {d}, {d}, {d})\n", .{ self.data[0][0], self.data[1][0], self.data[2][0], self.data[3][0], }); print("({d}, {d}, {d}, {d})\n", .{ self.data[0][1], self.data[1][1], self.data[2][1], self.data[3][1], }); print("({d}, {d}, {d}, {d})\n", .{ self.data[0][2], self.data[1][2], self.data[2][2], self.data[3][2], }); print("({d}, {d}, {d}, {d})\n", .{ self.data[0][3], self.data[1][3], self.data[2][3], self.data[3][3], }); } /// Cast a type to another type. /// It's like builtins: @intCast, @floatCast, @intToFloat, @floatToInt. pub fn cast(self: Self, comptime dest_type: type) Mat4x4(dest_type) { const dest_info = @typeInfo(dest_type); if (dest_info != .Float) { std.debug.panic("Error, dest type should be float.\n", .{}); } var result: Mat4x4(dest_type) = undefined; for (result.data) |_, column| { for (result.data[column]) |_, row| { result.data[column][row] = @floatCast(dest_type, self.data[column][row]); } } return result; } }; } test "zalgebra.Mat4.eql" { const a = Mat4.identity(); const b = Mat4.identity(); const c = Mat4.zero(); try expectEqual(Mat4.eql(a, b), true); try expectEqual(Mat4.eql(a, c), false); } test "zalgebra.Mat4.set" { const a = Mat4.set(12); const b = Mat4{ .data = .{ .{ 12, 12, 12, 12 }, .{ 12, 12, 12, 12 }, .{ 12, 12, 12, 12 }, .{ 12, 12, 12, 12 }, }, }; try expectEqual(a, b); } test "zalgebra.Mat4.negate" { const a = Mat4{ .data = .{ .{ 1, 2, 3, 4 }, .{ 5, -6, 7, 8 }, .{ 9, 10, 11, -12 }, .{ 13, 14, 15, 16 }, }, }; const a_negated = Mat4{ .data = .{ .{ -1, -2, -3, -4 }, .{ -5, 6, -7, -8 }, .{ -9, -10, -11, 12 }, .{ -13, -14, -15, -16 }, }, }; try expectEqual(a.negate(), a_negated); } test "zalgebra.Mat4.transpose" { const a = Mat4{ .data = .{ .{ 1, 2, 3, 4 }, .{ 5, 6, 7, 8 }, .{ 9, 10, 11, 12 }, .{ 13, 14, 15, 16 }, }, }; const b = Mat4{ .data = .{ .{ 1, 5, 9, 13 }, .{ 2, 6, 10, 14 }, .{ 3, 7, 11, 15 }, .{ 4, 8, 12, 16 }, }, }; try expectEqual(a.transpose(), b); } test "zalgebra.Mat4.fromSlice" { const data = [_]f32{ 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 }; const result = Mat4.fromSlice(&data); try expectEqual(result, Mat4.identity()); } test "zalgebra.Mat4.fromTranslate" { const a = Mat4.fromTranslate(Vec3.new(2, 3, 4)); try expectEqual(a, Mat4{ .data = .{ .{ 1, 0, 0, 0 }, .{ 0, 1, 0, 0 }, .{ 0, 0, 1, 0 }, .{ 2, 3, 4, 1 }, }, }); } test "zalgebra.Mat4.translate" { const a = Mat4.fromTranslate(Vec3.new(2, 3, 2)); const result = Mat4.translate(a, Vec3.new(2, 3, 4)); try expectEqual(result, Mat4{ .data = .{ .{ 1, 0, 0, 0 }, .{ 0, 1, 0, 0 }, .{ 0, 0, 1, 0 }, .{ 4, 6, 6, 1 }, }, }); } test "zalgebra.Mat4.fromScale" { const a = Mat4.fromScale(Vec3.new(2, 3, 4)); try expectEqual(a, Mat4{ .data = .{ .{ 2, 0, 0, 0 }, .{ 0, 3, 0, 0 }, .{ 0, 0, 4, 0 }, .{ 0, 0, 0, 1 }, }, }); } test "zalgebra.Mat4.scale" { const a = Mat4.fromScale(Vec3.new(2, 3, 4)); const result = Mat4.scale(a, Vec3.set(2)); try expectEqual(result, Mat4{ .data = .{ .{ 4, 0, 0, 0 }, .{ 0, 6, 0, 0 }, .{ 0, 0, 8, 0 }, .{ 0, 0, 0, 1 }, }, }); } test "zalgebra.Mat4.inv" { const a: Mat4 = .{ .data = .{ .{ 2, 0, 0, 4 }, .{ 0, 2, 0, 0 }, .{ 0, 0, 2, 0 }, .{ 4, 0, 0, 2 }, }, }; try expectEqual(a.inv(), Mat4{ .data = .{ .{ -0.1666666716337204, 0, 0, 0.3333333432674408 }, .{ 0, 0.5, 0, 0 }, .{ 0, 0, 0.5, 0 }, .{ 0.3333333432674408, 0, 0, -0.1666666716337204 }, }, }); } test "zalgebra.Mat4.extractTranslation" { var a = Mat4.fromTranslate(Vec3.new(2, 3, 2)); a = a.translate(Vec3.new(2, 3, 2)); try expectEqual(a.extractTranslation(), Vec3.new(4, 6, 4)); } test "zalgebra.Mat4.extractEulerAngles" { const a = Mat4.fromEulerAngles(Vec3.new(45, -5, 20)); try expectEqual(a.extractEulerAngles(), Vec3.new(45.000003814697266, -4.99052524, 19.999998092651367)); } test "zalgebra.Mat4.extractScale" { var a = Mat4.fromScale(Vec3.new(2, 4, 8)); a = a.scale(Vec3.new(2, 4, 8)); try expectEqual(a.extractScale(), Vec3.new(4, 16, 64)); } test "zalgebra.Mat4.recompose" { const result = Mat4.recompose( Vec3.set(2), Vec3.new(45, 5, 0), Vec3.one(), ); try expectEqual(result, Mat4{ .data = .{ .{ 0.9961947202682495, 0, -0.08715573698282242, 0 }, .{ 0.06162841245532036, 0.7071067690849304, 0.704416036605835, 0 }, .{ 0.06162841245532036, -0.7071067690849304, 0.704416036605835, 0 }, .{ 2, 2, 2, 1 }, } }); } test "zalgebra.Mat4.decompose" { const a = Mat4.recompose( Vec3.new(10, 5, 5), Vec3.new(45, 5, 0), Vec3.set(1), ); const result = a.decompose(); try expectEqual(result.t, Vec3.new(10, 5, 5)); try expectEqual(result.s, Vec3.set(1)); try expectEqual(result.r.extractEulerAngles(), Vec3.new(45, 5, 0.00000010712935250012379)); } test "zalgebra.Mat4.cast" { const a = Mat4{ .data = .{ .{ 0.9961947202682495, 0, -0.08715573698282242, 0 }, .{ 0.06162841245532036, 0.7071067690849304, 0.704416036605835, 0 }, .{ 0.06162841245532036, -0.7071067690849304, 0.704416036605835, 0 }, .{ 2, 2, 2, 1 }, } }; const a_f64 = Mat4_f64{ .data = .{ .{ 0.9961947202682495, 0, -0.08715573698282242, 0 }, .{ 0.06162841245532036, 0.7071067690849304, 0.704416036605835, 0 }, .{ 0.06162841245532036, -0.7071067690849304, 0.704416036605835, 0 }, .{ 2, 2, 2, 1 }, } }; try expectEqual(a.cast(f64), a_f64); try expectEqual(a_f64.cast(f32), a); }
src/mat4.zig
pub const ED_BASE = @as(i32, 4096); pub const DEV_PORT_SIM = @as(u32, 1); pub const DEV_PORT_COM1 = @as(u32, 2); pub const DEV_PORT_COM2 = @as(u32, 3); pub const DEV_PORT_COM3 = @as(u32, 4); pub const DEV_PORT_COM4 = @as(u32, 5); pub const DEV_PORT_DIAQ = @as(u32, 6); pub const DEV_PORT_ARTI = @as(u32, 7); pub const DEV_PORT_1394 = @as(u32, 8); pub const DEV_PORT_USB = @as(u32, 9); pub const DEV_PORT_MIN = @as(u32, 1); pub const DEV_PORT_MAX = @as(u32, 9); pub const ED_TOP = @as(u32, 1); pub const ED_MIDDLE = @as(u32, 2); pub const ED_BOTTOM = @as(u32, 4); pub const ED_LEFT = @as(u32, 256); pub const ED_CENTER = @as(u32, 512); pub const ED_RIGHT = @as(u32, 1024); pub const ED_AUDIO_ALL = @as(u32, 268435456); pub const ED_AUDIO_1 = @as(i32, 1); pub const ED_AUDIO_2 = @as(i32, 2); pub const ED_AUDIO_3 = @as(i32, 4); pub const ED_AUDIO_4 = @as(i32, 8); pub const ED_AUDIO_5 = @as(i32, 16); pub const ED_AUDIO_6 = @as(i32, 32); pub const ED_AUDIO_7 = @as(i32, 64); pub const ED_AUDIO_8 = @as(i32, 128); pub const ED_AUDIO_9 = @as(i32, 256); pub const ED_AUDIO_10 = @as(i32, 512); pub const ED_AUDIO_11 = @as(i32, 1024); pub const ED_AUDIO_12 = @as(i32, 2048); pub const ED_AUDIO_13 = @as(i32, 4096); pub const ED_AUDIO_14 = @as(i32, 8192); pub const ED_AUDIO_15 = @as(i32, 16384); pub const ED_AUDIO_16 = @as(i32, 32768); pub const ED_AUDIO_17 = @as(i32, 65536); pub const ED_AUDIO_18 = @as(i32, 131072); pub const ED_AUDIO_19 = @as(i32, 262144); pub const ED_AUDIO_20 = @as(i32, 524288); pub const ED_AUDIO_21 = @as(i32, 1048576); pub const ED_AUDIO_22 = @as(i32, 2097152); pub const ED_AUDIO_23 = @as(i32, 4194304); pub const ED_AUDIO_24 = @as(i32, 8388608); pub const ED_VIDEO = @as(i32, 33554432); pub const CLSID_DeviceIoControl = Guid.initString("12d3e372-874b-457d-9fdf-73977778686c"); //-------------------------------------------------------------------------------- // Section: Types (3) //-------------------------------------------------------------------------------- const IID_IDeviceRequestCompletionCallback_Value = @import("../zig.zig").Guid.initString("999bad24-9acd-45bb-8669-2a2fc0288b04"); pub const IID_IDeviceRequestCompletionCallback = &IID_IDeviceRequestCompletionCallback_Value; pub const IDeviceRequestCompletionCallback = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Invoke: fn( self: *const IDeviceRequestCompletionCallback, requestResult: HRESULT, bytesReturned: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDeviceRequestCompletionCallback_Invoke(self: *const T, requestResult: HRESULT, bytesReturned: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDeviceRequestCompletionCallback.VTable, self.vtable).Invoke(@ptrCast(*const IDeviceRequestCompletionCallback, self), requestResult, bytesReturned); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDeviceIoControl_Value = @import("../zig.zig").Guid.initString("9eefe161-23ab-4f18-9b49-991b586ae970"); pub const IID_IDeviceIoControl = &IID_IDeviceIoControl_Value; pub const IDeviceIoControl = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, DeviceIoControlSync: fn( self: *const IDeviceIoControl, ioControlCode: u32, inputBuffer: ?[*:0]u8, inputBufferSize: u32, outputBuffer: ?[*:0]u8, outputBufferSize: u32, bytesReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DeviceIoControlAsync: fn( self: *const IDeviceIoControl, ioControlCode: u32, inputBuffer: ?[*:0]u8, inputBufferSize: u32, outputBuffer: ?[*:0]u8, outputBufferSize: u32, requestCompletionCallback: ?*IDeviceRequestCompletionCallback, cancelContext: ?*usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CancelOperation: fn( self: *const IDeviceIoControl, cancelContext: usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDeviceIoControl_DeviceIoControlSync(self: *const T, ioControlCode: u32, inputBuffer: ?[*:0]u8, inputBufferSize: u32, outputBuffer: ?[*:0]u8, outputBufferSize: u32, bytesReturned: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDeviceIoControl.VTable, self.vtable).DeviceIoControlSync(@ptrCast(*const IDeviceIoControl, self), ioControlCode, inputBuffer, inputBufferSize, outputBuffer, outputBufferSize, bytesReturned); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDeviceIoControl_DeviceIoControlAsync(self: *const T, ioControlCode: u32, inputBuffer: ?[*:0]u8, inputBufferSize: u32, outputBuffer: ?[*:0]u8, outputBufferSize: u32, requestCompletionCallback: ?*IDeviceRequestCompletionCallback, cancelContext: ?*usize) callconv(.Inline) HRESULT { return @ptrCast(*const IDeviceIoControl.VTable, self.vtable).DeviceIoControlAsync(@ptrCast(*const IDeviceIoControl, self), ioControlCode, inputBuffer, inputBufferSize, outputBuffer, outputBufferSize, requestCompletionCallback, cancelContext); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDeviceIoControl_CancelOperation(self: *const T, cancelContext: usize) callconv(.Inline) HRESULT { return @ptrCast(*const IDeviceIoControl.VTable, self.vtable).CancelOperation(@ptrCast(*const IDeviceIoControl, self), cancelContext); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ICreateDeviceAccessAsync_Value = @import("../zig.zig").Guid.initString("3474628f-683d-42d2-abcb-db018c6503bc"); pub const IID_ICreateDeviceAccessAsync = &IID_ICreateDeviceAccessAsync_Value; pub const ICreateDeviceAccessAsync = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Cancel: fn( self: *const ICreateDeviceAccessAsync, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Wait: fn( self: *const ICreateDeviceAccessAsync, timeout: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Close: fn( self: *const ICreateDeviceAccessAsync, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetResult: fn( self: *const ICreateDeviceAccessAsync, riid: ?*const Guid, deviceAccess: ?*?*anyopaque, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICreateDeviceAccessAsync_Cancel(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ICreateDeviceAccessAsync.VTable, self.vtable).Cancel(@ptrCast(*const ICreateDeviceAccessAsync, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICreateDeviceAccessAsync_Wait(self: *const T, timeout: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ICreateDeviceAccessAsync.VTable, self.vtable).Wait(@ptrCast(*const ICreateDeviceAccessAsync, self), timeout); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICreateDeviceAccessAsync_Close(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ICreateDeviceAccessAsync.VTable, self.vtable).Close(@ptrCast(*const ICreateDeviceAccessAsync, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICreateDeviceAccessAsync_GetResult(self: *const T, riid: ?*const Guid, deviceAccess: ?*?*anyopaque) callconv(.Inline) HRESULT { return @ptrCast(*const ICreateDeviceAccessAsync.VTable, self.vtable).GetResult(@ptrCast(*const ICreateDeviceAccessAsync, self), riid, deviceAccess); } };} pub usingnamespace MethodMixin(@This()); }; //-------------------------------------------------------------------------------- // Section: Functions (1) //-------------------------------------------------------------------------------- pub extern "deviceaccess" fn CreateDeviceAccessInstance( deviceInterfacePath: ?[*:0]const u16, desiredAccess: u32, createAsync: ?*?*ICreateDeviceAccessAsync, ) callconv(@import("std").os.windows.WINAPI) HRESULT; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (4) //-------------------------------------------------------------------------------- const Guid = @import("../zig.zig").Guid; const HRESULT = @import("../foundation.zig").HRESULT; const IUnknown = @import("../system/com.zig").IUnknown; const PWSTR = @import("../foundation.zig").PWSTR; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/devices/device_access.zig
const std = @import("../std.zig"); const meta = std.meta; const testing = std.testing; const mem = std.mem; const assert = std.debug.assert; const Type = std.builtin.Type; /// This is useful for saving memory when allocating an object that has many /// optional components. The optional objects are allocated sequentially in /// memory, and a single integer is used to represent each optional object /// and whether it is present based on each corresponding bit. pub fn TrailerFlags(comptime Fields: type) type { return struct { bits: Int, pub const Int = meta.Int(.unsigned, bit_count); pub const bit_count = @typeInfo(Fields).Struct.fields.len; pub const FieldEnum = std.meta.FieldEnum(Fields); pub const ActiveFields = std.enums.EnumFieldStruct(FieldEnum, bool, false); pub const FieldValues = blk: { comptime var fields: [bit_count]Type.StructField = undefined; inline for (@typeInfo(Fields).Struct.fields) |struct_field, i| { fields[i] = Type.StructField{ .name = struct_field.name, .field_type = ?struct_field.field_type, .default_value = &@as(?struct_field.field_type, null), .is_comptime = false, .alignment = @alignOf(?struct_field.field_type), }; } break :blk @Type(.{ .Struct = .{ .layout = .Auto, .fields = &fields, .decls = &.{}, .is_tuple = false, }, }); }; pub const Self = @This(); pub fn has(self: Self, comptime field: FieldEnum) bool { const field_index = @enumToInt(field); return (self.bits & (1 << field_index)) != 0; } pub fn get(self: Self, p: [*]align(@alignOf(Fields)) const u8, comptime field: FieldEnum) ?Field(field) { if (!self.has(field)) return null; return self.ptrConst(p, field).*; } pub fn setFlag(self: *Self, comptime field: FieldEnum) void { const field_index = @enumToInt(field); self.bits |= 1 << field_index; } /// `fields` is a boolean struct where each active field is set to `true` pub fn init(fields: ActiveFields) Self { var self: Self = .{ .bits = 0 }; inline for (@typeInfo(Fields).Struct.fields) |field, i| { if (@field(fields, field.name)) self.bits |= 1 << i; } return self; } /// `fields` is a struct with each field set to an optional value pub fn setMany(self: Self, p: [*]align(@alignOf(Fields)) u8, fields: FieldValues) void { inline for (@typeInfo(Fields).Struct.fields) |field, i| { if (@field(fields, field.name)) |value| self.set(p, @intToEnum(FieldEnum, i), value); } } pub fn set( self: Self, p: [*]align(@alignOf(Fields)) u8, comptime field: FieldEnum, value: Field(field), ) void { self.ptr(p, field).* = value; } pub fn ptr(self: Self, p: [*]align(@alignOf(Fields)) u8, comptime field: FieldEnum) *Field(field) { if (@sizeOf(Field(field)) == 0) return undefined; const off = self.offset(field); return @ptrCast(*Field(field), @alignCast(@alignOf(Field(field)), p + off)); } pub fn ptrConst(self: Self, p: [*]align(@alignOf(Fields)) const u8, comptime field: FieldEnum) *const Field(field) { if (@sizeOf(Field(field)) == 0) return undefined; const off = self.offset(field); return @ptrCast(*const Field(field), @alignCast(@alignOf(Field(field)), p + off)); } pub fn offset(self: Self, comptime field: FieldEnum) usize { var off: usize = 0; inline for (@typeInfo(Fields).Struct.fields) |field_info, i| { const active = (self.bits & (1 << i)) != 0; if (i == @enumToInt(field)) { assert(active); return mem.alignForwardGeneric(usize, off, @alignOf(field_info.field_type)); } else if (active) { off = mem.alignForwardGeneric(usize, off, @alignOf(field_info.field_type)); off += @sizeOf(field_info.field_type); } } } pub fn Field(comptime field: FieldEnum) type { return @typeInfo(Fields).Struct.fields[@enumToInt(field)].field_type; } pub fn sizeInBytes(self: Self) usize { var off: usize = 0; inline for (@typeInfo(Fields).Struct.fields) |field, i| { if (@sizeOf(field.field_type) == 0) continue; if ((self.bits & (1 << i)) != 0) { off = mem.alignForwardGeneric(usize, off, @alignOf(field.field_type)); off += @sizeOf(field.field_type); } } return off; } }; } test "TrailerFlags" { const Flags = TrailerFlags(struct { a: i32, b: bool, c: u64, }); try testing.expectEqual(u2, meta.Tag(Flags.FieldEnum)); var flags = Flags.init(.{ .b = true, .c = true, }); const slice = try testing.allocator.allocAdvanced(u8, 8, flags.sizeInBytes(), .exact); defer testing.allocator.free(slice); flags.set(slice.ptr, .b, false); flags.set(slice.ptr, .c, 12345678); try testing.expect(flags.get(slice.ptr, .a) == null); try testing.expect(!flags.get(slice.ptr, .b).?); try testing.expect(flags.get(slice.ptr, .c).? == 12345678); flags.setMany(slice.ptr, .{ .b = true, .c = 5678, }); try testing.expect(flags.get(slice.ptr, .a) == null); try testing.expect(flags.get(slice.ptr, .b).?); try testing.expect(flags.get(slice.ptr, .c).? == 5678); }
lib/std/meta/trailer_flags.zig
const std = @import("std"); const abort = std.os.abort; const assert = std.debug.assert; const expect = std.testing.expect; // defined in C as: // typedef unsigned int gcc_word __attribute__((mode(word))); const gcc_word = usize; comptime { assert(std.builtin.link_libc); } /// public entrypoint for generated code using EmulatedTLS pub fn __emutls_get_address(control: *emutls_control) callconv(.C) *c_void { return control.getPointer(); } /// Simple allocator interface, to avoid pulling in the while /// std allocator implementation. const simple_allocator = struct { /// Allocate a memory chunk for requested type. Return a pointer on the data. pub fn alloc(comptime T: type) *T { return @ptrCast(*T, @alignCast( @alignOf(T), advancedAlloc(@alignOf(T), @sizeOf(T)), )); } /// Allocate a slice of T, with len elements. pub fn allocSlice(comptime T: type, len: usize) []T { return @ptrCast([*]T, @alignCast( @alignOf(T), advancedAlloc(@alignOf(T), @sizeOf(T) * len), ))[0 .. len - 1]; } /// Allocate a memory chunk. pub fn advancedAlloc(alignment: u29, size: usize) [*]u8 { const minimal_alignment = std.math.max(@alignOf(usize), alignment); var aligned_ptr: ?*c_void = undefined; if (std.c.posix_memalign(&aligned_ptr, minimal_alignment, size) != 0) { abort(); } return @ptrCast([*]u8, aligned_ptr); } /// Resize a slice. pub fn reallocSlice(comptime T: type, slice: []T, len: usize) []T { var c_ptr: *c_void = @ptrCast(*c_void, slice.ptr); var new_array: [*]T = @ptrCast([*]T, @alignCast( @alignOf(T), std.c.realloc(c_ptr, @sizeOf(T) * len) orelse abort(), )); return new_array[0..len]; } /// Free a memory chunk allocated with simple_allocator. pub fn free(ptr: anytype) void { std.c.free(@ptrCast(*c_void, ptr)); } }; /// Simple array of ?ObjectPointer with automatic resizing and /// automatic storage allocation. const ObjectArray = struct { const ObjectPointer = *c_void; // content of the array slots: []?ObjectPointer, /// create a new ObjectArray with n slots. must call deinit() to deallocate. pub fn init(n: usize) *ObjectArray { var array = simple_allocator.alloc(ObjectArray); errdefer simple_allocator.free(array); array.* = ObjectArray{ .slots = simple_allocator.allocSlice(?ObjectPointer, n), }; errdefer simple_allocator.free(array.slots); for (array.slots) |*object| { object.* = null; } return array; } /// deallocate the ObjectArray. pub fn deinit(self: *ObjectArray) void { // deallocated used objects in the array for (self.slots) |*object| { simple_allocator.free(object.*); } simple_allocator.free(self.slots); simple_allocator.free(self); } /// resize the ObjectArray if needed. pub fn ensureLength(self: *ObjectArray, new_len: usize) *ObjectArray { const old_len = self.slots.len; if (old_len > new_len) { return self; } // reallocate self.slots = simple_allocator.reallocSlice(?ObjectPointer, self.slots, new_len); // init newly added slots for (self.slots[old_len..]) |*object| { object.* = null; } return self; } /// Retrieve the pointer at request index, using control to initialize it if needed. pub fn getPointer(self: *ObjectArray, index: usize, control: *emutls_control) ObjectPointer { if (self.slots[index] == null) { // initialize the slot const size = control.size; const alignment = @truncate(u29, control.alignment); var data = simple_allocator.advancedAlloc(alignment, size); errdefer simple_allocator.free(data); if (control.default_value) |value| { // default value: copy the content to newly allocated object. @memcpy(data, @ptrCast([*]u8, value), size); } else { // no default: return zeroed memory. @memset(data, 0, size); } self.slots[index] = @ptrCast(*c_void, data); } return self.slots[index].?; } }; // Global stucture for Thread Storage. // It provides thread-safety for on-demand storage of Thread Objects. const current_thread_storage = struct { var key: std.c.pthread_key_t = undefined; var init_once = std.once(current_thread_storage.init); /// Return a per thread ObjectArray with at least the expected index. pub fn getArray(index: usize) *ObjectArray { if (current_thread_storage.getspecific()) |array| { // we already have a specific. just ensure the array is // big enough for the wanted index. return array.ensureLength(index); } // no specific. we need to create a new array. // make it to contains at least 16 objects (to avoid too much // reallocation at startup). const size = std.math.max(16, index); // create a new array and store it. var array: *ObjectArray = ObjectArray.init(size); current_thread_storage.setspecific(array); return array; } /// Return casted thread specific value. fn getspecific() ?*ObjectArray { return @ptrCast( ?*ObjectArray, @alignCast( @alignOf(ObjectArray), std.c.pthread_getspecific(current_thread_storage.key), ), ); } /// Set casted thread specific value. fn setspecific(new: ?*ObjectArray) void { if (std.c.pthread_setspecific(current_thread_storage.key, @ptrCast(*c_void, new)) != 0) { abort(); } } /// Initialize pthread_key_t. fn init() void { if (std.c.pthread_key_create(&current_thread_storage.key, current_thread_storage.deinit) != 0) { abort(); } } /// Invoked by pthread specific destructor. the passed argument is the ObjectArray pointer. fn deinit(arrayPtr: *c_void) callconv(.C) void { var array = @ptrCast( *ObjectArray, @alignCast(@alignOf(ObjectArray), arrayPtr), ); array.deinit(); } }; const emutls_control = extern struct { // A emutls_control value is a global value across all // threads. The threads shares the index of TLS variable. The data // array (containing address of allocated variables) is thread // specific and stored using pthread_setspecific(). // size of the object in bytes size: gcc_word, // alignment of the object in bytes alignment: gcc_word, object: extern union { // data[index-1] is the object address / 0 = uninit index: usize, // object address, when in single thread env (not used) address: *c_void, }, // null or non-zero initial value for the object default_value: ?*c_void, // global Mutex used to serialize control.index initialization. var mutex: std.c.pthread_mutex_t = std.c.PTHREAD_MUTEX_INITIALIZER; // global counter for keeping track of requested indexes. // access should be done with mutex held. var next_index: usize = 1; /// Simple wrapper for global lock. fn lock() void { if (std.c.pthread_mutex_lock(&emutls_control.mutex) != 0) { abort(); } } /// Simple wrapper for global unlock. fn unlock() void { if (std.c.pthread_mutex_unlock(&emutls_control.mutex) != 0) { abort(); } } /// Helper to retrieve nad initialize global unique index per emutls variable. pub fn getIndex(self: *emutls_control) usize { // Two threads could race against the same emutls_control. // Use atomic for reading coherent value lockless. const index_lockless = @atomicLoad(usize, &self.object.index, .Acquire); if (index_lockless != 0) { // index is already initialized, return it. return index_lockless; } // index is uninitialized: take global lock to avoid possible race. emutls_control.lock(); defer emutls_control.unlock(); const index_locked = self.object.index; if (index_locked != 0) { // we lost a race, but index is already initialized: nothing particular to do. return index_locked; } // Store a new index atomically (for having coherent index_lockless reading). @atomicStore(usize, &self.object.index, emutls_control.next_index, .Release); // Increment the next available index emutls_control.next_index += 1; return self.object.index; } /// Simple helper for testing purpose. pub fn init(comptime T: type, default_value: ?*T) emutls_control { return emutls_control{ .size = @sizeOf(T), .alignment = @alignOf(T), .object = .{ .index = 0 }, .default_value = @ptrCast(?*c_void, default_value), }; } /// Get the pointer on allocated storage for emutls variable. pub fn getPointer(self: *emutls_control) *c_void { // ensure current_thread_storage initialization is done current_thread_storage.init_once.call(); const index = self.getIndex(); var array = current_thread_storage.getArray(index); return array.getPointer(index - 1, self); } /// Testing helper for retrieving typed pointer. pub fn get_typed_pointer(self: *emutls_control, comptime T: type) *T { assert(self.size == @sizeOf(T)); assert(self.alignment == @alignOf(T)); return @ptrCast( *T, @alignCast(@alignOf(T), self.getPointer()), ); } }; test "simple_allocator" { var data1: *[64]u8 = simple_allocator.alloc([64]u8); defer simple_allocator.free(data1); for (data1) |*c| { c.* = 0xff; } var data2: [*]u8 = simple_allocator.advancedAlloc(@alignOf(u8), 64); defer simple_allocator.free(data2); for (data2[0..63]) |*c| { c.* = 0xff; } } test "__emutls_get_address zeroed" { var ctl = emutls_control.init(usize, null); try expect(ctl.object.index == 0); // retrieve a variable from ctl var x = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl))); try expect(ctl.object.index != 0); // index has been allocated for this ctl try expect(x.* == 0); // storage has been zeroed // modify the storage x.* = 1234; // retrieve a variable from ctl (same ctl) var y = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl))); try expect(y.* == 1234); // same content that x.* try expect(x == y); // same pointer } test "__emutls_get_address with default_value" { var value: usize = 5678; // default value var ctl = emutls_control.init(usize, &value); try expect(ctl.object.index == 0); var x: *usize = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl))); try expect(ctl.object.index != 0); try expect(x.* == 5678); // storage initialized with default value // modify the storage x.* = 9012; try expect(value == 5678); // the default value didn't change var y = @ptrCast(*usize, @alignCast(@alignOf(usize), __emutls_get_address(&ctl))); try expect(y.* == 9012); // the modified storage persists } test "test default_value with differents sizes" { const testType = struct { fn _testType(comptime T: type, value: T) !void { var def: T = value; var ctl = emutls_control.init(T, &def); var x = ctl.get_typed_pointer(T); try expect(x.* == value); } }._testType; try testType(usize, 1234); try testType(u32, 1234); try testType(i16, -12); try testType(f64, -12.0); try testType( @TypeOf("012345678901234567890123456789"), "012345678901234567890123456789", ); }
lib/std/special/compiler_rt/emutls.zig
const std = @import("std"); const assert = std.debug.assert; const vk = @import("vk"); const vma_config = @import("vma_config.zig"); pub const config = if (std.builtin.mode == .Debug) vma_config.debugConfig else vma_config.releaseConfig; // callbacks use vk.CallConv, but the vma functions may not. pub const CallConv = .C; /// \struct Allocator /// \brief Represents main object of this library initialized. /// /// Fill structure #AllocatorCreateInfo and call function create() to create it. /// Call function destroy() to destroy it. /// /// It is recommended to create just one object of this type per `Device` object, /// right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. pub const Allocator = extern enum(usize) { Null = 0, _, /// Creates Allocator object. pub fn create(createInfo: AllocatorCreateInfo) !Allocator { var result: Allocator = undefined; const rc = vmaCreateAllocator(&createInfo, &result); if (@enumToInt(rc) >= 0) return result; return error.VMACreateFailed; } /// Destroys allocator object. /// fn (Allocator) void pub const destroy = vmaDestroyAllocator; /// PhysicalDeviceProperties are fetched from physicalDevice by the allocator. /// You can access it here, without fetching it again on your own. pub fn getPhysicalDeviceProperties(allocator: Allocator) *const vk.PhysicalDeviceProperties { var properties: *const vk.PhysicalDeviceProperties = undefined; vmaGetPhysicalDeviceProperties(allocator, &properties); return properties; } /// PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. /// You can access it here, without fetching it again on your own. pub fn getMemoryProperties(allocator: Allocator) *const vk.PhysicalDeviceMemoryProperties { var properties: *const vk.PhysicalDeviceMemoryProperties = undefined; vmaGetMemoryProperties(allocator, &properties); return properties; } /// \brief Given Memory Type Index, returns Property Flags of this memory type. /// /// This is just a convenience function. Same information can be obtained using /// GetMemoryProperties(). pub fn getMemoryTypeProperties(allocator: Allocator, memoryTypeIndex: u32) vk.MemoryPropertyFlags { var flags: vk.MemoryPropertyFlags align(4) = undefined; vmaGetMemoryTypeProperties(allocator, memoryTypeIndex, &flags); return flags; } /// \brief Sets index of the current frame. /// /// This function must be used if you make allocations with /// #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT and /// #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flags to inform the allocator /// when a new frame begins. Allocations queried using GetAllocationInfo() cannot /// become lost in the current frame. /// fn setCurrentFrameIndex(self: Allocator, frameIndex: u32) void pub const setCurrentFrameIndex = vmaSetCurrentFrameIndex; /// \brief Retrieves statistics from current state of the Allocator. /// /// This function is called "calculate" not "get" because it has to traverse all /// internal data structures, so it may be quite slow. For faster but more brief statistics /// suitable to be called every frame or every allocation, use GetBudget(). /// /// Note that when using allocator from multiple threads, returned information may immediately /// become outdated. pub fn calculateStats(allocator: Allocator) Stats { var stats: Stats = undefined; vmaCalculateStats(allocator, &stats); return stats; } /// \brief Retrieves information about current memory budget for all memory heaps. /// /// \param[out] pBudget Must point to array with number of elements at least equal to number of memory heaps in physical device used. /// /// This function is called "get" not "calculate" because it is very fast, suitable to be called /// every frame or every allocation. For more detailed statistics use CalculateStats(). /// /// Note that when using allocator from multiple threads, returned information may immediately /// become outdated. pub fn getBudget(allocator: Allocator) Budget { var budget: Budget = undefined; vmaGetBudget(allocator, &budget); return budget; } // pub usingnamespace if (config.statsStringEnabled) // struct { // /// Builds and returns statistics as string in JSON format. // /// @param[out] ppStatsString Must be freed using FreeStatsString() function. // pub fn buildStatsString(allocator: Allocator, detailedMap: bool) [*:0]u8 { // var string: [*:0]u8 = undefined; // vmaBuildStatsString(allocator, &string, @boolToInt(detailedMap)); // return string; // } // pub const freeStatsString = vmaFreeStatsString; // } // else // struct {}; /// \brief Helps to find memoryTypeIndex, given memoryTypeBits and AllocationCreateInfo. /// /// This algorithm tries to find a memory type that: /// /// - Is allowed by memoryTypeBits. /// - Contains all the flags from pAllocationCreateInfo->requiredFlags. /// - Matches intended usage. /// - Has as many flags from pAllocationCreateInfo->preferredFlags as possible. /// /// \return Returns error.VK_FEATURE_NOT_PRESENT if not found. Receiving such result /// from this function or any other allocating function probably means that your /// device doesn't support any memory type with requested features for the specific /// type of resource you want to use it for. Please check parameters of your /// resource, like image layout (OPTIMAL versus LINEAR) or mip level count. pub fn findMemoryTypeIndex(allocator: Allocator, memoryTypeBits: u32, allocationCreateInfo: AllocationCreateInfo) !u32 { var index: u32 = undefined; const rc = vmaFindMemoryTypeIndex(allocator, memoryTypeBits, &allocationCreateInfo, &index); if (@enumToInt(rc) >= 0) return index; if (rc == .ERROR_FEATURE_NOT_PRESENT) return error.VK_FEATURE_NOT_PRESENT; return error.VK_UNDOCUMENTED_ERROR; } /// \brief Helps to find memoryTypeIndex, given vk.BufferCreateInfo and AllocationCreateInfo. /// /// It can be useful e.g. to determine value to be used as PoolCreateInfo::memoryTypeIndex. /// It internally creates a temporary, dummy buffer that never has memory bound. /// It is just a convenience function, equivalent to calling: /// /// - `vkCreateBuffer` /// - `vkGetBufferMemoryRequirements` /// - `FindMemoryTypeIndex` /// - `vkDestroyBuffer` pub fn findMemoryTypeIndexForBufferInfo( allocator: Allocator, bufferCreateInfo: vk.BufferCreateInfo, allocationCreateInfo: AllocationCreateInfo, ) !u32 { var index: u32 = undefined; const rc = vmaFindMemoryTypeIndexForBufferInfo(allocator, &bufferCreateInfo, &allocationCreateInfo, &index); if (@enumToInt(rc) >= 0) return index; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, .ERROR_FEATURE_NOT_PRESENT => error.VK_FEATURE_NOT_PRESENT, else => error.VK_UNDOCUMENTED_ERROR, }; } /// \brief Helps to find memoryTypeIndex, given vk.ImageCreateInfo and AllocationCreateInfo. /// /// It can be useful e.g. to determine value to be used as PoolCreateInfo::memoryTypeIndex. /// It internally creates a temporary, dummy image that never has memory bound. /// It is just a convenience function, equivalent to calling: /// /// - `vkCreateImage` /// - `vkGetImageMemoryRequirements` /// - `FindMemoryTypeIndex` /// - `vkDestroyImage` pub fn findMemoryTypeIndexForImageInfo( allocator: Allocator, imageCreateInfo: vk.ImageCreateInfo, allocationCreateInfo: AllocationCreateInfo, ) !u32 { var index: u32 = undefined; const rc = vmaFindMemoryTypeIndexForImageInfo(allocator, &imageCreateInfo, &allocationCreateInfo, &index); if (@enumToInt(rc) >= 0) return index; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_FEATURE_NOT_PRESENT => error.VK_FEATURE_NOT_PRESENT, else => error.VK_UNDOCUMENTED_ERROR, }; } /// \brief Allocates Vulkan device memory and creates #Pool object. /// /// @param allocator Allocator object. /// @param pCreateInfo Parameters of pool to create. /// @param[out] pPool Handle to created pool. pub fn createPool(allocator: Allocator, createInfo: PoolCreateInfo) !Pool { var pool: Pool = undefined; const rc = vmaCreatePool(allocator, &createInfo, &pool); if (@enumToInt(rc) >= 0) return pool; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_TOO_MANY_OBJECTS => error.VK_TOO_MANY_OBJECTS, .ERROR_INVALID_EXTERNAL_HANDLE => error.VK_INVALID_EXTERNAL_HANDLE, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, .ERROR_MEMORY_MAP_FAILED => error.VK_MEMORY_MAP_FAILED, else => error.VK_UNDOCUMENTED_ERROR, }; } /// \brief Destroys #Pool object and frees Vulkan device memory. /// fn destroyPool(self: Allocator, pool: Pool) void pub const destroyPool = vmaDestroyPool; /// \brief Retrieves statistics of existing #Pool object. /// /// @param allocator Allocator object. /// @param pool Pool object. /// @param[out] pPoolStats Statistics of specified pool. pub fn getPoolStats(allocator: Allocator, pool: Pool) PoolStats { var stats: PoolStats = undefined; vmaGetPoolStats(allocator, pool, &stats); return stats; } /// \brief Marks all allocations in given pool as lost if they are not used in current frame or PoolCreateInfo::frameInUseCount back from now. /// /// @param allocator Allocator object. /// @param pool Pool. pub fn makePoolAllocationsLost(allocator: Allocator, pool: Pool) void { vmaMakePoolAllocationsLost(allocator, pool, null); } /// \brief Marks all allocations in given pool as lost if they are not used in current frame or PoolCreateInfo::frameInUseCount back from now. /// /// @param allocator Allocator object. /// @param pool Pool. /// @return the number of allocations that were marked as lost. pub fn makePoolAllocationsLostAndCount(allocator: Allocator, pool: Pool) usize { var count: usize = undefined; vmaMakePoolAllocationsLost(allocator, pool, &count); return count; } /// \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. /// /// Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, /// `VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is /// `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). /// /// Possible return values: /// /// - `error.VK_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. /// - `vk.SUCCESS` - corruption detection has been performed and succeeded. /// - `error.VK_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations. /// `VMA_ASSERT` is also fired in that case. /// - Other value: Error returned by Vulkan, e.g. memory mapping failure. pub fn checkPoolCorruption(allocator: Allocator, pool: Pool) !void { const rc = vmaCheckPoolCorruption(allocator, pool); if (@enumToInt(rc) >= 0) return; return switch (rc) { .ERROR_FEATURE_NOT_PRESENT => error.VMA_CORRUPTION_DETECTION_DISABLED, .ERROR_VALIDATION_FAILED_EXT => error.VMA_CORRUPTION_DETECTED, .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_MEMORY_MAP_FAILED => error.VK_MEMORY_MAP_FAILED, else => error.VK_UNDOCUMENTED_ERROR, }; } /// \brief Retrieves name of a custom pool. /// /// After the call `ppName` is either null or points to an internally-owned null-terminated string /// containing name of the pool that was previously set. The pointer becomes invalid when the pool is /// destroyed or its name is changed using SetPoolName(). pub fn getPoolName(allocator: Allocator, pool: Pool) ?[*:0]const u8 { var name: ?[*:0]const u8 = undefined; vmaGetPoolName(allocator, pool, &name); return name; } /// \brief Sets name of a custom pool. /// /// `pName` can be either null or pointer to a null-terminated string with new name for the pool. /// Function makes internal copy of the string, so it can be changed or freed immediately after this call. /// fn setPoolName(self: Allocator, pool: Pool, name: ?[*:0]const u8) pub const setPoolName = vmaSetPoolName; /// \brief General purpose memory allocation. /// /// @param[out] pAllocation Handle to allocated memory. /// @param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function GetAllocationInfo(). /// /// You should free the memory using FreeMemory() or FreeMemoryPages(). /// /// It is recommended to use AllocateMemoryForBuffer(), AllocateMemoryForImage(), /// CreateBuffer(), CreateImage() instead whenever possible. pub fn allocateMemory(allocator: Allocator, vkMemoryRequirements: vk.MemoryRequirements, createInfo: AllocationCreateInfo) !Allocation { return allocateMemoryAndGetInfo(allocator, vkMemoryRequirements, createInfo, null); } pub fn allocateMemoryAndGetInfo(allocator: Allocator, vkMemoryRequirements: vk.MemoryRequirements, createInfo: AllocationCreateInfo, outInfo: ?*AllocationInfo) !Allocation { var result: Allocation = undefined; const rc = vmaAllocateMemory(allocator, &vkMemoryRequirements, &createInfo, &result, outInfo); if (@enumToInt(rc) >= 0) return result; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_TOO_MANY_OBJECTS => error.VK_TOO_MANY_OBJECTS, .ERROR_INVALID_EXTERNAL_HANDLE => error.VK_INVALID_EXTERNAL_HANDLE, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, .ERROR_MEMORY_MAP_FAILED => error.VK_MEMORY_MAP_FAILED, .ERROR_FRAGMENTED_POOL => error.VK_FRAGMENTED_POOL, .ERROR_OUT_OF_POOL_MEMORY => error.VK_OUT_OF_POOL_MEMORY, else => error.VK_UNDOCUMENTED_ERROR, }; } /// \brief General purpose memory allocation for multiple allocation objects at once. /// /// @param allocator Allocator object. /// @param pVkMemoryRequirements Memory requirements for each allocation. /// @param pCreateInfo Creation parameters for each alloction. /// @param allocationCount Number of allocations to make. /// @param[out] pAllocations Pointer to array that will be filled with handles to created allocations. /// @param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. /// /// You should free the memory using FreeMemory() or FreeMemoryPages(). /// /// Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. /// It is just a general purpose allocation function able to make multiple allocations at once. /// It may be internally optimized to be more efficient than calling AllocateMemory() `allocationCount` times. /// /// All allocations are made using same parameters. All of them are created out of the same memory pool and type. /// If any allocation fails, all allocations already made within this function call are also freed, so that when /// returned result is not `vk.SUCCESS`, `pAllocation` array is always entirely filled with `.Null`. pub fn allocateMemoryPages(allocator: Allocator, vkMemoryRequirements: vk.MemoryRequirements, createInfo: AllocationCreateInfo, outAllocations: []Allocation) !void { const rc = vmaAllocateMemoryPages(allocator, &vkMemoryRequirements, &createInfo, outAllocations.len, outAllocations.ptr, null); if (@enumToInt(rc) >= 0) return; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_TOO_MANY_OBJECTS => error.VK_TOO_MANY_OBJECTS, .ERROR_INVALID_EXTERNAL_HANDLE => error.VK_INVALID_EXTERNAL_HANDLE, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, .ERROR_MEMORY_MAP_FAILED => error.VK_MEMORY_MAP_FAILED, .ERROR_FRAGMENTED_POOL => error.VK_FRAGMENTED_POOL, .ERROR_OUT_OF_POOL_MEMORY => error.VK_OUT_OF_POOL_MEMORY, else => error.VK_UNDOCUMENTED_ERROR, }; } pub fn allocateMemoryPagesAndGetInfo(allocator: Allocator, vkMemoryRequirements: vk.MemoryRequirements, createInfo: AllocationCreateInfo, outAllocations: []Allocation, outInfo: []AllocationInfo) !void { assert(outAllocations.len == outInfo.len); const rc = vmaAllocateMemoryPages(allocator, &vkMemoryRequirements, &createInfo, outAllocations.len, outAllocations.ptr, outInfo.ptr); if (@enumToInt(rc) >= 0) return; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_TOO_MANY_OBJECTS => error.VK_TOO_MANY_OBJECTS, .ERROR_INVALID_EXTERNAL_HANDLE => error.VK_INVALID_EXTERNAL_HANDLE, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, .ERROR_MEMORY_MAP_FAILED => error.VK_MEMORY_MAP_FAILED, .ERROR_FRAGMENTED_POOL => error.VK_FRAGMENTED_POOL, .ERROR_OUT_OF_POOL_MEMORY => error.VK_OUT_OF_POOL_MEMORY, else => error.VK_UNDOCUMENTED_ERROR, }; } /// @param[out] pAllocation Handle to allocated memory. /// @param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function GetAllocationInfo(). /// /// You should free the memory using FreeMemory(). pub fn allocateMemoryForBuffer(allocator: Allocator, buffer: vk.Buffer, createInfo: AllocationCreateInfo) !Allocation { return allocateMemoryForBufferAndGetInfo(allocator, buffer, createInfo, null); } pub fn allocateMemoryForBufferAndGetInfo(allocator: Allocator, buffer: vk.Buffer, createInfo: AllocationCreateInfo, outInfo: ?*AllocationInfo) !Allocation { var result: Allocation = undefined; const rc = vmaAllocateMemoryForBuffer(allocator, buffer, &createInfo, &result, outInfo); if (@enumToInt(rc) >= 0) return result; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_TOO_MANY_OBJECTS => error.VK_TOO_MANY_OBJECTS, .ERROR_INVALID_EXTERNAL_HANDLE => error.VK_INVALID_EXTERNAL_HANDLE, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, .ERROR_MEMORY_MAP_FAILED => error.VK_MEMORY_MAP_FAILED, .ERROR_FRAGMENTED_POOL => error.VK_FRAGMENTED_POOL, .ERROR_OUT_OF_POOL_MEMORY => error.VK_OUT_OF_POOL_MEMORY, else => error.VK_UNDOCUMENTED_ERROR, }; } /// Function similar to AllocateMemoryForBuffer(). pub fn allocateMemoryForImage(allocator: Allocator, image: vk.Image, createInfo: AllocationCreateInfo) !Allocation { return allocateMemoryForImageAndGetInfo(allocator, image, createInfo, null); } pub fn allocateMemoryForImageAndGetInfo(allocator: Allocator, image: vk.Image, createInfo: AllocationCreateInfo, outInfo: ?*AllocationInfo) !Allocation { var result: Allocation = undefined; const rc = vmaAllocateMemoryForImage(allocator, image, &createInfo, &result, outInfo); if (@enumToInt(rc) >= 0) return result; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_TOO_MANY_OBJECTS => error.VK_TOO_MANY_OBJECTS, .ERROR_INVALID_EXTERNAL_HANDLE => error.VK_INVALID_EXTERNAL_HANDLE, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, .ERROR_MEMORY_MAP_FAILED => error.VK_MEMORY_MAP_FAILED, .ERROR_FRAGMENTED_POOL => error.VK_FRAGMENTED_POOL, .ERROR_OUT_OF_POOL_MEMORY => error.VK_OUT_OF_POOL_MEMORY, else => error.VK_UNDOCUMENTED_ERROR, }; } /// \brief Frees memory previously allocated using AllocateMemory(), AllocateMemoryForBuffer(), or AllocateMemoryForImage(). /// /// Passing `.Null` as `allocation` is valid. Such function call is just skipped. /// fn freeMemory(allocator: Allocator, allocation: Allocation) void pub const freeMemory = vmaFreeMemory; /// \brief Frees memory and destroys multiple allocations. /// /// Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. /// It is just a general purpose function to free memory and destroy allocations made using e.g. AllocateMemory(), /// AllocateMemoryPages() and other functions. /// It may be internally optimized to be more efficient than calling FreeMemory() `allocationCount` times. /// /// Allocations in `pAllocations` array can come from any memory pools and types. /// Passing `.Null` as elements of `pAllocations` array is valid. Such entries are just skipped. pub fn freeMemoryPages(allocator: Allocator, allocations: []Allocation) void { vmaFreeMemoryPages(allocator, allocations.len, allocations.ptr); } /// \brief Returns current information about specified allocation and atomically marks it as used in current frame. /// /// Current paramters of given allocation are returned in `pAllocationInfo`. /// /// This function also atomically "touches" allocation - marks it as used in current frame, /// just like TouchAllocation(). /// If the allocation is in lost state, `pAllocationInfo->deviceMemory == .Null`. /// /// Although this function uses atomics and doesn't lock any mutex, so it should be quite efficient, /// you can avoid calling it too often. /// /// - You can retrieve same AllocationInfo structure while creating your resource, from function /// CreateBuffer(), CreateImage(). You can remember it if you are sure parameters don't change /// (e.g. due to defragmentation or allocation becoming lost). /// - If you just want to check if allocation is not lost, TouchAllocation() will work faster. pub fn getAllocationInfo(allocator: Allocator, allocation: Allocation) AllocationInfo { var info: AllocationInfo = undefined; vmaGetAllocationInfo(allocator, allocation, &info); return info; } /// \brief Returns `true` if allocation is not lost and atomically marks it as used in current frame. /// /// If the allocation has been created with #.canBecomeLost flag, /// this function returns `true` if it's not in lost state, so it can still be used. /// It then also atomically "touches" the allocation - marks it as used in current frame, /// so that you can be sure it won't become lost in current frame or next `frameInUseCount` frames. /// /// If the allocation is in lost state, the function returns `false`. /// Memory of such allocation, as well as buffer or image bound to it, should not be used. /// Lost allocation and the buffer/image still need to be destroyed. /// /// If the allocation has been created without #.canBecomeLost flag, /// this function always returns `true`. pub fn touchAllocation(allocator: Allocator, allocation: Allocation) bool { return vmaTouchAllocation(allocator, allocation) != 0; } /// \brief Sets pUserData in given allocation to new value. /// /// If the allocation was created with VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT, /// pUserData must be either null, or pointer to a null-terminated string. The function /// makes local copy of the string and sets it as allocation's `pUserData`. String /// passed as pUserData doesn't need to be valid for whole lifetime of the allocation - /// you can free it after this call. String previously pointed by allocation's /// pUserData is freed from memory. /// /// If the flag was not used, the value of pointer `pUserData` is just copied to /// allocation's `pUserData`. It is opaque, so you can use it however you want - e.g. /// as a pointer, ordinal number or some handle to you own data. /// fn setAllocationUserData(allocator: Allocator, allocation: Allocation, pUserData: ?*c_void) void pub const setAllocationUserData = vmaSetAllocationUserData; /// \brief Creates new allocation that is in lost state from the beginning. /// /// It can be useful if you need a dummy, non-null allocation. /// /// You still need to destroy created object using FreeMemory(). /// /// Returned allocation is not tied to any specific memory pool or memory type and /// not bound to any image or buffer. It has size = 0. It cannot be turned into /// a real, non-empty allocation. pub fn createLostAllocation(allocator: Allocator) Allocation { var allocation: Allocation = undefined; vmaCreateLostAllocation(allocator, &allocation); return allocation; } /// \brief Maps memory represented by given allocation and returns pointer to it. /// /// Maps memory represented by given allocation to make it accessible to CPU code. /// When succeeded, `*ppData` contains pointer to first byte of this memory. /// If the allocation is part of bigger `vk.DeviceMemory` block, the pointer is /// correctly offseted to the beginning of region assigned to this particular /// allocation. /// /// Mapping is internally reference-counted and synchronized, so despite raw Vulkan /// function `vkMapMemory()` cannot be used to map same block of `vk.DeviceMemory` /// multiple times simultaneously, it is safe to call this function on allocations /// assigned to the same memory block. Actual Vulkan memory will be mapped on first /// mapping and unmapped on last unmapping. /// /// If the function succeeded, you must call UnmapMemory() to unmap the /// allocation when mapping is no longer needed or before freeing the allocation, at /// the latest. /// /// It also safe to call this function multiple times on the same allocation. You /// must call UnmapMemory() same number of times as you called MapMemory(). /// /// It is also safe to call this function on allocation created with /// #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. /// You must still call UnmapMemory() same number of times as you called /// MapMemory(). You must not call UnmapMemory() additional time to free the /// "0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. /// /// This function fails when used on allocation made in memory type that is not /// `HOST_VISIBLE`. /// /// This function always fails when called for allocation that was created with /// #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocations cannot be /// mapped. /// /// This function doesn't automatically flush or invalidate caches. /// If the allocation is made from a memory types that is not `HOST_COHERENT`, /// you also need to use InvalidateAllocation() / FlushAllocation(), as required by Vulkan specification. pub fn mapMemory(allocator: Allocator, allocation: Allocation, comptime T: type) ![*]T { var data: *c_void = undefined; const rc = vmaMapMemory(allocator, allocation, &data); if (@enumToInt(rc) >= 0) return @intToPtr([*]T, @ptrToInt(data)); return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_MEMORY_MAP_FAILED => error.VK_MEMORY_MAP_FAILED, else => error.VK_UNDOCUMENTED_ERROR, }; } /// \brief Unmaps memory represented by given allocation, mapped previously using MapMemory(). /// /// For details, see description of MapMemory(). /// /// This function doesn't automatically flush or invalidate caches. /// If the allocation is made from a memory types that is not `HOST_COHERENT`, /// you also need to use InvalidateAllocation() / FlushAllocation(), as required by Vulkan specification. /// fn unmapMemory(self: Allocator, allocation: Allocation) void pub const unmapMemory = vmaUnmapMemory; /// \brief Flushes memory of given allocation. /// /// Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. /// It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. /// Unmap operation doesn't do that automatically. /// /// - `offset` must be relative to the beginning of allocation. /// - `size` can be `vk.WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. /// - `offset` and `size` don't have to be aligned. /// They are internally rounded down/up to multiply of `nonCoherentAtomSize`. /// - If `size` is 0, this call is ignored. /// - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, /// this call is ignored. /// /// Warning! `offset` and `size` are relative to the contents of given `allocation`. /// If you mean whole allocation, you can pass 0 and `vk.WHOLE_SIZE`, respectively. /// Do not pass allocation's offset as `offset`!!! /// fn flushAllocation(allocator: Allocator, allocation: Allocation, offset: vk.DeviceSize, size: vk.DeviceSize) void pub const flushAllocation = vmaFlushAllocation; /// \brief Invalidates memory of given allocation. /// /// Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. /// It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. /// Map operation doesn't do that automatically. /// /// - `offset` must be relative to the beginning of allocation. /// - `size` can be `vk.WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. /// - `offset` and `size` don't have to be aligned. /// They are internally rounded down/up to multiply of `nonCoherentAtomSize`. /// - If `size` is 0, this call is ignored. /// - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, /// this call is ignored. /// /// Warning! `offset` and `size` are relative to the contents of given `allocation`. /// If you mean whole allocation, you can pass 0 and `vk.WHOLE_SIZE`, respectively. /// Do not pass allocation's offset as `offset`!!! /// fn invalidateAllocation(allocator: Allocator, allocation: Allocation, offset: vk.DeviceSize, size: vk.DeviceSize) void pub const invalidateAllocation = vmaInvalidateAllocation; /// \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. /// /// @param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. /// /// Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, /// `VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are /// `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). /// /// Possible return values: /// /// - `error.VK_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. /// - `vk.SUCCESS` - corruption detection has been performed and succeeded. /// - `error.VK_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations. /// `VMA_ASSERT` is also fired in that case. /// - Other value: Error returned by Vulkan, e.g. memory mapping failure. pub fn checkCorruption(allocator: Allocator, memoryTypeBits: u32) !void { const rc = vmaCheckCorruption(allocator, memoryTypeBits); if (@enumToInt(rc) >= 0) return; return switch (rc) { .ERROR_FEATURE_NOT_PRESENT => error.VMA_CORRUPTION_DETECTION_DISABLED, .ERROR_VALIDATION_FAILED_EXT => error.VMA_CORRUPTION_DETECTED, .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_MEMORY_MAP_FAILED => error.VK_MEMORY_MAP_FAILED, else => error.VK_UNDOCUMENTED_ERROR, }; } /// \brief Begins defragmentation process. /// /// @param allocator Allocator object. /// @param pInfo Structure filled with parameters of defragmentation. /// @param[out] pStats Optional. Statistics of defragmentation. You can pass null if you are not interested in this information. /// @param[out] pContext Context object that must be passed to DefragmentationEnd() to finish defragmentation. /// @return `vk.SUCCESS` and `*pContext == null` if defragmentation finished within this function call. `vk.NOT_READY` and `*pContext != null` if defragmentation has been started and you need to call DefragmentationEnd() to finish it. Negative value in case of error. /// /// Use this function instead of old, deprecated Defragment(). /// /// Warning! Between the call to DefragmentationBegin() and DefragmentationEnd(): /// /// - You should not use any of allocations passed as `pInfo->pAllocations` or /// any allocations that belong to pools passed as `pInfo->pPools`, /// including calling GetAllocationInfo(), TouchAllocation(), or access /// their data. /// - Some mutexes protecting internal data structures may be locked, so trying to /// make or free any allocations, bind buffers or images, map memory, or launch /// another simultaneous defragmentation in between may cause stall (when done on /// another thread) or deadlock (when done on the same thread), unless you are /// 100% sure that defragmented allocations are in different pools. /// - Information returned via `pStats` and `pInfo->pAllocationsChanged` are undefined. /// They become valid after call to DefragmentationEnd(). /// - If `pInfo->commandBuffer` is not null, you must submit that command buffer /// and make sure it finished execution before calling DefragmentationEnd(). /// /// For more information and important limitations regarding defragmentation, see documentation chapter: /// [Defragmentation](@ref defragmentation). pub fn defragmentationBegin(allocator: Allocator, info: DefragmentationInfo2) !DefragmentationContext { return defragmentationBeginWithStats(allocator, info, null); } pub fn defragmentationBeginWithStats(allocator: Allocator, info: DefragmentationInfo2, stats: ?*DefragmentationStats) !DefragmentationContext { var context: DefragmentationContext = undefined; const rc = vmaDefragmentationBegin(allocator, &info, stats, &context); if (@enumToInt(rc) >= 0) return context; // includes NOT_READY return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_TOO_MANY_OBJECTS => error.VK_TOO_MANY_OBJECTS, .ERROR_INVALID_EXTERNAL_HANDLE => error.VK_INVALID_EXTERNAL_HANDLE, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, .ERROR_MEMORY_MAP_FAILED => error.VK_MEMORY_MAP_FAILED, .ERROR_FRAGMENTED_POOL => error.VK_FRAGMENTED_POOL, .ERROR_OUT_OF_POOL_MEMORY => error.VK_OUT_OF_POOL_MEMORY, else => error.VK_UNDOCUMENTED_ERROR, }; } /// \brief Ends defragmentation process. /// /// Use this function to finish defragmentation started by DefragmentationBegin(). /// It is safe to pass `context == null`. The function then does nothing. pub fn defragmentationEnd(allocator: Allocator, context: DefragmentationContext) !void { const rc = vmaDefragmentationEnd(allocator, context); if (@enumToInt(rc) >= 0) return; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_TOO_MANY_OBJECTS => error.VK_TOO_MANY_OBJECTS, .ERROR_INVALID_EXTERNAL_HANDLE => error.VK_INVALID_EXTERNAL_HANDLE, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, .ERROR_MEMORY_MAP_FAILED => error.VK_MEMORY_MAP_FAILED, .ERROR_FRAGMENTED_POOL => error.VK_FRAGMENTED_POOL, .ERROR_OUT_OF_POOL_MEMORY => error.VK_OUT_OF_POOL_MEMORY, else => error.VK_UNDOCUMENTED_ERROR, }; } /// \brief Binds buffer to allocation. /// /// Binds specified buffer to region of memory represented by specified allocation. /// Gets `vk.DeviceMemory` handle and offset from the allocation. /// If you want to create a buffer, allocate memory for it and bind them together separately, /// you should use this function for binding instead of standard `vkBindBufferMemory()`, /// because it ensures proper synchronization so that when a `vk.DeviceMemory` object is used by multiple /// allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously /// (which is illegal in Vulkan). /// /// It is recommended to use function createBuffer() instead of this one. pub fn bindBufferMemory(allocator: Allocator, allocation: Allocation, buffer: vk.Buffer) !void { const rc = vmaBindBufferMemory(allocator, allocation, buffer); if (@enumToInt(rc) >= 0) return; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, else => error.VK_UNDOCUMENTED_ERROR, }; } /// \brief Binds buffer to allocation with additional parameters. /// /// @param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0. /// @param pNext A chain of structures to be attached to `vk.BindBufferMemoryInfoKHR` structure used internally. Normally it should be null. /// /// This function is similar to BindBufferMemory(), but it provides additional parameters. /// /// If `pNext` is not null, #Allocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag /// or with AllocatorCreateInfo::vulkanApiVersion `== vk.API_VERSION_1_1`. Otherwise the call fails. pub fn bindBufferMemory2(allocator: Allocator, allocation: Allocation, allocationLocalOffset: vk.DeviceSize, buffer: vk.Buffer, pNext: ?*const c_void) !void { const rc = vmaBindBufferMemory2(allocator, allocation, allocationLocalOffset, buffer, pNext); if (@enumToInt(rc) >= 0) return; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, else => error.VK_UNDOCUMENTED_ERROR, }; } /// \brief Binds image to allocation. /// /// Binds specified image to region of memory represented by specified allocation. /// Gets `vk.DeviceMemory` handle and offset from the allocation. /// If you want to create an image, allocate memory for it and bind them together separately, /// you should use this function for binding instead of standard `vkBindImageMemory()`, /// because it ensures proper synchronization so that when a `vk.DeviceMemory` object is used by multiple /// allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously /// (which is illegal in Vulkan). /// /// It is recommended to use function CreateImage() instead of this one. pub fn bindImageMemory(allocator: Allocator, allocation: Allocation, image: vk.Image) !void { const rc = vmaBindImageMemory(allocator, allocation, image); if (@enumToInt(rc) >= 0) return; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, else => error.VK_UNDOCUMENTED_ERROR, }; } /// \brief Binds image to allocation with additional parameters. /// /// @param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0. /// @param pNext A chain of structures to be attached to `vk.BindImageMemoryInfoKHR` structure used internally. Normally it should be null. /// /// This function is similar to BindImageMemory(), but it provides additional parameters. /// /// If `pNext` is not null, #Allocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag /// or with AllocatorCreateInfo::vulkanApiVersion `== vk.API_VERSION_1_1`. Otherwise the call fails. pub fn bindImageMemory2(allocator: Allocator, allocation: Allocation, allocationLocalOffset: vk.DeviceSize, image: vk.Image, pNext: ?*const c_void) !void { const rc = vmaBindImageMemory2(allocator, allocation, allocationLocalOffset, image, pNext); if (@enumToInt(rc) >= 0) return; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, else => error.VK_UNDOCUMENTED_ERROR, }; } /// @param[out] pBuffer Buffer that was created. /// @param[out] pAllocation Allocation that was created. /// @param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function GetAllocationInfo(). /// /// This function automatically: /// /// -# Creates buffer. /// -# Allocates appropriate memory for it. /// -# Binds the buffer with the memory. /// /// If any of these operations fail, buffer and allocation are not created, /// returned value is negative error code, *pBuffer and *pAllocation are null. /// /// If the function succeeded, you must destroy both buffer and allocation when you /// no longer need them using either convenience function DestroyBuffer() or /// separately, using `vkDestroyBuffer()` and FreeMemory(). /// /// If VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, /// vk.KHR_dedicated_allocation extension is used internally to query driver whether /// it requires or prefers the new buffer to have dedicated allocation. If yes, /// and if dedicated allocation is possible (AllocationCreateInfo::pool is null /// and VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated /// allocation for this buffer, just like when using /// VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. pub fn createBuffer( allocator: Allocator, bufferCreateInfo: vk.BufferCreateInfo, allocationCreateInfo: AllocationCreateInfo, ) !CreateBufferResult { return createBufferAndGetInfo(allocator, bufferCreateInfo, allocationCreateInfo, null); } pub fn createBufferAndGetInfo( allocator: Allocator, bufferCreateInfo: vk.BufferCreateInfo, allocationCreateInfo: AllocationCreateInfo, outInfo: ?*AllocationInfo, ) !CreateBufferResult { var result: CreateBufferResult = undefined; const rc = vmaCreateBuffer( allocator, &bufferCreateInfo, &allocationCreateInfo, &result.buffer, &result.allocation, outInfo, ); if (@enumToInt(rc) >= 0) return result; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_TOO_MANY_OBJECTS => error.VK_TOO_MANY_OBJECTS, .ERROR_INVALID_EXTERNAL_HANDLE => error.VK_INVALID_EXTERNAL_HANDLE, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, .ERROR_MEMORY_MAP_FAILED => error.VK_MEMORY_MAP_FAILED, .ERROR_FRAGMENTED_POOL => error.VK_FRAGMENTED_POOL, .ERROR_OUT_OF_POOL_MEMORY => error.VK_OUT_OF_POOL_MEMORY, else => error.VK_UNDOCUMENTED_ERROR, }; } pub const CreateBufferResult = struct { buffer: vk.Buffer, allocation: Allocation, }; /// \brief Destroys Vulkan buffer and frees allocated memory. /// /// This is just a convenience function equivalent to: /// /// \code /// vkDestroyBuffer(device, buffer, allocationCallbacks); /// FreeMemory(allocator, allocation); /// \endcode /// /// It it safe to pass null as buffer and/or allocation. /// fn destroyBuffer(allocator: Allocator, buffer: vk.Buffer, allocation: Allocation) void pub const destroyBuffer = vmaDestroyBuffer; /// Function similar to CreateBuffer(). pub fn createImage( allocator: Allocator, imageCreateInfo: vk.ImageCreateInfo, allocationCreateInfo: AllocationCreateInfo, ) !CreateImageResult { return createImageAndGetInfo(allocator, imageCreateInfo, allocationCreateInfo, null); } pub fn createImageAndGetInfo( allocator: Allocator, imageCreateInfo: vk.ImageCreateInfo, allocationCreateInfo: AllocationCreateInfo, outInfo: ?*AllocationInfo, ) !CreateImageResult { var result: CreateImageResult = undefined; const rc = vmaCreateImage( allocator, &imageCreateInfo, &allocationCreateInfo, &result.image, &result.allocation, outInfo, ); if (@enumToInt(rc) >= 0) return result; return switch (rc) { .ERROR_OUT_OF_HOST_MEMORY => error.VK_OUT_OF_HOST_MEMORY, .ERROR_OUT_OF_DEVICE_MEMORY => error.VK_OUT_OF_DEVICE_MEMORY, .ERROR_TOO_MANY_OBJECTS => error.VK_TOO_MANY_OBJECTS, .ERROR_INVALID_EXTERNAL_HANDLE => error.VK_INVALID_EXTERNAL_HANDLE, .ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS => error.VK_INVALID_OPAQUE_CAPTURE_ADDRESS, .ERROR_MEMORY_MAP_FAILED => error.VK_MEMORY_MAP_FAILED, .ERROR_FRAGMENTED_POOL => error.VK_FRAGMENTED_POOL, .ERROR_OUT_OF_POOL_MEMORY => error.VK_OUT_OF_POOL_MEMORY, else => error.VK_UNDOCUMENTED_ERROR, }; } pub const CreateImageResult = struct { image: vk.Image, allocation: Allocation, }; /// \brief Destroys Vulkan image and frees allocated memory. /// /// This is just a convenience function equivalent to: /// /// \code /// vkDestroyImage(device, image, allocationCallbacks); /// FreeMemory(allocator, allocation); /// \endcode /// /// It is safe to pass null as image and/or allocation. /// fn destroyImage(self: Allocator, image: vk.Image, allocation: Allocation) void pub const destroyImage = vmaDestroyImage; }; /// Callback function called after successful vkAllocateMemory. pub const PFN_AllocateDeviceMemoryFunction = fn ( allocator: Allocator, memoryType: u32, memory: vk.DeviceMemory, size: vk.DeviceSize, ) callconv(vk.CallConv) void; /// Callback function called before vkFreeMemory. pub const PFN_FreeDeviceMemoryFunction = fn ( allocator: Allocator, memoryType: u32, memory: vk.DeviceMemory, size: vk.DeviceSize, ) callconv(vk.CallConv) void; /// \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. /// /// Provided for informative purpose, e.g. to gather statistics about number of /// allocations or total amount of memory allocated in Vulkan. /// /// Used in AllocatorCreateInfo::pDeviceMemoryCallbacks. pub const DeviceMemoryCallbacks = extern struct { pfnAllocate: ?PFN_AllocateDeviceMemoryFunction, pfnFree: ?PFN_FreeDeviceMemoryFunction, }; /// Flags for created #Allocator. pub const AllocatorCreateFlags = packed struct { /// \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. /// /// Using this flag may increase performance because internal mutexes are not used. externallySynchronized: bool align(4) = false, /// \brief Enables usage of vk.KHR_dedicated_allocation extension. /// /// The flag works only if AllocatorCreateInfo::vulkanApiVersion `== vk.API_VERSION_1_0`. /// When it's `vk.API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. /// /// Using this extenion will automatically allocate dedicated blocks of memory for /// some buffers and images instead of suballocating place for them out of bigger /// memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT /// flag) when it is recommended by the driver. It may improve performance on some /// GPUs. /// /// You may set this flag only if you found out that following device extensions are /// supported, you enabled them while creating Vulkan device passed as /// AllocatorCreateInfo::device, and you want them to be used internally by this /// library: /// /// - vk.KHR_get_memory_requirements2 (device extension) /// - vk.KHR_dedicated_allocation (device extension) /// /// When this flag is set, you can experience following warnings reported by Vulkan /// validation layer. You can ignore them. /// /// > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. dedicatedAllocationKHR: bool = false, /// Enables usage of vk.KHR_bind_memory2 extension. /// /// The flag works only if AllocatorCreateInfo::vulkanApiVersion `== vk.API_VERSION_1_0`. /// When it's `vk.API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. /// /// You may set this flag only if you found out that this device extension is supported, /// you enabled it while creating Vulkan device passed as AllocatorCreateInfo::device, /// and you want it to be used internally by this library. /// /// The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, /// which allow to pass a chain of `pNext` structures while binding. /// This flag is required if you use `pNext` parameter in BindBufferMemory2() or BindImageMemory2(). bindMemory2KHR: bool = false, /// Enables usage of vk.EXT_memory_budget extension. /// /// You may set this flag only if you found out that this device extension is supported, /// you enabled it while creating Vulkan device passed as AllocatorCreateInfo::device, /// and you want it to be used internally by this library, along with another instance extension /// vk.KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). /// /// The extension provides query for current memory usage and budget, which will probably /// be more accurate than an estimation used by the library otherwise. memoryBudgetEXT: bool = false, __reserved_bits_04_31: u28 = 0, pub usingnamespace vk.FlagsMixin(@This()); }; /// \brief Pointers to some Vulkan functions - a subset used by the library. /// /// Used in AllocatorCreateInfo::pVulkanFunctions. pub const VulkanFunctions = extern struct { vkGetPhysicalDeviceProperties: @TypeOf(vk.vkGetPhysicalDeviceProperties), vkGetPhysicalDeviceMemoryProperties: @TypeOf(vk.vkGetPhysicalDeviceMemoryProperties), vkAllocateMemory: @TypeOf(vk.vkAllocateMemory), vkFreeMemory: @TypeOf(vk.vkFreeMemory), vkMapMemory: @TypeOf(vk.vkMapMemory), vkUnmapMemory: @TypeOf(vk.vkUnmapMemory), vkFlushMappedMemoryRanges: @TypeOf(vk.vkFlushMappedMemoryRanges), vkInvalidateMappedMemoryRanges: @TypeOf(vk.vkInvalidateMappedMemoryRanges), vkBindBufferMemory: @TypeOf(vk.vkBindBufferMemory), vkBindImageMemory: @TypeOf(vk.vkBindImageMemory), vkGetBufferMemoryRequirements: @TypeOf(vk.vkGetBufferMemoryRequirements), vkGetImageMemoryRequirements: @TypeOf(vk.vkGetImageMemoryRequirements), vkCreateBuffer: @TypeOf(vk.vkCreateBuffer), vkDestroyBuffer: @TypeOf(vk.vkDestroyBuffer), vkCreateImage: @TypeOf(vk.vkCreateImage), vkDestroyImage: @TypeOf(vk.vkDestroyImage), vkCmdCopyBuffer: @TypeOf(vk.vkCmdCopyBuffer), dedicatedAllocation: if (config.dedicatedAllocation or config.vulkanVersion >= 1001000) DedicatedAllocationFunctions else void, bindMemory2: if (config.bindMemory2 or config.vulkanVersion >= 1001000) BindMemory2Functions else void, memoryBudget: if (config.memoryBudget or config.vulkanVersion >= 1001000) MemoryBudgetFunctions else void, const DedicatedAllocationFunctions = extern struct { vkGetBufferMemoryRequirements2: @TypeOf(vk.vkGetBufferMemoryRequirements2KHR), vkGetImageMemoryRequirements2: @TypeOf(vk.vkGetImageMemoryRequirements2KHR), }; const BindMemory2Functions = extern struct { vkBindBufferMemory2: @TypeOf(vk.vkBindBufferMemory2KHR), vkBindImageMemory2: @TypeOf(vk.vkBindImageMemory2KHR), }; const MemoryBudgetFunctions = extern struct { vkGetPhysicalDeviceMemoryProperties2: @TypeOf(vk.vkGetPhysicalDeviceMemoryProperties2KHR), }; fn isDeviceFunc(comptime FuncType: type) bool { comptime { const info = @typeInfo(FuncType).Fn; if (info.args.len == 0) return false; const arg0 = info.args[0].arg_type; return arg0 == vk.Device or arg0 == vk.Queue or arg0 == vk.CommandBuffer; } } fn loadRecursive( comptime T: type, inst: vk.Instance, device: vk.Device, vkGetInstanceProcAddr: fn(vk.Instance, [*:0]const u8) callconv(vk.CallConv) ?vk.PFN_VoidFunction, vkGetDeviceProcAddr: fn(vk.Device, [*:0]const u8) callconv(vk.CallConv) ?vk.PFN_VoidFunction, ) T { if (@typeInfo(T) != .Struct) return undefined; var value: T = undefined; inline for (@typeInfo(T).Struct.fields) |field| { if (comptime std.mem.startsWith(u8, field.name, "vk")) { if (comptime isDeviceFunc(field.field_type)) { const func = vkGetDeviceProcAddr(device, @ptrCast([*:0]const u8, field.name.ptr)); const resolved = func orelse @panic("Couldn't fetch vk device function "++field.name); @field(value, field.name) = @ptrCast(field.field_type, resolved); } else { const func = vkGetInstanceProcAddr(inst, @ptrCast([*:0]const u8, field.name.ptr)); const resolved = func orelse @panic("Couldn't fetch vk instance function "++field.name); @field(value, field.name) = @ptrCast(field.field_type, resolved); } } else { @field(value, field.name) = loadRecursive(field.field_type, inst, device, vkGetInstanceProcAddr, vkGetDeviceProcAddr); } } return value; } pub fn init( inst: vk.Instance, device: vk.Device, vkGetInstanceProcAddr: fn(vk.Instance, [*:0]const u8) callconv(vk.CallConv) ?vk.PFN_VoidFunction ) VulkanFunctions { const vkGetDeviceProcAddrPtr = vkGetInstanceProcAddr(inst, "vkGetDeviceProcAddr") orelse @panic("Couldn't fetch vkGetDeviceProcAddr: vkGetInstanceProcAddr returned null."); const vkGetDeviceProcAddr = @ptrCast(fn(vk.Device, [*:0]const u8) callconv(vk.CallConv) ?vk.PFN_VoidFunction, vkGetDeviceProcAddrPtr); return loadRecursive(VulkanFunctions, inst, device, vkGetInstanceProcAddr, vkGetDeviceProcAddr); } }; /// Flags to be used in RecordSettings::flags. pub const RecordFlags = packed struct { /// \brief Enables flush after recording every function call. /// /// Enable it if you expect your application to crash, which may leave recording file truncated. /// It may degrade performance though. flushAfterCall: bool = false, __reserved_bits_01_31: u31 = 0, pub usingnamespace vk.FlagsMixin(@This()); }; /// Parameters for recording calls to VMA functions. To be used in AllocatorCreateInfo::pRecordSettings. pub const RecordSettings = extern struct { /// Flags for recording. Use #RecordFlagBits enum. flags: RecordFlags = .{}, /// \brief Path to the file that should be written by the recording. /// /// Suggested extension: "csv". /// If the file already exists, it will be overwritten. /// It will be opened for the whole time #Allocator object is alive. /// If opening this file fails, creation of the whole allocator object fails. pFilePath: [*:0]const u8, }; /// Description of a Allocator to be created. pub const AllocatorCreateInfo = extern struct { /// Flags for created allocator. Use #AllocatorCreateFlagBits enum. flags: AllocatorCreateFlags = .{}, /// Vulkan physical device. /// It must be valid throughout whole lifetime of created allocator. physicalDevice: vk.PhysicalDevice, /// Vulkan device. /// It must be valid throughout whole lifetime of created allocator. device: vk.Device, /// Preferred size of a single `vk.DeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. /// Set to 0 to use default, which is currently 256 MiB. preferredLargeHeapBlockSize: vk.DeviceSize = 0, /// Custom CPU memory allocation callbacks. Optional. /// Optional, can be null. When specified, will also be used for all CPU-side memory allocations. pAllocationCallbacks: ?*const vk.AllocationCallbacks = null, /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. /// Optional, can be null. pDeviceMemoryCallbacks: ?*const DeviceMemoryCallbacks = null, /// \brief Maximum number of additional frames that are in use at the same time as current frame. /// /// This value is used only when you make allocations with /// .canBeLost = true. Such allocation cannot become /// lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount. /// /// For example, if you double-buffer your command buffers, so resources used for /// rendering in previous frame may still be in use by the GPU at the moment you /// allocate resources needed for the current frame, set this value to 1. /// /// If you want to allow any allocations other than used in the current frame to /// become lost, set this value to 0. frameInUseCount: u32, /// \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. /// /// If not NULL, it must be a pointer to an array of /// `vk.PhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on /// maximum number of bytes that can be allocated out of particular Vulkan memory /// heap. /// /// Any of the elements may be equal to `vk.WHOLE_SIZE`, which means no limit on that /// heap. This is also the default in case of `pHeapSizeLimit` = NULL. /// /// If there is a limit defined for a heap: /// /// - If user tries to allocate more memory from that heap using this allocator, /// the allocation fails with `error.VK_OUT_OF_DEVICE_MEMORY`. /// - If the limit is smaller than heap size reported in `vk.MemoryHeap::size`, the /// value of this limit will be reported instead when using GetMemoryProperties(). /// /// Warning! Using this feature may not be equivalent to installing a GPU with /// smaller amount of memory, because graphics driver doesn't necessary fail new /// allocations with `error.VK_OUT_OF_DEVICE_MEMORY` result when memory capacity is /// exceeded. It may return success and just silently migrate some device memory /// blocks to system RAM. This driver behavior can also be controlled using /// vk.AMD_memory_overallocation_behavior extension. pHeapSizeLimit: ?[*]const vk.DeviceSize = null, /// \brief Pointers to Vulkan functions. Can be null if you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1`. /// /// If you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1` in configuration section, /// you can pass null as this member, because the library will fetch pointers to /// Vulkan functions internally in a static way, like: /// /// vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; /// /// Fill this member if you want to provide your own pointers to Vulkan functions, /// e.g. fetched using `vkGetInstanceProcAddr()` and `vkGetDeviceProcAddr()`. pVulkanFunctions: ?*const VulkanFunctions = null, /// \brief Parameters for recording of VMA calls. Can be null. /// /// If not null, it enables recording of calls to VMA functions to a file. /// If support for recording is not enabled using `VMA_RECORDING_ENABLED` macro, /// creation of the allocator object fails with `error.VK_FEATURE_NOT_PRESENT`. pRecordSettings: ?*const RecordSettings = null, /// \brief Optional handle to Vulkan instance object. /// /// Optional, can be null. Must be set if #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT flas is used /// or if `vulkanApiVersion >= vk.MAKE_VERSION(1, 1, 0)`. instance: vk.Instance, /// \brief Optional. The highest version of Vulkan that the application is designed to use. /// /// It must be a value in the format as created by macro `vk.MAKE_VERSION` or a constant like: `vk.API_VERSION_1_1`, `vk.API_VERSION_1_0`. /// The patch version number specified is ignored. Only the major and minor versions are considered. /// It must be less or euqal (preferably equal) to value as passed to `vkCreateInstance` as `vk.ApplicationInfo::apiVersion`. /// Only versions 1.0 and 1.1 are supported by the current implementation. /// Leaving it initialized to zero is equivalent to `vk.API_VERSION_1_0`. vulkanApiVersion: u32 = 0, }; /// \brief Calculated statistics of memory usage in entire allocator. pub const StatInfo = extern struct { /// Number of `vk.DeviceMemory` Vulkan memory blocks allocated. blockCount: u32, /// Number of #Allocation allocation objects allocated. allocationCount: u32, /// Number of free ranges of memory between allocations. unusedRangeCount: u32, /// Total number of bytes occupied by all allocations. usedBytes: vk.DeviceSize, /// Total number of bytes occupied by unused ranges. unusedBytes: vk.DeviceSize, allocationSizeMin: vk.DeviceSize, allocationSizeAvg: vk.DeviceSize, allocationSizeMax: vk.DeviceSize, unusedRangeSizeMin: vk.DeviceSize, unusedRangeSizeAvg: vk.DeviceSize, unusedRangeSizeMax: vk.DeviceSize, }; /// General statistics from current state of Allocator. pub const Stats = extern struct { memoryType: [vk.MAX_MEMORY_TYPES]StatInfo, memoryHeap: [vk.MAX_MEMORY_HEAPS]StatInfo, total: StatInfo, }; /// \brief Statistics of current memory usage and available budget, in bytes, for specific memory heap. pub const Budget = extern struct { /// \brief Sum size of all `vk.DeviceMemory` blocks allocated from particular heap, in bytes. blockBytes: vk.DeviceSize, /// \brief Sum size of all allocations created in particular heap, in bytes. /// /// Usually less or equal than `blockBytes`. /// Difference `blockBytes - allocationBytes` is the amount of memory allocated but unused - /// available for new allocations or wasted due to fragmentation. /// /// It might be greater than `blockBytes` if there are some allocations in lost state, as they account /// to this value as well. allocationBytes: vk.DeviceSize, /// \brief Estimated current memory usage of the program, in bytes. /// /// Fetched from system using `vk.EXT_memory_budget` extension if enabled. /// /// It might be different than `blockBytes` (usually higher) due to additional implicit objects /// also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or /// `vk.DeviceMemory` blocks allocated outside of this library, if any. usage: vk.DeviceSize, /// \brief Estimated amount of memory available to the program, in bytes. /// /// Fetched from system using `vk.EXT_memory_budget` extension if enabled. /// /// It might be different (most probably smaller) than `vk.MemoryHeap::size[heapIndex]` due to factors /// external to the program, like other programs also consuming system resources. /// Difference `budget - usage` is the amount of additional memory that can probably /// be allocated without problems. Exceeding the budget may result in various problems. budget: vk.DeviceSize, }; /// \struct Pool /// \brief Represents custom memory pool /// /// Fill structure PoolCreateInfo and call function CreatePool() to create it. /// Call function DestroyPool() to destroy it. /// /// For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). pub const Pool = extern enum(usize) { Null = 0, _ }; pub const MemoryUsage = extern enum(u32) { /// No intended memory usage specified. /// Use other members of AllocationCreateInfo to specify your requirements. unknown = 0, /// Memory will be used on device only, so fast access from the device is preferred. /// It usually means device-local GPU (video) memory. /// No need to be mappable on host. /// It is roughly equivalent of `D3D12_HEAP_TYPE_DEFAULT`. /// /// Usage: /// /// - Resources written and read by device, e.g. images used as attachments. /// - Resources transferred from host once (immutable) or infrequently and read by /// device multiple times, e.g. textures to be sampled, vertex buffers, uniform /// (constant) buffers, and majority of other types of resources used on GPU. /// /// Allocation may still end up in `HOST_VISIBLE` memory on some implementations. /// In such case, you are free to map it. /// You can use #VMA_ALLOCATION_CREATE_MAPPED_BIT with this usage type. gpuOnly = 1, /// Memory will be mappable on host. /// It usually means CPU (system) memory. /// Guarantees to be `HOST_VISIBLE` and `HOST_COHERENT`. /// CPU access is typically uncached. Writes may be write-combined. /// Resources created in this pool may still be accessible to the device, but access to them can be slow. /// It is roughly equivalent of `D3D12_HEAP_TYPE_UPLOAD`. /// /// Usage: Staging copy of resources used as transfer source. cpuOnly = 2, /// Memory that is both mappable on host (guarantees to be `HOST_VISIBLE`) and preferably fast to access by GPU. /// CPU access is typically uncached. Writes may be write-combined. /// /// Usage: Resources written frequently by host (dynamic), read by device. E.g. textures, vertex buffers, uniform buffers updated every frame or every draw call. cpuToGpu = 3, /// Memory mappable on host (guarantees to be `HOST_VISIBLE`) and cached. /// It is roughly equivalent of `D3D12_HEAP_TYPE_READBACK`. /// /// Usage: /// /// - Resources written by device, read by host - results of some computations, e.g. screen capture, average scene luminance for HDR tone mapping. /// - Any resources read or accessed randomly on host, e.g. CPU-side copy of vertex buffer used as source of transfer, but also used for collision detection. gpuToCpu = 4, /// CPU memory - memory that is preferably not `DEVICE_LOCAL`, but also not guaranteed to be `HOST_VISIBLE`. /// /// Usage: Staging copy of resources moved from GPU memory to CPU memory as part /// of custom paging/residency mechanism, to be moved back to GPU memory when needed. cpuCopy = 5, /// Lazily allocated GPU memory having `vk.MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. /// Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. /// /// Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `vk.IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. /// /// Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. gpuLazilyAllocated = 6, }; /// Flags to be passed as AllocationCreateInfo::flags. pub const AllocationCreateFlags = packed struct { /// \brief Set this flag if the allocation should have its own memory block. /// /// Use it for special, big resources, like fullscreen images used as attachments. /// /// You should not use this flag if AllocationCreateInfo::pool is not null. dedicatedMemory: bool = false, /// \brief Set this flag to only try to allocate from existing `vk.DeviceMemory` blocks and never create new such block. /// /// If new allocation cannot be placed in any of the existing blocks, allocation /// fails with `error.VK_OUT_OF_DEVICE_MEMORY` error. /// /// You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and /// #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. /// /// If AllocationCreateInfo::pool is not null, this flag is implied and ignored. */ neverAllocate: bool = false, /// \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. /// /// Pointer to mapped memory will be returned through AllocationInfo::pMappedData. /// /// Is it valid to use this flag for allocation made from memory type that is not /// `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is /// useful if you need an allocation that is efficient to use on GPU /// (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that /// support it (e.g. Intel GPU). /// /// You should not use this flag together with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT. createMapped: bool = false, /// Allocation created with this flag can become lost as a result of another /// allocation with #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag, so you /// must check it before use. /// /// To check if allocation is not lost, call GetAllocationInfo() and check if /// AllocationInfo::deviceMemory is not `.Null`. /// /// For details about supporting lost allocations, see Lost Allocations /// chapter of User Guide on Main Page. /// /// You should not use this flag together with #VMA_ALLOCATION_CREATE_MAPPED_BIT. canBecomeLost: bool = false, /// While creating allocation using this flag, other allocations that were /// created with flag #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT can become lost. /// /// For details about supporting lost allocations, see Lost Allocations /// chapter of User Guide on Main Page. canMakeOtherLost: bool = false, /// Set this flag to treat AllocationCreateInfo::pUserData as pointer to a /// null-terminated string. Instead of copying pointer value, a local copy of the /// string is made and stored in allocation's `pUserData`. The string is automatically /// freed together with the allocation. It is also used in BuildStatsString(). userDataCopyString: bool = false, /// Allocation will be created from upper stack in a double stack pool. /// /// This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. upperAddress: bool = false, /// Create both buffer/image and allocation, but don't bind them together. /// It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. /// The flag is meaningful only with functions that bind by default: CreateBuffer(), CreateImage(). /// Otherwise it is ignored. dontBind: bool = false, /// Create allocation only if additional device memory required for it, if any, won't exceed /// memory budget. Otherwise return `error.VK_OUT_OF_DEVICE_MEMORY`. withinBudget: bool = false, __reserved_bits_09_15: u7 = 0, /// Allocation strategy that chooses smallest possible free range for the /// allocation. strategyBestFit: bool = false, /// Allocation strategy that chooses biggest possible free range for the /// allocation. strategyWorstFit: bool = false, /// Allocation strategy that chooses first suitable free range for the /// allocation. /// /// "First" doesn't necessarily means the one with smallest offset in memory, /// but rather the one that is easiest and fastest to find. strategyFirstFit: bool = false, __reserved_bits_19_31: u13 = 0, /// Allocation strategy that tries to minimize memory usage. pub const STRATEGY_MIN_MEMORY = AllocationCreateFlags{ .strategyBestFit = true }; /// Allocation strategy that tries to minimize allocation time. pub const STRATEGY_MIN_TIME = AllocationCreateFlags{ .strategyFirstFit = true }; /// Allocation strategy that tries to minimize memory fragmentation. pub const STRATEGY_MIN_FRAGMENTATION = AllocationCreateFlags{ .strategyWorstFit = true }; /// A bit mask to extract only `STRATEGY` bits from entire set of flags. pub const STRATEGY_MASK = AllocationCreateFlags{ .strategyBestFit = true, .strategyWorstFit = true, .strategyFirstFit = true, }; pub usingnamespace vk.FlagsMixin(@This()); }; pub const AllocationCreateInfo = extern struct { /// Use #AllocationCreateFlagBits enum. flags: AllocationCreateFlags = .{}, /// \brief Intended usage of memory. /// /// You can leave #MemoryUsage.unknown if you specify memory requirements in other way. \n /// If `pool` is not null, this member is ignored. usage: MemoryUsage = .unknown, /// \brief Flags that must be set in a Memory Type chosen for an allocation. /// /// Leave 0 if you specify memory requirements in other way. \n /// If `pool` is not null, this member is ignored.*/ requiredFlags: vk.MemoryPropertyFlags = .{}, /// \brief Flags that preferably should be set in a memory type chosen for an allocation. /// /// Set to 0 if no additional flags are prefered. \n /// If `pool` is not null, this member is ignored. */ preferredFlags: vk.MemoryPropertyFlags = .{}, /// \brief Bitmask containing one bit set for every memory type acceptable for this allocation. /// /// Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if /// it meets other requirements specified by this structure, with no further /// restrictions on memory type index. \n /// If `pool` is not null, this member is ignored. memoryTypeBits: u32 = 0, /// \brief Pool that this allocation should be created in. /// /// Leave `.Null` to allocate from default pool. If not null, members: /// `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. pool: Pool = .Null, /// \brief Custom general-purpose pointer that will be stored in #Allocation, can be read as AllocationInfo::pUserData and changed using SetAllocationUserData(). /// /// If #AllocationCreateFlags.userDataCopyString is true, it must be either /// null or pointer to a null-terminated string. The string will be then copied to /// internal buffer, so it doesn't need to be valid after allocation call. pUserData: ?*c_void = null, }; /// Flags to be passed as PoolCreateInfo::flags. pub const PoolCreateFlags = packed struct { __reserved_bit_00: u1 = 0, /// \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. /// /// This is an optional optimization flag. /// /// If you always allocate using CreateBuffer(), CreateImage(), /// AllocateMemoryForBuffer(), then you don't need to use it because allocator /// knows exact type of your allocations so it can handle Buffer-Image Granularity /// in the optimal way. /// /// If you also allocate using AllocateMemoryForImage() or AllocateMemory(), /// exact type of such allocations is not known, so allocator must be conservative /// in handling Buffer-Image Granularity, which can lead to suboptimal allocation /// (wasted memory). In that case, if you can make sure you always allocate only /// buffers and linear images or only optimal images out of this pool, use this flag /// to make allocator disregard Buffer-Image Granularity and so make allocations /// faster and more optimal. ignoreBufferImageGranularity: bool = false, /// \brief Enables alternative, linear allocation algorithm in this pool. /// /// Specify this flag to enable linear allocation algorithm, which always creates /// new allocations after last one and doesn't reuse space from allocations freed in /// between. It trades memory consumption for simplified algorithm and data /// structure, which has better performance and uses less memory for metadata. /// /// By using this flag, you can achieve behavior of free-at-once, stack, /// ring buffer, and double stack. For details, see documentation chapter /// \ref linear_algorithm. /// /// When using this flag, you must specify PoolCreateInfo::maxBlockCount == 1 (or 0 for default). /// /// For more details, see [Linear allocation algorithm](@ref linear_algorithm). linearAlgorithm: bool = false, /// \brief Enables alternative, buddy allocation algorithm in this pool. /// /// It operates on a tree of blocks, each having size that is a power of two and /// a half of its parent's size. Comparing to default algorithm, this one provides /// faster allocation and deallocation and decreased external fragmentation, /// at the expense of more memory wasted (internal fragmentation). /// /// For more details, see [Buddy allocation algorithm](@ref buddy_algorithm). buddyAlgorithm: bool = false, __reserved_bits_04_31: u28 = 0, /// Bit mask to extract only `ALGORITHM` bits from entire set of flags. pub const ALGORITHM_MASK = PoolCreateFlags{ .linearAlgorithm = true, .buddyAlgorithm = true, }; pub usingnamespace vk.FlagsMixin(@This()); }; /// \brief Describes parameter of created #Pool. pub const PoolCreateInfo = extern struct { /// \brief Vulkan memory type index to allocate this pool from. memoryTypeIndex: u32, /// \brief Use combination of #PoolCreateFlagBits. flags: PoolCreateFlags = .{}, /// \brief Size of a single `vk.DeviceMemory` block to be allocated as part of this pool, in bytes. Optional. /// /// Specify nonzero to set explicit, constant size of memory blocks used by this /// pool. /// /// Leave 0 to use default and let the library manage block sizes automatically. /// Sizes of particular blocks may vary. blockSize: vk.DeviceSize = 0, /// \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. /// /// Set to 0 to have no preallocated blocks and allow the pool be completely empty. minBlockCount: usize = 0, /// \brief Maximum number of blocks that can be allocated in this pool. Optional. /// /// Set to 0 to use default, which is `SIZE_MAX`, which means no limit. /// /// Set to same value as PoolCreateInfo::minBlockCount to have fixed amount of memory allocated /// throughout whole lifetime of this pool. maxBlockCount: usize = 0, /// \brief Maximum number of additional frames that are in use at the same time as current frame. /// /// This value is used only when you make allocations with /// #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become /// lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount. /// /// For example, if you double-buffer your command buffers, so resources used for /// rendering in previous frame may still be in use by the GPU at the moment you /// allocate resources needed for the current frame, set this value to 1. /// /// If you want to allow any allocations other than used in the current frame to /// become lost, set this value to 0. frameInUseCount: u32, }; /// \brief Describes parameter of existing #Pool. pub const PoolStats = extern struct { /// \brief Total amount of `vk.DeviceMemory` allocated from Vulkan for this pool, in bytes. size: vk.DeviceSize, /// \brief Total number of bytes in the pool not used by any #Allocation. unusedSize: vk.DeviceSize, /// \brief Number of #Allocation objects created from this pool that were not destroyed or lost. allocationCount: usize, /// \brief Number of continuous memory ranges in the pool not used by any #Allocation. unusedRangeCount: usize, /// \brief Size of the largest continuous free memory region available for new allocation. /// /// Making a new allocation of that size is not guaranteed to succeed because of /// possible additional margin required to respect alignment and buffer/image /// granularity. unusedRangeSizeMax: vk.DeviceSize, /// \brief Number of `vk.DeviceMemory` blocks allocated for this pool. blockCount: usize, }; /// \struct Allocation /// \brief Represents single memory allocation. /// /// It may be either dedicated block of `vk.DeviceMemory` or a specific region of a bigger block of this type /// plus unique offset. /// /// There are multiple ways to create such object. /// You need to fill structure AllocationCreateInfo. /// For more information see [Choosing memory type](@ref choosing_memory_type). /// /// Although the library provides convenience functions that create Vulkan buffer or image, /// allocate memory for it and bind them together, /// binding of the allocation to a buffer or an image is out of scope of the allocation itself. /// Allocation object can exist without buffer/image bound, /// binding can be done manually by the user, and destruction of it can be done /// independently of destruction of the allocation. /// /// The object also remembers its size and some other information. /// To retrieve this information, use function GetAllocationInfo() and inspect /// returned structure AllocationInfo. /// /// Some kinds allocations can be in lost state. /// For more information, see [Lost allocations](@ref lost_allocations). pub const Allocation = extern enum(usize) { Null = 0, _ }; /// \brief Parameters of #Allocation objects, that can be retrieved using function GetAllocationInfo(). pub const AllocationInfo = extern struct { /// \brief Memory type index that this allocation was allocated from. /// /// It never changes. memoryType: u32, /// \brief Handle to Vulkan memory object. /// /// Same memory object can be shared by multiple allocations. /// /// It can change after call to Defragment() if this allocation is passed to the function, or if allocation is lost. /// /// If the allocation is lost, it is equal to `.Null`. deviceMemory: vk.DeviceMemory, /// \brief Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation. /// /// It can change after call to Defragment() if this allocation is passed to the function, or if allocation is lost. offset: vk.DeviceSize, /// \brief Size of this allocation, in bytes. /// /// It never changes, unless allocation is lost. size: vk.DeviceSize, /// \brief Pointer to the beginning of this allocation as mapped data. /// /// If the allocation hasn't been mapped using MapMemory() and hasn't been /// created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value null. /// /// It can change after call to MapMemory(), UnmapMemory(). /// It can also change after call to Defragment() if this allocation is passed to the function. pMappedData: ?*c_void, /// \brief Custom general-purpose pointer that was passed as AllocationCreateInfo::pUserData or set using SetAllocationUserData(). /// /// It can change after call to SetAllocationUserData() for this allocation. pUserData: ?*c_void, }; /// \struct DefragmentationContext /// \brief Represents Opaque object that represents started defragmentation process. /// /// Fill structure #DefragmentationInfo2 and call function DefragmentationBegin() to create it. /// Call function DefragmentationEnd() to destroy it. pub const DefragmentationContext = extern enum(usize) { Null = 0, _ }; /// Flags to be used in DefragmentationBegin(). None at the moment. Reserved for future use. pub const DefragmentationFlags = packed struct { __reserved_bits_0_31: u32 = 0, pub usingnamespace vk.FlagsMixin(@This()); }; /// \brief Parameters for defragmentation. /// /// To be used with function DefragmentationBegin(). pub const DefragmentationInfo2 = extern struct { /// \brief Reserved for future use. Should be 0. flags: DefragmentationFlags = .{}, /// \brief Number of allocations in `pAllocations` array. allocationCount: u32, /// \brief Pointer to array of allocations that can be defragmented. /// /// The array should have `allocationCount` elements. /// The array should not contain nulls. /// Elements in the array should be unique - same allocation cannot occur twice. /// It is safe to pass allocations that are in the lost state - they are ignored. /// All allocations not present in this array are considered non-moveable during this defragmentation. pAllocations: [*]Allocation, /// \brief Optional, output. Pointer to array that will be filled with information whether the allocation at certain index has been changed during defragmentation. /// /// The array should have `allocationCount` elements. /// You can pass null if you are not interested in this information. pAllocationsChanged: ?[*]vk.Bool32, /// \brief Numer of pools in `pPools` array. poolCount: u32, /// \brief Either null or pointer to array of pools to be defragmented. /// /// All the allocations in the specified pools can be moved during defragmentation /// and there is no way to check if they were really moved as in `pAllocationsChanged`, /// so you must query all the allocations in all these pools for new `vk.DeviceMemory` /// and offset using GetAllocationInfo() if you might need to recreate buffers /// and images bound to them. /// /// The array should have `poolCount` elements. /// The array should not contain nulls. /// Elements in the array should be unique - same pool cannot occur twice. /// /// Using this array is equivalent to specifying all allocations from the pools in `pAllocations`. /// It might be more efficient. pPools: ?[*]Pool, /// \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on CPU side, like `memcpy()`, `memmove()`. /// /// `vk.WHOLE_SIZE` means no limit. maxCpuBytesToMove: vk.DeviceSize, /// \brief Maximum number of allocations that can be moved to a different place using transfers on CPU side, like `memcpy()`, `memmove()`. /// /// `UINT32_MAX` means no limit. maxCpuAllocationsToMove: u32, /// \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on GPU side, posted to `commandBuffer`. /// /// `vk.WHOLE_SIZE` means no limit. maxGpuBytesToMove: vk.DeviceSize, /// \brief Maximum number of allocations that can be moved to a different place using transfers on GPU side, posted to `commandBuffer`. /// /// `UINT32_MAX` means no limit. maxGpuAllocationsToMove: u32, /// \brief Optional. Command buffer where GPU copy commands will be posted. /// /// If not null, it must be a valid command buffer handle that supports Transfer queue type. /// It must be in the recording state and outside of a render pass instance. /// You need to submit it and make sure it finished execution before calling DefragmentationEnd(). /// /// Passing null means that only CPU defragmentation will be performed. commandBuffer: vk.CommandBuffer, }; /// \brief Deprecated. Optional configuration parameters to be passed to function Defragment(). /// /// \deprecated This is a part of the old interface. It is recommended to use structure #DefragmentationInfo2 and function DefragmentationBegin() instead. pub const DefragmentationInfo = extern struct { /// \brief Maximum total numbers of bytes that can be copied while moving allocations to different places. /// /// Default is `vk.WHOLE_SIZE`, which means no limit. maxBytesToMove: vk.DeviceSize, /// \brief Maximum number of allocations that can be moved to different place. /// /// Default is `UINT32_MAX`, which means no limit. maxAllocationsToMove: u32, }; /// \brief Statistics returned by function Defragment(). pub const DefragmentationStats = extern struct { /// Total number of bytes that have been copied while moving allocations to different places. bytesMoved: vk.DeviceSize, /// Total number of bytes that have been released to the system by freeing empty `vk.DeviceMemory` objects. bytesFreed: vk.DeviceSize, /// Number of allocations that have been moved to different places. allocationsMoved: u32, /// Number of empty `vk.DeviceMemory` objects that have been released to the system. deviceMemoryBlocksFreed: u32, }; pub extern fn vmaCreateAllocator(pCreateInfo: *const AllocatorCreateInfo, pAllocator: *Allocator) callconv(CallConv) vk.Result; pub extern fn vmaDestroyAllocator(allocator: Allocator) callconv(CallConv) void; pub extern fn vmaGetPhysicalDeviceProperties( allocator: Allocator, ppPhysicalDeviceProperties: **const vk.PhysicalDeviceProperties, ) callconv(CallConv) void; pub extern fn vmaGetMemoryProperties( allocator: Allocator, ppPhysicalDeviceMemoryProperties: **const vk.PhysicalDeviceMemoryProperties, ) callconv(CallConv) void; pub extern fn vmaGetMemoryTypeProperties( allocator: Allocator, memoryTypeIndex: u32, pFlags: *align(4) vk.MemoryPropertyFlags, ) callconv(CallConv) void; pub extern fn vmaSetCurrentFrameIndex(allocator: Allocator, frameIndex: u32) callconv(CallConv) void; pub extern fn vmaCalculateStats(allocator: Allocator, pStats: *Stats) callconv(CallConv) void; pub extern fn vmaGetBudget( allocator: Allocator, pBudget: *Budget, ) callconv(CallConv) void; // pub usingnamespace if (config.statsStringEnabled) // struct { // pub extern fn vmaBuildStatsString( // allocator: Allocator, // ppStatsString: *[*:0]u8, // detailedMap: vk.Bool32, // ) callconv(CallConv) void; // pub extern fn vmaFreeStatsString( // allocator: Allocator, // pStatsString: [*:0]u8, // ) callconv(CallConv) void; // } // else // struct {}; pub extern fn vmaFindMemoryTypeIndex( allocator: Allocator, memoryTypeBits: u32, pAllocationCreateInfo: *const AllocationCreateInfo, pMemoryTypeIndex: *u32, ) callconv(CallConv) vk.Result; pub extern fn vmaFindMemoryTypeIndexForBufferInfo( allocator: Allocator, pBufferCreateInfo: *const vk.BufferCreateInfo, pAllocationCreateInfo: *const AllocationCreateInfo, pMemoryTypeIndex: *u32, ) callconv(CallConv) vk.Result; pub extern fn vmaFindMemoryTypeIndexForImageInfo( allocator: Allocator, pImageCreateInfo: *const vk.ImageCreateInfo, pAllocationCreateInfo: *const AllocationCreateInfo, pMemoryTypeIndex: *u32, ) callconv(CallConv) vk.Result; pub extern fn vmaCreatePool( allocator: Allocator, pCreateInfo: *const PoolCreateInfo, pPool: *Pool, ) callconv(CallConv) vk.Result; pub extern fn vmaDestroyPool( allocator: Allocator, pool: Pool, ) callconv(CallConv) void; pub extern fn vmaGetPoolStats( allocator: Allocator, pool: Pool, pPoolStats: *PoolStats, ) callconv(CallConv) void; pub extern fn vmaMakePoolAllocationsLost( allocator: Allocator, pool: Pool, pLostAllocationCount: ?*usize, ) callconv(CallConv) void; pub extern fn vmaCheckPoolCorruption(allocator: Allocator, pool: Pool) callconv(CallConv) vk.Result; pub extern fn vmaGetPoolName( allocator: Allocator, pool: Pool, ppName: *?[*:0]const u8, ) callconv(CallConv) void; pub extern fn vmaSetPoolName( allocator: Allocator, pool: Pool, pName: ?[*:0]const u8, ) callconv(CallConv) void; pub extern fn vmaAllocateMemory( allocator: Allocator, pVkMemoryRequirements: *const vk.MemoryRequirements, pCreateInfo: *const AllocationCreateInfo, pAllocation: *Allocation, pAllocationInfo: ?*AllocationInfo, ) callconv(CallConv) vk.Result; pub extern fn vmaAllocateMemoryPages( allocator: Allocator, pVkMemoryRequirements: *const vk.MemoryRequirements, pCreateInfo: *const AllocationCreateInfo, allocationCount: usize, pAllocations: [*]Allocation, pAllocationInfo: ?[*]AllocationInfo, ) callconv(CallConv) vk.Result; pub extern fn vmaAllocateMemoryForBuffer( allocator: Allocator, buffer: vk.Buffer, pCreateInfo: *const AllocationCreateInfo, pAllocation: *Allocation, pAllocationInfo: ?*AllocationInfo, ) callconv(CallConv) vk.Result; pub extern fn vmaAllocateMemoryForImage( allocator: Allocator, image: vk.Image, pCreateInfo: *const AllocationCreateInfo, pAllocation: *Allocation, pAllocationInfo: ?*AllocationInfo, ) callconv(CallConv) vk.Result; pub extern fn vmaFreeMemory( allocator: Allocator, allocation: Allocation, ) callconv(CallConv) void; pub extern fn vmaFreeMemoryPages( allocator: Allocator, allocationCount: usize, pAllocations: [*]Allocation, ) callconv(CallConv) void; /// \brief Deprecated. /// /// In version 2.2.0 it used to try to change allocation's size without moving or reallocating it. /// In current version it returns `vk.SUCCESS` only if `newSize` equals current allocation's size. /// Otherwise returns `error.VK_OUT_OF_POOL_MEMORY`, indicating that allocation's size could not be changed. pub extern fn vmaResizeAllocation( allocator: Allocator, allocation: Allocation, newSize: vk.DeviceSize, ) callconv(CallConv) vk.Result; pub extern fn vmaGetAllocationInfo( allocator: Allocator, allocation: Allocation, pAllocationInfo: *AllocationInfo, ) callconv(CallConv) void; pub extern fn vmaTouchAllocation( allocator: Allocator, allocation: Allocation, ) callconv(CallConv) vk.Bool32; pub extern fn vmaSetAllocationUserData( allocator: Allocator, allocation: Allocation, pUserData: ?*c_void, ) callconv(CallConv) void; pub extern fn vmaCreateLostAllocation( allocator: Allocator, pAllocation: *Allocation, ) callconv(CallConv) void; pub extern fn vmaMapMemory( allocator: Allocator, allocation: Allocation, ppData: **c_void, ) callconv(CallConv) vk.Result; pub extern fn vmaUnmapMemory( allocator: Allocator, allocation: Allocation, ) callconv(CallConv) void; pub extern fn vmaFlushAllocation(allocator: Allocator, allocation: Allocation, offset: vk.DeviceSize, size: vk.DeviceSize) callconv(CallConv) void; pub extern fn vmaInvalidateAllocation(allocator: Allocator, allocation: Allocation, offset: vk.DeviceSize, size: vk.DeviceSize) callconv(CallConv) void; pub extern fn vmaCheckCorruption(allocator: Allocator, memoryTypeBits: u32) callconv(CallConv) vk.Result; pub extern fn vmaDefragmentationBegin( allocator: Allocator, pInfo: *const DefragmentationInfo2, pStats: ?*DefragmentationStats, pContext: *DefragmentationContext, ) callconv(CallConv) vk.Result; pub extern fn vmaDefragmentationEnd( allocator: Allocator, context: DefragmentationContext, ) callconv(CallConv) vk.Result; /// \brief Deprecated. Compacts memory by moving allocations. /// /// @param pAllocations Array of allocations that can be moved during this compation. /// @param allocationCount Number of elements in pAllocations and pAllocationsChanged arrays. /// @param[out] pAllocationsChanged Array of boolean values that will indicate whether matching allocation in pAllocations array has been moved. This parameter is optional. Pass null if you don't need this information. /// @param pDefragmentationInfo Configuration parameters. Optional - pass null to use default values. /// @param[out] pDefragmentationStats Statistics returned by the function. Optional - pass null if you don't need this information. /// @return `vk.SUCCESS` if completed, negative error code in case of error. /// /// \deprecated This is a part of the old interface. It is recommended to use structure #DefragmentationInfo2 and function DefragmentationBegin() instead. /// /// This function works by moving allocations to different places (different /// `vk.DeviceMemory` objects and/or different offsets) in order to optimize memory /// usage. Only allocations that are in `pAllocations` array can be moved. All other /// allocations are considered nonmovable in this call. Basic rules: /// /// - Only allocations made in memory types that have /// `vk.MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `vk.MEMORY_PROPERTY_HOST_COHERENT_BIT` /// flags can be compacted. You may pass other allocations but it makes no sense - /// these will never be moved. /// - Custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT or /// #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag are not defragmented. Allocations /// passed to this function that come from such pools are ignored. /// - Allocations created with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT or /// created as dedicated allocations for any other reason are also ignored. /// - Both allocations made with or without #VMA_ALLOCATION_CREATE_MAPPED_BIT /// flag can be compacted. If not persistently mapped, memory will be mapped /// temporarily inside this function if needed. /// - You must not pass same #Allocation object multiple times in `pAllocations` array. /// /// The function also frees empty `vk.DeviceMemory` blocks. /// /// Warning: This function may be time-consuming, so you shouldn't call it too often /// (like after every resource creation/destruction). /// You can call it on special occasions (like when reloading a game level or /// when you just destroyed a lot of objects). Calling it every frame may be OK, but /// you should measure that on your platform. /// /// For more information, see [Defragmentation](@ref defragmentation) chapter. pub extern fn vmaDefragment( allocator: Allocator, pAllocations: *Allocation, allocationCount: usize, pAllocationsChanged: *vk.Bool32, pDefragmentationInfo: *const DefragmentationInfo, pDefragmentationStats: *DefragmentationStats, ) callconv(CallConv) vk.Result; pub extern fn vmaBindBufferMemory( allocator: Allocator, allocation: Allocation, buffer: vk.Buffer, ) callconv(CallConv) vk.Result; pub extern fn vmaBindBufferMemory2( allocator: Allocator, allocation: Allocation, allocationLocalOffset: vk.DeviceSize, buffer: vk.Buffer, pNext: ?*const c_void, ) callconv(CallConv) vk.Result; pub extern fn vmaBindImageMemory( allocator: Allocator, allocation: Allocation, image: vk.Image, ) callconv(CallConv) vk.Result; pub extern fn vmaBindImageMemory2( allocator: Allocator, allocation: Allocation, allocationLocalOffset: vk.DeviceSize, image: vk.Image, pNext: ?*const c_void, ) callconv(CallConv) vk.Result; pub extern fn vmaCreateBuffer( allocator: Allocator, pBufferCreateInfo: *const vk.BufferCreateInfo, pAllocationCreateInfo: *const AllocationCreateInfo, pBuffer: *vk.Buffer, pAllocation: *Allocation, pAllocationInfo: ?*AllocationInfo, ) callconv(CallConv) vk.Result; pub extern fn vmaDestroyBuffer( allocator: Allocator, buffer: vk.Buffer, allocation: Allocation, ) callconv(CallConv) void; pub extern fn vmaCreateImage( allocator: Allocator, pImageCreateInfo: *const vk.ImageCreateInfo, pAllocationCreateInfo: *const AllocationCreateInfo, pImage: *vk.Image, pAllocation: *Allocation, pAllocationInfo: ?*AllocationInfo, ) callconv(CallConv) vk.Result; pub extern fn vmaDestroyImage( allocator: Allocator, image: vk.Image, allocation: Allocation, ) callconv(CallConv) void;
vma.zig
const std = @import("std"); const allocator = std.heap.page_allocator; const usage = \\Usage: touch [options] <file...> \\ \\Options: \\ -h, --help Print this help and exit \\ -a Change only the access time \\ -c, --no-create Do not create any file \\ -m Change only the modification time \\ ; pub fn main() !void { const stdout = std.io.getStdOut().writer(); const stderr = std.io.getStdErr().writer(); const all_args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, all_args); var no_create: bool = false; var atime_only: bool = false; var mtime_only: bool = false; const args = all_args[1..]; var i: usize = 0; while (i < args.len) : (i += 1) { const arg = args[i]; if (std.mem.startsWith(u8, arg, "-")) { if (std.mem.eql(u8, arg, "-h") or std.mem.eql(u8, arg, "--help")) { _ = try stdout.print("{s}", .{usage}); return; } else if (std.mem.eql(u8, arg, "-a")) { if (mtime_only) { _ = try stderr.print("error: -a and -m cannot be used at the same time\n", .{}); return; } atime_only = true; } else if (std.mem.eql(u8, arg, "-c") or std.mem.eql(u8, arg, "--no-create")) { no_create = true; } else if (std.mem.eql(u8, arg, "-m")) { if (atime_only) { _ = try stderr.print("error: -a and -m cannot be used at the same time\n", .{}); return; } mtime_only = true; } else { _ = try stderr.print("error: Unknown option: {s}\n", .{arg}); return; } } else break; } if (args.len == i) { try stderr.print("{s}", .{usage}); return; } while (i < args.len) : (i += 1) { const arg = args[i]; var f_path = try std.fs.path.resolve(allocator, &[_][]const u8{arg}); var cwd = std.fs.cwd(); var f_file: std.fs.File = undefined; if (no_create) { f_file = cwd.openFile(f_path, .{}) catch |e| switch (e) { error.PathAlreadyExists => { try stderr.print("error: PathAlreadyExists for '{s}'\n", .{arg}); return; }, error.FileNotFound => { try stderr.print("error: FileNotFound for '{s}'\n", .{arg}); return; }, error.AccessDenied => { try stderr.print("error: AccessDenied for '{s}'\n", .{arg}); return; }, else => return e, }; } else { f_file = cwd.createFile(f_path, .{ .truncate = false }) catch |e| switch (e) { error.FileNotFound => { try stderr.print("error: FileNotFound for '{s}'\n", .{arg}); return; }, error.AccessDenied => { try stderr.print("error: AccessDenied for '{s}'\n", .{arg}); return; }, else => return e, }; } defer f_file.close(); var cur_time = std.time.nanoTimestamp(); var f_stat = try f_file.stat(); if (atime_only) { try f_file.updateTimes(cur_time, f_stat.mtime); } else if (mtime_only) { try f_file.updateTimes(f_stat.atime, cur_time); } else { try f_file.updateTimes(cur_time, cur_time); } } }
src/touch.zig
const Wwise = @import("../wwise.zig").Wwise; const ImGui = @import("../imgui.zig").ImGui; const DemoInterface = @import("demo_interface.zig").DemoInterface; const std = @import("std"); pub const LocalizationDemo = struct { allocator: std.mem.Allocator = undefined, isVisibleState: bool = false, bankID: u32 = 0, currentSelectedLanguage: usize = 0, const Self = @This(); const DemoGameObjectID = 3; const Languages = &[_][]const u8{ "English(US)", "French(Canada)" }; pub fn init(self: *Self, allocator: std.mem.Allocator) !void { self.allocator = allocator; self.currentSelectedLanguage = 0; try Wwise.setCurrentLanguage(Languages[0]); self.bankID = try Wwise.loadBankByString("Human.bnk"); try Wwise.registerGameObj(DemoGameObjectID, "LocalizationDemo"); } pub fn deinit(self: *Self) void { _ = Wwise.unloadBankByID(self.bankID); Wwise.unregisterGameObj(DemoGameObjectID); self.allocator.destroy(self); } pub fn onUI(self: *Self) !void { if (ImGui.igBegin("Localization Demo", &self.isVisibleState, ImGui.ImGuiWindowFlags_AlwaysAutoResize)) { if (ImGui.igButton("Say \"Hello\"", .{ .x = 0, .y = 0 })) { _ = try Wwise.postEvent("Play_Hello", DemoGameObjectID); } const firstLanguage = try std.cstr.addNullByte(self.allocator, Languages[self.currentSelectedLanguage]); defer self.allocator.free(firstLanguage); if (ImGui.igBeginCombo("Language", firstLanguage, 0)) { for (Languages) |lang, i| { const is_selected = (self.currentSelectedLanguage == i); const cLang = try std.cstr.addNullByte(self.allocator, lang); defer self.allocator.free(cLang); if (ImGui.igSelectable(cLang, is_selected, 0, .{ .x = 0, .y = 0 })) { self.currentSelectedLanguage = i; try Wwise.setCurrentLanguage(Languages[self.currentSelectedLanguage]); _ = Wwise.unloadBankByID(self.bankID); self.bankID = try Wwise.loadBankByString("Human.bnk"); } if (is_selected) { ImGui.igSetItemDefaultFocus(); } } ImGui.igEndCombo(); } ImGui.igEnd(); } if (!self.isVisibleState) { Wwise.stopAllOnGameObject(DemoGameObjectID); } } pub fn isVisible(self: *Self) bool { return self.isVisibleState; } pub fn show(self: *Self) void { self.isVisibleState = true; } pub fn getInterface(self: *Self) DemoInterface { return DemoInterface{ .instance = @ptrCast(DemoInterface.InstanceType, self), .initFn = @ptrCast(DemoInterface.InitFn, init), .deinitFn = @ptrCast(DemoInterface.DeinitFn, deinit), .onUIFn = @ptrCast(DemoInterface.OnUIFn, onUI), .isVisibleFn = @ptrCast(DemoInterface.IsVisibleFn, isVisible), .showFn = @ptrCast(DemoInterface.ShowFn, show), }; } };
src/demos/localization_demo.zig
const std = @import("std"); const math = std.math; fn assertIsFloat(comptime T: type) void { if (@typeInfo(T) != .Float and @typeInfo(T) != .ComptimeFloat) @compileError("Expected float type, found `" ++ @typeName(T) ++ "`!"); } pub fn toRadians(degrees: anytype) @TypeOf(degrees) { comptime assertIsFloat(@TypeOf(degrees)); return degrees * math.pi / 180.0; } pub fn toDegrees(radians: anytype) @TypeOf(radians) { comptime assertIsFloat(@TypeOf(radians)); return radians * 180.0 / math.pi; } pub fn lerp(a: anytype, b: @TypeOf(a), t: @TypeOf(a)) @TypeOf(a) { comptime assertIsFloat(@TypeOf(a)); return a + ((b - a) * t); } fn pow(a: anytype, power_of: @TypeOf(a)) @TypeOf(a) { const T = @TypeOf(a); const _pow = std.math.pow; const pow_floor = @floor(power_of); if (power_of != pow_floor) { return lerp(_pow(T, a, pow_floor), _pow(T, a, @ceil(power_of)), power_of - pow_floor); } else { return _pow(T, a, power_of); } } pub fn easeStart(t: anytype, power_of: @TypeOf(t)) @TypeOf(t) { comptime assertIsFloat(@TypeOf(t)); return pow(t, power_of); } pub fn easeEnd(t: anytype, power_of: @TypeOf(t)) @TypeOf(t) { comptime assertIsFloat(@TypeOf(t)); return 1 - easeStart(1 - t, power_of); } pub fn easeStartEnd(t: anytype, power_of: @TypeOf(t)) @TypeOf(t) { comptime assertIsFloat(@TypeOf(t)); return lerp(easeStart(t, power_of), easeEnd(t, power_of), t); } pub fn easeStartElastic(t: anytype, amplitude: f32, period: @TypeOf(t)) @TypeOf(t) { var a: f32 = amplitude; var s: f32 = period / 4.0; if (a < 1.0) a = 1.0 else s = period * std.math.asin(1.0 / a) / std.math.tau; const _t = t - 1.0; return -(a * pow(@as(f32, 2.0), 10.0 * _t) * @sin((_t - s) * std.math.tau / period)); } pub fn easeEndElastic(t: anytype, amplitude: f32, period: @TypeOf(t)) @TypeOf(t) { var a: f32 = amplitude; var s: f32 = period / 4.0; if (a < 1.0) a = 1.0 else s = period * std.math.asin(1.0 / a) / std.math.tau; return a * pow(@as(f32, 2.0), -10.0 * t) * @sin((t - s) * std.math.tau / period) + 1.0; } pub fn easeStartEndElastic(t: anytype, amplitude: f32, period: @TypeOf(t)) @TypeOf(t) { var a: f32 = amplitude; var s: f32 = period / 4.0; if (a < 1.0) a = 1.0 else s = period * std.math.asin(1.0 / a) / std.math.tau; if (t * 2 < 1) { return -0.5 * (a * pow(@as(f32, 2.0), 10.0 * (t * 2.0 - 1.0)) * @sin(((t * 2.0 - 1.0) - s) * std.math.tau / period)); } else { return a * pow(@as(f32, 2.0), -10.0 * (t * 2.0 - 1.0)) * @sin(((t * 2.0) - s) * std.math.tau / period) * 0.5 + 1.0; } } pub fn easeStartBack(t: anytype, overshoot: @TypeOf(t)) @TypeOf(t) { return t * t * ((overshoot + 1) * t - overshoot); } pub fn easeEndBack(t: anytype, overshoot: @TypeOf(t)) @TypeOf(t) { const _t = t - 1; return _t * _t * ((overshoot + 1) * _t + overshoot) + 1; } pub fn easeStartEndBack(t: anytype, overshoot: @TypeOf(t)) @TypeOf(t) { const o: f32 = overshoot * 1.525; var _t = t * 2.0; if (_t < 1.0) { return 0.5 * (_t * _t * ((o + 1) * _t - o)); } else { _t -= 2.0; return 0.5 * (_t * _t * ((o + 1) * _t + o) + 2.0); } } // TODO: add tests
src/float.zig
const zwl = @import("zwl/src/zwl.zig"); pub const WindowError = error { InitializationError }; const std = @import("std"); const zlm = @import("zlm"); const Vec2 = zlm.Vec2; // TODO: more inputs and a more efficient way to do them pub const Input = struct { nativeId: u32, lastMousePos: Vec2 = Vec2.zero, mouseDelta: Vec2 = Vec2.zero, firstFrame: bool = true, pub const KEY_A = 0; pub const KEY_D = 0; pub const KEY_S = 0; pub const KEY_W = 0; pub const KEY_ESCAPE = 0; pub const KEY_SPACE = 0; pub const KEY_UP = 0; pub const KEY_LEFT = 0; pub const KEY_RIGHT = 0; pub const KEY_DOWN = 0; pub const MouseInputMode = enum { Normal, Hidden, Grabbed }; pub const MouseButton = enum { Left, Middle, Right }; pub const Joystick = struct { id: u4, name: []const u8, /// This doesn't necessarily means the joystick *IS* a gamepad, this means it is registered in the DB. isGamepad: bool, pub const ButtonType = enum { A, B, X, Y, LeftBumper, RightBumper, Back, Start, Guide, LeftThumb, RightThumb, DPad_Up, DPad_Right, DPad_Down, DPad_Left }; pub fn getRawAxes(self: *const Joystick) []const f32 { var count: c_int = 0; const axes = c.glfwGetJoystickAxes(self.id, &count); return axes[0..@intCast(usize, count)]; } pub fn getRawButtons(self: *const Joystick) []bool { var count: c_int = 0; const cButtons = c.glfwGetJoystickButtons(self.id, &count); var cButtonsBool: [15]bool = undefined; var i: usize = 0; while (i < count) { cButtonsBool[i] = cButtons[i] == c.GLFW_PRESS; i += 1; } return cButtonsBool[0..@intCast(usize, count)]; } pub fn getAxes(self: *const Joystick) []const f32 { if (self.isGamepad) { var state: c.GLFWgamepadstate = undefined; _ = c.glfwGetGamepadState(self.id, &state); return state.axes[0..6]; } else { return self.getRawAxes(); } } pub fn isButtonDown(self: *const Joystick, btn: ButtonType) bool { const buttons = self.getButtons(); return buttons[@enumToInt(btn)]; } pub fn getButtons(self: *const Joystick) []bool { if (self.isGamepad) { var state: c.GLFWgamepadstate = undefined; _ = c.glfwGetGamepadState(self.id, &state); var buttons: [15]bool = undefined; for (state.buttons[0..15]) |value, i| { buttons[i] = value == c.GLFW_PRESS; } return buttons[0..]; } else { return self.getRawButtons(); } } }; fn init(self: *const Input) void { } /// Returns true if the key is currently being pressed. pub fn isKeyDown(self: *const Input, key: u32) bool { return false; } pub fn getJoystick(self: *const Input, id: u4) ?Joystick { return null; } pub fn isMouseButtonDown(self: *const Input, button: MouseButton) bool { return false; } pub fn getMousePosition(self: *const Input) Vec2 { return Vec2.new(0, 0); } /// Set the input mode of the mouse. /// This allows to grab, hide or reset to normal the cursor. pub fn setMouseInputMode(self: *const Input, mode: MouseInputMode) void { } pub fn getMouseInputMode(self: *const Input) MouseInputMode { return MouseInputMode.Normal; } pub fn update(self: *Input) void { } }; pub const Window = struct { const Platform = zwl.Platform(.{ .single_window = true, .backends_enabled = .{ .opengl = true }, .platforms_enabled = .{ .x11 = false, .xlib = true // temporary, for OpenGL support } }); /// The input context of the window input: Input, platform: *Platform, nativeId: *Platform.Window, /// Create a new window /// By default, the window will be resizable, with empty title and a size of 800x600. pub fn create() !Window { var platform = try Platform.init(std.heap.page_allocator, .{}); var window = try platform.createWindow(.{ .resizeable = true, .decorations = true, .track_keyboard = true, .track_mouse = true, .title = "", .backend = .{ .opengl = .{ .major = 3, .minor = 2 } } }); return Window { .nativeId = window, .platform = platform, .input = .{ .nativeId = 0 } }; } pub fn setSize(self: *const Window, width: u32, height: u32) void { var w = @floor(width); var h = @floor(height); if (w > std.math.maxInt(u16) or h > std.math.maxInt(u16)) { std.log.warn("Unable to set size to {d}x{d} : ZWL only supports up to a 65535x65535 window size, size will be set to 655535x65535", .{w, h}); w = @intToFloat(f32, std.math.maxInt(u16)); h = @intToFloat(f32, std.math.maxInt(u16)); } self.nativeId.configure(.{ .width = @floatToInt(u16, w), .height = @floatToInt(u16, h) }); } pub fn setPosition(self: *const Window, x: i32, y: i32) void { } pub fn setTitle(self: *const Window, title: [:0]const u8) void { self.nativeId.configure(.{ .title = title }) catch unreachable; // TODO: handle error on title change? } pub fn getPosition(self: *const Window) Vec2 { return Vec2.new(0, 0); } pub fn getSize(self: *const Window) Vec2 { return self.getFramebufferSize(); } pub fn getFramebufferSize(self: *const Window) Vec2 { var size = self.nativeId.getSize(); return Vec2.new(@intToFloat(f32, size[0]), @intToFloat(f32, size[1])); } /// Poll events, swap buffer and update input. /// Returns false if the window should be closed and true otherwises. pub fn update(self: *Window) bool { self.input.update(); self.nativeId.present() catch unreachable; return true; } pub fn deinit(self: *Window) void { self.nativeId.deinit(); self.platform.deinit(); } }; comptime { std.testing.refAllDecls(Window); std.testing.refAllDecls(Input); }
didot-zwl/window.zig
const std = @import("std"); /// Represents a single DNS domain-name, which is a slice of strings. /// /// The "www.google.com" friendly domain name can be represented in DNS as a /// sequence of labels: first "www", then "google", then "com", with a length /// prefix for all of them, ending in a null byte. /// /// Keep in mind Name's are not singularly deserializeable, as the names /// could be pointers to different bytes in the packet. /// (RFC1035, section 4.1.4 Message Compression) pub const Name = struct { /// The name's labels. labels: [][]const u8, /// Returns the total size in bytes of the DNS Name pub fn size(self: @This()) usize { // by default, add the null octet at the end of it var total_size: usize = 1; for (self.labels) |label| { // length octet + the actual label octets total_size += @sizeOf(u8); total_size += label.len * @sizeOf(u8); } return total_size; } /// Get a Name out of a domain name ("www.google.com", for example). pub fn fromString(domain: []const u8, buffer: [][]const u8) !@This() { if (domain.len > 255) return error.Overflow; var it = std.mem.split(domain, "."); var idx: usize = 0; while (it.next()) |label| { // Is there a better error for this? if (idx > (buffer.len - 1)) return error.Underflow; // buffer too small buffer[idx] = label; idx += 1; } return @This(){ .labels = buffer[0..idx] }; } pub fn serialize(self: @This(), serializer: anytype) !void { std.debug.warn("{}\n", .{self.labels.len}); for (self.labels) |label| { std.debug.assert(label.len < 255); try serializer.serialize(@intCast(u8, label.len)); for (label) |byte| { try serializer.serialize(byte); } } // null-octet for the end of labels for this name try serializer.serialize(@as(u8, 0)); } /// Format the given DNS name. pub fn format( self: @This(), comptime f: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { if (f.len != 0) { @compileError("Unknown format character: '" ++ f ++ "'"); } for (self.labels) |label| { try std.fmt.format(writer, "{}.", .{label}); } } };
src/pkg2/names.zig
const std = @import("std"); const builtin = @import("builtin"); const expect = std.testing.expect; test "@byteSwap integers" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const ByteSwapIntTest = struct { fn run() !void { try t(u0, 0, 0); try t(u8, 0x12, 0x12); try t(u16, 0x1234, 0x3412); try t(u24, 0x123456, 0x563412); try t(i24, @bitCast(i24, @as(u24, 0xf23456)), 0x5634f2); try t(i24, 0x1234f6, @bitCast(i24, @as(u24, 0xf63412))); try t(u32, 0x12345678, 0x78563412); try t(i32, @bitCast(i32, @as(u32, 0xf2345678)), 0x785634f2); try t(i32, 0x123456f8, @bitCast(i32, @as(u32, 0xf8563412))); try t(u40, 0x123456789a, 0x9a78563412); try t(i48, 0x123456789abc, @bitCast(i48, @as(u48, 0xbc9a78563412))); try t(u56, 0x123456789abcde, 0xdebc9a78563412); try t(u64, 0x123456789abcdef1, 0xf1debc9a78563412); try t(u88, 0x123456789abcdef1112131, 0x312111f1debc9a78563412); try t(u96, 0x123456789abcdef111213141, 0x41312111f1debc9a78563412); try t(u128, 0x123456789abcdef11121314151617181, 0x8171615141312111f1debc9a78563412); try t(u0, @as(u0, 0), 0); try t(i8, @as(i8, -50), -50); try t(i16, @bitCast(i16, @as(u16, 0x1234)), @bitCast(i16, @as(u16, 0x3412))); try t(i24, @bitCast(i24, @as(u24, 0x123456)), @bitCast(i24, @as(u24, 0x563412))); try t(i32, @bitCast(i32, @as(u32, 0x12345678)), @bitCast(i32, @as(u32, 0x78563412))); try t(u40, @bitCast(i40, @as(u40, 0x123456789a)), @as(u40, 0x9a78563412)); try t(i48, @bitCast(i48, @as(u48, 0x123456789abc)), @bitCast(i48, @as(u48, 0xbc9a78563412))); try t(i56, @bitCast(i56, @as(u56, 0x123456789abcde)), @bitCast(i56, @as(u56, 0xdebc9a78563412))); try t(i64, @bitCast(i64, @as(u64, 0x123456789abcdef1)), @bitCast(i64, @as(u64, 0xf1debc9a78563412))); try t(i88, @bitCast(i88, @as(u88, 0x123456789abcdef1112131)), @bitCast(i88, @as(u88, 0x312111f1debc9a78563412))); try t(i96, @bitCast(i96, @as(u96, 0x123456789abcdef111213141)), @bitCast(i96, @as(u96, 0x41312111f1debc9a78563412))); try t( i128, @bitCast(i128, @as(u128, 0x123456789abcdef11121314151617181)), @bitCast(i128, @as(u128, 0x8171615141312111f1debc9a78563412)), ); } fn t(comptime I: type, input: I, expected_output: I) !void { try std.testing.expect(expected_output == @byteSwap(I, input)); } }; comptime try ByteSwapIntTest.run(); try ByteSwapIntTest.run(); } fn vector8() !void { var v = @Vector(2, u8){ 0x12, 0x13 }; var result = @byteSwap(u8, v); try expect(result[0] == 0x12); try expect(result[1] == 0x13); } test "@byteSwap vectors u8" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; comptime try vector8(); try vector8(); } fn vector16() !void { var v = @Vector(2, u16){ 0x1234, 0x2345 }; var result = @byteSwap(u16, v); try expect(result[0] == 0x3412); try expect(result[1] == 0x4523); } test "@byteSwap vectors u16" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; comptime try vector16(); try vector16(); } fn vector24() !void { var v = @Vector(2, u24){ 0x123456, 0x234567 }; var result = @byteSwap(u24, v); try expect(result[0] == 0x563412); try expect(result[1] == 0x674523); } test "@byteSwap vectors u24" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; comptime try vector24(); try vector24(); } fn vector0() !void { var v = @Vector(2, u0){ 0, 0 }; var result = @byteSwap(u0, v); try expect(result[0] == 0); try expect(result[1] == 0); } test "@byteSwap vectors u0" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; comptime try vector0(); try vector0(); }
test/behavior/byteswap.zig
const std = @import("std"); const info = std.log.info; const warn = std.log.warn; const bus = @import("bus.zig"); const rdp = @import("rdp.zig"); const RDPStatus = rdp.RDPStatus; const mi = @import("mi.zig"); const InterruptSource = mi.InterruptSource; const rcpROM = @import("rsptable.zig").rcpROM; const rsqROM = @import("rsptable.zig").rsqROM; /// Clamp 32-bit signed data fn clamps32(data: i32) u16 { if (data < -32768) return @bitCast(u16, @intCast(i16, -32768)); if (data > 32767) return 32767; return @truncate(u16, @bitCast(u32, data)); } /// Clamp 32-bit unsigned data fn clampu32(data: i32) u16 { if (data < 0) return 0; if (data > 32767) return 65535; return @truncate(u16, @bitCast(u32, data)); } fn isSignExtended(hi: u16, lo: u16) bool { if (hi == 0) { return (lo & 0x8000) == 0; } else if (hi == 0xFFFF) { return (lo & 0x8000) == 0x8000; } else { return false; } } /// Sign extend 8-bit data fn exts8(data: u8) u32 { return @bitCast(u32, @intCast(i32, @bitCast(i8, data))); } /// Sign extend 16-bit data fn exts16(data: u16) u32 { return @bitCast(u32, @intCast(i32, @bitCast(i16, data))); } /// Sign extend 32-bit data fn exts32(data: u32) u48 { return @bitCast(u48, @intCast(i48, @bitCast(i32, data))); } /// Get VU lane index fn getIdxL(idx: u32) u32 { return 7 - idx; } /// Get Accumulator lane index fn getIdxA(idx: u32) u32 { return 2 - idx; } const RSPMemory = enum(u64) { DMEM = 0x00, IMEM = 0x01, IO = 0x40, PC = 0x80, }; const RSPReg = enum(u64) { RSPAddr = 0x00, RSPDRAMAddr = 0x04, RSPDMALenRD = 0x08, RSPDMALenWR = 0x0C, RSPStatus = 0x10, RSPDMABusy = 0x18, }; const RSPOpcode = enum(u32) { SPECIAL = 0x00, REGIMM = 0x01, J = 0x02, JAL = 0x03, BEQ = 0x04, BNE = 0x05, BLEZ = 0x06, BGTZ = 0x07, ADDI = 0x08, ADDIU = 0x09, ANDI = 0x0C, ORI = 0x0D, XORI = 0x0E, LUI = 0x0F, COP0 = 0x10, COP2 = 0x12, LB = 0x20, LH = 0x21, LW = 0x23, LBU = 0x24, LHU = 0x25, SB = 0x28, SH = 0x29, SW = 0x2B, LWC2 = 0x32, SWC2 = 0x3A, }; const RSPSpecial = enum(u32) { SLL = 0x00, SRL = 0x02, SRA = 0x03, SLLV = 0x04, SRLV = 0x06, JR = 0x08, BREAK = 0x0D, ADD = 0x20, ADDU = 0x21, SUB = 0x22, SUBU = 0x23, AND = 0x24, OR = 0x25, XOR = 0x26, SLT = 0x2A, }; const RSPRegimm = enum(u32) { BLTZ = 0x00, BGEZ = 0x01, }; const RSPCOPOpcode = enum(u32) { MF = 0x00, CF = 0x02, MT = 0x04, }; const RSPCP2Opcode = enum(u32) { COMPUTE = 0x10, }; const RSPVULoadOpcode = enum(u32) { LSV = 0x01, LLV = 0x02, LDV = 0x03, LQV = 0x04, LRV = 0x05, }; const RSPVUStoreOpcode = enum(u32) { SBV = 0x00, SSV = 0x01, SLV = 0x02, SDV = 0x03, SQV = 0x04, }; const RSPVUOpcode = enum(u32) { VMULF = 0x00, VMULU = 0x01, VRNDP = 0x02, VMUDL = 0x04, VMUDM = 0x05, VMUDN = 0x06, VMUDH = 0x07, VMACF = 0x08, VMACU = 0x09, VMADL = 0x0C, VMADM = 0x0D, VMADN = 0x0E, VMADH = 0x0F, VADD = 0x10, VSUB = 0x11, VADDC = 0x14, VSUBC = 0x15, VSAR = 0x1D, VLT = 0x20, VGE = 0x23, VCL = 0x24, VCH = 0x25, VMRG = 0x27, VAND = 0x28, VOR = 0x2A, VXOR = 0x2C, VRCPL = 0x31, VRCPH = 0x32, VMOV = 0x33, }; const RSPAddr = packed struct { addr : u12 = 0, isIMEM: bool = false, }; const RSPDMALen = packed struct { length: u12 = 0, count : u8 = 0, skip : u12 = 0, }; const RSPStatus = packed struct { h : bool = true, b : bool = false, db : bool = false, df : bool = false, iof: bool = false, ss : bool = false, ib : bool = false, s0 : bool = false, s1 : bool = false, s2 : bool = false, s3 : bool = false, s4 : bool = false, s5 : bool = false, s6 : bool = false, s7 : bool = false, }; const VectorReg = packed union { uLane: [8]u16, pub fn getSLane(self: VectorReg, idx: u32) i16 { return @bitCast(i16, self.uLane[getIdxL(idx)]); } pub fn getULane(self: VectorReg, idx: u32) u16 { return self.uLane[getIdxL(idx)]; } pub fn setSLane(self: *VectorReg, idx: u32, data: i16) void { self.uLane[getIdxL(idx)] = @bitCast(u16, data); } pub fn setULane(self: *VectorReg, idx: u32, data: u16) void { self.uLane[getIdxL(idx)] = data; } }; const Accumulator = struct { uLane: [8]u48, pub fn getSLane(self: Accumulator, idx: u32) i48 { return @bitCast(i48, self.uLane[getIdxL(idx)]); } pub fn getULane(self: Accumulator, idx: u32) u48 { return self.uLane[getIdxL(idx)]; } pub fn getLane(self: Accumulator, idx: u32, e: u32) u16 { return @truncate(u16, self.uLane[getIdxL(idx)] >> @truncate(u6, 16 * getIdxA(e))); } pub fn setSLane(self: *Accumulator, idx: u32, data: i48) void { self.uLane[getIdxL(idx)] = @bitCast(u48, data); } pub fn setULane(self: *Accumulator, idx: u32, data: u48) void { self.uLane[getIdxL(idx)] = data; } pub fn setLane(self: *Accumulator, idx: u32, e: u32, data: u16) void { const shift = @truncate(u6, 16 * getIdxA(e)); const mask = @intCast(u48, 0xFFFF) << shift; const uLane = self.uLane[getIdxL(idx)] & ~mask; self.uLane[getIdxL(idx)] = uLane | (@intCast(u48, data) << shift); } }; const VCC = struct { gte : [8]bool = undefined, lten: [8]bool = undefined, }; const VCO = struct { c : [8]bool = undefined, ne: [8]bool = undefined, }; const VCE = struct { n1: [8]bool = undefined, }; const RSPRegs = struct { rspAddr : RSPAddr = RSPAddr{}, rspDMALen: RSPDMALen = RSPDMALen{}, rspStatus: RSPStatus = RSPStatus{}, rspSema: bool = false, rspDRAMAddr: u24 = 0, gprs: [32]u32 = undefined, vprs: [32]VectorReg = undefined, acc: Accumulator = undefined, vcc: VCC = undefined, vco: VCO = undefined, vce: VCE = undefined, divIn : u16 = undefined, divOut: u16 = undefined, isDIVInLoaded: bool = false, pc : u12 = undefined, cpc: u12 = undefined, npc: u12 = undefined, pub fn get(self: RSPRegs, idx: u32) u32 { return self.gprs[idx]; } pub fn getVPR(self: RSPRegs, idx: u32) VectorReg { return self.vprs[idx]; } pub fn getByte(self: RSPRegs, idx: u32, element: u32) u8 { return @truncate(u8, self.vprs[idx].getULane(element >> 1) >> @truncate(u4, (8 * ((element ^ 1) & 1)))); } pub fn getLane(self: RSPRegs, idx: u32, element: u32) u16 { return self.vprs[idx].getULane(element); } pub fn broadcast(self: RSPRegs, idx: u32, e: u32) VectorReg { const v = self.vprs[idx]; var vBroadcast: VectorReg = undefined; var mask: u32 = undefined; switch (e) { 0,1 => mask = 0x76543210, 2 => mask = 0x66442200, 3 => mask = 0x77553311, 4 => mask = 0x44440000, 5 => mask = 0x55551111, 6 => mask = 0x66662222, 7 => mask = 0x77773333, 8 => mask = 0x00000000, 9 => mask = 0x11111111, 10 => mask = 0x22222222, 11 => mask = 0x33333333, 12 => mask = 0x44444444, 13 => mask = 0x55555555, 14 => mask = 0x66666666, 15 => mask = 0x77777777, else => @panic("invalid broadcast modifier"), } vBroadcast.setULane(0, v.getULane((mask >> 0) & 0xF)); vBroadcast.setULane(1, v.getULane((mask >> 4) & 0xF)); vBroadcast.setULane(2, v.getULane((mask >> 8) & 0xF)); vBroadcast.setULane(3, v.getULane((mask >> 12) & 0xF)); vBroadcast.setULane(4, v.getULane((mask >> 16) & 0xF)); vBroadcast.setULane(5, v.getULane((mask >> 20) & 0xF)); vBroadcast.setULane(6, v.getULane((mask >> 24) & 0xF)); vBroadcast.setULane(7, v.getULane((mask >> 28) & 0xF)); return vBroadcast; } pub fn getCP0(self: RSPRegs, idx: u32) u32 { var data: u32 = undefined; switch (idx) { 4 => data = @intCast(u32, @bitCast(u15, rspRegs.rspStatus)), 5 => data = @intCast(u32, @bitCast(u1, rspRegs.rspStatus.df)), 6 => data = @intCast(u32, @bitCast(u1, rspRegs.rspStatus.db)), 7 => { data = @intCast(u32, @bitCast(u1, rspRegs.rspSema)); rspRegs.rspSema = true; }, 9 => { data = @intCast(u32, rdp.rdpRegs.rdpCMDEnd); }, 10 => { data = @intCast(u32, rdp.rdpRegs.rdpCMDCurr); }, 11 => { data = @intCast(u32, @bitCast(u11, rdp.rdpRegs.rdpStatus)); }, else => { warn("[RSP] Unhandled CP0 read @ ${}.", .{idx}); @panic("unhandled CP0 read"); } } return data; } pub fn set(self: *RSPRegs, idx: u32, data: u32) void { self.gprs[idx] = data; self.gprs[0] = 0; } pub fn setByte(self: *RSPRegs, idx: u32, element: u32, data: u8) void { const uLane = self.vprs[idx].getULane(element >> 1); const shift = @truncate(u4, 8 * ((element ^ 1) & 1)); const mask = @intCast(u16, 0xFF) << shift; self.vprs[idx].setULane(element >> 1, (uLane & ~mask) | (@intCast(u16, data) << shift)); } pub fn setLane(self: *RSPRegs, idx: u32, element: u32, data: u16) void { self.vprs[idx].setULane(element, data); } pub fn setSLane(self: *RSPRegs, idx: u32, element: u32, data: i16) void { self.vprs[idx].setSLane(element, data); } pub fn setVector(self: *RSPRegs, idx: u32, data: u128) void { const data_ = @byteSwap(u128, data); @memcpy(@ptrCast([*]u8, &vprs[idx].byte), @ptrCast([*]const u8, &data_), 16); } pub fn setCP0(self: RSPRegs, idx: u32, data: u32) void { info("[RSP] CP0 write @ ${}, data: {X}h.", .{idx, data}); switch (idx) { 0 => rspRegs.rspAddr = @bitCast(RSPAddr, @truncate(u13, data)), 1 => rspRegs.rspDRAMAddr = @truncate(u24, data), 2 => { rspRegs.rspDMALen = @bitCast(RSPDMALen, data); doDMAToRSP(); }, 3 => { rspRegs.rspDMALen = @bitCast(RSPDMALen, data); doDMAToRAM(); }, 4 => { if ((data & (1 << 0)) != 0) rspRegs.rspStatus.h = false; if ((data & (1 << 1)) != 0) rspRegs.rspStatus.h = true; if ((data & (1 << 2)) != 0) rspRegs.rspStatus.b = false; if ((data & (1 << 3)) != 0) rspIRQ = false; if ((data & (1 << 4)) != 0) rspIRQ = true; if ((data & (1 << 5)) != 0) rspRegs.rspStatus.ss = false; if ((data & (1 << 6)) != 0) rspRegs.rspStatus.ss = true; if ((data & (1 << 7)) != 0) rspRegs.rspStatus.ib = false; if ((data & (1 << 8)) != 0) rspRegs.rspStatus.ib = true; if ((data & (1 << 9)) != 0) rspRegs.rspStatus.s0 = false; if ((data & (1 << 10)) != 0) rspRegs.rspStatus.s0 = true; if ((data & (1 << 11)) != 0) rspRegs.rspStatus.s1 = false; if ((data & (1 << 12)) != 0) rspRegs.rspStatus.s1 = true; if ((data & (1 << 13)) != 0) rspRegs.rspStatus.s2 = false; if ((data & (1 << 14)) != 0) rspRegs.rspStatus.s2 = true; if ((data & (1 << 15)) != 0) rspRegs.rspStatus.s3 = false; if ((data & (1 << 16)) != 0) rspRegs.rspStatus.s3 = true; if ((data & (1 << 17)) != 0) rspRegs.rspStatus.s4 = false; if ((data & (1 << 18)) != 0) rspRegs.rspStatus.s4 = true; if ((data & (1 << 19)) != 0) rspRegs.rspStatus.s5 = false; if ((data & (1 << 20)) != 0) rspRegs.rspStatus.s5 = true; if ((data & (1 << 21)) != 0) rspRegs.rspStatus.s6 = false; if ((data & (1 << 22)) != 0) rspRegs.rspStatus.s6 = true; if ((data & (1 << 23)) != 0) rspRegs.rspStatus.s7 = false; if ((data & (1 << 24)) != 0) rspRegs.rspStatus.s7 = true; if (rspIRQ) { mi.setPending(InterruptSource.SP); } else { mi.clearPending(InterruptSource.SP); } }, 7 => { if (data == 0) rspRegs.rspSema = false; }, 8 => { rdp.rdpRegs.rdpCMDStart = @truncate(u24, data); rdp.rdpRegs.rdpCMDCurr = rdp.rdpRegs.rdpCMDStart; rdp.rdpRegs.rdpStatus.sv = true; }, 9 => { rdp.rdpRegs.rdpCMDEnd = @truncate(u24, data); rdp.rdpRegs.rdpStatus.ev = true; rdp.processDP(); }, 11 => { if ((data & (1 << 0)) != 0) rdp.rdpRegs.rdpStatus.x = false; if ((data & (1 << 1)) != 0) rdp.rdpRegs.rdpStatus.x = true; if ((data & (1 << 2)) != 0) rdp.rdpRegs.rdpStatus.f = false; if ((data & (1 << 0)) != 0) rdp.rdpRegs.rdpStatus.f = false; if ((data & (1 << 1)) != 0) rdp.rdpRegs.rdpStatus.fl = true; if ((data & (1 << 5)) != 0) rdp.rdpRegs.rdpStatus.fl = false; }, else => { warn("[RSP] Unhandled CP0 write @ ${}, data: {X}h.", .{idx, data}); @panic("unhandled CP0 write"); } } } pub fn setPC(self: *RSPRegs, data: u32) void { self.pc = @truncate(u12, data); self.npc = self.pc +% 4; } }; // RSP memory pub var spDMEM: [0x1000]u8 = undefined; pub var spIMEM: [0x1000]u8 = undefined; var rspRegs = RSPRegs{}; var rspIRQ = false; const isDisasm = true; fn getImm16(instr: u32) u16 { return @truncate(u16, instr); } fn getRd(instr: u32) u32 { return (instr >> 11) & 0x1F; } fn getRs(instr: u32) u32 { return (instr >> 21) & 0x1F; } fn getRt(instr: u32) u32 { return (instr >> 16) & 0x1F; } fn getSa(instr: u32) u32 { return (instr >> 6) & 0x1F; } fn getTarget(instr: u32) u32 { return (instr << 2) & 0xFFF_FFFF; } fn getFunct(instr: u32) u32 { return instr & 0x3F; } fn getElement(instr: u32) u32 { return (instr >> 7) & 0xF; } fn getDElement(instr: u32) u32 { return (instr >> 11) & 7; } fn getBroadcastMod(instr: u32) u32 { return (instr >> 21) & 0xF; } fn getOffset(instr: u32, comptime n: comptime_int) u32 { return @bitCast(u32, @intCast(i32, @bitCast(i7, @truncate(u7, instr)))) << n; } const getVd = getSa; const getVs = getRd; const getVt = getRt; pub fn read8(pAddr: u64) u8 { var data: u8 = undefined; switch ((pAddr >> 12) & 0xFF) { @enumToInt(RSPMemory.DMEM) => { data = spDMEM[pAddr & 0xFFF]; }, @enumToInt(RSPMemory.IMEM) => { data = spIMEM[pAddr & 0xFFF]; }, else => { warn("[RSP] Unhandled read8 @ pAddr {X}h.", .{pAddr}); @panic("unhandled RSP read"); } } return data; } pub fn read16(pAddr: u64) u16 { var data: u16 = undefined; switch ((pAddr >> 12) & 0xFF) { @enumToInt(RSPMemory.DMEM) => { @memcpy(@ptrCast([*]u8, &data), @ptrCast([*]u8, &spDMEM[pAddr & 0xFFF]), 2); data = @byteSwap(u16, data); }, @enumToInt(RSPMemory.IMEM) => { @memcpy(@ptrCast([*]u8, &data), @ptrCast([*]u8, &spIMEM[pAddr & 0xFFF]), 2); data = @byteSwap(u16, data); }, else => { warn("[RSP] Unhandled read16 @ pAddr {X}h.", .{pAddr}); @panic("unhandled RSP read"); } } return data; } pub fn read32(pAddr: u64) u32 { var data: u32 = undefined; switch ((pAddr >> 12) & 0xFF) { @enumToInt(RSPMemory.DMEM) => { @memcpy(@ptrCast([*]u8, &data), @ptrCast([*]u8, &spDMEM[pAddr & 0xFFF]), 4); data = @byteSwap(u32, data); }, @enumToInt(RSPMemory.IMEM) => { @memcpy(@ptrCast([*]u8, &data), @ptrCast([*]u8, &spIMEM[pAddr & 0xFFF]), 4); data = @byteSwap(u32, data); }, @enumToInt(RSPMemory.IO) => { switch (pAddr & 0xFF) { @enumToInt(RSPReg.RSPAddr) => { info("[RSP] Read32 @ pAddr {X}h (RSP Address).", .{pAddr}); data = @intCast(u32, @bitCast(u13, rspRegs.rspAddr)); }, @enumToInt(RSPReg.RSPDRAMAddr) => { info("[RSP] Read32 @ pAddr {X}h (RSP DRAM Address).", .{pAddr}); data = @intCast(u32, rspRegs.rspDRAMAddr); }, @enumToInt(RSPReg.RSPDMALenRD) => { info("[RSP] Read32 @ pAddr {X}h (RSP DMA Length RD).", .{pAddr}); data = @bitCast(u32, rspRegs.rspDMALen); }, @enumToInt(RSPReg.RSPDMALenWR) => { info("[RSP] Read32 @ pAddr {X}h (RSP DMA Length WR).", .{pAddr}); data = @bitCast(u32, rspRegs.rspDMALen); }, @enumToInt(RSPReg.RSPStatus) => { info("[RSP] Read32 @ pAddr {X}h (RSP Status).", .{pAddr}); data = @intCast(u32, @bitCast(u15, rspRegs.rspStatus)); }, @enumToInt(RSPReg.RSPDMABusy) => { info("[RSP] Read32 @ pAddr {X}h (RSP DMA Busy).", .{pAddr}); data = @intCast(u32, @bitCast(u1, rspRegs.rspStatus.db)); }, else => { warn("[RSP] Unhandled read32 @ pAddr {X}h.", .{pAddr}); @panic("unhandled RSP read"); } } }, @enumToInt(RSPMemory.PC) => { info("[RSP] Read32 @ pAddr {X}h (RSP PC).", .{pAddr}); data = @intCast(u32, rspRegs.pc); }, else => { warn("[RSP] Unhandled read32 @ pAddr {X}h.", .{pAddr}); @panic("unhandled RSP read"); } } return data; } fn readDMEM(comptime T: type, pAddr: u32) T { var data: T = undefined; @memcpy(@ptrCast([*]u8, &data), @ptrCast([*]u8, &spDMEM[pAddr & 0xFFF]), @sizeOf(T)); if (pAddr == 0x364) info("[RSP] Read audio buffer size, data: {X}h.", .{@byteSwap(T, data)}); return @byteSwap(T, data); } pub fn write8(pAddr: u64, data: u8) void { switch ((pAddr >> 12) & 0xFF) { @enumToInt(RSPMemory.DMEM) => { spDMEM[pAddr & 0xFFF] = data; }, @enumToInt(RSPMemory.IMEM) => { spIMEM[pAddr & 0xFFF] = data; }, else => { warn("[RSP] Unhandled write8 @ pAddr {X}h, data: {X}h.", .{pAddr, data}); @panic("unhandled RSP write"); } } } pub fn write32(pAddr: u64, data: u32) void { switch ((pAddr >> 12) & 0xFF) { @enumToInt(RSPMemory.DMEM) => { const data_ = @byteSwap(u32, data); @memcpy(@ptrCast([*]u8, &spDMEM[pAddr & 0xFFF]), @ptrCast([*]const u8, &data_), 4); }, @enumToInt(RSPMemory.IMEM) => { const data_ = @byteSwap(u32, data); @memcpy(@ptrCast([*]u8, &spIMEM[pAddr & 0xFFF]), @ptrCast([*]const u8, &data_), 4); }, @enumToInt(RSPMemory.IO) => { switch (pAddr & 0xFF) { @enumToInt(RSPReg.RSPAddr) => { info("[RSP] Write32 @ pAddr {X}h (RSP DMEM/IMEM Address), data: {X}h.", .{pAddr, data}); rspRegs.rspAddr = @bitCast(RSPAddr, @truncate(u13, data)); }, @enumToInt(RSPReg.RSPDRAMAddr) => { info("[RSP] Write32 @ pAddr {X}h (RSP DRAM Address), data: {X}h.", .{pAddr, data}); rspRegs.rspDRAMAddr = @truncate(u24, data); }, @enumToInt(RSPReg.RSPDMALenRD) => { info("[RSP] Write32 @ pAddr {X}h (RSP DMA Length RD), data: {X}h.", .{pAddr, data}); rspRegs.rspDMALen = @bitCast(RSPDMALen, data); doDMAToRSP(); }, @enumToInt(RSPReg.RSPDMALenWR) => { info("[RSP] Write32 @ pAddr {X}h (RSP DMA Length WR), data: {X}h.", .{pAddr, data}); rspRegs.rspDMALen = @bitCast(RSPDMALen, data); doDMAToRAM(); }, @enumToInt(RSPReg.RSPStatus) => { info("[RSP] Write32 @ pAddr {X}h (RSP Status), data: {X}h.", .{pAddr, data}); if ((data & (1 << 0)) != 0) rspRegs.rspStatus.h = false; if ((data & (1 << 1)) != 0) rspRegs.rspStatus.h = true; if ((data & (1 << 2)) != 0) rspRegs.rspStatus.b = false; if ((data & (1 << 3)) != 0) rspIRQ = false; if ((data & (1 << 4)) != 0) rspIRQ = true; if ((data & (1 << 5)) != 0) rspRegs.rspStatus.ss = false; if ((data & (1 << 6)) != 0) rspRegs.rspStatus.ss = true; if ((data & (1 << 7)) != 0) rspRegs.rspStatus.ib = false; if ((data & (1 << 8)) != 0) rspRegs.rspStatus.ib = true; if ((data & (1 << 9)) != 0) rspRegs.rspStatus.s0 = false; if ((data & (1 << 10)) != 0) rspRegs.rspStatus.s0 = true; if ((data & (1 << 11)) != 0) rspRegs.rspStatus.s1 = false; if ((data & (1 << 12)) != 0) rspRegs.rspStatus.s1 = true; if ((data & (1 << 13)) != 0) rspRegs.rspStatus.s2 = false; if ((data & (1 << 14)) != 0) rspRegs.rspStatus.s2 = true; if ((data & (1 << 15)) != 0) rspRegs.rspStatus.s3 = false; if ((data & (1 << 16)) != 0) rspRegs.rspStatus.s3 = true; if ((data & (1 << 17)) != 0) rspRegs.rspStatus.s4 = false; if ((data & (1 << 18)) != 0) rspRegs.rspStatus.s4 = true; if ((data & (1 << 19)) != 0) rspRegs.rspStatus.s5 = false; if ((data & (1 << 20)) != 0) rspRegs.rspStatus.s5 = true; if ((data & (1 << 21)) != 0) rspRegs.rspStatus.s6 = false; if ((data & (1 << 22)) != 0) rspRegs.rspStatus.s6 = true; if ((data & (1 << 23)) != 0) rspRegs.rspStatus.s7 = false; if ((data & (1 << 24)) != 0) rspRegs.rspStatus.s7 = true; if (rspIRQ) { mi.setPending(InterruptSource.SP); } else { mi.clearPending(InterruptSource.SP); } }, else => { warn("[RSP] Unhandled write32 @ pAddr {X}h, data: {X}h.", .{pAddr, data}); @panic("unhandled RSP write"); } } }, @enumToInt(RSPMemory.PC) => { info("[RSP] Write32 @ pAddr {X}h (RSP Program Counter), data: {X}h.", .{pAddr, data}); rspRegs.setPC(data); }, else => { warn("[RSP] Unhandled write32 @ pAddr {X}h, data: {X}h.", .{pAddr, data}); @panic("unhandled RSP write"); } } } fn writeDMEM(comptime T: type, pAddr: u32, data: T) void { var data_: T = @byteSwap(T, data); if (pAddr == 0x364) info("[RSP] Set audio buffer size, data: {X}h.", .{data}); if (pAddr >= 0x1000) @panic("DMEM write out of bounds"); @memcpy(@ptrCast([*]u8, &spDMEM[pAddr & 0xFFF]), @ptrCast([*]u8, &data_), @sizeOf(T)); } fn fetchInstr() u32 { var data: u32 = undefined; rspRegs.cpc = rspRegs.pc; if ((rspRegs.cpc & 3) != 0) { @panic("unaligned program counter"); } @memcpy(@ptrCast([*]u8, &data), @ptrCast([*]u8, &spIMEM[rspRegs.pc & 0xFFF]), 4); rspRegs.pc = rspRegs.npc; rspRegs.npc +%= 4; return @byteSwap(u32, data); } fn doDMAToRAM() void { const ramAddr = @intCast(u64, rspRegs.rspDRAMAddr) & 0xFF_FFF8; const rspAddr = @intCast(u64, rspRegs.rspAddr.addr) & 0xFF8; const length = (@intCast(u64, rspRegs.rspDMALen.length) | 7) + 1; const count = @intCast(u64, rspRegs.rspDMALen.count ) + 1; const skip = @intCast(u64, rspRegs.rspDMALen.skip); var mem: ?*[0x1000]u8 = null; if (rspRegs.rspAddr.isIMEM) { info("[RSP] IMEM->RAM DMA, DRAM pAddr: {X}h, IMEM pAddr: {X}h, length: {X}h.", .{ramAddr, rspAddr, length}); mem = &spIMEM; } else { info("[RSP] DMEM->RAM DMA, DRAM pAddr: {X}h, DMEM pAddr: {X}h, length: {X}h.", .{ramAddr, rspAddr, length}); mem = &spDMEM; } var count_: u64 = 0; while (count_ < count) : (count_ += 1) { var length_: u64 = 0; while (length_ < length) : (length_ += 1) { bus.ram[ramAddr + count_ * ((length + skip) & 0xFFFFFFFF_FFFFFFF8) + length_] = mem.?[rspAddr + count_ * length + length_]; } } } fn doDMAToRSP() void { const ramAddr = @intCast(u64, rspRegs.rspDRAMAddr) & 0xFF_FFF8; const rspAddr = @intCast(u64, rspRegs.rspAddr.addr) & 0xFF8; const length = (@intCast(u64, rspRegs.rspDMALen.length) | 7) + 1; const count = @intCast(u64, rspRegs.rspDMALen.count ) + 1; const skip = @intCast(u64, rspRegs.rspDMALen.skip); var mem: ?*[0x1000]u8 = null; if (rspRegs.rspAddr.isIMEM) { info("[RSP] RAM->IMEM DMA, DRAM pAddr: {X}h, IMEM pAddr: {X}h, length: {X}h.", .{ramAddr, rspAddr, length}); mem = &spIMEM; } else { info("[RSP] RAM->DMEM DMA, DRAM pAddr: {X}h, DMEM pAddr: {X}h, length: {X}h.", .{ramAddr, rspAddr, length}); mem = &spDMEM; } var count_: u64 = 0; while (count_ < count) : (count_ += 1) { var length_: u64 = 0; while (length_ < length) : (length_ += 1) { mem.?[rspAddr + count_ * length + length_] = bus.ram[ramAddr + count_ * ((length + skip) & 0xFFFFFFFF_FFFFFFF8) + length_]; } } } fn decodeInstr(instr: u32) void { const opcode = instr >> 26; switch (opcode) { @enumToInt(RSPOpcode.SPECIAL) => { const funct = instr & 0x3F; switch (funct) { @enumToInt(RSPSpecial.SLL ) => iSLL (instr), @enumToInt(RSPSpecial.SRL ) => iSRL (instr), @enumToInt(RSPSpecial.SRA ) => iSRA (instr), @enumToInt(RSPSpecial.SLLV ) => iSLLV (instr), @enumToInt(RSPSpecial.SRLV ) => iSRLV (instr), @enumToInt(RSPSpecial.JR ) => iJR (instr), @enumToInt(RSPSpecial.BREAK) => iBREAK(instr), @enumToInt(RSPSpecial.ADD ) => iADDU (instr), @enumToInt(RSPSpecial.ADDU ) => iADDU (instr), @enumToInt(RSPSpecial.SUB ) => iSUBU (instr), @enumToInt(RSPSpecial.SUBU ) => iSUBU (instr), @enumToInt(RSPSpecial.AND ) => iAND (instr), @enumToInt(RSPSpecial.OR ) => iOR (instr), @enumToInt(RSPSpecial.XOR ) => iXOR (instr), @enumToInt(RSPSpecial.SLT ) => iSLT (instr), else => { warn("[RSP] Unhandled function {X}h ({X}h) @ {X}h.", .{funct, instr, rspRegs.cpc}); @panic("unhandled RSP instruction"); } } }, @enumToInt(RSPOpcode.REGIMM) => { const rt = getRt(instr); switch (rt) { @enumToInt(RSPRegimm.BLTZ) => iBLTZ(instr), @enumToInt(RSPRegimm.BGEZ) => iBGEZ(instr), else => { warn("[RSP] Unhandled REGIMM opcode {X}h ({X}h) @ {X}h.", .{rt, instr, rspRegs.cpc}); @panic("unhandled RSP instruction"); } } }, @enumToInt(RSPOpcode.J ) => iJ (instr), @enumToInt(RSPOpcode.JAL ) => iJAL (instr), @enumToInt(RSPOpcode.BEQ ) => iBEQ (instr), @enumToInt(RSPOpcode.BNE ) => iBNE (instr), @enumToInt(RSPOpcode.BLEZ ) => iBLEZ (instr), @enumToInt(RSPOpcode.BGTZ ) => iBGTZ (instr), @enumToInt(RSPOpcode.ADDI ) => iADDIU(instr), @enumToInt(RSPOpcode.ADDIU) => iADDIU(instr), @enumToInt(RSPOpcode.ANDI ) => iANDI (instr), @enumToInt(RSPOpcode.ORI ) => iORI (instr), @enumToInt(RSPOpcode.XORI ) => iXORI (instr), @enumToInt(RSPOpcode.LUI ) => iLUI (instr), @enumToInt(RSPOpcode.COP0 ) => { switch (getRs(instr)) { @enumToInt(RSPCOPOpcode.MF) => iMFC(instr, 0), @enumToInt(RSPCOPOpcode.MT) => iMTC(instr, 0), else => { warn("[RSP] Unhandled COP0 opcode {X}h ({X}h) @ {X}h.", .{getRs(instr), instr, rspRegs.cpc}); @panic("unhandled RSP instruction"); } } }, @enumToInt(RSPOpcode.COP2 ) => { switch (getRs(instr)) { @enumToInt(RSPCOPOpcode.MF) => iMFC(instr, 2), @enumToInt(RSPCOPOpcode.CF) => iCFC(instr, 2), @enumToInt(RSPCOPOpcode.MT) => iMTC(instr, 2), @enumToInt(RSPCP2Opcode.COMPUTE) ... @enumToInt(RSPCP2Opcode.COMPUTE) + 0xF => { switch (getFunct(instr)) { @enumToInt(RSPVUOpcode.VMULF) => iVMULF(instr), @enumToInt(RSPVUOpcode.VMULU) => iVMULU(instr), @enumToInt(RSPVUOpcode.VRNDP) => iVRNDP(instr), @enumToInt(RSPVUOpcode.VMUDL) => iVMUDL(instr), @enumToInt(RSPVUOpcode.VMUDM) => iVMUDM(instr), @enumToInt(RSPVUOpcode.VMUDN) => iVMUDN(instr), @enumToInt(RSPVUOpcode.VMUDH) => iVMUDH(instr), @enumToInt(RSPVUOpcode.VMACF) => iVMACF(instr), @enumToInt(RSPVUOpcode.VMACU) => iVMACU(instr), @enumToInt(RSPVUOpcode.VMADL) => iVMADL(instr), @enumToInt(RSPVUOpcode.VMADM) => iVMADM(instr), @enumToInt(RSPVUOpcode.VMADN) => iVMADN(instr), @enumToInt(RSPVUOpcode.VMADH) => iVMADH(instr), @enumToInt(RSPVUOpcode.VADD ) => iVADD (instr), @enumToInt(RSPVUOpcode.VSUB ) => iVSUB (instr), @enumToInt(RSPVUOpcode.VADDC) => iVADDC(instr), @enumToInt(RSPVUOpcode.VSUBC) => iVSUBC(instr), @enumToInt(RSPVUOpcode.VSAR ) => iVSAR (instr), @enumToInt(RSPVUOpcode.VLT ) => iVLT (instr), @enumToInt(RSPVUOpcode.VGE ) => iVGE (instr), @enumToInt(RSPVUOpcode.VCL ) => iVCL (instr), @enumToInt(RSPVUOpcode.VCH ) => iVCH (instr), @enumToInt(RSPVUOpcode.VMRG ) => iVMRG (instr), @enumToInt(RSPVUOpcode.VAND ) => iVAND (instr), @enumToInt(RSPVUOpcode.VOR ) => iVOR (instr), @enumToInt(RSPVUOpcode.VXOR ) => iVXOR (instr), @enumToInt(RSPVUOpcode.VRCPL) => iVRCPL(instr), @enumToInt(RSPVUOpcode.VRCPH) => iVRCPH(instr), @enumToInt(RSPVUOpcode.VMOV ) => iVMOV (instr), else => { warn("[RSP] Unhandled COP2 function {X}h ({X}h) @ {X}h.", .{getFunct(instr), instr, rspRegs.cpc}); @panic("unhandled RSP instruction"); } } }, else => { warn("[RSP] Unhandled COP2 opcode {X}h ({X}h) @ {X}h.", .{getRs(instr), instr, rspRegs.cpc}); @panic("unhandled RSP instruction"); } } }, @enumToInt(RSPOpcode.LB ) => iLB (instr), @enumToInt(RSPOpcode.LH ) => iLH (instr), @enumToInt(RSPOpcode.LW ) => iLW (instr), @enumToInt(RSPOpcode.LBU ) => iLBU (instr), @enumToInt(RSPOpcode.LHU ) => iLHU (instr), @enumToInt(RSPOpcode.SB ) => iSB (instr), @enumToInt(RSPOpcode.SH ) => iSH (instr), @enumToInt(RSPOpcode.SW ) => iSW (instr), @enumToInt(RSPOpcode.LWC2 ) => { switch (getRd(instr)) { @enumToInt(RSPVULoadOpcode.LSV) => iLSV(instr), @enumToInt(RSPVULoadOpcode.LLV) => iLLV(instr), @enumToInt(RSPVULoadOpcode.LDV) => iLDV(instr), @enumToInt(RSPVULoadOpcode.LQV) => iLQV(instr), @enumToInt(RSPVULoadOpcode.LRV) => iLRV(instr), else => { warn("[RSP] Unhandled VU load opcode {X}h ({X}h) @ {X}h.", .{getRd(instr), instr, rspRegs.cpc}); @panic("unhandled RSP instruction"); } } }, @enumToInt(RSPOpcode.SWC2 ) => { switch (getRd(instr)) { @enumToInt(RSPVUStoreOpcode.SBV) => iSBV(instr), @enumToInt(RSPVUStoreOpcode.SSV) => iSSV(instr), @enumToInt(RSPVUStoreOpcode.SLV) => iSLV(instr), @enumToInt(RSPVUStoreOpcode.SDV) => iSDV(instr), @enumToInt(RSPVUStoreOpcode.SQV) => iSQV(instr), else => { warn("[RSP] Unhandled VU store opcode {X}h ({X}h) @ {X}h.", .{getRd(instr), instr, rspRegs.cpc}); @panic("unhandled RSP instruction"); } } }, else => { warn("[RSP] Unhandled instruction {X}h ({X}h) @ {X}h.", .{opcode, instr, rspRegs.cpc}); @panic("unhandled RSP instruction"); } } } fn doBranch(target: u32, isCondition: bool, isLink: comptime bool) void { if (isLink) rspRegs.set(31, rspRegs.npc); if (isCondition) { rspRegs.npc = @truncate(u12, target); // isBranchDelay = true; } } fn doReciprocal(input: i32) u32 { if (input == 0) return 0x7FFFFFFF; var iAbs: u32 = undefined; if (input < 0) { iAbs = @bitCast(u32, -input); } else { iAbs = @bitCast(u32, input); } const idxShift = @truncate(u5, @clz(u32, iAbs) + 1); const idx = (iAbs << idxShift) >> 23; const resShift = @truncate(u5, 32 - @intCast(u6, idxShift)); const res = ((0x10000 | @intCast(u32, rcpROM[idx])) << 14) >> resShift; if (@bitCast(u32, input) != iAbs) { return ~res; } else { return res; } } /// ADDIU - ADD Immediate Unsigned fn iADDIU(instr: u32) void { const imm = exts16(getImm16(instr)); const rs = getRs(instr); const rt = getRt(instr); rspRegs.set(rt, rspRegs.get(rs) +% imm); if (isDisasm) info("[RSP] ADDIU ${}, ${}, {X}h; ${} = {X}h", .{rt, rs, imm, rt, rspRegs.get(rt)}); } /// ADDU - ADD Unsigned fn iADDU(instr: u32) void { const rd = getRd(instr); const rs = getRs(instr); const rt = getRt(instr); rspRegs.set(rd, rspRegs.get(rs) +% rspRegs.get(rt)); if (isDisasm) info("[RSP] ADDU ${}, ${}, ${}; ${} = {X}h", .{rd, rs, rt, rd, rspRegs.get(rd)}); } /// AND - AND fn iAND(instr: u32) void { const rd = getRd(instr); const rs = getRs(instr); const rt = getRt(instr); rspRegs.set(rd, rspRegs.get(rs) & rspRegs.get(rt)); if (isDisasm) info("[RSP] AND ${}, ${}, ${}; ${} = {X}h", .{rd, rs, rt, rd, rspRegs.get(rd)}); } /// ANDI - AND Immediate fn iANDI(instr: u32) void { const imm = @intCast(u32, getImm16(instr)); const rs = getRs(instr); const rt = getRt(instr); rspRegs.set(rt, rspRegs.get(rs) & imm); if (isDisasm) info("[RSP] ANDI ${}, ${}, {X}h; ${} = {X}h", .{rt, rs, imm, rt, rspRegs.get(rt)}); } /// BEQ - Branch on EQual fn iBEQ(instr: u32) void { const offset = exts16(getImm16(instr)) << 2; const rs = getRs(instr); const rt = getRt(instr); const target = rspRegs.pc +% offset; doBranch(target, rspRegs.get(rs) == rspRegs.get(rt), false); if (isDisasm) info("[RSP] BEQ ${}, ${}, {X}h; ${} = {X}h, ${} = {X}h", .{rs, rt, target, rs, rspRegs.get(rs), rt, rspRegs.get(rt)}); } /// BGEZ - Branch on Greater than or Equal Zero fn iBGEZ(instr: u32) void { const offset = exts16(getImm16(instr)) << 2; const rs = getRs(instr); const target = rspRegs.pc +% offset; doBranch(target, @bitCast(i32, rspRegs.get(rs)) >= 0, false); if (isDisasm) info("[RSP] BGEZ ${}, {X}h; ${} = {X}h", .{rs, target, rs, rspRegs.get(rs)}); } /// BGTZ - Branch on Greater Than Zero fn iBGTZ(instr: u32) void { const offset = exts16(getImm16(instr)) << 2; const rs = getRs(instr); const target = rspRegs.pc +% offset; doBranch(target, @bitCast(i32, rspRegs.get(rs)) > 0, false); if (isDisasm) info("[RSP] BGTZ ${}, {X}h; ${} = {X}h", .{rs, target, rs, rspRegs.get(rs)}); } /// BLEZ - Branch on Less than or Equal Zero fn iBLEZ(instr: u32) void { const offset = exts16(getImm16(instr)) << 2; const rs = getRs(instr); const target = rspRegs.pc +% offset; doBranch(target, @bitCast(i32, rspRegs.get(rs)) <= 0, false); if (isDisasm) info("[RSP] BLEZ ${}, {X}h; ${} = {X}h", .{rs, target, rs, rspRegs.get(rs)}); } /// BLTZ - Branch on Less Than Zero fn iBLTZ(instr: u32) void { const offset = exts16(getImm16(instr)) << 2; const rs = getRs(instr); const target = rspRegs.pc +% offset; doBranch(target, @bitCast(i32, rspRegs.get(rs)) < 0, false); if (isDisasm) info("[RSP] BLTZ ${}, {X}h; ${} = {X}h", .{rs, target, rs, rspRegs.get(rs)}); } /// BNE - Branch on Not Equal fn iBNE(instr: u32) void { const offset = exts16(getImm16(instr)) << 2; const rs = getRs(instr); const rt = getRt(instr); const target = rspRegs.pc +% offset; doBranch(target, rspRegs.get(rs) != rspRegs.get(rt), false); if (isDisasm) info("[RSP] BNE ${}, ${}, {X}h; ${} = {X}h, ${} = {X}h", .{rs, rt, target, rs, rspRegs.get(rs), rt, rspRegs.get(rt)}); } /// BREAK - Breakpoint fn iBREAK(instr: u32) void { info("BREAK", .{}); rspRegs.rspStatus.h = true; rspRegs.rspStatus.b = true; if (rspRegs.rspStatus.ib) { rspIRQ = true; mi.setPending(InterruptSource.SP); } } /// CFC - Move From Control fn iCFC(instr: u32, comptime copN: comptime_int) void { const rd = getRd(instr); const rt = getRt(instr); if (copN == 2) { rspRegs.set(rt, 0); } else { warn("[RSP] Unhandled Coprocessor {}.", .{copN}); @panic("unhandled coprocessor"); } if (isDisasm) info("[RSP] CFC{} ${}, ${}; ${} = {X}h", .{copN, rt, rd, rt, rspRegs.get(rt)}); } /// J - Jump fn iJ(instr: u32) void { const target = getTarget(instr); doBranch(target, true, false); if (isDisasm) info("[RSP] J {X}h", .{target}); } /// JAL - Jump And Link fn iJAL(instr: u32) void { const target = getTarget(instr); doBranch(target, true, true); if (isDisasm) info("[RSP] JAL {X}h", .{target}); } /// JR - Jump Register fn iJR(instr: u32) void { const rs = getRs(instr); const target = rspRegs.get(rs); doBranch(target, true, false); if (isDisasm) info("[RSP] JR ${}; PC = {X}h", .{rs, target}); } /// LB - Load Byte fn iLB(instr: u32) void { const imm = exts16(getImm16(instr)); const base = getRs(instr); const rt = getRt(instr); const addr = rspRegs.get(base) +% imm; rspRegs.set(rt, @intCast(u32, exts8(readDMEM(u8, addr)))); if (isDisasm) info("[RSP] LB ${}, ${}({}); ${} = ({X}h) = {X}h", .{rt, base, @bitCast(i32, imm), rt, addr, rspRegs.get(rt)}); } /// LBU - Load Byte Unsigned fn iLBU(instr: u32) void { const imm = exts16(getImm16(instr)); const base = getRs(instr); const rt = getRt(instr); const addr = rspRegs.get(base) +% imm; rspRegs.set(rt, @intCast(u32, readDMEM(u8, addr))); if (isDisasm) info("[RSP] LBU ${}, ${}({}); ${} = ({X}h) = {X}h", .{rt, base, @bitCast(i32, imm), rt, addr, rspRegs.get(rt)}); } /// LDV - Load Double into Vector register fn iLDV(instr: u32) void { const base = getRs(instr); const vt = getVt(instr); const offset = getOffset(instr, 3); var addr = rspRegs.get(base) +% offset; var element = getElement(instr); var end: u32 = undefined; if ((element + 8) < 16) { end = element + 8; } else { end = 16; } while (element < end) : (element += 1) { rspRegs.setByte(vt, element, readDMEM(u8, addr)); addr +%= 1; } if (isDisasm) info("[RSP] LDV ${}[{}], ${X}({})", .{vt, element, base, @bitCast(i32, offset)}); } /// LH - Load Halfword fn iLH(instr: u32) void { const imm = exts16(getImm16(instr)); const base = getRs(instr); const rt = getRt(instr); const addr = rspRegs.get(base) +% imm; rspRegs.set(rt, exts16(readDMEM(u16, addr))); if (isDisasm) info("[RSP] LH ${}, ${}({}); ${} = ({X}h) = {X}h", .{rt, base, @bitCast(i32, imm), rt, addr, rspRegs.get(rt)}); } /// LHU - Load Halfword Unsigned fn iLHU(instr: u32) void { const imm = exts16(getImm16(instr)); const base = getRs(instr); const rt = getRt(instr); const addr = rspRegs.get(base) +% imm; rspRegs.set(rt, @intCast(u32, readDMEM(u16, addr))); if (isDisasm) info("[RSP] LHU ${}, ${}({}); ${} = ({X}h) = {X}h", .{rt, base, @bitCast(i32, imm), rt, addr, rspRegs.get(rt)}); } /// LLV - Load Long into Vector register fn iLLV(instr: u32) void { const base = getRs(instr); const vt = getVt(instr); const offset = getOffset(instr, 2); const addr = rspRegs.get(base) +% offset; var i: u32 = 0; llvLoop: while (i < 4) : (i += 1) { const element = getElement(instr) + i; if (element > 15) break :llvLoop; rspRegs.setByte(vt, element, readDMEM(u8, addr + i)); } if (isDisasm) info("[RSP] LLV ${}[{}], ${X}({})", .{vt, getElement(instr), base, @bitCast(i32, offset)}); } /// LQV - Load Quad into Vector register fn iLQV(instr: u32) void { const base = getRs(instr); const vt = getVt(instr); const offset = getOffset(instr, 4); const addr = rspRegs.get(base) +% offset; const element = getElement(instr); var i: u32 = 0; while ((addr + i) <= ((addr & 0xFFFF_FFF0) + 15)) : (i += 1) { rspRegs.setByte(vt, (element + i) & 15, readDMEM(u8, addr + i)); } if (isDisasm) info("[RSP] LQV ${}[0], ${X}({})", .{vt, base, @bitCast(i32, offset)}); if (isDisasm) { i = 0; while (i < 8) : (i += 1) { info ("{X}h", .{rspRegs.getLane(vt, i)}); } } } /// LRV - Load quad Right into Vector register fn iLRV(instr: u32) void { const base = getRs(instr); const vt = getVt(instr); const offset = getOffset(instr, 4); var addr = rspRegs.get(base) +% offset; var i = 16 - ((addr & 0xF) - getElement(instr)); while (i < 16) : (i += 1) { rspRegs.setByte(vt, i & 15, readDMEM(u8, addr)); addr +%= 1; } if (isDisasm) info("[RSP] LRV ${}[0], ${X}({})", .{vt, base, @bitCast(i32, offset)}); } /// LSV - Load Short into Vector register fn iLSV(instr: u32) void { const base = getRs(instr); const vt = getVt(instr); const offset = getOffset(instr, 1); const addr = rspRegs.get(base) +% offset; var data = readDMEM(u16, addr); const element = getElement(instr); rspRegs.setByte(vt, element, @truncate(u8, data >> 8)); if (element < 15) { rspRegs.setByte(vt, element + 1, @truncate(u8, data)); } if (isDisasm) info("[RSP] LSV ${}[{}], ${X}({})", .{vt, element, base, @bitCast(i32, offset)}); } /// LUI - Load Upper Immediate fn iLUI(instr: u32) void { const imm = getImm16(instr); const rt = getRt(instr); rspRegs.set(rt, exts16(imm) << 16); if (isDisasm) info("[RSP] LUI ${}, {X}h; ${} = {X}h", .{rt, imm, rt, rspRegs.get(rt)}); } /// LW - Load Word fn iLW(instr: u32) void { const imm = exts16(getImm16(instr)); const base = getRs(instr); const rt = getRt(instr); const addr = rspRegs.get(base) +% imm; rspRegs.set(rt, readDMEM(u32, addr)); if (isDisasm) info("[RSP] LW ${}, ${}({}); ${} = ({X}h) = {X}h", .{rt, base, @bitCast(i32, imm), rt, addr, rspRegs.get(rt)}); } /// MFC - Move From Coprocessor fn iMFC(instr: u32, comptime copN: comptime_int) void { const rd = getRd(instr); const rt = getRt(instr); if (copN == 0) { rspRegs.set(rt, rspRegs.getCP0(rd)); } else if (copN == 2) { const lane = getElement(instr) >> 1; rspRegs.set(rt, rspRegs.getLane(rd, lane)); } else { warn("[RSP] Unhandled Coprocessor {}.", .{copN}); @panic("unhandled coprocessor"); } if (isDisasm) info("[RSP] MFC{} ${}, ${}; ${} = {X}h", .{copN, rt, rd, rt, rspRegs.get(rt)}); } /// MTC - Move To Coprocessor fn iMTC(instr: u32, comptime copN: comptime_int) void { const rd = getRd(instr); const rt = getRt(instr); const data = rspRegs.get(rt); if (copN == 0) { rspRegs.setCP0(rd, data); } else if (copN == 2) { const lane = getElement(instr) >> 1; rspRegs.setLane(rd, lane, @truncate(u16, data)); } else { warn("[RSP] Unhandled Coprocessor {}.", .{copN}); @panic("unhandled coprocessor"); } if (isDisasm) info("[RSP] MTC{} ${}, ${}; ${} = {X}h", .{copN, rt, rd, rd, data}); } /// OR - OR fn iOR(instr: u32) void { const rd = getRd(instr); const rs = getRs(instr); const rt = getRt(instr); rspRegs.set(rd, rspRegs.get(rs) | rspRegs.get(rt)); if (isDisasm) info("[RSP] OR ${}, ${}, ${}; ${} = {X}h", .{rd, rs, rt, rd, rspRegs.get(rd)}); } /// ORI - OR Immediate fn iORI(instr: u32) void { const imm = @intCast(u32, getImm16(instr)); const rs = getRs(instr); const rt = getRt(instr); rspRegs.set(rt, rspRegs.get(rs) | imm); if (isDisasm) info("[RSP] ORI ${}, ${}, {X}h; ${} = {X}h", .{rt, rs, imm, rt, rspRegs.get(rt)}); } /// SB - Store Byte fn iSB(instr: u32) void { const imm = exts16(getImm16(instr)); const base = getRs(instr); const rt = getRt(instr); const addr = rspRegs.get(base) +% imm; writeDMEM(u8, addr, @truncate(u8, rspRegs.get(rt))); if (isDisasm) info("[RSP] SB ${}, ${}({}); ({X}h) = {X}h", .{rt, base, @bitCast(i32, imm), addr, @truncate(u8, rspRegs.get(rt))}); } /// SBV - Store Byte from Vector register fn iSBV(instr: u32) void { const base = getRs(instr); const vt = getVt(instr); const offset = getOffset(instr, 0); const addr = rspRegs.get(base) +% offset; const element = getElement(instr); writeDMEM(u8, addr, rspRegs.getByte(vt, element)); if (isDisasm) info("[RSP] SBV ${}[{}], ${X}({})", .{vt, element, base, @bitCast(i32, offset)}); } /// SDV - Store Double from Vector register fn iSDV(instr: u32) void { const base = getRs(instr); const vt = getVt(instr); const offset = getOffset(instr, 3); const addr = rspRegs.get(base) +% offset; const element = getElement(instr); var i: u32 = 0; while (i < 8) : (i += 1) { writeDMEM(u8, addr + i, rspRegs.getByte(vt, (element + i) & 15)); } if (isDisasm) info("[RSP] SDV ${}[{}], ${X}({})", .{vt, element, base, @bitCast(i32, offset)}); } /// SH - Store Halfword fn iSH(instr: u32) void { const imm = exts16(getImm16(instr)); const base = getRs(instr); const rt = getRt(instr); const addr = rspRegs.get(base) +% imm; writeDMEM(u16, addr, @truncate(u16, rspRegs.get(rt))); if (isDisasm) info("[RSP] SH ${}, ${}({}); ({X}h) = {X}h", .{rt, base, @bitCast(i32, imm), addr, @truncate(u16, rspRegs.get(rt))}); } /// SLL - Shift Left Logical fn iSLL(instr: u32) void { const sa = getSa(instr); const rd = getRd(instr); const rt = getRt(instr); rspRegs.set(rd, rspRegs.get(rt) << @truncate(u5, sa)); if (rd == 0) { if (isDisasm) info("[RSP] NOP", .{}); } else { if (isDisasm) info("[RSP] SLL ${}, ${}, {}; ${} = {X}h", .{rd, rt, sa, rd, rspRegs.get(rd)}); } } /// SLLV - Shift Left Logical Variable fn iSLLV(instr: u32) void { const rd = getRd(instr); const rs = getRs(instr); const rt = getRt(instr); rspRegs.set(rd, rspRegs.get(rt) << @truncate(u5, rspRegs.get(rs))); if (isDisasm) info("[RSP] SLLV ${}, ${}, ${}; ${} = {X}h", .{rd, rt, rs, rd, rspRegs.get(rd)}); } /// SLT - Set on Less Than fn iSLT(instr: u32) void { const rd = getRd(instr); const rs = getRs(instr); const rt = getRt(instr); rspRegs.set(rd, @intCast(u32, @bitCast(u1, @bitCast(i32, rspRegs.get(rs)) < @bitCast(i32, rspRegs.get(rt))))); if (isDisasm) info("[RSP] SLT ${}, ${}, ${}; ${} = {X}h", .{rd, rs, rt, rd, rspRegs.get(rd)}); } /// SLV - Store Long from Vector register fn iSLV(instr: u32) void { const base = getRs(instr); const vt = getVt(instr); const offset = getOffset(instr, 2); const addr = rspRegs.get(base) +% offset; const element = getElement(instr); var i: u32 = 0; while (i < 4) : (i += 1) { writeDMEM(u8, addr + i, @truncate(u8, rspRegs.getByte(vt, (element + i) & 15))); } if (isDisasm) info("[RSP] SLV ${}[{}], ${X}({})", .{vt, element, base, @bitCast(i32, offset)}); } /// SQV - Store Quad from Vector register fn iSQV(instr: u32) void { const base = getRs(instr); const vt = getVt(instr); const offset = getOffset(instr, 4); const addr = rspRegs.get(base) +% offset; const element = getElement(instr); var i: u32 = 0; while ((addr + i) <= ((addr & 0xFFFF_FFF0) + 15)) : (i += 1) { writeDMEM(u8, addr + i, @truncate(u8, rspRegs.getByte(vt, (element + i) & 15))); } if (isDisasm) info("[RSP] SQV ${}[{}], ${X}({})", .{vt, element, base, @bitCast(i32, offset)}); } /// SRA - Shift Right Arithmetic fn iSRA(instr: u32) void { const sa = getSa(instr); const rd = getRd(instr); const rt = getRt(instr); rspRegs.set(rd, @bitCast(u32, @bitCast(i32, rspRegs.get(rt)) >> @truncate(u5, sa))); if (isDisasm) info("[RSP] SRA ${}, ${}, {}; ${} = {X}h", .{rd, rt, sa, rd, rspRegs.get(rd)}); } /// SRL - Shift Right Logical fn iSRL(instr: u32) void { const sa = getSa(instr); const rd = getRd(instr); const rt = getRt(instr); rspRegs.set(rd, rspRegs.get(rt) >> @truncate(u5, sa)); if (isDisasm) info("[RSP] SRL ${}, ${}, {}; ${} = {X}h", .{rd, rt, sa, rd, rspRegs.get(rd)}); } /// SRLV - Shift Right Logical Variable fn iSRLV(instr: u32) void { const rd = getRd(instr); const rs = getRs(instr); const rt = getRt(instr); rspRegs.set(rd, rspRegs.get(rt) >> @truncate(u5, rspRegs.get(rs))); if (isDisasm) info("[RSP] SRLV ${}, ${}, ${}; ${} = {X}h", .{rd, rt, rs, rd, rspRegs.get(rd)}); } /// SSV - Store Short from Vector register fn iSSV(instr: u32) void { const base = getRs(instr); const vt = getVt(instr); const offset = getOffset(instr, 1); const addr = rspRegs.get(base) +% offset; const element = getElement(instr); writeDMEM(u16, addr, rspRegs.getLane(vt, element >> 1)); if (isDisasm) info("[RSP] SSV ${}[{}], ${X}({})", .{vt, element, base, @bitCast(i32, offset)}); } /// SUBU - SUB Unsigned fn iSUBU(instr: u32) void { const rd = getRd(instr); const rs = getRs(instr); const rt = getRt(instr); rspRegs.set(rd, rspRegs.get(rs) -% rspRegs.get(rt)); if (isDisasm) info("[RSP] SUBU ${}, ${}, ${}; ${} = {X}h", .{rd, rs, rt, rd, rspRegs.get(rd)}); } /// SW - Store Word fn iSW(instr: u32) void { const imm = exts16(getImm16(instr)); const base = getRs(instr); const rt = getRt(instr); const addr = rspRegs.get(base) +% imm; writeDMEM(u32, addr, rspRegs.get(rt)); if (isDisasm) info("[RSP] SW ${}, ${}({}); ({X}h) = {X}h", .{rt, base, @bitCast(i32, imm), addr, rspRegs.get(rt)}); } //const VCC = packed struct { // gte : [8]bool = undefined, // lten: [8]bool = undefined, //}; //const VCO = packed struct { // c : [8]bool = undefined, // ne: [8]bool = undefined, //}; //const VCE = packed struct { // n1: [8]bool = undefined, //}; /// VADD - Vector ADD fn iVADD(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const res = @intCast(i32, s.getSLane(i)) + @intCast(i32, t.getSLane(i)) + @intCast(i32, @bitCast(u1, rspRegs.vco.c[i])); rspRegs.acc.setLane(i, 0, @truncate(u16, @bitCast(u32, res))); rspRegs.setLane(vd, i, clamps32(res)); rspRegs.vco.c [i] = false; rspRegs.vco.ne[i] = false; } if (isDisasm) info("[RSP] VADD ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VADDC - Vector ADD Carry fn iVADDC(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { var res: u16 = undefined; rspRegs.vco.c [i] = @subWithOverflow(u16, s.getULane(i), s.getULane(i), &res); rspRegs.vco.ne[i] = res != 0; rspRegs.acc.setLane(i, 0, res); rspRegs.setLane(vd, i, res); } if (isDisasm) info("[RSP] VADD ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VAND - Vector AND fn iVAND(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { rspRegs.acc.setLane(i, 0, s.getULane(i) & t.getULane(i)); rspRegs.setLane(vd, i, rspRegs.acc.getLane(i, 0)); } if (isDisasm) info("[RSP] VAND ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VCH - Vector Select Clip Test High fn iVCH(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const eS = s.getSLane(i); const eT = t.getSLane(i); rspRegs.vco.c[i] = ((eS >> 15) & 1) != ((eT >> 15) & 1); const c = rspRegs.vcc.gte[i]; var eAbs: i16 = undefined; if (c) { eAbs = -eT; } else { eAbs = eT; } rspRegs.vce.n1[i] = c and (eS == (-eT -% 1)); const n1 = rspRegs.vce.n1[i]; rspRegs.vco.ne[i] = !n1 and (eS != eAbs); rspRegs.vcc.lten[i] = eS <= -eT; rspRegs.vcc.gte [i] = eS >= eT; var clip: bool = undefined; if (c) { clip = rspRegs.vcc.lten[i]; } else { clip = rspRegs.vcc.gte[i]; } if (clip) { rspRegs.acc.setSLane(i, @intCast(i48, eAbs)); } else { rspRegs.acc.setSLane(i, @intCast(i48, eS)); } rspRegs.setSLane(vd, i, @truncate(i16, rspRegs.acc.getSLane(i))); } if (isDisasm) info("[RSP] VCH ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VCL - Vector Select Clip Test Low fn iVCL(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const eS = s.getULane(i); const eT = t.getULane(i); const c = rspRegs.vco.c [i]; const ne = rspRegs.vco.ne[i]; if (!c and !ne) { rspRegs.vcc.gte[i] = eS >= eT; } const nET = @bitCast(u16, -@bitCast(i16, eT)); if (c and !ne) { const lte = eS <= nET; const eql = eS == nET; rspRegs.vcc.lten[i] = lte and eql; } var clip: bool = undefined; if (c) { clip = rspRegs.vcc.lten[i]; } else { clip = rspRegs.vcc.gte[i]; } var eAbs: u16 = undefined; if (c) { eAbs = nET; } else { eAbs = eT; } if (clip) { rspRegs.acc.setSLane(i, @bitCast(i48, @intCast(u48, eAbs))); } else { rspRegs.acc.setSLane(i, @bitCast(i48, @intCast(u48, eS))); } rspRegs.setSLane(vd, i, @truncate(i16, rspRegs.acc.getSLane(i))); } if (isDisasm) info("[RSP] VCL ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VGE - Vector select Greater than or Equal fn iVGE(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const eql = s.getSLane(i) == t.getSLane(i); const neg = eql and !(rspRegs.vco.c[i] and rspRegs.vco.ne[i]); rspRegs.vcc.gte[i] = neg or (s.getSLane(i) > t.getSLane(i)); if (rspRegs.vcc.gte[i]) { rspRegs.acc.setLane(i, 0, s.getULane(i)); } else { rspRegs.acc.setLane(i, 0, t.getULane(i)); } rspRegs.setLane(vd, i, rspRegs.acc.getLane(i, 0)); rspRegs.vcc.lten[i] = false; rspRegs.vco.c [i] = false; rspRegs.vco.ne [i] = false; } if (isDisasm) info("[RSP] VGE ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VLT - Vector select Less Than fn iVLT(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const eql = s.getSLane(i) == t.getSLane(i); const neg = eql and rspRegs.vco.c[i] and rspRegs.vco.ne[i]; rspRegs.vcc.gte[i] = neg or (s.getSLane(i) < t.getSLane(i)); if (rspRegs.vcc.gte[i]) { rspRegs.acc.setLane(i, 0, s.getULane(i)); } else { rspRegs.acc.setLane(i, 0, t.getULane(i)); } rspRegs.setLane(vd, i, rspRegs.acc.getLane(i, 0)); rspRegs.vcc.lten[i] = false; rspRegs.vco.c [i] = false; rspRegs.vco.ne [i] = false; } if (isDisasm) info("[RSP] VLT ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VMACF - Vector Multiply and ACcumulate of signed Fractions fn iVMACF(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const prod = @intCast(i32, s.getSLane(i)) * @intCast(i32, t.getSLane(i)) * 2; rspRegs.acc.setSLane(i, rspRegs.acc.getSLane(i) + @intCast(i48, prod)); rspRegs.setLane(vd, i, clamps32(@truncate(i32, rspRegs.acc.getSLane(i) >> 16))); } if (isDisasm) info("[RSP] VMACF ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); if (isDisasm) { i = 0; while (i < 8) : (i += 1) { info ("{X}h", .{rspRegs.getLane(vd, i)}); } } } /// VMACU - Vector Multiply and ACcumulate of Unsigned fractions fn iVMACU(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const prod = @intCast(i32, s.getSLane(i)) * @intCast(i32, t.getSLane(i)) * 2; rspRegs.acc.setSLane(i, rspRegs.acc.getSLane(i) + @intCast(i48, prod)); rspRegs.setLane(vd, i, clampu32(@truncate(i32, rspRegs.acc.getSLane(i) >> 16))); } if (isDisasm) info("[RSP] VMACU ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); if (isDisasm) { i = 0; while (i < 8) : (i += 1) { info ("{X}h", .{rspRegs.getLane(vd, i)}); } } } /// VMADH - Vector Multiply of high partial products? fn iVMADH(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const prod = @intCast(i32, s.getSLane(i)) * @intCast(i32, t.getSLane(i)); rspRegs.acc.setSLane(i, rspRegs.acc.getSLane(i) + (@intCast(i48, prod) << 16)); rspRegs.setLane(vd, i, clamps32(@truncate(i32, rspRegs.acc.getSLane(i) >> 16))); } if (isDisasm) info("[RSP] VMADH ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VMADL - Vector Multiply of low partial products? fn iVMADL(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const prod = @intCast(u32, s.getULane(i)) * @intCast(u32, t.getULane(i)); rspRegs.acc.setSLane(i, rspRegs.acc.getSLane(i) + (@intCast(i48, prod) >> 16)); // Taken from Dillonb's N64 emulator (see iVMADN()) if (isSignExtended(rspRegs.acc.getLane(i, 0), rspRegs.acc.getLane(i, 1))) { rspRegs.setLane(vd, i, rspRegs.acc.getLane(i, 2)); } else if (@bitCast(i16, rspRegs.acc.getLane(i, 0)) < 0) { rspRegs.setLane(vd, i, 0); } else { rspRegs.setLane(vd, i, 0xFFFF); } } if (isDisasm) info("[RSP] VMADL ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VMADM - Vector Multiply of mid partial products? fn iVMADM(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const prod = @intCast(i32, s.getSLane(i)) * @intCast(i32, t.getULane(i)); rspRegs.acc.setSLane(i, rspRegs.acc.getSLane(i) + @intCast(i48, prod)); rspRegs.setLane(vd, i, clamps32(@truncate(i32, rspRegs.acc.getSLane(i) >> 16))); } if (isDisasm) info("[RSP] VMADM ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VMADN - Vector Multiply of mid partial products? fn iVMADN(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const prod = @intCast(i32, s.getULane(i)) * @intCast(i32, t.getSLane(i)); rspRegs.acc.setSLane(i, rspRegs.acc.getSLane(i) + @intCast(i48, prod)); // Taken from Dillonb's N64 emulator (https://github.com/Dillonb/n64/blob/68a0c186d29b3fa02ec038782313c5ae8e181c06/src/cpu/rsp_vector_instructions.c#L1058) if (isSignExtended(rspRegs.acc.getLane(i, 0), rspRegs.acc.getLane(i, 1))) { rspRegs.setLane(vd, i, rspRegs.acc.getLane(i, 2)); } else if (@bitCast(i16, rspRegs.acc.getLane(i, 0)) < 0) { rspRegs.setLane(vd, i, 0); } else { rspRegs.setLane(vd, i, 0xFFFF); } } if (isDisasm) info("[RSP] VMADN ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VMOV - Vector MOVe fn iVMOV(instr: u32) void { const vd = getVd(instr); const vt = getVt(instr); const eD = getDElement(instr); var eS: u32 = undefined; switch (getElement(instr)) { 0 ... 1 => eS = (getElement(instr) & 0) | (getVs(instr) & 7), 2 ... 3 => eS = (getElement(instr) & 1) | (getVs(instr) & 6), 4 ... 7 => eS = (getElement(instr) & 3) | (getVs(instr) & 4), 8 ... 15 => eS = (getElement(instr) & 7) | (getVs(instr) & 0), else => unreachable, } rspRegs.setLane(vd, eD, rspRegs.broadcast(vt, getBroadcastMod(instr)).getULane(eS)); var i: u32 = 0; while (i < 8) : (i += 1) { rspRegs.acc.setLane(i, 0, rspRegs.broadcast(vt, getBroadcastMod(instr)).getULane(i)); } if (isDisasm) info("[RSP] VMOV ${}[{}], ${}[{}]", .{vd, eD, vt, eS}); } /// VMRG - Vector MeRGe fn iVMRG(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { if (rspRegs.vcc.gte[i]) { rspRegs.acc.setLane(i, 0, s.getULane(i)); } else { rspRegs.acc.setLane(i, 0, t.getULane(i)); } rspRegs.setLane(vd, i, rspRegs.acc.getLane(i, 0)); rspRegs.vco.c [i] = false; rspRegs.vco.ne [i] = false; } if (isDisasm) info("[RSP] VMRG ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VMUDH - Vector Multiply of high partial products? fn iVMUDH(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const prod = @intCast(i32, s.getSLane(i) * @intCast(i32, t.getSLane(i))); rspRegs.acc.setSLane(i, @intCast(i48, prod) << 16); rspRegs.setLane(vd, i, clamps32(prod)); } if (isDisasm) info("[RSP] VMUDH ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VMUDL - Vector Multiply of low partial products? fn iVMUDL(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const prod = @intCast(u32, s.getULane(i)) * @intCast(u32, t.getULane(i)); rspRegs.acc.setULane(i, @intCast(u48, prod) >> 16); rspRegs.setLane(vd, i, clampu32(@truncate(i32, rspRegs.acc.getSLane(i)))); } if (isDisasm) info("[RSP] VMUDL ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VMUDM - Vector Multiply of mid partial products? fn iVMUDM(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const prod = @intCast(i32, s.getSLane(i)) * @intCast(i32, t.getSLane(i)); rspRegs.acc.setSLane(i, @intCast(i48, prod)); rspRegs.setLane(vd, i, clamps32(@truncate(i32, rspRegs.acc.getSLane(i)) >> 16)); } if (isDisasm) info("[RSP] VMUDM ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VMUDN - Vector Multiply of mid partial products? fn iVMUDN(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const prod = @intCast(i32, s.getULane(i) * @intCast(i32, t.getSLane(i))); rspRegs.acc.setSLane(i, @intCast(i48, prod)); // Taken from Dillonb's N64 emulator (see iVMADN()) if (isSignExtended(rspRegs.acc.getLane(i, 0), rspRegs.acc.getLane(i, 1))) { rspRegs.setLane(vd, i, rspRegs.acc.getLane(i, 2)); } else if (@bitCast(i16, rspRegs.acc.getLane(i, 0)) < 0) { rspRegs.setLane(vd, i, 0); } else { rspRegs.setLane(vd, i, 0xFFFF); } } if (isDisasm) info("[RSP] VMUDN ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VMULF - Vector MULtiply of signed Fractions fn iVMULF(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const prod = @intCast(i32, s.getSLane(i)) * @intCast(i32, t.getSLane(i)) * 2 + 0x8000; rspRegs.acc.setSLane(i, @intCast(i48, prod)); rspRegs.setLane(vd, i, clamps32(@truncate(i32, rspRegs.acc.getSLane(i) >> 16))); } if (isDisasm) info("[RSP] VMULF ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VMULU - Vector MULtiply of Unsigned fractions fn iVMULU(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const prod = @intCast(i32, s.getSLane(i)) * @intCast(i32, t.getSLane(i)) * 2 + 0x8000; rspRegs.acc.setSLane(i, @intCast(i48, prod)); rspRegs.setLane(vd, i, clampu32(@truncate(i32, rspRegs.acc.getSLane(i) >> 16))); } if (isDisasm) info("[RSP] VMULU ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VOR - Vector OR fn iVOR(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { rspRegs.acc.setLane(i, 0, s.getULane(i) | t.getULane(i)); rspRegs.setLane(vd, i, rspRegs.acc.getLane(i, 0)); } if (isDisasm) info("[RSP] VOR ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VRCPH - Vector ReCiProcal High fn iVRCPH(instr: u32) void { const vd = getVd(instr); const vt = getVt(instr); const eD = getDElement(instr); var eS: u32 = undefined; switch (getElement(instr)) { 0 ... 1 => eS = (getElement(instr) & 0) | (getVs(instr) & 7), 2 ... 3 => eS = (getElement(instr) & 1) | (getVs(instr) & 6), 4 ... 7 => eS = (getElement(instr) & 3) | (getVs(instr) & 4), 8 ... 15 => eS = (getElement(instr) & 7) | (getVs(instr) & 0), else => unreachable, } rspRegs.setLane(vd, eD, rspRegs.divOut); rspRegs.divIn = rspRegs.getLane(vt, eS); rspRegs.isDIVInLoaded = true; var i: u32 = 0; while (i < 8) : (i += 1) { rspRegs.acc.setLane(i, 0, rspRegs.broadcast(vt, getBroadcastMod(instr)).getULane(i)); } if (isDisasm) info("[RSP] VRCPH ${}[{}], ${}[{}]", .{vd, eD, vt, eS}); } /// VRCPL - Vector ReCiProcal Low fn iVRCPL(instr: u32) void { const vd = getVd(instr); const vt = getVt(instr); const eD = getDElement(instr); var eS: u32 = undefined; switch (getElement(instr)) { 0 ... 1 => eS = (getElement(instr) & 0) | (getVs(instr) & 7), 2 ... 3 => eS = (getElement(instr) & 1) | (getVs(instr) & 6), 4 ... 7 => eS = (getElement(instr) & 3) | (getVs(instr) & 4), 8 ... 15 => eS = (getElement(instr) & 7) | (getVs(instr) & 0), else => unreachable, } var input: i32 = undefined; if (rspRegs.isDIVInLoaded) { input = @bitCast(i32, (@intCast(u32, rspRegs.divIn) << 16) | rspRegs.getLane(vt, eS)); } else { input = @intCast(i32, @bitCast(i16, rspRegs.getLane(vt, eS))); } const res = doReciprocal(input); rspRegs.setLane(vd, eD, @truncate(u16, res)); rspRegs.divOut = @truncate(u16, res >> 16); rspRegs.divIn = 0; rspRegs.isDIVInLoaded = false; var i: u32 = 0; while (i < 8) : (i += 1) { rspRegs.acc.setLane(i, 0, rspRegs.broadcast(vt, getBroadcastMod(instr)).getULane(i)); } if (isDisasm) info("[RSP] VRCPL ${}[{}], ${}[{}]", .{vd, eD, vt, eS}); } /// VRNDP - Vector RouND Positive fn iVRNDP(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { var prod: u32 = undefined; if (vs == 1) { prod = exts16(t.getULane(i)) << 16; } else { prod = exts16(t.getULane(i)); } rspRegs.acc.setSLane(i, rspRegs.acc.getSLane(i) + @bitCast(i48, exts32(prod))); rspRegs.setLane(vd, i, clamps32(@truncate(i32, rspRegs.acc.getSLane(i) >> 16))); } if (isDisasm) info("[RSP] VRNDP ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VSAR - Vector Select Accumulator Read fn iVSAR(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); var i: u32 = 0; while (i < 8) : (i += 1) { switch (e) { 0x8 => rspRegs.setLane(vd, i, rspRegs.acc.getLane(i, 0)), 0x9 => rspRegs.setLane(vd, i, rspRegs.acc.getLane(i, 1)), 0xA => rspRegs.setLane(vd, i, rspRegs.acc.getLane(i, 2)), else => rspRegs.setLane(vd, i, 0), } } if (isDisasm) info("[RSP] VSAR ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VSUB - Vector SUBtract fn iVSUB(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { const res = @intCast(i32, s.getSLane(i)) - @intCast(i32, t.getSLane(i)) - @intCast(i32, @bitCast(u1, rspRegs.vco.c[i])); rspRegs.acc.setLane(i, 0, @truncate(u16, @bitCast(u32, res))); rspRegs.setLane(vd, i, clamps32(res)); rspRegs.vco.c [i] = false; rspRegs.vco.ne[i] = false; } if (isDisasm) info("[RSP] VSUB ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VSUBC - Vector SUBtract Carry fn iVSUBC(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { var res: u16 = undefined; rspRegs.vco.c [i] = @subWithOverflow(u16, s.getULane(i), t.getULane(i), &res); rspRegs.vco.ne[i] = res != 0; rspRegs.acc.setLane(i, 0, res); rspRegs.setLane(vd, i, res); } if (isDisasm) info("[RSP] VSUBC ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// VXOR - Vector XOR fn iVXOR(instr: u32) void { const vd = getVd(instr); const vs = getVs(instr); const vt = getVt(instr); const e = getBroadcastMod(instr); const s = rspRegs.getVPR(vs); const t = rspRegs.broadcast(vt, e); var i: u32 = 0; while (i < 8) : (i += 1) { rspRegs.acc.setLane(i, 0, s.getULane(i) ^ t.getULane(i)); rspRegs.setLane(vd, i, rspRegs.acc.getLane(i, 0)); } if (isDisasm) info("[RSP] VXOR ${}, ${}, ${}[{}]", .{vd, vs, vt, e}); } /// XOR - XOR fn iXOR(instr: u32) void { const rd = getRd(instr); const rs = getRs(instr); const rt = getRt(instr); rspRegs.set(rd, rspRegs.get(rs) ^ rspRegs.get(rt)); if (isDisasm) info("[RSP] XOR ${}, ${}, ${}; ${} = {X}h", .{rd, rs, rt, rd, rspRegs.get(rd)}); } /// XORI - XOR Immediate fn iXORI(instr: u32) void { const imm = @intCast(u32, getImm16(instr)); const rs = getRs(instr); const rt = getRt(instr); rspRegs.set(rt, rspRegs.get(rs) ^ imm); if (isDisasm) info("[RSP] XORI ${}, ${}, {X}h; ${} = {X}h", .{rt, rs, imm, rt, rspRegs.get(rt)}); } pub fn step() void { if (rspRegs.rspStatus.h ) return; if (rspRegs.rspStatus.ss) @panic("rsp single step"); const instr = fetchInstr(); decodeInstr(instr); }
src/core/rsp.zig
pub const GUID_DEVINTERFACE_SENSOR = Guid.initString("ba1bb692-9b7a-4833-9a1e-525ed134e7e2"); pub const SENSOR_EVENT_STATE_CHANGED = Guid.initString("bfd96016-6bd7-4560-ad34-f2f6607e8f81"); pub const SENSOR_EVENT_DATA_UPDATED = Guid.initString("2ed0f2a4-0087-41d3-87db-6773370b3c88"); pub const SENSOR_EVENT_PROPERTY_CHANGED = Guid.initString("2358f099-84c9-4d3d-90df-c2421e2b2045"); pub const SENSOR_EVENT_ACCELEROMETER_SHAKE = Guid.initString("825f5a94-0f48-4396-9ca0-6ecb5c99d915"); pub const SENSOR_EVENT_PARAMETER_COMMON_GUID = Guid.initString("64346e30-8728-4b34-bdf6-4f52442c5c28"); pub const SENSOR_ERROR_PARAMETER_COMMON_GUID = Guid.initString("77112bcd-fce1-4f43-b8b8-a88256adb4b3"); pub const SENSOR_PROPERTY_COMMON_GUID = Guid.initString("7f8383ec-d3ec-495c-a8cf-b8bbe85c2920"); pub const SENSOR_CATEGORY_ALL = Guid.initString("c317c286-c468-4288-9975-d4c4587c442c"); pub const SENSOR_CATEGORY_LOCATION = Guid.initString("bfa794e4-f964-4fdb-90f6-51056bfe4b44"); pub const SENSOR_CATEGORY_ENVIRONMENTAL = Guid.initString("323439aa-7f66-492b-ba0c-73e9aa0a65d5"); pub const SENSOR_CATEGORY_MOTION = Guid.initString("cd09daf1-3b2e-4c3d-b598-b5e5ff93fd46"); pub const SENSOR_CATEGORY_ORIENTATION = Guid.initString("9e6c04b6-96fe-4954-b726-68682a473f69"); pub const SENSOR_CATEGORY_MECHANICAL = Guid.initString("8d131d68-8ef7-4656-80b5-cccbd93791c5"); pub const SENSOR_CATEGORY_ELECTRICAL = Guid.initString("fb73fcd8-fc4a-483c-ac58-27b691c6beff"); pub const SENSOR_CATEGORY_BIOMETRIC = Guid.initString("ca19690f-a2c7-477d-a99e-99ec6e2b5648"); pub const SENSOR_CATEGORY_LIGHT = Guid.initString("17a665c0-9063-4216-b202-5c7a255e18ce"); pub const SENSOR_CATEGORY_SCANNER = Guid.initString("b000e77e-f5b5-420f-815d-0270a726f270"); pub const SENSOR_CATEGORY_OTHER = Guid.initString("2c90e7a9-f4c9-4fa2-af37-56d471fe5a3d"); pub const SENSOR_CATEGORY_UNSUPPORTED = Guid.initString("2beae7fa-19b0-48c5-a1f6-b5480dc206b0"); pub const SENSOR_TYPE_LOCATION_GPS = Guid.initString("ed4ca589-327a-4ff9-a560-91da4b48275e"); pub const SENSOR_TYPE_LOCATION_STATIC = Guid.initString("095f8184-0fa9-4445-8e6e-b70f320b6b4c"); pub const SENSOR_TYPE_LOCATION_LOOKUP = Guid.initString("3b2eae4a-72ce-436d-96d2-3c5b8570e987"); pub const SENSOR_TYPE_LOCATION_TRIANGULATION = Guid.initString("691c341a-5406-4fe1-942f-2246cbeb39e0"); pub const SENSOR_TYPE_LOCATION_OTHER = Guid.initString("9b2d0566-0368-4f71-b88d-533f132031de"); pub const SENSOR_TYPE_LOCATION_BROADCAST = Guid.initString("d26988cf-5162-4039-bb17-4c58b698e44a"); pub const SENSOR_TYPE_LOCATION_DEAD_RECKONING = Guid.initString("1a37d538-f28b-42da-9fce-a9d0a2a6d829"); pub const SENSOR_TYPE_ENVIRONMENTAL_TEMPERATURE = Guid.initString("04fd0ec4-d5da-45fa-95a9-5db38ee19306"); pub const SENSOR_TYPE_ENVIRONMENTAL_ATMOSPHERIC_PRESSURE = Guid.initString("0e903829-ff8a-4a93-97df-3dcbde402288"); pub const SENSOR_TYPE_ENVIRONMENTAL_HUMIDITY = Guid.initString("5c72bf67-bd7e-4257-990b-98a3ba3b400a"); pub const SENSOR_TYPE_ENVIRONMENTAL_WIND_SPEED = Guid.initString("dd50607b-a45f-42cd-8efd-ec61761c4226"); pub const SENSOR_TYPE_ENVIRONMENTAL_WIND_DIRECTION = Guid.initString("9ef57a35-9306-434d-af09-37fa5a9c00bd"); pub const SENSOR_TYPE_ACCELEROMETER_1D = Guid.initString("c04d2387-7340-4cc2-991e-3b18cb8ef2f4"); pub const SENSOR_TYPE_ACCELEROMETER_2D = Guid.initString("b2c517a8-f6b5-4ba6-a423-5df560b4cc07"); pub const SENSOR_TYPE_ACCELEROMETER_3D = Guid.initString("c2fb0f5f-e2d2-4c78-bcd0-352a9582819d"); pub const SENSOR_TYPE_MOTION_DETECTOR = Guid.initString("5c7c1a12-30a5-43b9-a4b2-cf09ec5b7be8"); pub const SENSOR_TYPE_GYROMETER_1D = Guid.initString("fa088734-f552-4584-8324-edfaf649652c"); pub const SENSOR_TYPE_GYROMETER_2D = Guid.initString("31ef4f83-919b-48bf-8de0-5d7a9d240556"); pub const SENSOR_TYPE_GYROMETER_3D = Guid.initString("09485f5a-759e-42c2-bd4b-a349b75c8643"); pub const SENSOR_TYPE_SPEEDOMETER = Guid.initString("6bd73c1f-0bb4-4310-81b2-dfc18a52bf94"); pub const SENSOR_TYPE_COMPASS_1D = Guid.initString("a415f6c5-cb50-49d0-8e62-a8270bd7a26c"); pub const SENSOR_TYPE_COMPASS_2D = Guid.initString("15655cc0-997a-4d30-84db-57caba3648bb"); pub const SENSOR_TYPE_COMPASS_3D = Guid.initString("76b5ce0d-17dd-414d-93a1-e127f40bdf6e"); pub const SENSOR_TYPE_INCLINOMETER_1D = Guid.initString("b96f98c5-7a75-4ba7-94e9-ac868c966dd8"); pub const SENSOR_TYPE_INCLINOMETER_2D = Guid.initString("ab140f6d-83eb-4264-b70b-b16a5b256a01"); pub const SENSOR_TYPE_INCLINOMETER_3D = Guid.initString("b84919fb-ea85-4976-8444-6f6f5c6d31db"); pub const SENSOR_TYPE_DISTANCE_1D = Guid.initString("5f14ab2f-1407-4306-a93f-b1dbabe4f9c0"); pub const SENSOR_TYPE_DISTANCE_2D = Guid.initString("5cf9a46c-a9a2-4e55-b6a1-a04aafa95a92"); pub const SENSOR_TYPE_DISTANCE_3D = Guid.initString("a20cae31-0e25-4772-9fe5-96608a1354b2"); pub const SENSOR_TYPE_AGGREGATED_QUADRANT_ORIENTATION = Guid.initString("9f81f1af-c4ab-4307-9904-c828bfb90829"); pub const SENSOR_TYPE_AGGREGATED_DEVICE_ORIENTATION = Guid.initString("cdb5d8f7-3cfd-41c8-8542-cce622cf5d6e"); pub const SENSOR_TYPE_AGGREGATED_SIMPLE_DEVICE_ORIENTATION = Guid.initString("86a19291-0482-402c-bf4c-addac52b1c39"); pub const SENSOR_TYPE_VOLTAGE = Guid.initString("c5484637-4fb7-4953-98b8-a56d8aa1fb1e"); pub const SENSOR_TYPE_CURRENT = Guid.initString("5adc9fce-15a0-4bbe-a1ad-2d38a9ae831c"); pub const SENSOR_TYPE_CAPACITANCE = Guid.initString("ca2ffb1c-2317-49c0-a0b4-b63ce63461a0"); pub const SENSOR_TYPE_RESISTANCE = Guid.initString("9993d2c8-c157-4a52-a7b5-195c76037231"); pub const SENSOR_TYPE_INDUCTANCE = Guid.initString("dc1d933f-c435-4c7d-a2fe-607192a524d3"); pub const SENSOR_TYPE_ELECTRICAL_POWER = Guid.initString("212f10f5-14ab-4376-9a43-a7794098c2fe"); pub const SENSOR_TYPE_POTENTIOMETER = Guid.initString("2b3681a9-cadc-45aa-a6ff-54957c8bb440"); pub const SENSOR_TYPE_FREQUENCY = Guid.initString("8cd2cbb6-73e6-4640-a709-72ae8fb60d7f"); pub const SENSOR_TYPE_BOOLEAN_SWITCH = Guid.initString("9c7e371f-1041-460b-8d5c-71e4752e350c"); pub const SENSOR_TYPE_MULTIVALUE_SWITCH = Guid.initString("b3ee4d76-37a4-4402-b25e-99c60a775fa1"); pub const SENSOR_TYPE_FORCE = Guid.initString("c2ab2b02-1a1c-4778-a81b-954a1788cc75"); pub const SENSOR_TYPE_SCALE = Guid.initString("c06dd92c-7feb-438e-9bf6-82207fff5bb8"); pub const SENSOR_TYPE_PRESSURE = Guid.initString("26d31f34-6352-41cf-b793-ea0713d53d77"); pub const SENSOR_TYPE_STRAIN = Guid.initString("c6d1ec0e-6803-4361-ad3d-85bcc58c6d29"); pub const SENSOR_TYPE_BOOLEAN_SWITCH_ARRAY = Guid.initString("545c8ba5-b143-4545-868f-ca7fd986b4f6"); pub const SENSOR_TYPE_HUMAN_PRESENCE = Guid.initString("c138c12b-ad52-451c-9375-87f518ff10c6"); pub const SENSOR_TYPE_HUMAN_PROXIMITY = Guid.initString("5220dae9-3179-4430-9f90-06266d2a34de"); pub const SENSOR_TYPE_TOUCH = Guid.initString("17db3018-06c4-4f7d-81af-9274b7599c27"); pub const SENSOR_TYPE_AMBIENT_LIGHT = Guid.initString("97f115c8-599a-4153-8894-d2d12899918a"); pub const SENSOR_TYPE_RFID_SCANNER = Guid.initString("44328ef5-02dd-4e8d-ad5d-9249832b2eca"); pub const SENSOR_TYPE_BARCODE_SCANNER = Guid.initString("990b3d8f-85bb-45ff-914d-998c04f372df"); pub const SENSOR_TYPE_CUSTOM = Guid.initString("e83af229-8640-4d18-a213-e22675ebb2c3"); pub const SENSOR_TYPE_UNKNOWN = Guid.initString("10ba83e3-ef4f-41ed-9885-a87d6435a8e1"); pub const SENSOR_DATA_TYPE_COMMON_GUID = Guid.initString("db5e0cf2-cf1f-4c18-b46c-d86011d62150"); pub const SENSOR_DATA_TYPE_LOCATION_GUID = Guid.initString("055c74d8-ca6f-47d6-95c6-1ed3637a0ff4"); pub const SENSOR_DATA_TYPE_ENVIRONMENTAL_GUID = Guid.initString("8b0aa2f1-2d57-42ee-8cc0-4d27622b46c4"); pub const SENSOR_DATA_TYPE_MOTION_GUID = Guid.initString("3f8a69a2-07c5-4e48-a965-cd797aab56d5"); pub const SENSOR_DATA_TYPE_ORIENTATION_GUID = Guid.initString("1637d8a2-4248-4275-865d-558de84aedfd"); pub const SENSOR_DATA_TYPE_GUID_MECHANICAL_GUID = Guid.initString("38564a7c-f2f2-49bb-9b2b-ba60f66a58df"); pub const SENSOR_DATA_TYPE_BIOMETRIC_GUID = Guid.initString("2299288a-6d9e-4b0b-b7ec-3528f89e40af"); pub const SENSOR_DATA_TYPE_LIGHT_GUID = Guid.initString("e4c77ce2-dcb7-46e9-8439-4fec548833a6"); pub const SENSOR_DATA_TYPE_SCANNER_GUID = Guid.initString("d7a59a3c-3421-44ab-8d3a-9de8ab6c4cae"); pub const SENSOR_DATA_TYPE_ELECTRICAL_GUID = Guid.initString("bbb246d1-e242-4780-a2d3-cded84f35842"); pub const SENSOR_DATA_TYPE_CUSTOM_GUID = Guid.initString("b14c764f-07cf-41e8-9d82-ebe3d0776a6f"); pub const SENSOR_PROPERTY_TEST_GUID = Guid.initString("e1e962f4-6e65-45f7-9c36-d487b7b1bd34"); pub const GNSS_CLEAR_ALL_ASSISTANCE_DATA = @as(u32, 1); //-------------------------------------------------------------------------------- // Section: Types (17) //-------------------------------------------------------------------------------- const CLSID_SensorManager_Value = @import("../zig.zig").Guid.initString("77a1c827-fcd2-4689-8915-9d613cc5fa3e"); pub const CLSID_SensorManager = &CLSID_SensorManager_Value; const CLSID_SensorCollection_Value = @import("../zig.zig").Guid.initString("79c43adb-a429-469f-aa39-2f2b74b75937"); pub const CLSID_SensorCollection = &CLSID_SensorCollection_Value; const CLSID_Sensor_Value = @import("../zig.zig").Guid.initString("e97ced00-523a-4133-bf6f-d3a2dae7f6ba"); pub const CLSID_Sensor = &CLSID_Sensor_Value; const CLSID_SensorDataReport_Value = @import("../zig.zig").Guid.initString("4ea9d6ef-694b-4218-8816-ccda8da74bba"); pub const CLSID_SensorDataReport = &CLSID_SensorDataReport_Value; pub const SensorState = enum(i32) { MIN = 0, // READY = 0, this enum value conflicts with MIN NOT_AVAILABLE = 1, NO_DATA = 2, INITIALIZING = 3, ACCESS_DENIED = 4, ERROR = 5, // MAX = 5, this enum value conflicts with ERROR }; pub const SENSOR_STATE_MIN = SensorState.MIN; pub const SENSOR_STATE_READY = SensorState.MIN; pub const SENSOR_STATE_NOT_AVAILABLE = SensorState.NOT_AVAILABLE; pub const SENSOR_STATE_NO_DATA = SensorState.NO_DATA; pub const SENSOR_STATE_INITIALIZING = SensorState.INITIALIZING; pub const SENSOR_STATE_ACCESS_DENIED = SensorState.ACCESS_DENIED; pub const SENSOR_STATE_ERROR = SensorState.ERROR; pub const SENSOR_STATE_MAX = SensorState.ERROR; pub const SensorConnectionType = enum(i32) { INTEGRATED = 0, ATTACHED = 1, EXTERNAL = 2, }; pub const SENSOR_CONNECTION_TYPE_PC_INTEGRATED = SensorConnectionType.INTEGRATED; pub const SENSOR_CONNECTION_TYPE_PC_ATTACHED = SensorConnectionType.ATTACHED; pub const SENSOR_CONNECTION_TYPE_PC_EXTERNAL = SensorConnectionType.EXTERNAL; pub const LOCATION_DESIRED_ACCURACY = enum(i32) { DEFAULT = 0, HIGH = 1, }; pub const LOCATION_DESIRED_ACCURACY_DEFAULT = LOCATION_DESIRED_ACCURACY.DEFAULT; pub const LOCATION_DESIRED_ACCURACY_HIGH = LOCATION_DESIRED_ACCURACY.HIGH; pub const LOCATION_POSITION_SOURCE = enum(i32) { CELLULAR = 0, SATELLITE = 1, WIFI = 2, IPADDRESS = 3, UNKNOWN = 4, }; pub const LOCATION_POSITION_SOURCE_CELLULAR = LOCATION_POSITION_SOURCE.CELLULAR; pub const LOCATION_POSITION_SOURCE_SATELLITE = LOCATION_POSITION_SOURCE.SATELLITE; pub const LOCATION_POSITION_SOURCE_WIFI = LOCATION_POSITION_SOURCE.WIFI; pub const LOCATION_POSITION_SOURCE_IPADDRESS = LOCATION_POSITION_SOURCE.IPADDRESS; pub const LOCATION_POSITION_SOURCE_UNKNOWN = LOCATION_POSITION_SOURCE.UNKNOWN; pub const SimpleDeviceOrientation = enum(i32) { NOT_ROTATED = 0, ROTATED_90 = 1, ROTATED_180 = 2, ROTATED_270 = 3, ROTATED_FACE_UP = 4, ROTATED_FACE_DOWN = 5, }; pub const SIMPLE_DEVICE_ORIENTATION_NOT_ROTATED = SimpleDeviceOrientation.NOT_ROTATED; pub const SIMPLE_DEVICE_ORIENTATION_ROTATED_90 = SimpleDeviceOrientation.ROTATED_90; pub const SIMPLE_DEVICE_ORIENTATION_ROTATED_180 = SimpleDeviceOrientation.ROTATED_180; pub const SIMPLE_DEVICE_ORIENTATION_ROTATED_270 = SimpleDeviceOrientation.ROTATED_270; pub const SIMPLE_DEVICE_ORIENTATION_ROTATED_FACE_UP = SimpleDeviceOrientation.ROTATED_FACE_UP; pub const SIMPLE_DEVICE_ORIENTATION_ROTATED_FACE_DOWN = SimpleDeviceOrientation.ROTATED_FACE_DOWN; pub const MagnetometerAccuracy = enum(i32) { UNKNOWN = 0, UNRELIABLE = 1, APPROXIMATE = 2, HIGH = 3, }; pub const MAGNETOMETER_ACCURACY_UNKNOWN = MagnetometerAccuracy.UNKNOWN; pub const MAGNETOMETER_ACCURACY_UNRELIABLE = MagnetometerAccuracy.UNRELIABLE; pub const MAGNETOMETER_ACCURACY_APPROXIMATE = MagnetometerAccuracy.APPROXIMATE; pub const MAGNETOMETER_ACCURACY_HIGH = MagnetometerAccuracy.HIGH; // TODO: this type is limited to platform 'windows6.1' const IID_ISensorManager_Value = @import("../zig.zig").Guid.initString("bd77db67-45a8-42dc-8d00-6dcf15f8377a"); pub const IID_ISensorManager = &IID_ISensorManager_Value; pub const ISensorManager = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetSensorsByCategory: fn( self: *const ISensorManager, sensorCategory: ?*Guid, ppSensorsFound: ?*?*ISensorCollection, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSensorsByType: fn( self: *const ISensorManager, sensorType: ?*Guid, ppSensorsFound: ?*?*ISensorCollection, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSensorByID: fn( self: *const ISensorManager, sensorID: ?*Guid, ppSensor: ?*?*ISensor, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetEventSink: fn( self: *const ISensorManager, pEvents: ?*ISensorManagerEvents, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RequestPermissions: fn( self: *const ISensorManager, hParent: ?HWND, pSensors: ?*ISensorCollection, fModal: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorManager_GetSensorsByCategory(self: *const T, sensorCategory: ?*Guid, ppSensorsFound: ?*?*ISensorCollection) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorManager.VTable, self.vtable).GetSensorsByCategory(@ptrCast(*const ISensorManager, self), sensorCategory, ppSensorsFound); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorManager_GetSensorsByType(self: *const T, sensorType: ?*Guid, ppSensorsFound: ?*?*ISensorCollection) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorManager.VTable, self.vtable).GetSensorsByType(@ptrCast(*const ISensorManager, self), sensorType, ppSensorsFound); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorManager_GetSensorByID(self: *const T, sensorID: ?*Guid, ppSensor: ?*?*ISensor) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorManager.VTable, self.vtable).GetSensorByID(@ptrCast(*const ISensorManager, self), sensorID, ppSensor); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorManager_SetEventSink(self: *const T, pEvents: ?*ISensorManagerEvents) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorManager.VTable, self.vtable).SetEventSink(@ptrCast(*const ISensorManager, self), pEvents); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorManager_RequestPermissions(self: *const T, hParent: ?HWND, pSensors: ?*ISensorCollection, fModal: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorManager.VTable, self.vtable).RequestPermissions(@ptrCast(*const ISensorManager, self), hParent, pSensors, fModal); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_ILocationPermissions_Value = @import("../zig.zig").Guid.initString("d5fb0a7f-e74e-44f5-8e02-4806863a274f"); pub const IID_ILocationPermissions = &IID_ILocationPermissions_Value; pub const ILocationPermissions = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetGlobalLocationPermission: fn( self: *const ILocationPermissions, pfEnabled: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CheckLocationCapability: fn( self: *const ILocationPermissions, dwClientThreadId: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ILocationPermissions_GetGlobalLocationPermission(self: *const T, pfEnabled: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ILocationPermissions.VTable, self.vtable).GetGlobalLocationPermission(@ptrCast(*const ILocationPermissions, self), pfEnabled); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ILocationPermissions_CheckLocationCapability(self: *const T, dwClientThreadId: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ILocationPermissions.VTable, self.vtable).CheckLocationCapability(@ptrCast(*const ILocationPermissions, self), dwClientThreadId); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISensorCollection_Value = @import("../zig.zig").Guid.initString("23571e11-e545-4dd8-a337-b89bf44b10df"); pub const IID_ISensorCollection = &IID_ISensorCollection_Value; pub const ISensorCollection = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetAt: fn( self: *const ISensorCollection, ulIndex: u32, ppSensor: ?*?*ISensor, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCount: fn( self: *const ISensorCollection, pCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Add: fn( self: *const ISensorCollection, pSensor: ?*ISensor, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Remove: fn( self: *const ISensorCollection, pSensor: ?*ISensor, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RemoveByID: fn( self: *const ISensorCollection, sensorID: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clear: fn( self: *const ISensorCollection, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorCollection_GetAt(self: *const T, ulIndex: u32, ppSensor: ?*?*ISensor) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorCollection.VTable, self.vtable).GetAt(@ptrCast(*const ISensorCollection, self), ulIndex, ppSensor); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorCollection_GetCount(self: *const T, pCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorCollection.VTable, self.vtable).GetCount(@ptrCast(*const ISensorCollection, self), pCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorCollection_Add(self: *const T, pSensor: ?*ISensor) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorCollection.VTable, self.vtable).Add(@ptrCast(*const ISensorCollection, self), pSensor); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorCollection_Remove(self: *const T, pSensor: ?*ISensor) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorCollection.VTable, self.vtable).Remove(@ptrCast(*const ISensorCollection, self), pSensor); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorCollection_RemoveByID(self: *const T, sensorID: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorCollection.VTable, self.vtable).RemoveByID(@ptrCast(*const ISensorCollection, self), sensorID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorCollection_Clear(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorCollection.VTable, self.vtable).Clear(@ptrCast(*const ISensorCollection, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISensor_Value = @import("../zig.zig").Guid.initString("5fa08f80-2657-458e-af75-46f73fa6ac5c"); pub const IID_ISensor = &IID_ISensor_Value; pub const ISensor = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetID: fn( self: *const ISensor, pID: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCategory: fn( self: *const ISensor, pSensorCategory: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetType: fn( self: *const ISensor, pSensorType: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFriendlyName: fn( self: *const ISensor, pFriendlyName: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetProperty: fn( self: *const ISensor, key: ?*const PROPERTYKEY, pProperty: ?*PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetProperties: fn( self: *const ISensor, pKeys: ?*IPortableDeviceKeyCollection, ppProperties: ?*?*IPortableDeviceValues, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSupportedDataFields: fn( self: *const ISensor, ppDataFields: ?*?*IPortableDeviceKeyCollection, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetProperties: fn( self: *const ISensor, pProperties: ?*IPortableDeviceValues, ppResults: ?*?*IPortableDeviceValues, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SupportsDataField: fn( self: *const ISensor, key: ?*const PROPERTYKEY, pIsSupported: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetState: fn( self: *const ISensor, pState: ?*SensorState, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetData: fn( self: *const ISensor, ppDataReport: ?*?*ISensorDataReport, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SupportsEvent: fn( self: *const ISensor, eventGuid: ?*const Guid, pIsSupported: ?*i16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetEventInterest: fn( self: *const ISensor, ppValues: ?[*]?*Guid, pCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetEventInterest: fn( self: *const ISensor, pValues: ?[*]Guid, count: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetEventSink: fn( self: *const ISensor, pEvents: ?*ISensorEvents, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_GetID(self: *const T, pID: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).GetID(@ptrCast(*const ISensor, self), pID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_GetCategory(self: *const T, pSensorCategory: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).GetCategory(@ptrCast(*const ISensor, self), pSensorCategory); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_GetType(self: *const T, pSensorType: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).GetType(@ptrCast(*const ISensor, self), pSensorType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_GetFriendlyName(self: *const T, pFriendlyName: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).GetFriendlyName(@ptrCast(*const ISensor, self), pFriendlyName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_GetProperty(self: *const T, key: ?*const PROPERTYKEY, pProperty: ?*PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).GetProperty(@ptrCast(*const ISensor, self), key, pProperty); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_GetProperties(self: *const T, pKeys: ?*IPortableDeviceKeyCollection, ppProperties: ?*?*IPortableDeviceValues) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).GetProperties(@ptrCast(*const ISensor, self), pKeys, ppProperties); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_GetSupportedDataFields(self: *const T, ppDataFields: ?*?*IPortableDeviceKeyCollection) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).GetSupportedDataFields(@ptrCast(*const ISensor, self), ppDataFields); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_SetProperties(self: *const T, pProperties: ?*IPortableDeviceValues, ppResults: ?*?*IPortableDeviceValues) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).SetProperties(@ptrCast(*const ISensor, self), pProperties, ppResults); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_SupportsDataField(self: *const T, key: ?*const PROPERTYKEY, pIsSupported: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).SupportsDataField(@ptrCast(*const ISensor, self), key, pIsSupported); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_GetState(self: *const T, pState: ?*SensorState) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).GetState(@ptrCast(*const ISensor, self), pState); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_GetData(self: *const T, ppDataReport: ?*?*ISensorDataReport) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).GetData(@ptrCast(*const ISensor, self), ppDataReport); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_SupportsEvent(self: *const T, eventGuid: ?*const Guid, pIsSupported: ?*i16) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).SupportsEvent(@ptrCast(*const ISensor, self), eventGuid, pIsSupported); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_GetEventInterest(self: *const T, ppValues: ?[*]?*Guid, pCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).GetEventInterest(@ptrCast(*const ISensor, self), ppValues, pCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_SetEventInterest(self: *const T, pValues: ?[*]Guid, count: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).SetEventInterest(@ptrCast(*const ISensor, self), pValues, count); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensor_SetEventSink(self: *const T, pEvents: ?*ISensorEvents) callconv(.Inline) HRESULT { return @ptrCast(*const ISensor.VTable, self.vtable).SetEventSink(@ptrCast(*const ISensor, self), pEvents); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISensorDataReport_Value = @import("../zig.zig").Guid.initString("0ab9df9b-c4b5-4796-8898-0470706a2e1d"); pub const IID_ISensorDataReport = &IID_ISensorDataReport_Value; pub const ISensorDataReport = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetTimestamp: fn( self: *const ISensorDataReport, pTimeStamp: ?*SYSTEMTIME, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSensorValue: fn( self: *const ISensorDataReport, pKey: ?*const PROPERTYKEY, pValue: ?*PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSensorValues: fn( self: *const ISensorDataReport, pKeys: ?*IPortableDeviceKeyCollection, ppValues: ?*?*IPortableDeviceValues, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorDataReport_GetTimestamp(self: *const T, pTimeStamp: ?*SYSTEMTIME) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorDataReport.VTable, self.vtable).GetTimestamp(@ptrCast(*const ISensorDataReport, self), pTimeStamp); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorDataReport_GetSensorValue(self: *const T, pKey: ?*const PROPERTYKEY, pValue: ?*PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorDataReport.VTable, self.vtable).GetSensorValue(@ptrCast(*const ISensorDataReport, self), pKey, pValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorDataReport_GetSensorValues(self: *const T, pKeys: ?*IPortableDeviceKeyCollection, ppValues: ?*?*IPortableDeviceValues) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorDataReport.VTable, self.vtable).GetSensorValues(@ptrCast(*const ISensorDataReport, self), pKeys, ppValues); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISensorManagerEvents_Value = @import("../zig.zig").Guid.initString("9b3b0b86-266a-4aad-b21f-fde5501001b7"); pub const IID_ISensorManagerEvents = &IID_ISensorManagerEvents_Value; pub const ISensorManagerEvents = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, OnSensorEnter: fn( self: *const ISensorManagerEvents, pSensor: ?*ISensor, state: SensorState, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorManagerEvents_OnSensorEnter(self: *const T, pSensor: ?*ISensor, state: SensorState) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorManagerEvents.VTable, self.vtable).OnSensorEnter(@ptrCast(*const ISensorManagerEvents, self), pSensor, state); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISensorEvents_Value = @import("../zig.zig").Guid.initString("5d8dcc91-4641-47e7-b7c3-b74f48a6c391"); pub const IID_ISensorEvents = &IID_ISensorEvents_Value; pub const ISensorEvents = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, OnStateChanged: fn( self: *const ISensorEvents, pSensor: ?*ISensor, state: SensorState, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnDataUpdated: fn( self: *const ISensorEvents, pSensor: ?*ISensor, pNewData: ?*ISensorDataReport, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnEvent: fn( self: *const ISensorEvents, pSensor: ?*ISensor, eventID: ?*const Guid, pEventData: ?*IPortableDeviceValues, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnLeave: fn( self: *const ISensorEvents, ID: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorEvents_OnStateChanged(self: *const T, pSensor: ?*ISensor, state: SensorState) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorEvents.VTable, self.vtable).OnStateChanged(@ptrCast(*const ISensorEvents, self), pSensor, state); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorEvents_OnDataUpdated(self: *const T, pSensor: ?*ISensor, pNewData: ?*ISensorDataReport) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorEvents.VTable, self.vtable).OnDataUpdated(@ptrCast(*const ISensorEvents, self), pSensor, pNewData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorEvents_OnEvent(self: *const T, pSensor: ?*ISensor, eventID: ?*const Guid, pEventData: ?*IPortableDeviceValues) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorEvents.VTable, self.vtable).OnEvent(@ptrCast(*const ISensorEvents, self), pSensor, eventID, pEventData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISensorEvents_OnLeave(self: *const T, ID: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const ISensorEvents.VTable, self.vtable).OnLeave(@ptrCast(*const ISensorEvents, self), ID); } };} pub usingnamespace MethodMixin(@This()); }; //-------------------------------------------------------------------------------- // Section: Functions (0) //-------------------------------------------------------------------------------- //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (11) //-------------------------------------------------------------------------------- const Guid = @import("../zig.zig").Guid; const BOOL = @import("../foundation.zig").BOOL; const BSTR = @import("../foundation.zig").BSTR; const HRESULT = @import("../foundation.zig").HRESULT; const HWND = @import("../foundation.zig").HWND; const IPortableDeviceKeyCollection = @import("../devices/portable_devices.zig").IPortableDeviceKeyCollection; const IPortableDeviceValues = @import("../devices/portable_devices.zig").IPortableDeviceValues; const IUnknown = @import("../system/com.zig").IUnknown; const PROPERTYKEY = @import("../system/properties_system.zig").PROPERTYKEY; const PROPVARIANT = @import("../storage/structured_storage.zig").PROPVARIANT; const SYSTEMTIME = @import("../foundation.zig").SYSTEMTIME; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
deps/zigwin32/win32/devices/sensors.zig
const std = @import("std"); const utils = @import("utils.zig"); const epsilonEq = @import("utils.zig").epsilonEq; const vector = @import("vector.zig"); const Vec4 = vector.Vec4; pub const Mat2 = struct { const Self = @This(); pub const Size = 2; mat: [Size][Size]f64 = undefined, pub fn eql(self: Self, other: Self) bool { const a = self.mat; const b = other.mat; return epsilonEq(a[0][0], b[0][0]) and epsilonEq(a[0][1], b[0][1]) and epsilonEq(a[1][0], b[1][0]) and epsilonEq(a[1][1], b[1][1]); } pub fn determinant(self: Self) f64 { const m = self.mat; return m[0][0] * m[1][1] - m[0][1] * m[1][0]; } pub fn at(self: Self, row: usize, col: usize) f64 { return self.mat[row][col]; } }; pub const Mat3 = struct { const Self = @This(); pub const Size = 3; mat: [Size][Size]f64 = undefined, pub fn at(self: Self, row: usize, col: usize) f64 { return self.mat[row][col]; } pub fn eql(self: Self, other: Self) bool { for (self.mat) |row_values, row| { for (row_values) |val, col| { if (!epsilonEq(val, other.mat[row][col])) return false; } } return true; } pub fn submatrix(self: Self, row: usize, col: usize) Mat2 { var res = Mat2{}; var src_y: usize = 0; var dst_y: usize = 0; while (src_y < 3) : (src_y += 1) { if (src_y == row) continue; var src_x: usize = 0; var dst_x: usize = 0; while (src_x < 3) : (src_x += 1) { if (src_x == col) continue; res.mat[dst_y][dst_x] = self.mat[src_y][src_x]; dst_x += 1; } dst_y += 1; } return res; } pub fn minor(self: Self, row: usize, col: usize) f64 { const sub = self.submatrix(row, col); return sub.determinant(); } pub fn cofactor(self: Self, row: usize, col: usize) f64 { const det = self.minor(row, col); const is_odd = ((row + col) % 2 != 0); return if (is_odd) -det else det; } pub fn determinant(self: Self) f64 { var det: f64 = 0; for (self.mat[0]) |val, col| { det += val * self.cofactor(0, col); } return det; } }; pub const Mat4 = struct { const Self = @This(); pub const Size = 4; mat: [Size][Size]f64 = undefined, pub fn identity() Self { return .{ .mat = .{ .{ 1, 0, 0, 0 }, .{ 0, 1, 0, 0 }, .{ 0, 0, 1, 0 }, .{ 0, 0, 0, 1 }, }, }; } pub fn translate(self: Self, x: f64, y: f64, z: f64) Self { return (Self{ .mat = .{ .{ 1, 0, 0, x }, .{ 0, 1, 0, y }, .{ 0, 0, 1, z }, .{ 0, 0, 0, 1 }, }, }).mult(self); } pub fn scale(self: Self, x: f64, y: f64, z: f64) Self { return (Self{ .mat = .{ .{ x, 0, 0, 0 }, .{ 0, y, 0, 0 }, .{ 0, 0, z, 0 }, .{ 0, 0, 0, 1 }, }, }).mult(self); } pub fn rotateX(self: Self, rad: f64) Self { return (Self{ .mat = .{ .{ 1, 0, 0, 0 }, .{ 0, std.math.cos(rad), -std.math.sin(rad), 0 }, .{ 0, std.math.sin(rad), std.math.cos(rad), 0 }, .{ 0, 0, 0, 1 }, }, }).mult(self); } pub fn rotateY(self: Self, rad: f64) Self { return (Self{ .mat = .{ .{ std.math.cos(rad), 0, std.math.sin(rad), 0 }, .{ 0, 1, 0, 0 }, .{ -std.math.sin(rad), 0, std.math.cos(rad), 0 }, .{ 0, 0, 0, 1 }, }, }).mult(self); } pub fn rotateZ(self: Self, rad: f64) Self { return (Self{ .mat = .{ .{ std.math.cos(rad), -std.math.sin(rad), 0, 0 }, .{ std.math.sin(rad), std.math.cos(rad), 0, 0 }, .{ 0, 0, 1, 0 }, .{ 0, 0, 0, 1 }, }, }).mult(self); } pub fn shear(self: Self, xy: f64, xz: f64, yx: f64, yz: f64, zx: f64, zy: f64) Self { return (Self{ .mat = .{ .{ 1, xy, xz, 0 }, .{ yx, 1, yz, 0 }, .{ zx, zy, 1, 0 }, .{ 0, 0, 0, 1 }, }, }).mult(self); } pub fn at(self: Self, row: usize, col: usize) f64 { return self.mat[row][col]; } pub fn eql(self: Self, other: Self) bool { for (self.mat) |row_values, row| { for (row_values) |val, col| { if (!epsilonEq(val, other.mat[row][col])) return false; } } return true; } pub fn mult(self: Self, other: Self) Self { const a = self.mat; const b = other.mat; var res = Self{}; for (res.mat) |*row_values, row| { for (row_values) |*val, col| { val.* = a[row][0] * b[0][col] + a[row][1] * b[1][col] + a[row][2] * b[2][col] + a[row][3] * b[3][col]; } } return res; } pub fn multVec(self: Self, vec: Vec4) Vec4 { const m = self.mat; return Vec4.init( m[0][0] * vec.x + m[0][1] * vec.y + m[0][2] * vec.z + m[0][3] * vec.w, m[1][0] * vec.x + m[1][1] * vec.y + m[1][2] * vec.z + m[1][3] * vec.w, m[2][0] * vec.x + m[2][1] * vec.y + m[2][2] * vec.z + m[2][3] * vec.w, m[3][0] * vec.x + m[3][1] * vec.y + m[3][2] * vec.z + m[3][3] * vec.w, ); } pub fn transpose(self: Self) Self { var res = Self{}; for (res.mat) |*row_values, row| { for (row_values) |*val, col| { val.* = self.mat[col][row]; } } return res; } pub fn submatrix(self: Self, row: usize, col: usize) Mat3 { var res = Mat3{}; var src_y: usize = 0; var dst_y: usize = 0; while (src_y < 4) : (src_y += 1) { if (src_y == row) continue; var src_x: usize = 0; var dst_x: usize = 0; while (src_x < 4) : (src_x += 1) { if (src_x == col) continue; res.mat[dst_y][dst_x] = self.mat[src_y][src_x]; dst_x += 1; } dst_y += 1; } return res; } pub fn minor(self: Self, row: usize, col: usize) f64 { const sub = self.submatrix(row, col); return sub.determinant(); } pub fn cofactor(self: Self, row: usize, col: usize) f64 { const det = self.minor(row, col); const is_odd = ((row + col) % 2 != 0); return if (is_odd) -det else det; } pub fn determinant(self: Self) f64 { var det: f64 = 0; for (self.mat[0]) |val, col| { det += val * self.cofactor(0, col); } return det; } pub fn isInvertible(self: Self) bool { return !epsilonEq(self.determinant(), 0); } pub fn inverse(self: Self) Self { const det = self.determinant(); std.debug.assert(!epsilonEq(det, 0.0)); var res = Mat4{}; for (res.mat) |*row_values, row| { for (row_values) |*val, col| { const c = self.cofactor(col, row); // swapped to transpose end result val.* = c / det; } } return res; } }; test "a 2x2 matrix ought to be representable" { const m = Mat2{ .mat = .{ .{ -3, 5 }, .{ 1, -2 }, }, }; try utils.expectEpsilonEq(m.at(0, 0), -3); try utils.expectEpsilonEq(m.at(0, 1), 5); try utils.expectEpsilonEq(m.at(1, 0), 1); try utils.expectEpsilonEq(m.at(1, 1), -2); } test "a 3x3 matrix ought to be representable" { const m = Mat3{ .mat = .{ .{ -3, 5, 0 }, .{ 1, -2, -7 }, .{ 0, 1, 1 }, }, }; try utils.expectEpsilonEq(m.at(0, 0), -3); try utils.expectEpsilonEq(m.at(1, 1), -2); try utils.expectEpsilonEq(m.at(2, 2), 1); } test "constructing and inspecting a 4x4 matrix" { const m = Mat4{ .mat = .{ .{ 1, 2, 3, 4 }, .{ 5.5, 6.5, 7.5, 8.5 }, .{ 9, 10, 11, 12 }, .{ 13.5, 14.5, 15.5, 16.5 }, }, }; try utils.expectEpsilonEq(m.at(0, 0), 1); try utils.expectEpsilonEq(m.at(0, 3), 4); try utils.expectEpsilonEq(m.at(1, 0), 5.5); try utils.expectEpsilonEq(m.at(1, 2), 7.5); try utils.expectEpsilonEq(m.at(2, 2), 11); try utils.expectEpsilonEq(m.at(3, 0), 13.5); try utils.expectEpsilonEq(m.at(3, 2), 15.5); } test "matrix equality with identical matrices" { const a = Mat4{ .mat = .{ .{ 1, 2, 3, 4 }, .{ 5, 6, 7, 8 }, .{ 9, 8, 7, 6 }, .{ 5, 4, 3, 2 }, }, }; const b = Mat4{ .mat = .{ .{ 1, 2, 3, 4 }, .{ 5, 6, 7, 8 }, .{ 9, 8, 7, 6 }, .{ 5, 4, 3, 2 }, }, }; try std.testing.expect(a.eql(b) == true); } test "matrix equality with different matrices" { const a = Mat4{ .mat = .{ .{ 1, 2, 3, 4 }, .{ 5, 6, 7, 8 }, .{ 9, 8, 7, 6 }, .{ 5, 4, 3, 2 }, }, }; const b = Mat4{ .mat = .{ .{ 1, 2, 3, 4 }, .{ 5, 6, 7, 8 }, .{ 9, 8, 7, 5 }, .{ 5, 4, 3, 2 }, }, }; try std.testing.expect(a.eql(b) == false); } test "multiplying two matrices" { const a = Mat4{ .mat = .{ .{ 1, 2, 3, 4 }, .{ 5, 6, 7, 8 }, .{ 9, 8, 7, 6 }, .{ 5, 4, 3, 2 }, }, }; const b = Mat4{ .mat = .{ .{ -2, 1, 2, 3 }, .{ 3, 2, 1, -1 }, .{ 4, 3, 6, 5 }, .{ 1, 2, 7, 8 }, }, }; const result = a.mult(b); const expected = Mat4{ .mat = .{ .{ 20, 22, 50, 48 }, .{ 44, 54, 114, 108 }, .{ 40, 58, 110, 102 }, .{ 16, 26, 46, 42 }, }, }; try std.testing.expect(result.eql(expected)); } test "a matrix multiplied by a tuple" { const a = Mat4{ .mat = .{ .{ 1, 2, 3, 4 }, .{ 2, 4, 4, 2 }, .{ 8, 6, 4, 1 }, .{ 0, 0, 0, 1 }, }, }; const b = Vec4.init(1, 2, 3, 1); const result = a.multVec(b); try std.testing.expect(result.eql(Vec4.init(18, 24, 33, 1))); } test "multiplying a matrix by the identity matrix" { const a = Mat4{ .mat = .{ .{ 0, 1, 2, 4 }, .{ 1, 2, 4, 8 }, .{ 2, 4, 8, 16 }, .{ 4, 8, 16, 32 }, }, }; const identity_matrix = Mat4.identity(); try std.testing.expect(a.mult(identity_matrix).eql(a)); } test "transposing a matrix" { const a = Mat4{ .mat = .{ .{ 0, 9, 3, 0 }, .{ 9, 8, 0, 8 }, .{ 1, 8, 5, 3 }, .{ 0, 0, 5, 8 }, }, }; const expected = Mat4{ .mat = .{ .{ 0, 9, 1, 0 }, .{ 9, 8, 8, 0 }, .{ 3, 0, 5, 5 }, .{ 0, 8, 3, 8 }, }, }; try std.testing.expect(a.transpose().eql(expected)); } test "calculating the determinantof a 2x2 matrix" { const m = Mat2{ .mat = .{ .{ 1, 5 }, .{ -3, 2 }, }, }; try utils.expectEpsilonEq(m.determinant(), 17.0); } test "a submatrix of a 3x3 matrix is a 2x2 matrix" { const a = Mat3{ .mat = .{ .{ 1, 5, 0 }, .{ -3, 2, 7 }, .{ 0, 6, -3 }, }, }; const expected = Mat2{ .mat = .{ .{ -3, 2 }, .{ 0, 6 }, }, }; try std.testing.expect(a.submatrix(0, 2).eql(expected)); } test "a submatrix of a 4x4 matrix is a 3x3 matrix" { const a = Mat4{ .mat = .{ .{ -6, 1, 1, 6 }, .{ -8, 5, 8, 6 }, .{ -1, 0, 8, 2 }, .{ -7, 1, -1, 1 }, }, }; const expected = Mat3{ .mat = .{ .{ -6, 1, 6 }, .{ -8, 8, 6 }, .{ -7, -1, 1 }, }, }; try std.testing.expect(a.submatrix(2, 1).eql(expected)); } test "calculating a minor of a 3x3 matrix" { const a = Mat3{ .mat = .{ .{ 3, 5, 0 }, .{ 2, -1, -7 }, .{ 6, -1, 5 }, }, }; try utils.expectEpsilonEq(a.minor(1, 0), 25.0); } test "calculating a cofactor of a 3x3 matrix" { const a = Mat3{ .mat = .{ .{ 3, 5, 0 }, .{ 2, -1, -7 }, .{ 6, -1, 5 }, }, }; try utils.expectEpsilonEq(a.minor(0, 0), -12); try utils.expectEpsilonEq(a.cofactor(0, 0), -12); try utils.expectEpsilonEq(a.minor(1, 0), 25); try utils.expectEpsilonEq(a.cofactor(1, 0), -25); } test "calculating the determinant of a 3x3 matrix" { const a = Mat3{ .mat = .{ .{ 1, 2, 6 }, .{ -5, 8, -4 }, .{ 2, 6, 4 }, }, }; try utils.expectEpsilonEq(a.cofactor(0, 0), 56); try utils.expectEpsilonEq(a.cofactor(0, 1), 12); try utils.expectEpsilonEq(a.cofactor(0, 2), -46); try utils.expectEpsilonEq(a.determinant(), -196); } test "calculating the determinant of a 4x4 matrix" { const a = Mat4{ .mat = .{ .{ -2, -8, 3, 5 }, .{ -3, 1, 7, 3 }, .{ 1, 2, -9, 6 }, .{ -6, 7, 7, -9 }, }, }; try utils.expectEpsilonEq(a.cofactor(0, 0), 690); try utils.expectEpsilonEq(a.cofactor(0, 1), 447); try utils.expectEpsilonEq(a.cofactor(0, 2), 210); try utils.expectEpsilonEq(a.cofactor(0, 3), 51); try utils.expectEpsilonEq(a.determinant(), -4071); } test "testing an invertible matrix for invertibility" { const a = Mat4{ .mat = .{ .{ 6, 4, 4, 4 }, .{ 5, 5, 7, 6 }, .{ 4, -9, 3, -7 }, .{ 9, 1, 7, -6 }, }, }; try utils.expectEpsilonEq(a.determinant(), -2120); try std.testing.expect(a.isInvertible()); } test "testing an noninvertible matrix for invertibility" { const a = Mat4{ .mat = .{ .{ -4, 2, -2, -3 }, .{ 9, 6, 2, 6 }, .{ 0, -5, 1, -5 }, .{ 0, 0, 0, 0 }, }, }; try utils.expectEpsilonEq(a.determinant(), 0); try std.testing.expect(!a.isInvertible()); } test "calculating the inverse of a matrix" { const a = Mat4{ .mat = .{ .{ -5, 2, 6, -8 }, .{ 1, -5, 1, 8 }, .{ 7, 7, -6, -7 }, .{ 1, -3, 7, 4 }, }, }; const b = a.inverse(); const expected = Mat4{ .mat = .{ .{ 0.21805, 0.45113, 0.24060, -0.04511 }, .{ -0.80827, -1.45677, -0.44361, 0.52068 }, .{ -0.07895, -0.22368, -0.05263, 0.19737 }, .{ -0.52256, -0.81391, -0.30075, 0.30639 }, }, }; try utils.expectEpsilonEq(a.determinant(), 532); try utils.expectEpsilonEq(a.cofactor(2, 3), -160); try utils.expectEpsilonEq(b.at(3, 2), -160.0 / 532.0); try utils.expectEpsilonEq(a.cofactor(3, 2), 105); try utils.expectEpsilonEq(b.at(2, 3), 105.0 / 532.0); try utils.expectMatrixApproxEq(b, expected); } test "calculating the inverse of another matrix" { const a = Mat4{ .mat = .{ .{ 8, -5, 9, 2 }, .{ 7, 5, 6, 1 }, .{ -6, 0, 9, 6 }, .{ -3, 0, -9, -4 }, }, }; const expected = Mat4{ .mat = .{ .{ -0.15385, -0.15385, -0.28205, -0.53846 }, .{ -0.07692, 0.12308, 0.02564, 0.03077 }, .{ 0.35897, 0.35897, 0.43590, 0.92308 }, .{ -0.69231, -0.69231, -0.76923, -1.92308 }, }, }; try utils.expectMatrixApproxEq(a.inverse(), expected); } test "calculating the inverse of a third matrix" { const a = Mat4{ .mat = .{ .{ 9, 3, 0, 9 }, .{ -5, -2, -6, -3 }, .{ -4, 9, 6, 4 }, .{ -7, 6, 6, 2 }, }, }; const expected = Mat4{ .mat = .{ .{ -0.04074, -0.07778, 0.14444, -0.22222 }, .{ -0.07778, 0.03333, 0.36667, -0.33333 }, .{ -0.02901, -0.14630, -0.10926, 0.12963 }, .{ 0.17778, 0.06667, -0.26667, 0.33333 }, }, }; try utils.expectMatrixApproxEq(a.inverse(), expected); } test "multiplying a product by its inverse" { const a = Mat4{ .mat = .{ .{ 3, -9, 7, 3 }, .{ 3, -8, 2, -9 }, .{ -4, 4, 4, 1 }, .{ -6, 5, -1, 1 }, }, }; const b = Mat4{ .mat = .{ .{ 8, 2, 2, 2 }, .{ 3, -1, 7, 0 }, .{ 7, 0, 5, 4 }, .{ 6, -2, 0, 5 }, }, }; const c = a.mult(b); const result = c.mult(b.inverse()); try utils.expectMatrixApproxEq(a, result); } test "multiplying by a translation matrix" { const transform = Mat4.identity().translate(5, -3, 2); const p = vector.initPoint(-3, 4, 5); try std.testing.expect(transform.multVec(p).eql(vector.initPoint(2, 1, 7))); } test "multiplying by the inverse of a translation matrix" { const transform = Mat4.identity().translate(5, -3, 2); const inv = transform.inverse(); const p = vector.initPoint(-3, 4, 5); try std.testing.expect(inv.multVec(p).eql(vector.initPoint(-8, 7, 3))); } test "translation does not affect vectors" { const transform = Mat4.identity().translate(5, -3, 2); const v = vector.initVector(-3, 4, 5); try std.testing.expect(transform.multVec(v).eql(v)); } test "a scaling matrix applied to a point" { const transform = Mat4.identity().scale(2, 3, 4); const p = vector.initPoint(-4, 6, 8); try std.testing.expect(transform.multVec(p).eql(vector.initPoint(-8, 18, 32))); } test "a scaling matrix applied to a vector" { const transform = Mat4.identity().scale(2, 3, 4); const v = vector.initVector(-4, 6, 8); try std.testing.expect(transform.multVec(v).eql(vector.initVector(-8, 18, 32))); } test "multiplying by the inverse of a scaling matrix" { const transform = Mat4.identity().scale(2, 3, 4); const inv = transform.inverse(); const v = vector.initVector(-4, 6, 8); try std.testing.expect(inv.multVec(v).eql(vector.initVector(-2, 2, 2))); } test "reflection is scaling by a negative value" { const transform = Mat4.identity().scale(-1, 1, 1); const p = vector.initPoint(2, 3, 4); try std.testing.expect(transform.multVec(p).eql(vector.initPoint(-2, 3, 4))); } test "rotating a point around the x axis" { const p = vector.initPoint(0, 1, 0); const half_quarter = Mat4.identity().rotateX(std.math.pi / 4.0); const full_quarter = Mat4.identity().rotateX(std.math.pi / 2.0); try std.testing.expect(half_quarter.multVec(p).eql(vector.initPoint(0, std.math.sqrt(2.0) / 2.0, std.math.sqrt(2.0) / 2.0))); try std.testing.expect(full_quarter.multVec(p).eql(vector.initPoint(0, 0, 1))); } test "the inverse of an x-rotation rotates in the opposite direction" { const p = vector.initPoint(0, 1, 0); const half_quarter = Mat4.identity().rotateX(std.math.pi / 4.0); const inv = half_quarter.inverse(); try std.testing.expect(inv.multVec(p).eql(vector.initPoint(0, std.math.sqrt(2.0) / 2.0, -std.math.sqrt(2.0) / 2.0))); } test "rotating a point around the y axis" { const p = vector.initPoint(0, 0, 1); const half_quarter = Mat4.identity().rotateY(std.math.pi / 4.0); const full_quarter = Mat4.identity().rotateY(std.math.pi / 2.0); try std.testing.expect(half_quarter.multVec(p).eql(vector.initPoint(std.math.sqrt(2.0) / 2.0, 0, std.math.sqrt(2.0) / 2.0))); try std.testing.expect(full_quarter.multVec(p).eql(vector.initPoint(1, 0, 0))); } test "rotating a point around the z axis" { const p = vector.initPoint(0, 1, 0); const half_quarter = Mat4.identity().rotateZ(std.math.pi / 4.0); const full_quarter = Mat4.identity().rotateZ(std.math.pi / 2.0); try std.testing.expect(half_quarter.multVec(p).eql(vector.initPoint(-std.math.sqrt(2.0) / 2.0, std.math.sqrt(2.0) / 2.0, 0))); try std.testing.expect(full_quarter.multVec(p).eql(vector.initPoint(-1, 0, 0))); } test "shearing" { const p = vector.initPoint(2, 3, 4); { // x moves in proportion to y const transform = Mat4.identity().shear(1, 0, 0, 0, 0, 0); try std.testing.expect(transform.multVec(p).eql(vector.initPoint(5, 3, 4))); } { // x moves in proportion to z const transform = Mat4.identity().shear(0, 1, 0, 0, 0, 0); try std.testing.expect(transform.multVec(p).eql(vector.initPoint(6, 3, 4))); } { // y moves in proportion to x const transform = Mat4.identity().shear(0, 0, 1, 0, 0, 0); try std.testing.expect(transform.multVec(p).eql(vector.initPoint(2, 5, 4))); } { // y moves in proportion to z const transform = Mat4.identity().shear(0, 0, 0, 1, 0, 0); try std.testing.expect(transform.multVec(p).eql(vector.initPoint(2, 7, 4))); } { // z moves in proportion to x const transform = Mat4.identity().shear(0, 0, 0, 0, 1, 0); try std.testing.expect(transform.multVec(p).eql(vector.initPoint(2, 3, 6))); } { // z moves in proportion to y const transform = Mat4.identity().shear(0, 0, 0, 0, 0, 1); try std.testing.expect(transform.multVec(p).eql(vector.initPoint(2, 3, 7))); } } test "individual transformations are applied in sequence" { const p = vector.initPoint(1, 0, 1); const a = Mat4.identity().rotateX(std.math.pi / 2.0); const b = Mat4.identity().scale(5, 5, 5); const c = Mat4.identity().translate(10, 5, 7); const p2 = a.multVec(p); const p3 = b.multVec(p2); const p4 = c.multVec(p3); try std.testing.expect(p4.eql(vector.initPoint(15, 0, 7))); } test "chained transformations must be applied in reverse order" { const p = vector.initPoint(1, 0, 1); { const a = Mat4.identity().rotateX(std.math.pi / 2.0); const b = Mat4.identity().scale(5, 5, 5); const c = Mat4.identity().translate(10, 5, 7); const t = c.mult(b).mult(a); try std.testing.expect(t.multVec(p).eql(vector.initPoint(15, 0, 7))); } { // fllent API const t = Mat4.identity() .rotateX(std.math.pi / 2.0) .scale(5, 5, 5) .translate(10, 5, 7); try std.testing.expect(t.multVec(p).eql(vector.initPoint(15, 0, 7))); } }
matrix.zig
const std = @import("std"); const interpreter = @import("interpreter.zig"); const intrinsics = @import("intrinsics.zig"); const mem = @import("gc.zig"); const sourcelocation = @import("sourcelocation.zig"); const SourceLocation = sourcelocation.SourceLocation; pub const ExprErrors = error{ AlreadyReported, MissingRightParen, UnexpectedRightParen, ExpectedNumber, InvalidArgumentType, InvalidArgumentCount, SyntaxError, Eof }; pub const ExprType = enum { sym, num, lst, map, lam, mac, fun, env, err, any }; pub const ExprValue = union(ExprType) { sym: []const u8, num: f64, lst: std.ArrayList(*Expr), map: std.ArrayHashMap(*Expr, *Expr, Expr.HashUtil, true), lam: std.ArrayList(*Expr), mac: std.ArrayList(*Expr), fun: fn (evaluator: *interpreter.Interpreter, env: *Env, []const *Expr) anyerror!*Expr, env: *Env, err: *Expr, any: usize, }; /// A Bio expression with a value and an optional environment (lambda expressions /// must know in which environment they were defined) pub const Expr = struct { /// Hashmap support for Expr pub const HashUtil = struct { pub fn hash(_: HashUtil, key: *Expr) u32 { if (key.val == ExprType.sym) { return @truncate(u32, std.hash.Wyhash.hash(0, key.val.sym)); } else if (key.val == ExprType.num) { return @truncate(u32, @floatToInt(u64, key.val.num)); } @panic("Invalid hash key type"); } pub fn eql(_: HashUtil, first: *Expr, second: *Expr) bool { if (first.val == ExprType.sym) { return std.mem.eql(u8, first.val.sym, second.val.sym); } else if (first.val == ExprType.num) { return first.val.num == second.val.num; } @panic("Invalid hash key type"); } }; val: ExprValue, env: ?*Env = null, src: SourceLocation = .{}, /// Create a new expression with an undefined value pub fn create(register_with_gc: bool) !*@This() { var self = try mem.allocator.create(Expr); if (register_with_gc) { mem.gc.inc(); try mem.gc.registered_expr.append(self); } self.* = Expr{ .val = undefined }; self.src.file = SourceLocation.current().file; self.src.line = SourceLocation.current().line; self.src.col = SourceLocation.current().col; return self; } /// Called by the GC to clean up expression resources. Note that symbols /// are deallocated by sweeping the internalized string map. pub fn deinit(self: *@This()) void { if (self.val == ExprType.lst) { self.val.lst.deinit(); } else if (self.val == ExprType.map) { self.val.map.deinit(); } else if (self.val == ExprType.lam) { self.val.lam.deinit(); } else if (self.val == ExprType.mac) { self.val.mac.deinit(); } } /// Returns an owned string representation of this expression pub fn toStringAlloc(self: *@This()) anyerror![]u8 { switch (self.val) { ExprValue.sym => return try std.fmt.allocPrint(mem.allocator, "{s}", .{self.val.sym}), ExprValue.num => return try std.fmt.allocPrint(mem.allocator, "{d}", .{self.val.num}), ExprValue.lam => return try std.fmt.allocPrint(mem.allocator, "<lambda>", .{}), ExprValue.mac => return try std.fmt.allocPrint(mem.allocator, "<macro>", .{}), ExprValue.fun => return try std.fmt.allocPrint(mem.allocator, "<function>", .{}), ExprValue.env => return try std.fmt.allocPrint(mem.allocator, "<env>", .{}), ExprValue.any => return try std.fmt.allocPrint(mem.allocator, "<any>", .{}), ExprValue.err => |err_expr| { const err_str = try err_expr.toStringAlloc(); defer mem.allocator.free(err_str); return try std.fmt.allocPrint(mem.allocator, "{s}", .{err_str}); }, ExprValue.lst => |lst| { var buf = std.ArrayList(u8).init(mem.allocator); defer buf.deinit(); var bufWriter = buf.writer(); try bufWriter.writeAll("("); for (lst.items) |item, index| { const itemBuf = try item.toStringAlloc(); defer mem.allocator.free(itemBuf); try bufWriter.writeAll(itemBuf); if (index + 1 < lst.items.len) { try bufWriter.writeAll(" "); } } try bufWriter.writeAll(")"); return buf.toOwnedSlice(); }, ExprValue.map => |map| { var buf = std.ArrayList(u8).init(mem.allocator); defer buf.deinit(); var bufWriter = buf.writer(); // Output is ((key val)(key val)(key val)) try bufWriter.writeAll("("); var it = map.iterator(); while (it.next()) |entry| { const key = try entry.key_ptr.*.toStringAlloc(); defer mem.allocator.free(key); const val = try entry.value_ptr.*.toStringAlloc(); defer mem.allocator.free(val); try bufWriter.writeAll("("); try bufWriter.writeAll(key); try bufWriter.writeAll(" "); try bufWriter.writeAll(val); try bufWriter.writeAll(")"); } try bufWriter.writeAll(")"); return buf.toOwnedSlice(); }, } } /// Prints the expression to stdout pub fn print(self: *@This()) anyerror!void { const str = try self.toStringAlloc(); defer mem.allocator.free(str); try std.io.getStdOut().writer().print("{s}", .{str}); } }; /// Environment for variable bindings. Instances are named to get friendly debugging output. pub const Env = struct { // This is the same as Expr.HashUtil, but is duplicated here to dodge a compiler bug pub const HashUtil = struct { pub fn hash(_: HashUtil, key: *Expr) u32 { if (key.val == ExprType.sym) { return @truncate(u32, std.hash.Wyhash.hash(0, key.val.sym)); } else if (key.val == ExprType.num) { return @truncate(u32, @floatToInt(u64, key.val.num)); } @panic("Invalid hash key type"); } pub fn eql(_: HashUtil, first: *Expr, second: *Expr) bool { if (first.val == ExprType.sym) { return std.mem.eql(u8, first.val.sym, second.val.sym); } else if (first.val == ExprType.num) { return first.val.num == second.val.num; } @panic("Invalid hash key type"); } }; map: std.ArrayHashMap(*Expr, *Expr, Env.HashUtil, true), parent: ?*@This() = null, name: []const u8, pub fn deinit(self: *@This()) void { self.map.deinit(); } /// Put symbol/value, duplicating the key pub fn put(self: *@This(), key: []const u8, val: *Expr) !void { const binding_expr = try makeAtomByDuplicating(key); try self.putWithSymbol(binding_expr, val); } /// Put symbol/value pub fn putWithSymbol(self: *@This(), variable_name: *Expr, val: *Expr) anyerror!void { try self.map.put(variable_name, val); } /// Look up a variable in this or a parent environment pub fn lookup(self: *@This(), sym: []const u8, recursive: bool) ?*Expr { var lookupSym = Expr{ .val = ExprValue{ .sym = sym } }; if (self.map.get(&lookupSym)) |val| { return val; } else if (self.parent) |parent| { return if (recursive) parent.lookup(sym, recursive) else null; } else { return null; } } /// Recursively search for the binding, replace it if found. /// If the new value is null, the binding is removed instead. pub fn replace(self: *Env, var_name: *Expr, val: ?*Expr) *Expr { if (self.map.get(var_name)) |_| { if (val) |value| { self.putWithSymbol(var_name, value) catch return &intrinsics.expr_atom_nil; return value; } else { _ = self.map.swapRemove(var_name); } } else if (self.parent) |parent| { return parent.replace(var_name, val); } return &intrinsics.expr_atom_nil; } }; /// Make an environment expression pub fn makeEnv(parent: ?*Env, name: []const u8) !*Env { var environment = try mem.allocator.create(Env); environment.parent = parent; environment.map = @TypeOf(environment.map).init(mem.allocator); environment.name = name; try mem.gc.registered_envs.append(environment); return environment; } /// Duplicates the input and return an atom pub fn makeAtomByDuplicating(literal: []const u8) anyerror!*Expr { return try makeAtomImplementation(literal, false); } /// Takes ownership of the input and returns an atom pub fn makeAtomAndTakeOwnership(literal: []const u8) anyerror!*Expr { return try makeAtomImplementation(literal, true); } /// Make and return a potentially interned atom (symbol or number) fn makeAtomImplementation(literal: []const u8, take_ownership: bool) anyerror!*Expr { const intrinsic_atoms: []const *Expr = &.{ &intrinsics.expr_atom_quasi_quote, &intrinsics.expr_atom_quote, &intrinsics.expr_atom_unquote, &intrinsics.expr_atom_unquote_splicing, &intrinsics.expr_atom_list, &intrinsics.expr_atom_if, &intrinsics.expr_atom_cond, &intrinsics.expr_atom_begin, &intrinsics.expr_atom_nil, &intrinsics.expr_atom_rest, &intrinsics.expr_atom_mut, &intrinsics.expr_atom_true, &intrinsics.expr_atom_false, &intrinsics.expr_atom_last_eval, &intrinsics.expr_atom_last_try_err, &intrinsics.expr_atom_last_try_value, &intrinsics.expr_atom_break, }; // Lazy initialization of the interned intrinsics map if (mem.interned_intrinsics.count() == 0) { for (intrinsic_atoms) |atom| { try mem.interned_intrinsics.put(mem.allocator, atom.val.sym, atom); } } return mem.interned_intrinsics.get(literal) orelse { // Zig's parseFloat is too lenient and accepts input like "." and "--" // For Bio, we require at least one digit. if (std.mem.indexOfAny(u8, literal, "0123456789")) |_| { if (std.fmt.parseFloat(f64, literal)) |num| { defer { if (take_ownership) { mem.allocator.free(literal); } } const internalizable = @floor(num) == num and !std.math.isInf(num) and num > -1024 and num < 1024; if (internalizable) { if (mem.interned_nums.get(@floatToInt(i16, num))) |expr| { return expr; } } var expr = try Expr.create(false); expr.val = ExprValue{ .num = num }; if (internalizable) { try mem.interned_nums.put(mem.allocator, @floatToInt(i16, num), expr); } else { try mem.gc.registered_expr.append(expr); } return expr; } else |_| {} } return try makeAtomLiteral(literal, take_ownership); }; } /// Make an interned literal atom pub fn makeAtomLiteral(literal: []const u8, take_ownership: bool) anyerror!*Expr { var sym = sym_blk: { const maybe_entry = mem.interned_syms.getEntry(literal); if (maybe_entry) |entry| { // There's already an entry, free the input if we're supposed to take ownership if (take_ownership) { mem.allocator.free(literal); } break :sym_blk entry.key_ptr.*; } else { const res = if (take_ownership) literal else try mem.allocator.dupe(u8, literal); try mem.interned_syms.put(mem.allocator, res, {}); break :sym_blk res; } }; var expr = try Expr.create(true); expr.val = ExprValue{ .sym = sym }; return expr; } /// Make a list expression /// If `initial_expressions` is not null, each item as added *without evaluation* pub fn makeListExpr(initial_expressions: ?[]const *Expr) !*Expr { var expr = try Expr.create(true); expr.val = ExprValue{ .lst = std.ArrayList(*Expr).init(mem.allocator) }; if (initial_expressions) |expressions| { for (expressions) |e| { try expr.val.lst.append(e); } } return expr; } /// Make a hashmap expression /// If `initial_expressions` is not null, each item as added *without evaluation* pub fn makeHashmapExpr(initial_expressions: ?[]const *Expr) !*Expr { var expr = try Expr.create(true); expr.val = ExprValue{ .map = std.ArrayHashMap(*Expr, *Expr, Expr.HashUtil, true).init(mem.allocator) }; if (initial_expressions) |expressions| { for (expressions) |e| { try expr.val.map.put(e.val.lst.items[0], e.val.lst.items[1]); } } return expr; } /// Make a lambda expression pub fn makeLambdaExpr(env: *Env) !*Expr { var expr = try Expr.create(true); // This is a crucial detail: we're recording the environment that existed at the // time of lambda definition. This will be the parent environment whenever we // are invoking the lambda in Interpreter#eval expr.env = env; expr.val = ExprValue{ .lam = std.ArrayList(*Expr).init(mem.allocator) }; return expr; } /// Make a macro expression pub fn makeMacroExpr() !*Expr { var expr = try Expr.create(true); expr.val = ExprValue{ .mac = std.ArrayList(*Expr).init(mem.allocator) }; return expr; } /// Make a numeric expression pub fn makeNumExpr(num: f64) !*Expr { var expr = try Expr.create(true); expr.val = ExprValue{ .num = num }; return expr; } /// Make an error expression pub fn makeError(expr: *Expr) !*Expr { var error_expr = try Expr.create(true); error_expr.val = ExprValue{ .err = expr }; return error_expr; }
src/ast.zig
const std = @import("std"); const stdx = @import("stdx"); const gl = @import("gl"); const graphics = @import("../../graphics.zig"); const Color = graphics.Color; const log = stdx.log.scoped(.mesh); const StartVertexBufferSize = 10000; const StartIndexBufferSize = StartVertexBufferSize * 4; const MaxVertexBufferSize = 10000 * 3; const MaxIndexBufferSize = MaxVertexBufferSize * 4; /// Vertex and index buffer. pub const Mesh = struct { index_buffer_type: gl.GLenum = gl.GL_UNSIGNED_SHORT, alloc: std.mem.Allocator, index_buf: []u16, cur_index_buf_size: u32, vert_buf: []TexShaderVertex, cur_vert_buf_size: u32, const Self = @This(); pub fn init(alloc: std.mem.Allocator) Self { const vertex_buf = alloc.alloc(TexShaderVertex, StartVertexBufferSize) catch unreachable; const index_buf = alloc.alloc(u16, StartIndexBufferSize) catch unreachable; return Mesh{ .alloc = alloc, .index_buffer_type = gl.GL_UNSIGNED_SHORT, .vert_buf = vertex_buf, .index_buf = index_buf, .cur_vert_buf_size = 0, .cur_index_buf_size = 0, }; } pub fn deinit(self: Self) void { self.alloc.free(self.vert_buf); self.alloc.free(self.index_buf); } pub fn reset(self: *Self) void { self.cur_vert_buf_size = 0; self.cur_index_buf_size = 0; } pub fn addVertex(self: *Self, vert: *TexShaderVertex) void { self.vert_buf[self.cur_vert_buf_size] = vert.*; self.cur_vert_buf_size += 1; } // Assumes enough capacity. pub fn addVertexGetIndex(self: *Self, vert: *TexShaderVertex) u16 { const idx = self.cur_vert_buf_size; self.vert_buf[self.cur_vert_buf_size] = vert.*; self.cur_vert_buf_size += 1; return @intCast(u16, idx); } // Returns the id of the first vertex added. pub fn addVertices(self: *Self, verts: []const TexShaderVertex) u16 { const first_idx = self.cur_vert_buf_size; for (verts) |it| { self.vert_buf[self.cur_vert_buf_size] = it; self.cur_vert_buf_size += 1; } return @intCast(u16, first_idx); } pub fn getNextIndexId(self: *const Self) u16 { return @intCast(u16, self.cur_vert_buf_size); } pub fn addIndex(self: *Self, idx: u16) void { self.index_buf[self.cur_index_buf_size] = idx; self.cur_index_buf_size += 1; } pub fn addDeltaIndices(self: *Self, offset: u16, deltas: []const u16) void { for (deltas) |it| { self.addIndex(offset + it); } } /// Assumes triangle in cw order. Pushes as ccw triangle. pub fn addTriangle(self: *Self, v1: u16, v2: u16, v3: u16) void { self.index_buf[self.cur_index_buf_size] = v1; self.index_buf[self.cur_index_buf_size + 1] = v3; self.index_buf[self.cur_index_buf_size + 2] = v2; self.cur_index_buf_size += 3; } /// Assumes clockwise order of verts but pushes ccw triangles. pub fn addQuad(self: *Self, idx1: u16, idx2: u16, idx3: u16, idx4: u16) void { // First triangle. self.index_buf[self.cur_index_buf_size] = idx1; self.index_buf[self.cur_index_buf_size + 1] = idx4; self.index_buf[self.cur_index_buf_size + 2] = idx2; // Second triangle. self.index_buf[self.cur_index_buf_size + 3] = idx2; self.index_buf[self.cur_index_buf_size + 4] = idx4; self.index_buf[self.cur_index_buf_size + 5] = idx3; self.cur_index_buf_size += 6; } // Add vertex data that should be together. pub fn addVertexData(self: *Self, comptime num_verts: usize, comptime num_indices: usize, vdata: *VertexData(num_verts, num_indices)) void { const first_idx = self.addVertices(&vdata.verts); self.addDeltaIndices(first_idx, &vdata.indices); } pub fn ensureUnusedBuffer(self: *Self, vert_inc: usize, index_inc: usize) bool { if (self.cur_vert_buf_size + vert_inc > self.vert_buf.len) { // Grow buffer. var new_size = @floatToInt(u32, @intToFloat(f32, self.cur_vert_buf_size + vert_inc) * 1.5); if (new_size > MaxVertexBufferSize) { if (vert_inc > MaxVertexBufferSize) { stdx.panicFmt("requesting buffer size {} that exceeds max {}", .{ vert_inc, MaxVertexBufferSize }); } if (self.vert_buf.len < MaxVertexBufferSize) { self.vert_buf = self.alloc.realloc(self.vert_buf, MaxVertexBufferSize) catch unreachable; } if (self.cur_vert_buf_size + vert_inc > MaxVertexBufferSize) { return false; } } else { self.vert_buf = self.alloc.realloc(self.vert_buf, new_size) catch unreachable; } } if (self.cur_index_buf_size + index_inc > self.index_buf.len) { var new_size = @floatToInt(u32, @intToFloat(f32, self.cur_index_buf_size + index_inc) * 1.5); if (new_size > MaxIndexBufferSize) { if (index_inc > MaxIndexBufferSize) { stdx.panicFmt("requesting buffer size {} that exceeds max {}", .{ index_inc, MaxIndexBufferSize }); } if (self.index_buf.len < MaxIndexBufferSize) { self.index_buf = self.alloc.realloc(self.index_buf, MaxIndexBufferSize) catch unreachable; } if (self.cur_index_buf_size + index_inc > MaxIndexBufferSize) { return false; } } else { self.index_buf = self.alloc.realloc(self.index_buf, new_size) catch unreachable; } } return true; } }; // Used to set a bunch of data in one go, reducing the number of batcher capacity checks. pub fn VertexData(comptime num_verts: usize, comptime num_indices: usize) type { if (num_indices == 0 or num_indices % 3 != 0) { @panic("num_indices must be at least 3 and multiple of 3"); } return struct { verts: [num_verts]TexShaderVertex, // index value references vertex idx. indices: [num_indices]u16, pub fn setRect(self: *@This(), offset: u16, tl: u16, tr: u16, br: u16, bl: u16) void { // Assumes ccw front face order. self.indices[offset .. offset + 6][0..6].* = .{ // First triangle. tl, bl, br, // Second triangle. br, tr, tl, }; } }; } pub const TexShaderVertex = packed struct { const Self = @This(); pos_x: f32, pos_y: f32, pos_z: f32, pos_w: f32, uv_x: f32, uv_y: f32, color_r: f32, color_g: f32, color_b: f32, color_a: f32, pub fn setXY(self: *Self, x: f32, y: f32) void { self.pos_x = x; self.pos_y = y; self.pos_z = 0; self.pos_w = 1; } pub fn setXYZ(self: *Self, x: f32, y: f32, z: f32) void { self.pos_x = x; self.pos_y = y; self.pos_z = z; self.pos_w = 1; } pub fn setColor(self: *Self, color: Color) void { self.color_r = @intToFloat(f32, color.channels.r) / 255; self.color_g = @intToFloat(f32, color.channels.g) / 255; self.color_b = @intToFloat(f32, color.channels.b) / 255; self.color_a = @intToFloat(f32, color.channels.a) / 255; } pub fn setUV(self: *Self, u: f32, v: f32) void { self.uv_x = u; self.uv_y = v; } };
graphics/src/backend/gpu/mesh.zig
const std = @import("std"); const Random = std.rand.Random; const math = std.math; const Xoshiro256 = @This(); s: [4]u64, pub fn init(init_s: u64) Xoshiro256 { var x = Xoshiro256{ .s = undefined, }; x.seed(init_s); return x; } pub fn random(self: *Xoshiro256) Random { return Random.init(self, fill); } fn next(self: *Xoshiro256) u64 { const r = math.rotl(u64, self.s[0] +% self.s[3], 23) +% self.s[0]; const t = self.s[1] << 17; self.s[2] ^= self.s[0]; self.s[3] ^= self.s[1]; self.s[1] ^= self.s[2]; self.s[0] ^= self.s[3]; self.s[2] ^= t; self.s[3] = math.rotl(u64, self.s[3], 45); return r; } // Skip 2^128 places ahead in the sequence fn jump(self: *Xoshiro256) void { var s: u256 = 0; var table: u256 = 0x39abdc4529b1661ca9582618e03fc9aad5a61266f0c9392c180ec6d33cfd0aba; while (table != 0) : (table >>= 1) { if (@truncate(u1, table) != 0) { s ^= @bitCast(u256, self.s); } _ = self.next(); } self.s = @bitCast([4]u64, s); } pub fn seed(self: *Xoshiro256, init_s: u64) void { // Xoshiro requires 256-bits of seed. var gen = std.rand.SplitMix64.init(init_s); self.s[0] = gen.next(); self.s[1] = gen.next(); self.s[2] = gen.next(); self.s[3] = gen.next(); } pub fn fill(self: *Xoshiro256, buf: []u8) void { var i: usize = 0; const aligned_len = buf.len - (buf.len & 7); // Complete 8 byte segments. while (i < aligned_len) : (i += 8) { var n = self.next(); comptime var j: usize = 0; inline while (j < 8) : (j += 1) { buf[i + j] = @truncate(u8, n); n >>= 8; } } // Remaining. (cuts the stream) if (i != buf.len) { var n = self.next(); while (i < buf.len) : (i += 1) { buf[i] = @truncate(u8, n); n >>= 8; } } } test "xoroshiro sequence" { var r = Xoshiro256.init(0); const seq1 = [_]u64{ 0x53175d61490b23df, 0x61da6f3dc380d507, 0x5c0fdf91ec9a7bfc, 0x02eebf8c3bbe5e1a, 0x7eca04ebaf4a5eea, 0x0543c37757f08d9a, }; for (seq1) |s| { try std.testing.expect(s == r.next()); } r.jump(); const seq2 = [_]u64{ 0xae1db5c5e27807be, 0xb584c6a7fd8709fe, 0xc46a0ee9330fb6e, 0xdc0c9606f49ed76e, 0x1f5bb6540f6651fb, 0x72fa2ca734601488, }; for (seq2) |s| { try std.testing.expect(s == r.next()); } } test "xoroshiro fill" { var r = Xoshiro256.init(0); const seq = [_]u64{ 0x53175d61490b23df, 0x61da6f3dc380d507, 0x5c0fdf91ec9a7bfc, 0x02eebf8c3bbe5e1a, 0x7eca04ebaf4a5eea, 0x0543c37757f08d9a, }; for (seq) |s| { var buf0: [8]u8 = undefined; var buf1: [7]u8 = undefined; std.mem.writeIntLittle(u64, &buf0, s); r.fill(&buf1); try std.testing.expect(std.mem.eql(u8, buf0[0..7], buf1[0..])); } }
lib/std/rand/Xoshiro256.zig
const Allocator = std.mem.Allocator; const ClientSM = @import("state_machines/client.zig").ClientSM; const Event = @import("events/events.zig").Event; const ServerSM = @import("state_machines/server.zig").ServerSM; const SMError = @import("state_machines/errors.zig").SMError; const std = @import("std"); pub fn Client(comptime Reader: type, comptime Writer: type) type { return struct { const Self = @This(); localState: ClientSM(Writer), remoteState: ServerSM(Reader), pub fn init(allocator: Allocator, reader: Reader, writer: Writer) Self { var localState = ClientSM(Writer).init(allocator, writer); var remoteState = ServerSM(Reader).init(allocator, reader); return Self{ .localState = localState, .remoteState = remoteState, }; } pub fn deinit(self: *Self) void { self.localState.deinit(); self.remoteState.deinit(); } pub fn send(self: *Self, event: Event) !void { try self.localState.send(event); self.remoteState.expectEvent(event); } pub fn nextEvent(self: *Self, options: anytype) !Event { return self.remoteState.nextEvent(options); } }; } const expect = std.testing.expect; const expectEqualStrings = std.testing.expectEqualStrings; const expectError = std.testing.expectError; const Request = @import("events/events.zig").Request; const TestClient = Client(std.io.FixedBufferStream([]const u8).Reader, std.io.FixedBufferStream([]u8).Writer); test "Send - Client can send an event" { var read_buffer = ""; var fixed_read_buffer = std.io.fixedBufferStream(read_buffer); var write_buffer: [100]u8 = undefined; var fixed_write_buffer = std.io.fixedBufferStream(&write_buffer); var client = TestClient.init(std.testing.allocator, fixed_read_buffer.reader(), fixed_write_buffer.writer()); defer client.deinit(); client.localState.state = .SendBody; try client.send(.EndOfMessage); try expect(std.mem.startsWith(u8, &write_buffer, "")); } test "Send - Remember the request method when sending a request event" { var read_buffer = ""; var fixed_read_buffer = std.io.fixedBufferStream(read_buffer); var write_buffer: [100]u8 = undefined; var fixed_write_buffer = std.io.fixedBufferStream(&write_buffer); var client = TestClient.init(std.testing.allocator, fixed_read_buffer.reader(), fixed_write_buffer.writer()); defer client.deinit(); var request = Request.default(std.testing.allocator); try client.send(Event{ .Request = request }); try expect(client.remoteState.expected_request.?.method == .Get); } test "NextEvent - A Response event with a content length muste be followed by a Data event and an EndOfMessage event." { var content = "HTTP/1.1 200 OK\r\nContent-Length: 34\r\n\r\nAin't no sunshine when she's gone."; var fixed_read_buffer = std.io.fixedBufferStream(content); var write_buffer: [100]u8 = undefined; var fixed_write_buffer = std.io.fixedBufferStream(&write_buffer); var client = TestClient.init(std.testing.allocator, fixed_read_buffer.reader(), fixed_write_buffer.writer()); var request = Request.default(std.testing.allocator); try client.send(Event{ .Request = request }); var event = try client.nextEvent(.{}); try expect(event == .Response); var response = event.Response; defer response.deinit(); var buffer: [100]u8 = undefined; event = try client.nextEvent(.{ .buffer = &buffer }); try expect(event == .Data); var data = event.Data; event = try client.nextEvent(.{ .buffer = &buffer }); try expect(event == .EndOfMessage); client.deinit(); try expect(response.statusCode == .Ok); try expect(response.version == .Http11); try expect(response.headers.len() == 1); try expectEqualStrings(response.headers.items()[0].name.raw(), "Content-Length"); try expectEqualStrings(response.headers.items()[0].value, "34"); try expectEqualStrings(data.bytes, "Ain't no sunshine when she's gone."); }
src/connection.zig
const std = @import("std"); const draw = @import("pixel_draw.zig"); // === Windows definitions ==================================================== const WS_OVERLAPEDWINDOW: u64 = 0x00C00000 | 0x00080000 | 0x00040000 | 0x00020000 | 0x00010000; const WS_VISIBLE: u64 = 00200000; extern "user32" fn GetClientRect(hWnd: win.HWND, lpRect: *RECT) callconv(.Stdcall) c_int; extern "user32" fn BeginPaint(hWnd: win.HWND, lpPaint: *PAINTSTRUCT) callconv(.Stdcall) c_int; extern "user32" fn EndPaint(hWnd: win.HWND, lpPaint: *const PAINTSTRUCT) callconv(.Stdcall) c_int; extern "gdi32" fn CreateDIBSection(hdc: win.HDC, pbmi: *const BITMAPINFO, usage: c_uint, ppvBits: ?**c_void, hSection: win.HANDLE, offset: u32) callconv(.Stdcall) c_int; extern "gdi32" fn StretchDIBits(hdc: win.HDC, xDest: c_int, yDest: c_int, DestWidth: c_int, DestHeight: c_int, xSrc: c_int, ySrc: c_int, SrcWidth: c_int, SrcHeight: c_int, lpBits: ?*const c_void, lpbmi: ?*const c_void, iUsage: c_uint, rop: u32) callconv(.Stdcall) c_int; extern "gdi32" fn BitBlit(hdc: win.HDC, x: c_int, y: c_int, cx: c_int, cy: c_int, hdcSrc: win.HDC, x1: c_int, y1: c_int, rop: u32) c_int; const VirtualKeys = struct { pub const ABNT_C1 = 0xC1; pub const ABNT_C2 = 0xC2; pub const ADD = 0x6B; pub const ATTN = 0xF6; pub const BACK = 0x08; pub const CANCEL = 0x03; pub const CLEAR = 0x0C; pub const CRSEL = 0xF7; pub const DECIMAL = 0x6E; pub const DIVIDE = 0x6F; pub const EREOF = 0xF9; pub const ESCAPE = 0x1B; pub const EXECUTE = 0x2B; pub const EXSEL = 0xF8; pub const ICO_CLEAR = 0xE6; pub const ICO_HELP = 0xE3; pub const KEY_0 = 0x30; pub const KEY_1 = 0x31; pub const KEY_2 = 0x32; pub const KEY_3 = 0x33; pub const KEY_4 = 0x34; pub const KEY_5 = 0x35; pub const KEY_6 = 0x36; pub const KEY_7 = 0x37; pub const KEY_8 = 0x38; pub const KEY_9 = 0x39; pub const KEY_A = 0x41; pub const KEY_B = 0x42; pub const KEY_C = 0x43; pub const KEY_D = 0x44; pub const KEY_E = 0x45; pub const KEY_F = 0x46; pub const KEY_G = 0x47; pub const KEY_H = 0x48; pub const KEY_I = 0x49; pub const KEY_J = 0x4A; pub const KEY_K = 0x4B; pub const KEY_L = 0x4C; pub const KEY_M = 0x4D; pub const KEY_N = 0x4E; pub const KEY_O = 0x4F; pub const KEY_P = 0x50; pub const KEY_Q = 0x51; pub const KEY_R = 0x52; pub const KEY_S = 0x53; pub const KEY_T = 0x54; pub const KEY_U = 0x55; pub const KEY_V = 0x56; pub const KEY_W = 0x57; pub const KEY_X = 0x58; pub const KEY_Y = 0x59; pub const KEY_Z = 0x5A; pub const MULTIPLY = 0x6A; pub const NONAME = 0xFC; pub const NUMPAD0 = 0x60; pub const NUMPAD1 = 0x61; pub const NUMPAD2 = 0x62; pub const NUMPAD3 = 0x63; pub const NUMPAD4 = 0x64; pub const NUMPAD5 = 0x65; pub const NUMPAD6 = 0x66; pub const NUMPAD7 = 0x67; pub const NUMPAD8 = 0x68; pub const NUMPAD9 = 0x69; pub const OEM_1 = 0xBA; pub const OEM_102 = 0xE2; pub const OEM_2 = 0xBF; pub const OEM_3 = 0xC0; pub const OEM_4 = 0xDB; pub const OEM_5 = 0xDC; pub const OEM_6 = 0xDD; pub const OEM_7 = 0xDE; pub const OEM_8 = 0xDF; pub const OEM_ATTN = 0xF0; pub const OEM_AUTO = 0xF3; pub const OEM_AX = 0xE1; pub const OEM_BACKTAB = 0xF5; pub const OEM_CLEAR = 0xFE; pub const OEM_COMMA = 0xBC; pub const OEM_COPY = 0xF2; pub const OEM_CUSEL = 0xEF; pub const OEM_ENLW = 0xF4; pub const OEM_FINISH = 0xF1; pub const OEM_FJ_LOYA = 0x95; pub const OEM_FJ_MASSHOU = 0x93; pub const OEM_FJ_ROYA = 0x96; pub const OEM_FJ_TOUROKU = 0x94; pub const OEM_JUMP = 0xEA; pub const OEM_MINUS = 0xBD; pub const OEM_PA1 = 0xEB; pub const OEM_PA2 = 0xEC; pub const OEM_PA3 = 0xED; pub const OEM_PERIOD = 0xBE; pub const OEM_PLUS = 0xBB; pub const OEM_RESET = 0xE9; pub const OEM_WSCTRL = 0xEE; pub const PA1 = 0xFD; pub const PACKET = 0xE7; pub const PLAY = 0xFA; pub const PROCESSKEY = 0xE5; pub const RETURN = 0x0D; pub const SELECT = 0x29; pub const SEPARATOR = 0x6C; pub const SPACE = 0x20; pub const SUBTRACT = 0x6D; pub const TAB = 0x09; pub const ZOOM = 0xFB; //pub const DOWN = 0x25; //pub const LEFT = 0x26; //pub const UP = 0x27; //pub const RIGHT = 0x28; pub const LEFT = 0x25; pub const UP = 0x26; pub const RIGHT = 0x27; pub const DOWN = 0x28; }; const RECT = extern struct { left: c_long, top: c_long, right: c_long, bottom: c_long, }; const PAINTSTRUCT = extern struct { hdc: win.HDC, fErase: win.BOOL, rcPaint: RECT, fRestore: win.BOOL, fIncUpdate: win.BOOL, rgbReserved: [32]u8, }; const BITMAPINFOHEADER = extern struct { biSize: u32, biWidth: i32, biHeight: i32, biPlanes: u16, biBitCount: u16, biCompression: u32, biSizeImage: u32, biXPelsPerMeter: i32, biYPelsPerMeter: i32, biClrUsed: u32, biClrImportant: u32, }; const RGBQUAD = extern struct { rgbBlue: u8, rgbGreen: u8, rgbReed: u8, rgbReserved: u8, }; const BITMAPINFO = extern struct { bmiHeader: BITMAPINFOHEADER, bmiColors: [1]RGBQUAD, }; const win = std.os.windows; const usr32 = win.user32; const WNDCLASSEXA = win.user32.WNDCLASSEXA; // ============================================================================ pub var mouse_pos_x: i32 = 0; pub var mouse_pos_y: i32 = 0; pub var is_mouse_on_window: bool = false; pub var mouse_buttons_down = [_]bool{false} ** 8; pub var mouse_buttons_up = [_]bool{false} ** 8; pub var mouse_buttons_pressed = [_]bool{false} ** 8; const keymap = [_]u32{ VirtualKeys.KEY_Q, VirtualKeys.KEY_W, VirtualKeys.KEY_E, VirtualKeys.KEY_R, VirtualKeys.KEY_A, VirtualKeys.KEY_S, VirtualKeys.KEY_D, VirtualKeys.KEY_1, VirtualKeys.KEY_2, VirtualKeys.KEY_3, VirtualKeys.KEY_4, VirtualKeys.KEY_5, VirtualKeys.KEY_6, VirtualKeys.KEY_7, VirtualKeys.KEY_8, VirtualKeys.KEY_9, VirtualKeys.KEY_0, VirtualKeys.UP, VirtualKeys.DOWN, VirtualKeys.LEFT, VirtualKeys.RIGHT, }; pub var keys_down = [_]bool{false} ** keymap.len; pub var keys_up = [_]bool{false} ** keymap.len; pub var keys_pressed = [_]bool{false} ** keymap.len; fn mainWindowCallback(window: win.HWND, message: c_uint, w_param: usize, l_param: ?*c_void) callconv(.Stdcall) ?*c_void { var result: ?*c_void = null; switch (message) { usr32.WM_SIZE => { var client_rect: RECT = undefined; _ = GetClientRect(window, &client_rect); const width = @intCast(u32, client_rect.right - client_rect.left); const height = @intCast(u32, client_rect.bottom - client_rect.top); win32ResizeDibSection(width, height); }, usr32.WM_DESTROY => { usr32.PostQuitMessage(0); }, usr32.WM_CLOSE => { usr32.PostQuitMessage(0); }, else => { result = win.user32.DefWindowProcA(window, message, w_param, l_param); }, } return result; } var bitmap_info = BITMAPINFO{ .bmiHeader = .{ .biSize = @sizeOf(BITMAPINFOHEADER), .biWidth = 0, .biHeight = 0, .biPlanes = 1, .biBitCount = 32, .biCompression = 0, .biSizeImage = 0, .biXPelsPerMeter = 0, .biYPelsPerMeter = 0, .biClrUsed = 0, .biClrImportant = 0, }, .bmiColors = undefined, }; fn win32ResizeDibSection(width: u32, height: u32) void { draw.gb.width = width; draw.gb.height = height; bitmap_info.bmiHeader.biWidth = @intCast(i32, width); bitmap_info.bmiHeader.biHeight = @intCast(i32, height); bitmap_memory = main_allocator.realloc(bitmap_memory, width * height * 4) catch unreachable; draw.gb.depth = main_allocator.realloc(draw.gb.depth, draw.gb.width * draw.gb.height) catch unreachable; draw.gb.screen = @ptrCast(*[]u8, &bitmap_memory).*; } fn win32UpadateWindow(device_context: win.HDC) void { _ = StretchDIBits(device_context, 0, 0, @intCast(c_int, draw.gb.width), @intCast(c_int, draw.gb.height), 0, @intCast(c_int, draw.gb.height), @intCast(c_int, draw.gb.width), -@intCast(c_int, draw.gb.height), @ptrCast(*c_void, bitmap_memory.ptr), @ptrCast(*c_void, &bitmap_info), 0, 0xcc0020); } // === Globals ======================================= var bitmap_memory: []u32 = undefined; var main_allocator: *std.mem.Allocator = undefined; // =================================================== pub fn plataformInit(al: *std.mem.Allocator, w_width: u32, w_height: u32, start_fn: fn () void, update_fn: fn (f32) void) !void { main_allocator = al; const instance = @ptrCast(win.HINSTANCE, win.kernel32.GetModuleHandleW(null).?); var window_class = WNDCLASSEXA{ .style = usr32.CS_OWNDC | usr32.CS_HREDRAW | usr32.CS_VREDRAW, .lpfnWndProc = mainWindowCallback, .cbClsExtra = 0, .cbWndExtra = 0, .hInstance = instance, .hIcon = null, .hCursor = null, .hbrBackground = null, .lpszMenuName = null, .lpszClassName = "PixelDrawWindowClass", .hIconSm = null, }; if (usr32.RegisterClassExA(&window_class) == 0) { std.debug.panic("Win error {}\n", .{win.kernel32.GetLastError()}); } var window_handle_maybe_null = usr32.CreateWindowExA(0, window_class.lpszClassName, "PixelDraw", WS_OVERLAPEDWINDOW | WS_VISIBLE, 0, 0, @intCast(i32, w_width), @intCast(i32, w_height), null, null, instance, null); draw.gb.width = w_width; draw.gb.height = w_height; if (window_handle_maybe_null) |window_handle| { _ = usr32.ShowWindow(window_handle, 1); win32ResizeDibSection(w_width, w_height); //depth_buffer = try main_allocator.alloc(f32, draw.gb.width * draw.gb.height); start_fn(); var delta: f32 = 0.0; var initTime: i128 = 0; var msg: usr32.MSG = undefined; var running = true; while (running) { initTime = std.time.nanoTimestamp() - initTime; delta = @floatCast(f32, @intToFloat(f64, initTime) / 1000000000); initTime = std.time.nanoTimestamp(); for (draw.gb.depth) |*it| it.* = std.math.inf_f32; for (keys_up) |*it| it.* = false; for (keys_down) |*it| it.* = false; while (usr32.PeekMessageA(&msg, null, 0, 0, 0x0001)) { // 0x0001 = PM_REMOVE switch (msg.message) { usr32.WM_QUIT => running = false, usr32.WM_KEYDOWN => { const key = @intCast(u32, msg.wParam); for (keys_down) |*it, i| { if (keymap[i] == key) { it.* = true; keys_pressed[i] = true; } } }, usr32.WM_KEYUP => { const key = @intCast(u32, msg.wParam); for (keys_up) |*it, i| { if (keymap[i] == key) { it.* = true; keys_pressed[i] = false; } } }, else => {}, } _ = usr32.TranslateMessage(&msg); _ = usr32.DispatchMessageA(&msg); } const device_context = usr32.GetDC(window_handle).?; win32UpadateWindow(device_context); update_fn(delta); } } else { std.debug.panic("Unable to create Window - error: {}\n", .{win.kernel32.GetLastError()}); } main_allocator.free(bitmap_memory); main_allocator.free(draw.gb.depth); }
src/win32_plataform.zig
const std = @import("std"); const debug = std.log.debug; const testing = std.testing; //IDEAS: consider maybe json serialization but prefer binnary serialization //READ: read into Google Protocol Buffers ,Thrift and Avro in I want to use well established serialization tools //Serialization takes an in-memory data structure and converts it into a series of bytes that can be stored and transferred. //Deserialization takes a series of bytes and converts it to an in-memory data structure that can be consumed programmatically. //TODO: try to implement a way to convert slices into arrays for easy serialization //IDEAS: 🤔 maybe reify the slice type but with a .field_type of [N]u8 take inspiration from std.meta.Sentinel //REF: https://stackoverflow.com/questions/15707933/how-to-serialize-a-struct-in-c , serialization framework https://github.com/getty-zig/getty //REF: https://stackoverflow.com/questions/9778806/serializing-a-class-with-a-pointer-in-c https://www.boost.org/doc/libs/1_78_0/libs/serialization/doc/tutorial.html#pointers //REF: https://stackoverflow.com/questions/523872/how-do-you-serialize-an-object-in-c/ https://accu.org/journals/overload/24/136/ignatchenko_2317/ //REF: https://github.com/srwalter/dbus-serialize /// serialized a type in memory fn inMemSerialize(type_to_serialize: anytype, serialized_buf: *[@sizeOf(@TypeOf(type_to_serialize))]u8) void { @memcpy(serialized_buf, @ptrCast([*]const u8, &type_to_serialize), @sizeOf(@TypeOf(type_to_serialize))); } /// deserialize data from memory fn inMemDeserialize(comptime T: type, serialized_t: [@sizeOf(T)]u8) T { return @bitCast(T, serialized_t); } // test "simple serialization/deserialization with other data interleved " { // const Data = packed struct { // char: [21]u8 = "is my data still here".*, // int: u8 = 254, // ochar: [21]u8 = "is my data still here".*, // }; // const data = Data{}; // // var serialized_data: [@sizeOf(Data)]u8 = undefined; // // simpleSerialize(data, &serialized_data); // // const SerializedData = try std.fs.cwd().createFile("serialized-1.data", .{ .read = true }); // defer SerializedData.close(); // const writer = SerializedData.writer(); // try writer.writeStruct(data); // // try writer.writeAll(serialized_data[0..]); // try SerializedData.seekTo(0); // // // var deserialized_buf: [@sizeOf(Data)]u8 = undefined; // const reader = SerializedData.reader(); // const deserialized_data = try reader.readStruct(Data); // // const deserialized_data = simpleDeserialize(Data, serialized_data); // // try testing.expectEqualSlices(u8, serialized_data[0..], deserialized_buf[0..]); // std.debug.print("\ndata {}\ndserialized data {}\n", .{ data, deserialized_data }); // // // try testing.expectEqualSlices(u8, data.char[0..], deserialized_data.char[0..]); // // try testing.expectEqualSlices(u8, data.ochar[0..], deserialized_data.ochar[0..]); // // try testing.expect(std.mem.eql(u8, data.char[0..], deserialized_data.char[0..])); // // try testing.expect(std.mem.eql(u8, @ptrCast([*][]u8, &data)[0], @ptrCast([*][]u8, &deserialized_data))); // // std.debug.print("\nchar is {s}\n", .{deserialized_data.char[0..]}); // try testing.expect(data.int == deserialized_data.int); // } // //TODO: test output of serialized [N]T output with []T output // //when serializing slice don't forget to set the len field also // pub fn sliceSerialize(type_to_serialize: anytype, serialized_buf: *[@sizeOf(@TypeOf(type_to_serialize))]u8) void { // const @"type" = @TypeOf(type_to_serialize); // const fields = comptime std.meta.fields(@"type"); // debug("size of {} is {}", .{ @"type", @sizeOf(@"type") }); // var size: usize = 0; // const manyptr_to_serialize = @ptrCast([*]const u8, &type_to_serialize); // //TODO: use std.mem.alignInBytes for aligning fields in struct during deserialization // //take pointers to fields and ptrCast to bytes for modification // inline for (fields) |field| { // if (std.meta.trait.isSlice(field.field_type)) { // const size_of_slice = @sizeOf(field.field_type); // debug("The field {s} is a slice", .{field.name}); // debug("size of {s} slice is {}", .{ field.name, size_of_slice }); // debug("{s} has {s}", .{ field.name, field }); // const slice = @bitCast([]const u8, manyptr_to_serialize[size .. size + size_of_slice]); // debug("slice ptr contains {s}", .{slice}); // @memcpy(serialized_buf[size..].ptr, slice.ptr, slice.len); // size += size_of_slice; // } else { // const type_size = comptime blk: { // const size_of_type = @sizeOf(field.field_type); // if (size_of_type < 8) { // const new_size = @sizeOf(field.field_type) * 8; // //multiple size by 8 to properly align // // debug("size was initially {} but is now ", .{ size_of_type, new_size }); // break :blk new_size; // } else break :blk size_of_type; // }; // //since size might have been modified // const actual_size = @sizeOf(field.field_type); // debug("The field {s} is not a slice", .{field.name}); // debug("size of {s} is {}", .{ field.name, actual_size }); // debug("{s} has {s}", .{ field.name, field }); // @memcpy(serialized_buf[size..].ptr, manyptr_to_serialize[size .. size + actual_size].ptr, type_size); // size += type_size; // } // debug("{} bytes copied", .{size}); // } // // // @memcpy(serialized_buf, @ptrCast([*]const u8, &type_to_serialize), @sizeOf(@TypeOf(type_to_serialize))); // } // // pub fn deserialize(comptime T: type, serialized_t: [@sizeOf(T)]u8) T { // return @bitCast(T, serialized_t); // var des_type: T = undefined; // const fields = std.meta.fields(T); // var size: usize = 0; // inline for (fields) |field| { // if (std.meta.trait.isSlice(field.field_type)) { // const size_of_slice = @sizeOf(field.field_type); // @memcpy(cast([*]u8, des_type.character.ptr), serialized_t[size .. size + size_of_slice].ptr, size_of_slice); // size += size_of_slice; // } else { // const type_size = comptime blk: { // const size_of_type = @sizeOf(field.field_type); // if (size_of_type < 8) { // const new_size = @sizeOf(field.field_type) * 8; // break :blk new_size; // } else break :blk size_of_type; // }; // //since size might have been modified // const actual_size = @sizeOf(field.field_type); // @memcpy(des_type.integer, serialized_t[size .. size + actual_size].ptr, actual_size); // size += type_size; // } // } // return des_type; // } //
src/serializer.zig
const std = @import("std"); const assert = std.debug.assert; const approxEq = std.math.approxEq; pub const MixedNumber = struct { whole: i32, fraction: Fraction, }; pub const Fraction = struct { numerator: i32, denominator: i32, }; pub fn mediantInPlace(result: *Fraction, a: Fraction, b: Fraction) void { result.numerator = a.numerator + b.numerator; result.denominator = a.denominator + b.denominator; } pub fn mediant(a: Fraction, b: Fraction) Fraction { return Fraction{ .numerator = a.numerator + b.numerator, .denominator = a.denominator + b.denominator, }; } pub fn floatToNumber(n: f32, limit: i32) MixedNumber { var whole = @floatToInt(i32, n); var whats_left = n - @intToFloat(f32, whole); var fraction = findFraction(whats_left, limit); return MixedNumber{ .whole = whole, .fraction = fraction, }; } pub fn findFraction(n: f32, limit: i32) Fraction { var left = Fraction{ .numerator = 0, .denominator = 1, }; var right = Fraction{ .numerator = 1, .denominator = 1, }; var result = mediant(left, right); var epsilon = 1.0 / @intToFloat(f32, limit); while (result.denominator <= limit) { var decimal = @intToFloat(f32, result.numerator) / @intToFloat(f32, result.denominator); if (approxEq(f32, decimal, n, epsilon)) break; if (decimal > n) { right.numerator = result.numerator; right.denominator = result.denominator; } else if (decimal < n) { left.numerator = result.numerator; left.denominator = result.denominator; } mediantInPlace(&result, left, right); } return result; } fn equalFractions(a: Fraction, b: Fraction) bool { return a.numerator == b.numerator and a.denominator == b.denominator; } fn equalNumbers(a: MixedNumber, b: MixedNumber) bool { return a.whole == b.whole and equalFractions(a.fraction, b.fraction); } test "float to number" { const TestCase = struct { input: f32, limit: i32, expected: MixedNumber, }; var testCases = [_]TestCase{ TestCase{ .input = 0.5, .limit = 100, .expected = MixedNumber{ .whole = 0, .fraction = Fraction{ .numerator = 1, .denominator = 2, }, }, }, TestCase{ .input = 0.33333, .limit = 100, .expected = MixedNumber{ .whole = 0, .fraction = Fraction{ .numerator = 1, .denominator = 3, }, }, }, TestCase{ .input = 2.33333, .limit = 100, .expected = MixedNumber{ .whole = 2, .fraction = Fraction{ .numerator = 1, .denominator = 3, }, }, }, TestCase{ .input = 1.9090909, .limit = 10000000, .expected = MixedNumber{ .whole = 1, .fraction = Fraction{ .numerator = 10, .denominator = 11, }, }, }, }; for (testCases) |testCase| { var actual = floatToNumber(testCase.input, testCase.limit); assert(equalNumbers(actual, testCase.expected)); } } test "find fraction" { const TestCase = struct { input: f32, limit: i32, expected: Fraction, }; var testCases = [_]TestCase{ TestCase{ .input = 0.5, .limit = 100, .expected = Fraction{ .numerator = 1, .denominator = 2, }, }, TestCase{ .input = 0.33333, .limit = 100, .expected = Fraction{ .numerator = 1, .denominator = 3, }, }, TestCase{ .input = 0.048896581566421, .limit = 10000000, .expected = Fraction{ .numerator = 113, .denominator = 2311, }, }, }; for (testCases) |testCase| { var actual = findFraction(testCase.input, testCase.limit); assert(equalFractions(actual, testCase.expected)); } } test "mediant" { var a = Fraction{ .numerator = 0, .denominator = 1, }; var b = Fraction{ .numerator = 1, .denominator = 1, }; var actual = mediant(a, b); var expected = Fraction{ .numerator = 1, .denominator = 2, }; assert(equalFractions(actual, expected)); }
farey.zig
const std = @import("std"); const helper = @import("helper.zig"); const Allocator = std.mem.Allocator; const input = @embedFile("../inputs/day07.txt"); pub fn run(alloc: Allocator, stdout_: anytype) !void { const parsed = try parseInput(alloc); defer alloc.free(parsed); const res1 = try part1(alloc, parsed); const res2 = try part2(parsed); if (stdout_) |stdout| { try stdout.print("Part 1: {}\n", .{res1}); try stdout.print("Part 2: {}\n", .{res2}); } } fn part1(alloc: Allocator, parsed: []i32) !i32 { var crab_copy = try alloc.dupe(i32, parsed); defer alloc.free(crab_copy); sort(i32, crab_copy, {}, comptime std.sort.asc(i32)); const median = crab_copy[crab_copy.len / 2]; const minima = blk: { // if the median is undisputed, it's also the minima if (crab_copy.len % 2 == 1) { break :blk median; } // otherwise, we need to see which possible median has more impact // when it's chosen: const candidate = crab_copy[(crab_copy.len + 1) / 2]; if (count(i32, crab_copy, &.{candidate}) > count(i32, crab_copy, &.{median})) { // i am aware this is inefficient break :blk candidate; } else { break :blk median; } }; var fuel: i32 = 0; for (crab_copy) |crab| { fuel += try std.math.absInt(crab - minima); } return fuel; } fn part2(parsed: []i32) !i64 { const mean = @divFloor(helper.sum(i32, parsed), @intCast(i32, parsed.len)); // guess // my guess ended up being correct: see <https://www.reddit.com/r/adventofcode/comments/rawxad/2021_day_7_part_2_i_wrote_a_paper_on_todays/> var fuel: i64 = 0; for (parsed) |crab| { var diff = try std.math.absInt(mean - crab); fuel += nsum(diff); } return fuel; } fn nsum(x: i32) i32 { return @divExact(x * (x + 1), 2); } fn parseInput(alloc: Allocator) ![]i32 { const num_crabs = count(u8, input, ",") + 1; var crab_arr = try alloc.alloc(i32, num_crabs); var tokens = tokenize(u8, input, ",\n"); for (crab_arr) |*crab| { crab.* = try parseInt(i32, tokens.next().?, 10); } return crab_arr; } const tokenize = std.mem.tokenize; const count = std.mem.count; const parseUnsigned = std.fmt.parseUnsigned; const parseInt = std.fmt.parseInt; const sort = std.sort.sort;
src/day07.zig
const std = @import("std"); const fs = std.fs; const mem = std.mem; const print = std.debug.print; const assert = std.debug.assert; pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = &gpa.allocator; const input = try fs.cwd().readFileAlloc(allocator, "data/input_22_1.txt", std.math.maxInt(usize)); var deck1 = std.ArrayList(u32).init(allocator); defer deck1.deinit(); var deck2 = std.ArrayList(u32).init(allocator); defer deck2.deinit(); { // Process input var lines = std.mem.tokenize(input, "\n"); var curr_deck = &deck1; while (lines.next()) |raw_line| { const line = std.mem.trim(u8, raw_line, " \r\n"); if (line.len == 0) continue; if (mem.eql(u8, line, "Player 1:")) { curr_deck = &deck1; } else if (mem.eql(u8, line, "Player 2:")) { curr_deck = &deck2; } else { try curr_deck.append(try std.fmt.parseInt(u32, line, 10)); } } } { // Solution 1 // Copy the decks because we'll need them again for part 2 var d1 = std.ArrayList(u32).init(allocator); defer d1.deinit(); try d1.resize(deck1.items.len); mem.copy(u32, d1.items, deck1.items); var d2 = std.ArrayList(u32).init(allocator); defer d2.deinit(); try d2.resize(deck2.items.len); mem.copy(u32, d2.items, deck2.items); var winning_deck: ?std.ArrayList(u32) = null; while (true) { if (d1.items.len == 0) { winning_deck = deck2; break; } else if (d2.items.len == 0) { winning_deck = d1; break; } // printDeck(u32, 1, d1.items); // printDeck(u32, 2, d2.items); const card1 = d1.orderedRemove(0); const card2 = d2.orderedRemove(0); if (card1 > card2) { try d1.append(card1); try d1.append(card2); } else { try d2.append(card2); try d2.append(card1); } } var accum: u32 = 0; var i: u32 = 0; while (i < winning_deck.?.items.len) : (i += 1) { var j = i + 1; accum += winning_deck.?.items[winning_deck.?.items.len - j] * j; } print("Day 22 - Solution 1: {}\n", .{accum}); } { // Solution 2 // Make u8 version of the cards, it will be useful to map them as string // for checking configs var d1 = std.ArrayList(u8).init(allocator); var d2 = std.ArrayList(u8).init(allocator); defer d1.deinit(); defer d2.deinit(); try d1.resize(deck1.items.len); try d2.resize(deck2.items.len); for (deck1.items) |v, vi| { d1.items[vi] = @intCast(u8, v); } for (deck2.items) |v, vi| { d2.items[vi] = @intCast(u8, v); } const winner = playRecursiveRound(allocator, &d1, &d2, 1); const winner_deck = if (winner == 1) d1 else d2; var accum: u32 = 0; var i: u32 = 0; while (i < winner_deck.items.len) : (i += 1) { var j = i + 1; accum += winner_deck.items[winner_deck.items.len - j] * j; } print("Day 22 - Solution 2: {}\n", .{accum}); } } fn printDeck(comptime T: type, n: u32, a: []const T) void { print("Deck {}: ", .{n}); for (a) |v, vi| { if (vi == 0) { print("{}", .{v}); } else { print(", {}", .{v}); } } print("\n", .{}); } const Config = struct { d1: std.ArrayList(u8), d2: std.ArrayList(u8), h1: u64, h2: u64, const Self = @This(); pub fn init(a: *mem.Allocator, d1: []u8, d2: []u8) !Self { var res = Self{ .d1 = std.ArrayList(u8).init(a), .d2 = std.ArrayList(u8).init(a), .h1 = std.hash_map.hashString(d1), .h2 = std.hash_map.hashString(d2) }; try res.d1.resize(d1.len); try res.d2.resize(d2.len); std.mem.copy(u8, res.d1.items, d1); std.mem.copy(u8, res.d2.items, d2); return res; } pub fn deinit(s: *Self) void { s.d2.deinit(); s.d1.deinit(); } pub fn eql(a: *const Self, b: *const Self) bool { if (a.d1.items.len != b.d1.items.len) return false; if (a.d2.items.len != b.d2.items.len) return false; if (a.h1 != b.h1) return false; if (a.h2 != b.h2) return false; return mem.eql(u8, a.d1.items, b.d1.items) and mem.eql(u8, a.d2.items, b.d2.items); } }; fn playRecursiveRound(a: *mem.Allocator, d1: *std.ArrayList(u8), d2: *std.ArrayList(u8), game: u32) u32 { // Memory for previous rounds var configs = std.ArrayList(Config).init(a); defer { for (configs.items) |_, ci| { configs.items[ci].deinit(); } configs.deinit(); } // print("Start game: {}\n", .{game}); // printDeck(u8, 1, d1.items); // printDeck(u8, 2, d2.items); // Start game while (true) { // Win by empting opponent deck if (d1.items.len == 0) return 2; if (d2.items.len == 0) return 1; // Check recursion. Exit if in a prev config var curr_config = Config.init(a, d1.items, d2.items) catch unreachable; for (configs.items) |other_config| { if (Config.eql(&other_config, &curr_config)) return 1; } configs.append(curr_config) catch unreachable; // Draw card const c1 = d1.orderedRemove(0); const c2 = d2.orderedRemove(0); var winner: u32 = 0; if (c1 <= d1.items.len and c2 <= d2.items.len) { // Play a subgame var d1rec = std.ArrayList(u8).init(a); var d2rec = std.ArrayList(u8).init(a); defer d1rec.deinit(); defer d2rec.deinit(); d1rec.resize(@as(usize, c1)) catch unreachable; d2rec.resize(@as(usize, c2)) catch unreachable; mem.copy(u8, d1rec.items, d1.items[0..d1rec.items.len]); mem.copy(u8, d2rec.items, d2.items[0..d2rec.items.len]); winner = playRecursiveRound(a, &d1rec, &d2rec, game + 1); } else { winner = if (c1 > c2) 1 else 2; } if (winner == 1) { d1.append(c1) catch unreachable; d1.append(c2) catch unreachable; } else if (winner == 2) { d2.append(c2) catch unreachable; d2.append(c1) catch unreachable; } else unreachable; } }
2020/src/day_22.zig
const std = @import("std"); const assert = std.debug.assert; const warn = std.debug.warn; const Queue = std.atomic.Queue; /// ActorInterface is a member of all Actor's and /// every Actor must implement processMessage who's /// address is saved in this interface when an Actor /// is initialized by calling Actor(BodyType).init(). /// /// There must also be a BodyType.init(*Actor(BodyType)) pub const ActorInterface = packed struct { pub processMessage: fn (actorInterface: *ActorInterface, msg: *Message) void, }; /// Actor that can process messages. Actors implement /// processMessage in the BodyType passed to this Actor /// Type Constructor. pub fn Actor(comptime BodyType: type) type { return packed struct { const Self = @This(); pub interface: ActorInterface, pub body: BodyType, pub fn init() Self { var self: Self = undefined; //warn("Actor.init: aiPtr={*} self={*}\n", &self.interface, &self); self.interface.processMessage = BodyType.processMessage; BodyType.init(&self); return self; } /// Return a pointer to the Actor this interface is a member of. pub fn getActorPtr(aiPtr: *ActorInterface) *Self { return @fieldParentPtr(Self, "interface", aiPtr); } }; } /// Dispatches messages to actors pub fn ActorDispatcher(comptime maxActors: usize) type { return struct { const Self = @This(); pub queue: Queue(*Message), pub msg_count: u64, pub last_msg_cmd: u64, pub actor_processMessage_count: u64, // What type should ActorPtr be or how do I cast it // so I can call actor.processMessage pub actors: [maxActors]*ActorInterface, pub actors_count: u64, pub fn init() Self { return Self { .queue = Queue(*Message).init(), .msg_count = 0, .last_msg_cmd = 0, .actor_processMessage_count = 0, .actors_count = 0, .actors = undefined, }; } /// NOT thread safe pub fn add(self: *Self, actorInterface: *ActorInterface) !void { if (self.actors_count >= self.actors.len) return error.TooManyActors; self.actors[self.actors_count] = actorInterface; self.actors_count += 1; } pub fn broadcastLoop(self: *Self) void { while (true) { var pMsgNode = self.queue.get() orelse return; self.msg_count += 1; self.last_msg_cmd = pMsgNode.data.cmd; for (self.actors) |aiPtr| { self.actor_processMessage_count += 1; aiPtr.processMessage(aiPtr, pMsgNode.data); } } } }; } /// A message with a cmd and other fields in the future pub const Message = struct { pub cmd: u64, }; /// An ActorBody which must implement init and processMessage const MyActorBody = packed struct { const Self = @This(); pub count: usize, pub fn init(self: *Actor(MyActorBody)) void { self.body.count = 0; } pub fn processMessage(aiPtr: *ActorInterface, msg: *Message) void { var self = Actor(MyActorBody).getActorPtr(aiPtr); //warn("processMessage: aiPtr={*} self={*}\n", aiPtr, self); self.body.count += msg.cmd; } }; test "Actor" { // Create an actor var myActor = Actor(MyActorBody).init(); assert(myActor.body.count == 0); // Create a message var msg = Message { .cmd = 123 }; assert(msg.cmd == 123); // Test that the actor works myActor.interface.processMessage(&myActor.interface, &msg); assert(myActor.body.count == 1 * 123); myActor.interface.processMessage(&myActor.interface, &msg); assert(myActor.body.count == 2 * 123); // Create a dispatcher var dispatcher = ActorDispatcher(1).init(); assert(dispatcher.msg_count == 0); // Add the actor try dispatcher.add(&myActor.interface); // Create a node with a pointer to a message var node0 = @typeOf(dispatcher.queue).Node { .data = &msg, .next = undefined, .prev = undefined, }; // Place the node on the queue and broadcast to the actors dispatcher.queue.put(&node0); dispatcher.broadcastLoop(); assert(dispatcher.last_msg_cmd == 123); assert(dispatcher.msg_count == 1); assert(dispatcher.actor_processMessage_count == 1); assert(myActor.body.count == 3 * 123); }
actor.zig
const std = @import("std"); const math = std.math; const expect = std.testing.expect; pub fn __trunch(x: f16) callconv(.C) f16 { // TODO: more efficient implementation return @floatCast(f16, truncf(x)); } pub fn truncf(x: f32) callconv(.C) f32 { const u = @bitCast(u32, x); var e = @intCast(i32, ((u >> 23) & 0xFF)) - 0x7F + 9; var m: u32 = undefined; if (e >= 23 + 9) { return x; } if (e < 9) { e = 1; } m = @as(u32, math.maxInt(u32)) >> @intCast(u5, e); if (u & m == 0) { return x; } else { math.doNotOptimizeAway(x + 0x1p120); return @bitCast(f32, u & ~m); } } pub fn trunc(x: f64) callconv(.C) f64 { const u = @bitCast(u64, x); var e = @intCast(i32, ((u >> 52) & 0x7FF)) - 0x3FF + 12; var m: u64 = undefined; if (e >= 52 + 12) { return x; } if (e < 12) { e = 1; } m = @as(u64, math.maxInt(u64)) >> @intCast(u6, e); if (u & m == 0) { return x; } else { math.doNotOptimizeAway(x + 0x1p120); return @bitCast(f64, u & ~m); } } pub fn __truncx(x: f80) callconv(.C) f80 { // TODO: more efficient implementation return @floatCast(f80, truncq(x)); } pub fn truncq(x: f128) callconv(.C) f128 { const u = @bitCast(u128, x); var e = @intCast(i32, ((u >> 112) & 0x7FFF)) - 0x3FFF + 16; var m: u128 = undefined; if (e >= 112 + 16) { return x; } if (e < 16) { e = 1; } m = @as(u128, math.maxInt(u128)) >> @intCast(u7, e); if (u & m == 0) { return x; } else { math.doNotOptimizeAway(x + 0x1p120); return @bitCast(f128, u & ~m); } } pub fn truncl(x: c_longdouble) callconv(.C) c_longdouble { switch (@typeInfo(c_longdouble).Float.bits) { 16 => return __trunch(x), 32 => return truncf(x), 64 => return trunc(x), 80 => return __truncx(x), 128 => return truncq(x), else => @compileError("unreachable"), } } test "trunc32" { try expect(truncf(1.3) == 1.0); try expect(truncf(-1.3) == -1.0); try expect(truncf(0.2) == 0.0); } test "trunc64" { try expect(trunc(1.3) == 1.0); try expect(trunc(-1.3) == -1.0); try expect(trunc(0.2) == 0.0); } test "trunc128" { try expect(truncq(1.3) == 1.0); try expect(truncq(-1.3) == -1.0); try expect(truncq(0.2) == 0.0); } test "trunc32.special" { try expect(truncf(0.0) == 0.0); // 0x3F800000 try expect(truncf(-0.0) == -0.0); try expect(math.isPositiveInf(truncf(math.inf(f32)))); try expect(math.isNegativeInf(truncf(-math.inf(f32)))); try expect(math.isNan(truncf(math.nan(f32)))); } test "trunc64.special" { try expect(trunc(0.0) == 0.0); try expect(trunc(-0.0) == -0.0); try expect(math.isPositiveInf(trunc(math.inf(f64)))); try expect(math.isNegativeInf(trunc(-math.inf(f64)))); try expect(math.isNan(trunc(math.nan(f64)))); } test "trunc128.special" { try expect(truncq(0.0) == 0.0); try expect(truncq(-0.0) == -0.0); try expect(math.isPositiveInf(truncq(math.inf(f128)))); try expect(math.isNegativeInf(truncq(-math.inf(f128)))); try expect(math.isNan(truncq(math.nan(f128)))); }
lib/compiler_rt/trunc.zig
const std = @import("std"); const mem = std.mem; const AesBlock = std.crypto.core.aes.Block; pub const Ponteil = struct { const State = [8]AesBlock; pub const block_length: usize = 32; pub const key_length: usize = 32; pub const digest_length = 32; s: State, ctx_segments: u64 = 0, m_segments: u64 = 0, const rounds: usize = 12; inline fn aesround(in: AesBlock, rk: AesBlock) AesBlock { return in.encrypt(rk); } fn update(self: *Ponteil, m0: AesBlock, m1: AesBlock) void { const s = self.s; self.s = State{ aesround(s[7], s[0].xorBlocks(m0)), aesround(s[0], s[1]), aesround(s[1], s[2]), aesround(s[2], s[3]), aesround(s[3], s[4].xorBlocks(m1)), aesround(s[4], s[5]), aesround(s[5], s[6]), aesround(s[6], s[7]), }; } inline fn absorb_block(self: *Ponteil, xi: *const [32]u8) void { const t0 = AesBlock.fromBytes(xi[0..16]); const t1 = AesBlock.fromBytes(xi[16..32]); self.update(t0, t1); } fn init(k: [32]u8) Ponteil { const c0 = AesBlock.fromBytes(&[16]u8{ 0x0, 0x1, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d, 0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62 }); const c1 = AesBlock.fromBytes(&[16]u8{ 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1, 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd }); const zero = AesBlock.fromBytes(&[_]u8{0} ** 16); const k0 = AesBlock.fromBytes(k[0..16]); const k1 = AesBlock.fromBytes(k[16..32]); var self = Ponteil{ .s = State{ zero, k1, k0.xorBlocks(c1), k0.xorBlocks(c0), zero, k0, k1.xorBlocks(c0), k1.xorBlocks(c1), } }; var i: usize = 0; while (i < rounds) : (i += 1) { self.update(c0, c1); } return self; } fn absorb(self: *Ponteil, x: []const u8, up: u8) void { var i: usize = 0; while (i + 32 <= x.len) : (i += 32) { self.absorb_block(x[i..][0..32]); } if (x.len % 32 != 0) { var pad = [_]u8{0} ** 32; mem.copy(u8, pad[0 .. x.len % 32], x[i..]); self.absorb_block(&pad); } var len = [_]u8{0x00} ** 32; mem.writeIntLittle(u64, len[0..8], @intCast(u64, x.len) * 8); len[31] ^= up; self.absorb_block(&len); } pub fn push_context(self: *Ponteil, ctx: []const u8) void { self.absorb(ctx, 0x80); self.ctx_segments += 1; } pub fn push(self: *Ponteil, m: []const u8) void { self.absorb(m, 0x00); self.m_segments += 1; } pub fn finalize(self: *Ponteil, out: []u8) void { var b: [16]u8 = undefined; mem.writeIntLittle(u64, b[0..8], @intCast(u64, self.ctx_segments) * 8); mem.writeIntLittle(u64, b[8..16], @intCast(u64, self.m_segments) * 8); const t = self.s[2].xorBlocks(AesBlock.fromBytes(&b)); var i: usize = 0; while (i < rounds - 1) : (i += 1) { self.update(t, t); } const s = &self.s; i = 0; while (i + 32 <= out.len) : (i += 32) { self.update(t, t); mem.copy(u8, out[i..][0..16], &s[1].xorBlocks(s[6]).xorBlocks(s[2].andBlocks(s[3])).toBytes()); mem.copy(u8, out[i..][16..32], &s[2].xorBlocks(s[5]).xorBlocks(s[6].andBlocks(s[7])).toBytes()); } if (out.len % 32 != 0) { self.update(t, t); var pad = [_]u8{0} ** 32; mem.copy(u8, pad[0..16], &s[1].xorBlocks(s[6]).xorBlocks(s[2].andBlocks(s[3])).toBytes()); mem.copy(u8, pad[16..32], &s[2].xorBlocks(s[5]).xorBlocks(s[6].andBlocks(s[7])).toBytes()); mem.copy(u8, pad[0 .. out.len % 32], out[i..]); } } pub fn hash(ctx: ?[]const u8, m: []const u8) [32]u8 { const k = [_]u8{0} ** 32; var ponteil = Ponteil.init(k); if (ctx) |c| { ponteil.push_context(c); } ponteil.push(m); var out: [32]u8 = undefined; ponteil.finalize(&out); return out; } }; const testing = std.testing; const fmt = std.fmt; test "hash" { const len = 1_000_000 - 1; const alloc = testing.allocator; var m = try alloc.alloc(u8, len); defer alloc.free(m); mem.set(u8, m, 0); var h = Ponteil.hash(null, m); var expected_h: [32]u8 = undefined; _ = try fmt.hexToBytes(&expected_h, "527ab52703ed16f67920bee03e36e3255869a9ade88b5af3f5b459d21f7e4cc3"); try testing.expectEqualSlices(u8, &h, &expected_h); }
zig/src/main.zig
const std = @import("std"); const wlr = @import("wlroots"); const server = &@import("../main.zig").server; const Direction = @import("../command.zig").Direction; const PhysicalDirectionDirection = @import("../command.zig").PhysicalDirection; const Error = @import("../command.zig").Error; const Output = @import("../Output.zig"); const Seat = @import("../Seat.zig"); pub fn focusOutput( allocator: *std.mem.Allocator, seat: *Seat, args: []const [:0]const u8, out: *?[]const u8, ) Error!void { if (args.len < 2) return Error.NotEnoughArguments; if (args.len > 2) return Error.TooManyArguments; // If the noop output is focused, there are no other outputs to switch to if (seat.focused_output == &server.root.noop_output) { std.debug.assert(server.root.outputs.len == 0); return; } seat.focusOutput((try getOutput(seat, args[1])) orelse return); seat.focus(null); server.root.startTransaction(); } pub fn sendToOutput( allocator: *std.mem.Allocator, seat: *Seat, args: []const [:0]const u8, out: *?[]const u8, ) Error!void { if (args.len < 2) return Error.NotEnoughArguments; if (args.len > 2) return Error.TooManyArguments; // If the noop output is focused, there is nowhere to send the view if (seat.focused_output == &server.root.noop_output) { std.debug.assert(server.root.outputs.len == 0); return; } if (seat.focused == .view) { const destination_output = (try getOutput(seat, args[1])) orelse return; seat.focused.view.sendToOutput(destination_output); // Handle the change and focus whatever's next in the focus stack seat.focus(null); seat.focused_output.arrangeViews(); destination_output.arrangeViews(); server.root.startTransaction(); } } /// Find an output adjacent to the currently focused based on either logical or /// spacial direction fn getOutput(seat: *Seat, str: []const u8) !?*Output { if (std.meta.stringToEnum(Direction, str)) |direction| { // Logical direction // Return the next/prev output in the list if there is one, else wrap const focused_node = @fieldParentPtr(std.TailQueue(Output).Node, "data", seat.focused_output); return switch (direction) { .next => if (focused_node.next) |node| &node.data else &server.root.outputs.first.?.data, .previous => if (focused_node.prev) |node| &node.data else &server.root.outputs.last.?.data, }; } else if (std.meta.stringToEnum(wlr.OutputLayout.Direction, str)) |direction| { // Spacial direction const focus_box = server.root.output_layout.getBox(seat.focused_output.wlr_output) orelse return null; const wlr_output = server.root.output_layout.adjacentOutput( direction, seat.focused_output.wlr_output, @intToFloat(f64, focus_box.x + @divFloor(focus_box.width, 2)), @intToFloat(f64, focus_box.y + @divFloor(focus_box.height, 2)), ) orelse return null; return @intToPtr(*Output, wlr_output.data); } else { return Error.InvalidDirection; } }
source/river-0.1.0/river/command/output.zig
const std = @import("std"); const os = @import("windows.zig"); const dxgi = @import("dxgi.zig"); const dcommon = @import("dcommon.zig"); pub const RESOURCE_BARRIER_ALL_SUBRESOURCES = 0xffffffff; pub const GPU_VIRTUAL_ADDRESS = u64; pub const HEAP_TYPE = extern enum { DEFAULT = 1, UPLOAD = 2, READBACK = 3, CUSTOM = 4, }; pub const CPU_PAGE_PROPERTY = extern enum { UNKNOWN = 0, NOT_AVAILABLE = 1, WRITE_COMBINE = 2, WRITE_BACK = 3, }; pub const MEMORY_POOL = extern enum { UNKNOWN = 0, L0 = 1, L1 = 2, }; pub const HEAP_PROPERTIES = extern struct { Type: HEAP_TYPE, CPUPageProperty: CPU_PAGE_PROPERTY = .UNKNOWN, MemoryPoolPreference: MEMORY_POOL = .UNKNOWN, CreationNodeMask: u32 = 0, VisibleNodeMask: u32 = 0, }; pub const HEAP_FLAGS = packed struct { SHARED: bool = false, DENY_BUFFERS: bool = false, ALLOW_DISPLAY: bool = false, SHARED_CROSS_ADAPTER: bool = false, DENY_RT_DS_TEXTURES: bool = false, DENY_NON_RT_DS_TEXTURES: bool = false, HARDWARE_PROTECTED: bool = false, padding: u25 = 0, }; pub const HEAP_DESC = extern struct { SizeInBytes: u64, Properties: HEAP_PROPERTIES, Alignment: u64, Flags: HEAP_FLAGS, }; pub const RANGE = extern struct { Begin: u64, End: u64, }; pub const RESOURCE_DIMENSION = extern enum { UNKNOWN = 0, BUFFER = 1, TEXTURE1D = 2, TEXTURE2D = 3, TEXTURE3D = 4, }; pub const TEXTURE_LAYOUT = extern enum { UNKNOWN = 0, ROW_MAJOR = 1, _64KB_UNDEFINED_SWIZZLE = 2, _64KB_STANDARD_SWIZZLE = 3, }; pub const RESOURCE_FLAGS = packed struct { ALLOW_RENDER_TARGET: bool = false, ALLOW_DEPTH_STENCIL: bool = false, ALLOW_UNORDERED_ACCESS: bool = false, DENY_SHADER_RESOURCE: bool = false, ALLOW_CROSS_ADAPTER: bool = false, ALLOW_SIMULTANEOUS_ACCESS: bool = false, padding: u26 = 0, }; pub const RESOURCE_DESC = extern struct { Dimension: RESOURCE_DIMENSION, Alignment: u64, Width: u64, Height: u32, DepthOrArraySize: u16, MipLevels: u16, Format: dxgi.FORMAT, SampleDesc: dxgi.SAMPLE_DESC, Layout: TEXTURE_LAYOUT, Flags: RESOURCE_FLAGS, pub fn buffer(width: u64) RESOURCE_DESC { return RESOURCE_DESC{ .Dimension = .BUFFER, .Alignment = 0, .Width = width, .Height = 1, .DepthOrArraySize = 1, .MipLevels = 1, .Format = .UNKNOWN, .SampleDesc = .{ .Count = 1, .Quality = 0 }, .Layout = .ROW_MAJOR, .Flags = .{}, }; } pub fn tex2d(format: dxgi.FORMAT, width: u64, height: u32) RESOURCE_DESC { return RESOURCE_DESC{ .Dimension = .TEXTURE2D, .Alignment = 0, .Width = width, .Height = height, .DepthOrArraySize = 1, .MipLevels = 1, .Format = format, .SampleDesc = .{ .Count = 1, .Quality = 0 }, .Layout = .UNKNOWN, .Flags = .{}, }; } }; pub const BOX = extern struct { left: u32, top: u32, front: u32, right: u32, bottom: u32, back: u32, }; pub const DESCRIPTOR_HEAP_TYPE = extern enum { CBV_SRV_UAV = 0, SAMPLER = 1, RTV = 2, DSV = 3, }; pub const DESCRIPTOR_HEAP_FLAGS = extern enum { NONE = 0, SHADER_VISIBLE = 1, }; pub const DESCRIPTOR_HEAP_DESC = extern struct { Type: DESCRIPTOR_HEAP_TYPE, NumDescriptors: u32, Flags: DESCRIPTOR_HEAP_FLAGS, NodeMask: u32, }; pub const CPU_DESCRIPTOR_HANDLE = extern struct { ptr: u64, }; pub const GPU_DESCRIPTOR_HANDLE = extern struct { ptr: u64, }; pub const RECT = os.RECT; pub const DISCARD_REGION = extern struct { NumRects: u32, pRects: *const RECT, FirstSubresource: u32, NumSubresources: u32, }; pub const COMMAND_LIST_TYPE = extern enum { DIRECT = 0, BUNDLE = 1, COMPUTE = 2, COPY = 3, }; pub const SUBRESOURCE_FOOTPRINT = extern struct { Format: dxgi.FORMAT, Width: u32, Height: u32, Depth: u32, RowPitch: u32, }; pub const COMMAND_QUEUE_FLAGS = extern enum { NONE = 0, DISABLE_GPU_TIMEOUT = 0x1, }; pub const COMMAND_QUEUE_PRIORITY = extern enum { NORMAL = 0, HIGH = 100, }; pub const COMMAND_QUEUE_DESC = extern struct { Type: COMMAND_LIST_TYPE, Priority: i32, Flags: COMMAND_QUEUE_FLAGS, NodeMask: u32, }; pub const TILED_RESOURCE_COORDINATE = extern struct { X: u32, Y: u32, Z: u32, Subresource: u32, }; pub const TILE_REGION_SIZE = extern struct { NumTiles: u32, UseBox: os.BOOL, Width: u32, Height: u16, Depth: u16, }; pub const TILE_RANGE_FLAGS = extern enum { NONE = 0, NULL = 1, SKIP = 2, REUSE_SINGLE_TILE = 4, }; pub const SUBRESOURCE_TILING = extern struct { WidthInTiles: u32, HeightInTiles: u16, DepthInTiles: u16, StartTileIndexInOverallResource: u32, }; pub const TILE_SHAPE = extern struct { WidthInTexels: u32, HeightInTexels: u32, DepthInTexels: u32, }; pub const TILE_MAPPING_FLAGS = extern enum { NONE = 0, NO_HAZARD = 0x1, }; pub const TILE_COPY_FLAGS = extern enum { NONE = 0, NO_HAZARD = 0x1, LINEAR_BUFFER_TO_SWIZZLED_TILED_RESOURCE = 0x2, SWIZZLED_TILED_RESOURCE_TO_LINEAR_BUFFER = 0x4, }; pub const SHADER_BYTECODE = extern struct { pShaderBytecode: ?*const c_void = null, BytecodeLength: u64 = 0, }; pub const SO_DECLARATION_ENTRY = extern struct { Stream: u32, SemanticName: os.LPCSTR, SemanticIndex: u32, StartComponent: u8, ComponentCount: u8, OutputSlot: u8, }; pub const STREAM_OUTPUT_DESC = extern struct { pSODeclaration: ?[*]const SO_DECLARATION_ENTRY = null, NumEntries: u32 = 0, pBufferStrides: ?[*]const u32 = null, NumStrides: u32 = 0, RasterizedStream: u32 = 0, }; pub const BLEND = extern enum { ZERO = 1, ONE = 2, SRC_COLOR = 3, INV_SRC_COLOR = 4, SRC_ALPHA = 5, INV_SRC_ALPHA = 6, DEST_ALPHA = 7, INV_DEST_ALPHA = 8, DEST_COLOR = 9, INV_DEST_COLOR = 10, SRC_ALPHA_SAT = 11, BLEND_FACTOR = 14, INV_BLEND_FACTOR = 15, SRC1_COLOR = 16, INV_SRC1_COLOR = 17, SRC1_ALPHA = 18, INV_SRC1_ALPHA = 19, }; pub const BLEND_OP = extern enum { ADD = 1, SUBTRACT = 2, REV_SUBTRACT = 3, MIN = 4, MAX = 5, }; pub const COLOR_WRITE_ENABLE = extern enum { RED = 1, GREEN = 2, BLUE = 4, ALPHA = 8, ALL = 1 | 2 | 4 | 8, // TODO: Fix this. }; pub const LOGIC_OP = extern enum { CLEAR = 0, SET = 1, COPY = 2, COPY_INVERTED = 3, NOOP = 4, INVERT = 5, AND = 6, NAND = 7, OR = 8, NOR = 9, XOR = 10, EQUIV = 11, AND_REVERSE = 12, AND_INVERTED = 13, OR_REVERSE = 14, OR_INVERTED = 15, }; pub const MESSAGE_CATEGORY = extern enum { APPLICATION_DEFINED = 0, MISCELLANEOUS = 1, INITIALIZATION = 2, CLEANUP = 3, COMPILATION = 4, STATE_CREATION = 5, STATE_SETTING = 6, STATE_GETTING = 7, RESOURCE_MANIPULATION = 8, EXECUTION = 9, SHADER = 10, }; pub const MESSAGE_SEVERITY = extern enum { CORRUPTION = 0, ERROR = 1, WARNING = 2, INFO = 3, MESSAGE = 4, }; pub const MESSAGE_ID = extern enum { CLEARRENDERTARGETVIEW_MISMATCHINGCLEARVALUE = 820, }; pub const INFO_QUEUE_FILTER_DESC = extern struct { NumCategories: u32, pCategoryList: ?[*]MESSAGE_CATEGORY, NumSeverities: u32, pSeverityList: ?[*]MESSAGE_SEVERITY, NumIDs: u32, pIDList: ?[*]MESSAGE_ID, }; pub const INFO_QUEUE_FILTER = extern struct { AllowList: INFO_QUEUE_FILTER_DESC, DenyList: INFO_QUEUE_FILTER_DESC, }; pub const RENDER_TARGET_BLEND_DESC = extern struct { BlendEnable: os.BOOL = os.FALSE, LogicOpEnable: os.BOOL = os.FALSE, SrcBlend: BLEND = .ONE, DestBlend: BLEND = .ZERO, BlendOp: BLEND_OP = .ADD, SrcBlendAlpha: BLEND = .ONE, DestBlendAlpha: BLEND = .ZERO, BlendOpAlpha: BLEND_OP = .ADD, LogicOp: LOGIC_OP = .NOOP, RenderTargetWriteMask: u8 = 0x0f, }; pub const BLEND_DESC = extern struct { AlphaToCoverageEnable: os.BOOL = os.FALSE, IndependentBlendEnable: os.BOOL = os.FALSE, RenderTarget: [8]RENDER_TARGET_BLEND_DESC = [_]RENDER_TARGET_BLEND_DESC{.{}} ** 8, }; pub const RASTERIZER_DESC = extern struct { FillMode: FILL_MODE = .SOLID, CullMode: CULL_MODE = .BACK, FrontCounterClockwise: os.BOOL = os.FALSE, DepthBias: i32 = 0, DepthBiasClamp: f32 = 0.0, SlopeScaledDepthBias: f32 = 0.0, DepthClipEnable: os.BOOL = os.TRUE, MultisampleEnable: os.BOOL = os.FALSE, AntialiasedLineEnable: os.BOOL = os.FALSE, ForcedSampleCount: u32 = 0, ConservativeRaster: CONSERVATIVE_RASTERIZATION_MODE = .OFF, }; pub const FILL_MODE = extern enum { WIREFRAME = 2, SOLID = 3, }; pub const CONSERVATIVE_RASTERIZATION_MODE = extern enum { OFF = 0, ON = 1, }; pub const COMPARISON_FUNC = extern enum { NEVER = 1, LESS = 2, EQUAL = 3, LESS_EQUAL = 4, GREATER = 5, NOT_EQUAL = 6, GREATER_EQUAL = 7, ALWAYS = 8, }; pub const DEPTH_WRITE_MASK = extern enum { ZERO = 0, ALL = 1, }; pub const STENCIL_OP = extern enum { KEEP = 1, ZERO = 2, REPLACE = 3, INCR_SAT = 4, DECR_SAT = 5, INVERT = 6, INCR = 7, DECR = 8, }; pub const DEPTH_STENCILOP_DESC = extern struct { StencilFailOp: STENCIL_OP = .KEEP, StencilDepthFailOp: STENCIL_OP = .KEEP, StencilPassOp: STENCIL_OP = .KEEP, StencilFunc: COMPARISON_FUNC = .ALWAYS, }; pub const DEPTH_STENCIL_DESC = extern struct { DepthEnable: os.BOOL = os.TRUE, DepthWriteMask: DEPTH_WRITE_MASK = .ALL, DepthFunc: COMPARISON_FUNC = .LESS, StencilEnable: os.BOOL = os.FALSE, StencilReadMask: u8 = 0xff, StencilWriteMask: u8 = 0xff, FrontFace: DEPTH_STENCILOP_DESC = .{}, BackFace: DEPTH_STENCILOP_DESC = .{}, }; pub const INPUT_LAYOUT_DESC = extern struct { pInputElementDescs: ?[*]const INPUT_ELEMENT_DESC = null, NumElements: u32 = 0, }; pub const INPUT_CLASSIFICATION = extern enum { PER_VERTEX_DATA = 0, PER_INSTANCE_DATA = 1, }; pub const INPUT_ELEMENT_DESC = extern struct { SemanticName: os.LPCSTR, SemanticIndex: u32, Format: dxgi.FORMAT, InputSlot: u32, AlignedByteOffset: u32, InputSlotClass: INPUT_CLASSIFICATION, InstanceDataStepRate: u32, }; pub const INDEX_BUFFER_STRIP_CUT_VALUE = extern enum { DISABLED = 0, _0xFFFF = 1, _0xFFFFFFFF = 2, }; pub const VERTEX_BUFFER_VIEW = extern struct { BufferLocation: GPU_VIRTUAL_ADDRESS, SizeInBytes: u32, StrideInBytes: u32, }; pub const INDEX_BUFFER_VIEW = extern struct { BufferLocation: GPU_VIRTUAL_ADDRESS, SizeInBytes: u32, Format: dxgi.FORMAT, }; pub const STREAM_OUTPUT_BUFFER_VIEW = extern struct { BufferLocation: GPU_VIRTUAL_ADDRESS, SizeInBytes: u64, BufferFilledSizeLocation: GPU_VIRTUAL_ADDRESS, }; pub const CACHED_PIPELINE_STATE = extern struct { pCachedBlob: ?*const c_void = null, CachedBlobSizeInBytes: u64 = 0, }; pub const CLEAR_FLAGS = packed struct { DEPTH: bool = false, STENCIL: bool = false, padding: u30 = 0, }; pub const PIPELINE_STATE_FLAGS = extern enum { NONE = 0, TOOL_DEBUG = 0x1, }; pub const PRIMITIVE_TOPOLOGY = extern enum { UNDEFINED = 0, POINTLIST = 1, LINELIST = 2, LINESTRIP = 3, TRIANGLELIST = 4, TRIANGLESTRIP = 5, LINELIST_ADJ = 10, LINESTRIP_ADJ = 11, TRIANGLELIST_ADJ = 12, TRIANGLESTRIP_ADJ = 13, _1_CONTROL_POINT_PATCHLIST = 33, _2_CONTROL_POINT_PATCHLIST = 34, _3_CONTROL_POINT_PATCHLIST = 35, _4_CONTROL_POINT_PATCHLIST = 36, _5_CONTROL_POINT_PATCHLIST = 37, _6_CONTROL_POINT_PATCHLIST = 38, _7_CONTROL_POINT_PATCHLIST = 39, _8_CONTROL_POINT_PATCHLIST = 40, _9_CONTROL_POINT_PATCHLIST = 41, _10_CONTROL_POINT_PATCHLIST = 42, _11_CONTROL_POINT_PATCHLIST = 43, _12_CONTROL_POINT_PATCHLIST = 44, _13_CONTROL_POINT_PATCHLIST = 45, _14_CONTROL_POINT_PATCHLIST = 46, _15_CONTROL_POINT_PATCHLIST = 47, _16_CONTROL_POINT_PATCHLIST = 48, _17_CONTROL_POINT_PATCHLIST = 49, _18_CONTROL_POINT_PATCHLIST = 50, _19_CONTROL_POINT_PATCHLIST = 51, _20_CONTROL_POINT_PATCHLIST = 52, _21_CONTROL_POINT_PATCHLIST = 53, _22_CONTROL_POINT_PATCHLIST = 54, _23_CONTROL_POINT_PATCHLIST = 55, _24_CONTROL_POINT_PATCHLIST = 56, _25_CONTROL_POINT_PATCHLIST = 57, _26_CONTROL_POINT_PATCHLIST = 58, _27_CONTROL_POINT_PATCHLIST = 59, _28_CONTROL_POINT_PATCHLIST = 60, _29_CONTROL_POINT_PATCHLIST = 61, _30_CONTROL_POINT_PATCHLIST = 62, _31_CONTROL_POINT_PATCHLIST = 63, _32_CONTROL_POINT_PATCHLIST = 64, }; pub const SHADER_COMPONENT_MAPPING = extern enum { FROM_MEMORY_COMPONENT_0 = 0, FROM_MEMORY_COMPONENT_1 = 1, FROM_MEMORY_COMPONENT_2 = 2, FROM_MEMORY_COMPONENT_3 = 3, FORCE_VALUE_0 = 4, FORCE_VALUE_1 = 5, }; pub inline fn ENCODE_SHADER_4_COMPONENT_MAPPING(src0: u32, src1: u32, src2: u32, src3: u32) u32 { return (src0 & 0x7) | ((src1 & 0x7) << 3) | ((src2 & 0x7) << (3 * 2)) | ((src3 & 0x7) << (3 * 3)) | (1 << (3 * 4)); } pub const DEFAULT_SHADER_4_COMPONENT_MAPPING = ENCODE_SHADER_4_COMPONENT_MAPPING(0, 1, 2, 3); pub const BUFFER_SRV_FLAGS = extern enum { NONE = 0, RAW = 0x1, }; pub const BUFFER_SRV = extern struct { FirstElement: u64, NumElements: u32, StructureByteStride: u32, Flags: BUFFER_SRV_FLAGS = .NONE, }; pub const TEX1D_SRV = extern struct { MostDetailedMip: u32, MipLevels: u32, ResourceMinLODClamp: f32, }; pub const TEX1D_ARRAY_SRV = extern struct { MostDetailedMip: u32, MipLevels: u32, FirstArraySlice: u32, ArraySize: u32, ResourceMinLODClamp: f32, }; pub const TEX2D_SRV = extern struct { MostDetailedMip: u32, MipLevels: u32, PlaneSlice: u32, ResourceMinLODClamp: f32, }; pub const TEX2D_ARRAY_SRV = extern struct { MostDetailedMip: u32, MipLevels: u32, FirstArraySlice: u32, ArraySize: u32, PlaneSlice: u32, ResourceMinLODClamp: f32, }; pub const TEX3D_SRV = extern struct { MostDetailedMip: u32, MipLevels: u32, ResourceMinLODClamp: f32, }; pub const TEXCUBE_SRV = extern struct { MostDetailedMip: u32, MipLevels: u32, ResourceMinLODClamp: f32, }; pub const TEXCUBE_ARRAY_SRV = extern struct { MostDetailedMip: u32, MipLevels: u32, First2DArrayFace: u32, NumCubes: u32, ResourceMinLODClamp: f32, }; pub const TEX2DMS_SRV = extern struct { UnusedField_NothingToDefine: u32, }; pub const TEX2DMS_ARRAY_SRV = extern struct { FirstArraySlice: u32, ArraySize: u32, }; pub const SRV_DIMENSION = extern enum { UNKNOWN = 0, BUFFER = 1, TEXTURE1D = 2, TEXTURE1DARRAY = 3, TEXTURE2D = 4, TEXTURE2DARRAY = 5, TEXTURE2DMS = 6, TEXTURE2DMSARRAY = 7, TEXTURE3D = 8, TEXTURECUBE = 9, TEXTURECUBEARRAY = 10, }; pub const SHADER_RESOURCE_VIEW_DESC = extern struct { Format: dxgi.FORMAT = dxgi.FORMAT.UNKNOWN, ViewDimension: SRV_DIMENSION, Shader4ComponentMapping: u32 = DEFAULT_SHADER_4_COMPONENT_MAPPING, u: extern union { Buffer: BUFFER_SRV, Texture1D: TEX1D_SRV, Texture1DArray: TEX1D_ARRAY_SRV, Texture2D: TEX2D_SRV, Texture2DArray: TEX2D_ARRAY_SRV, Texture2DMS: TEX2DMS_SRV, Texture2DMSArray: TEX2DMS_ARRAY_SRV, Texture3D: TEX3D_SRV, TextureCube: TEXCUBE_SRV, TextureCubeArray: TEXCUBE_ARRAY_SRV, }, pub fn typedBuffer( format: dxgi.FORMAT, first_element: u64, num_elements: u32, ) SHADER_RESOURCE_VIEW_DESC { return SHADER_RESOURCE_VIEW_DESC{ .Format = format, .ViewDimension = .BUFFER, .u = .{ .Buffer = BUFFER_SRV{ .FirstElement = first_element, .NumElements = num_elements, .StructureByteStride = 0, }, }, }; } pub fn structuredBuffer( first_element: u64, num_elements: u32, stride: u32, ) SHADER_RESOURCE_VIEW_DESC { return SHADER_RESOURCE_VIEW_DESC{ .ViewDimension = .BUFFER, .u = .{ .Buffer = BUFFER_SRV{ .FirstElement = first_element, .NumElements = num_elements, .StructureByteStride = stride, }, }, }; } }; pub const FILTER = extern enum { MIN_MAG_MIP_POINT = 0, MIN_MAG_POINT_MIP_LINEAR = 0x1, MIN_POINT_MAG_LINEAR_MIP_POINT = 0x4, MIN_POINT_MAG_MIP_LINEAR = 0x5, MIN_LINEAR_MAG_MIP_POINT = 0x10, MIN_LINEAR_MAG_POINT_MIP_LINEAR = 0x11, MIN_MAG_LINEAR_MIP_POINT = 0x14, MIN_MAG_MIP_LINEAR = 0x15, ANISOTROPIC = 0x55, COMPARISON_MIN_MAG_MIP_POINT = 0x80, COMPARISON_MIN_MAG_POINT_MIP_LINEAR = 0x81, COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT = 0x84, COMPARISON_MIN_POINT_MAG_MIP_LINEAR = 0x85, COMPARISON_MIN_LINEAR_MAG_MIP_POINT = 0x90, COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR = 0x91, COMPARISON_MIN_MAG_LINEAR_MIP_POINT = 0x94, COMPARISON_MIN_MAG_MIP_LINEAR = 0x95, COMPARISON_ANISOTROPIC = 0xd5, MINIMUM_MIN_MAG_MIP_POINT = 0x100, MINIMUM_MIN_MAG_POINT_MIP_LINEAR = 0x101, MINIMUM_MIN_POINT_MAG_LINEAR_MIP_POINT = 0x104, MINIMUM_MIN_POINT_MAG_MIP_LINEAR = 0x105, MINIMUM_MIN_LINEAR_MAG_MIP_POINT = 0x110, MINIMUM_MIN_LINEAR_MAG_POINT_MIP_LINEAR = 0x111, MINIMUM_MIN_MAG_LINEAR_MIP_POINT = 0x114, MINIMUM_MIN_MAG_MIP_LINEAR = 0x115, MINIMUM_ANISOTROPIC = 0x155, MAXIMUM_MIN_MAG_MIP_POINT = 0x180, MAXIMUM_MIN_MAG_POINT_MIP_LINEAR = 0x181, MAXIMUM_MIN_POINT_MAG_LINEAR_MIP_POINT = 0x184, MAXIMUM_MIN_POINT_MAG_MIP_LINEAR = 0x185, MAXIMUM_MIN_LINEAR_MAG_MIP_POINT = 0x190, MAXIMUM_MIN_LINEAR_MAG_POINT_MIP_LINEAR = 0x191, MAXIMUM_MIN_MAG_LINEAR_MIP_POINT = 0x194, MAXIMUM_MIN_MAG_MIP_LINEAR = 0x195, MAXIMUM_ANISOTROPIC = 0x1d5, }; pub const FILTER_TYPE = extern enum { POINT = 0, LINEAR = 1, }; pub const FILTER_REDUCTION_TYPE = extern enum { STANDARD = 0, COMPARISON = 1, MINIMUM = 2, MAXIMUM = 3, }; pub const TEXTURE_ADDRESS_MODE = extern enum { WRAP = 1, MIRROR = 2, CLAMP = 3, BORDER = 4, MIRROR_ONCE = 5, }; pub const SAMPLER_DESC = extern struct { Filter: FILTER, AddressU: TEXTURE_ADDRESS_MODE, AddressV: TEXTURE_ADDRESS_MODE, AddressW: TEXTURE_ADDRESS_MODE, MipLODBias: f32, MaxAnisotropy: u32, ComparisonFunc: COMPARISON_FUNC, BorderColor: [4]f32, MinLOD: f32, MaxLOD: f32, }; pub const CONSTANT_BUFFER_VIEW_DESC = extern struct { BufferLocation: GPU_VIRTUAL_ADDRESS, SizeInBytes: u32, }; pub const BUFFER_UAV_FLAGS = extern enum { NONE = 0, RAW = 0x1, }; pub const BUFFER_UAV = extern struct { FirstElement: u64, NumElements: u32, StructureByteStride: u32, CounterOffsetInBytes: u64, Flags: BUFFER_UAV_FLAGS, }; pub const TEX1D_UAV = extern struct { MipSlice: u32, }; pub const TEX1D_ARRAY_UAV = extern struct { MipSlice: u32, FirstArraySlice: u32, ArraySize: u32, }; pub const TEX2D_UAV = extern struct { MipSlice: u32, PlaneSlice: u32, }; pub const TEX2D_ARRAY_UAV = extern struct { MipSlice: u32, FirstArraySlice: u32, ArraySize: u32, PlaneSlice: u32, }; pub const TEX3D_UAV = extern struct { MipSlice: u32, FirstWSlice: u32, WSize: u32, }; pub const UAV_DIMENSION = extern enum { UNKNOWN = 0, BUFFER = 1, TEXTURE1D = 2, TEXTURE1DARRAY = 3, TEXTURE2D = 4, TEXTURE2DARRAY = 5, TEXTURE3D = 8, }; pub const UNORDERED_ACCESS_VIEW_DESC = extern struct { Format: dxgi.FORMAT, ViewDimension: UAV_DIMENSION, u: extern union { Buffer: BUFFER_UAV, Texture1D: TEX1D_UAV, Texture1DArray: TEX1D_ARRAY_UAV, Texture2D: TEX2D_UAV, Texture2DArray: TEX2D_ARRAY_UAV, Texture3D: TEX3D_UAV, }, }; pub const BUFFER_RTV = extern struct { FirstElement: u64, NumElements: u32, }; pub const TEX1D_RTV = extern struct { MipSlice: u32, }; pub const TEX1D_ARRAY_RTV = extern struct { MipSlice: u32, FirstArraySlice: u32, ArraySize: u32, }; pub const TEX2D_RTV = extern struct { MipSlice: u32, PlaneSlice: u32, }; pub const TEX2DMS_RTV = extern struct { UnusedField_NothingToDefine: u32, }; pub const TEX2D_ARRAY_RTV = extern struct { MipSlice: u32, FirstArraySlice: u32, ArraySize: u32, PlaneSlice: u32, }; pub const TEX2DMS_ARRAY_RTV = extern struct { FirstArraySlice: u32, ArraySize: u32, }; pub const TEX3D_RTV = extern struct { MipSlice: u32, FirstWSlice: u32, WSize: u32, }; pub const RTV_DIMENSION = extern enum { UNKNOWN = 0, BUFFER = 1, TEXTURE1D = 2, TEXTURE1DARRAY = 3, TEXTURE2D = 4, TEXTURE2DARRAY = 5, TEXTURE2DMS = 6, TEXTURE2DMSARRAY = 7, TEXTURE3D = 8, }; pub const RENDER_TARGET_VIEW_DESC = extern struct { Format: dxgi.FORMAT, ViewDimension: RTV_DIMENSION, u: extern union { Buffer: BUFFER_RTV, Texture1D: TEX1D_RTV, Texture1DArray: TEX1D_ARRAY_RTV, Texture2D: TEX2D_RTV, Texture2DArray: TEX2D_ARRAY_RTV, Texture2DMS: TEX2DMS_RTV, Texture2DMSArray: TEX2DMS_ARRAY_RTV, Texture3D: TEX3D_RTV, }, }; pub const TEX1D_DSV = extern struct { MipSlice: u32, }; pub const TEX1D_ARRAY_DSV = extern struct { MipSlice: u32, FirstArraySlice: u32, ArraySize: u32, }; pub const TEX2D_DSV = extern struct { MipSlice: u32, }; pub const TEX2D_ARRAY_DSV = extern struct { MipSlice: u32, FirstArraySlice: u32, ArraySize: u32, }; pub const TEX2DMS_DSV = extern struct { UnusedField_NothingToDefine: u32, }; pub const TEX2DMS_ARRAY_DSV = extern struct { FirstArraySlice: u32, ArraySize: u32, }; pub const DSV_FLAGS = extern enum { NONE = 0, READ_ONLY_DEPTH = 0x1, READ_ONLY_STENCIL = 0x2, }; pub const DSV_DIMENSION = extern enum { UNKNOWN = 0, TEXTURE1D = 1, TEXTURE1DARRAY = 2, TEXTURE2D = 3, TEXTURE2DARRAY = 4, TEXTURE2DMS = 5, TEXTURE2DMSARRAY = 6, }; pub const DEPTH_STENCIL_VIEW_DESC = extern struct { Format: dxgi.FORMAT, ViewDimension: DSV_DIMENSION, Flags: DSV_FLAGS, u: extern union { Texture1D: TEX1D_DSV, Texture1DArray: TEX1D_ARRAY_DSV, Texture2D: TEX2D_DSV, Texture2DArray: TEX2D_ARRAY_DSV, Texture2DMS: TEX2DMS_DSV, Texture2DMSArray: TEX2DMS_ARRAY_DSV, }, }; pub const DEPTH_STENCIL_VALUE = extern struct { Depth: f32, Stencil: u8, }; pub const CLEAR_VALUE = extern struct { Format: dxgi.FORMAT, u: extern union { Color: [4]f32, DepthStencil: DEPTH_STENCIL_VALUE, }, pub fn color(format: dxgi.FORMAT, in_color: [4]f32) CLEAR_VALUE { return CLEAR_VALUE{ .Format = format, .u = .{ .Color = in_color }, }; } pub fn depthStencil(format: dxgi.FORMAT, depth: f32, stencil: u8) CLEAR_VALUE { return CLEAR_VALUE{ .Format = format, .u = .{ .DepthStencil = .{ .Depth = depth, .Stencil = stencil } }, }; } }; pub const FENCE_FLAGS = extern enum { NONE = 0, SHARED = 0x1, SHARED_CROSS_ADAPTER = 0x2, }; // TODO: This has alignment of 1. How to force alignment of 4? //'pub const RESOURCE_STATES align(4) = packed struct {' causes compiler error. pub const RESOURCE_STATES = packed struct { VERTEX_AND_CONSTANT_BUFFER: bool = false, INDEX_BUFFER: bool = false, RENDER_TARGET: bool = false, UNORDERED_ACCESS: bool = false, DEPTH_WRITE: bool = false, DEPTH_READ: bool = false, NON_PIXEL_SHADER_RESOURCE: bool = false, PIXEL_SHADER_RESOURCE: bool = false, STREAM_OUT: bool = false, INDIRECT_ARGUMENT_AND_PREDICATION: bool = false, COPY_DEST: bool = false, COPY_SOURCE: bool = false, RESOLVE_DEST: bool = false, RESOLVE_SOURCE: bool = false, // TODO: Clean this up. // 'padding: u17 = 0,' // causes @sizeOf(RESOURCE_STATES) == 5 with current compiler version. RESERVED0: bool = false, RESERVED1: bool = false, RESERVED2: bool = false, RESERVED3: bool = false, RESERVED4: bool = false, RESERVED5: bool = false, RESERVED6: bool = false, RESERVED7: bool = false, RESERVED8: bool = false, RESERVED9: bool = false, RESERVED10: bool = false, RESERVED11: bool = false, RESERVED12: bool = false, RESERVED13: bool = false, RESERVED14: bool = false, RESERVED15: bool = false, RESERVED16: bool = false, RESERVED17: bool = false, pub fn genericRead() RESOURCE_STATES { return RESOURCE_STATES{ .VERTEX_AND_CONSTANT_BUFFER = true, .INDEX_BUFFER = true, .NON_PIXEL_SHADER_RESOURCE = true, .PIXEL_SHADER_RESOURCE = true, .INDIRECT_ARGUMENT_AND_PREDICATION = true, .COPY_SOURCE = true, }; } }; pub const RESOURCE_BARRIER_TYPE = extern enum { TRANSITION = 0, ALIASING = 1, UAV = 2, }; pub const RESOURCE_TRANSITION_BARRIER = extern struct { pResource: *IResource, Subresource: u32, StateBefore: RESOURCE_STATES, StateAfter: RESOURCE_STATES, }; pub const RESOURCE_ALIASING_BARRIER = extern struct { pResourceBefore: *IResource, pResourceAfter: *IResource, }; pub const RESOURCE_UAV_BARRIER = extern struct { pResource: *IResource, }; pub const RESOURCE_BARRIER_FLAGS = extern enum { NONE = 0, BEGIN_ONLY = 0x1, END_ONLY = 0x2, }; pub const RESOURCE_BARRIER = extern struct { Type: RESOURCE_BARRIER_TYPE, Flags: RESOURCE_BARRIER_FLAGS, u: extern union { Transition: RESOURCE_TRANSITION_BARRIER, Aliasing: RESOURCE_ALIASING_BARRIER, UAV: RESOURCE_UAV_BARRIER, }, }; pub const PLACED_SUBRESOURCE_FOOTPRINT = extern struct { Offset: u64, Footprint: SUBRESOURCE_FOOTPRINT, }; pub const TEXTURE_COPY_TYPE = extern enum { SUBRESOURCE_INDEX = 0, PLACED_FOOTPRINT = 1, }; pub const TEXTURE_COPY_LOCATION = extern struct { pResource: *IResource, Type: TEXTURE_COPY_TYPE, u: extern union { PlacedFootprint: PLACED_SUBRESOURCE_FOOTPRINT, SubresourceIndex: u32, }, }; pub const QUERY_HEAP_TYPE = extern enum { OCCLUSION = 0, TIMESTAMP = 1, PIPELINE_STATISTICS = 2, SO_STATISTICS = 3, }; pub const QUERY_HEAP_DESC = extern struct { Type: QUERY_HEAP_TYPE, Count: u32, NodeMask: u32, }; pub const QUERY_TYPE = extern enum { OCCLUSION = 0, BINARY_OCCLUSION = 1, TIMESTAMP = 2, PIPELINE_STATISTICS = 3, SO_STATISTICS_STREAM0 = 4, SO_STATISTICS_STREAM1 = 5, SO_STATISTICS_STREAM2 = 6, SO_STATISTICS_STREAM3 = 7, }; pub const PREDICATION_OP = extern enum { EQUAL_ZERO = 0, NOT_EQUAL_ZERO = 1, }; pub const INDIRECT_ARGUMENT_TYPE = extern enum { DRAW = 0, DRAW_INDEXED = 1, DISPATCH = 2, VERTEX_BUFFER_VIEW = 3, INDEX_BUFFER_VIEW = 4, CONSTANT = 5, CONSTANT_BUFFER_VIEW = 6, SHADER_RESOURCE_VIEW = 7, UNORDERED_ACCESS_VIEW = 8, }; pub const INDIRECT_ARGUMENT_DESC = extern struct { Type: INDIRECT_ARGUMENT_TYPE, u: extern union { VertexBuffer: extern struct { Slot: u32, }, Constant: extern struct { RootParameterIndex: u32, DestOffsetIn32BitValues: u32, Num32BitValuesToSet: u32, }, ConstantBufferView: extern struct { RootParameterIndex: u32, }, ShaderResourceView: extern struct { RootParameterIndex: u32, }, UnorderedAccessView: extern struct { RootParameterIndex: u32, }, }, }; pub const COMMAND_SIGNATURE_DESC = extern struct { ByteStride: u32, NumArgumentDescs: u32, pArgumentDescs: *const INDIRECT_ARGUMENT_DESC, NodeMask: u32, }; pub const PACKED_MIP_INFO = extern struct { NumStandardMips: u8, NumPackedMips: u8, NumTilesForPackedMips: u32, StartTileIndexInOverallResource: u32, }; pub const PRIMITIVE_TOPOLOGY_TYPE = extern enum { UNDEFINED = 0, POINT = 1, LINE = 2, TRIANGLE = 3, PATCH = 4, }; pub const CULL_MODE = extern enum { NONE = 1, FRONT = 2, BACK = 3, }; pub const FEATURE = extern enum { D3D12_OPTIONS = 0, ARCHITECTURE = 1, FEATURE_LEVELS = 2, FORMAT_SUPPORT = 3, MULTISAMPLE_QUALITY_LEVELS = 4, FORMAT_INFO = 5, GPU_VIRTUAL_ADDRESS_SUPPORT = 6, SHADER_MODEL = 7, D3D12_OPTIONS1 = 8, ROOT_SIGNATURE = 12, }; pub const RESOURCE_ALLOCATION_INFO = extern struct { SizeInBytes: u64, Alignment: u64, }; pub const GRAPHICS_PIPELINE_STATE_DESC = extern struct { pRootSignature: ?*IRootSignature = null, VS: SHADER_BYTECODE = .{}, PS: SHADER_BYTECODE = .{}, DS: SHADER_BYTECODE = .{}, HS: SHADER_BYTECODE = .{}, GS: SHADER_BYTECODE = .{}, StreamOutput: STREAM_OUTPUT_DESC = .{}, BlendState: BLEND_DESC = .{}, SampleMask: u32 = 0xffff_ffff, RasterizerState: RASTERIZER_DESC = .{}, DepthStencilState: DEPTH_STENCIL_DESC = .{}, InputLayout: INPUT_LAYOUT_DESC = .{}, IBStripCutValue: INDEX_BUFFER_STRIP_CUT_VALUE = .DISABLED, PrimitiveTopologyType: PRIMITIVE_TOPOLOGY_TYPE, NumRenderTargets: u32, RTVFormats: [8]dxgi.FORMAT, DSVFormat: dxgi.FORMAT = .UNKNOWN, SampleDesc: dxgi.SAMPLE_DESC = .{}, NodeMask: u32 = 0, CachedPSO: CACHED_PIPELINE_STATE = .{}, Flags: PIPELINE_STATE_FLAGS = .NONE, }; pub const COMPUTE_PIPELINE_STATE_DESC = extern struct { pRootSignature: ?*IRootSignature = null, CS: SHADER_BYTECODE, NodeMask: u32 = 0, CachedPSO: CACHED_PIPELINE_STATE = .{}, Flags: PIPELINE_STATE_FLAGS = .NONE, }; pub const VIEWPORT = extern struct { TopLeftX: f32, TopLeftY: f32, Width: f32, Height: f32, MinDepth: f32, MaxDepth: f32, }; const HRESULT = os.HRESULT; pub const IBlob = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3DBlob GetBufferPointer: fn (*Self) callconv(.C) *c_void, GetBufferSize: fn (*Self) callconv(.C) usize, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IBlob.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn GetBufferPointer(self: *T) *c_void { return self.vtbl.GetBufferPointer(self); } pub inline fn GetBufferSize(self: *T) usize { return self.vtbl.GetBufferSize(self); } }; } }; pub const IDebug1 = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Debug1 EnableDebugLayer: fn (*Self) callconv(.C) void, SetEnableGPUBasedValidation: fn (*Self, os.BOOL) callconv(.C) void, SetEnableSynchronizedCommandQueueValidation: fn (*Self, os.BOOL) callconv(.C) void, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IDebug1.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn EnableDebugLayer(self: *T) void { self.vtbl.EnableDebugLayer(self); } pub inline fn SetEnableGPUBasedValidation(self: *T, enable: os.BOOL) void { self.vtbl.SetEnableGPUBasedValidation(self, enable); } pub inline fn SetEnableSynchronizedCommandQueueValidation(self: *T, enable: os.BOOL) void { self.vtbl.SetEnableSynchronizedCommandQueueValidation(self, enable); } }; } }; pub const IInfoQueue = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12InfoQueue SetMessageCountLimit: *c_void, ClearStoredMessages: *c_void, GetMessage: *c_void, GetNumMessagesAllowedByStorageFilter: *c_void, GetNumMessagesDeniedByStorageFilter: *c_void, GetNumStoredMessages: *c_void, GetNumStoredMessagesAllowedByRetrievalFilter: *c_void, GetNumMessagesDiscardedByMessageCountLimit: *c_void, GetMessageCountLimit: *c_void, AddStorageFilterEntries: fn (*Self, *INFO_QUEUE_FILTER) callconv(.C) HRESULT, GetStorageFilter: *c_void, ClearStorageFilter: *c_void, PushEmptyStorageFilter: *c_void, PushCopyOfStorageFilter: *c_void, PushStorageFilter: *c_void, PopStorageFilter: *c_void, GetStorageFilterStackSize: *c_void, AddRetrievalFilterEntries: *c_void, GetRetrievalFilter: *c_void, ClearRetrievalFilter: *c_void, PushEmptyRetrievalFilter: *c_void, PushCopyOfRetrievalFilter: *c_void, PushRetrievalFilter: *c_void, PopRetrievalFilter: *c_void, GetRetrievalFilterStackSize: *c_void, AddMessage: *c_void, AddApplicationMessage: *c_void, SetBreakOnCategory: *c_void, SetBreakOnSeverity: *c_void, SetBreakOnID: *c_void, GetBreakOnCategory: *c_void, GetBreakOnSeverity: *c_void, GetBreakOnID: *c_void, SetMuteDebugOutput: *c_void, GetMuteDebugOutput: *c_void, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IInfoQueue.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn AddStorageFilterEntries(self: *T, filter: *INFO_QUEUE_FILTER) HRESULT { return self.vtbl.AddStorageFilterEntries(self, filter); } }; } }; pub const IObject = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn GetPrivateData( self: *T, guid: *const os.GUID, data_size: *u32, data: ?*c_void, ) HRESULT { return self.vtbl.GetPrivateData(self, guid, data_size, data); } pub inline fn SetPrivateData( self: *T, guid: *const os.GUID, data_size: u32, data: ?*const c_void, ) HRESULT { return self.vtbl.SetPrivateData(self, guid, data_size, data); } pub inline fn SetPrivateDataInterface( self: *T, guid: *const os.GUID, data: ?*const os.IUnknown, ) HRESULT { return self.vtbl.SetPrivateDataInterface(self, guid, data); } pub inline fn SetName(self: *T, name: ?os.LPCWSTR) HRESULT { return self.vtbl.SetName(self, name); } }; } }; pub const IDeviceChild = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn GetDevice( self: *T, guid: *const os.GUID, device: **c_void, ) HRESULT { return self.vtbl.GetDevice(self, guid, device); } }; } }; pub const IRootSignature = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); }; pub const IQueryHeap = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); }; pub const ICommandSignature = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); }; pub const IPageable = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); }; pub const IHeap = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, // ID3D12Heap GetDesc: fn (*Self, *HEAP_DESC) callconv(.C) *HEAP_DESC, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); usingnamespace IHeap.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn GetDesc(self: *T) HEAP_DESC { var desc: HEAP_DESC = undefined; self.vtbl.GetDesc(self, &desc); return desc; } }; } }; pub const IResource = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, // ID3D12Resource Map: fn (*Self, u32, *const RANGE, **c_void) callconv(.C) HRESULT, Unmap: fn (*Self, u32, *const RANGE) callconv(.C) void, GetDesc: fn (*Self, *RESOURCE_DESC) callconv(.C) *RESOURCE_DESC, GetGPUVirtualAddress: fn (*Self) callconv(.C) GPU_VIRTUAL_ADDRESS, WriteToSubresource: fn ( *Self, u32, *const BOX, *const c_void, u32, u32, ) callconv(.C) HRESULT, ReadFromSubresource: fn (*Self, *c_void, u32, u32, u32, *const BOX) callconv(.C) HRESULT, GetHeapProperties: fn (*Self, *HEAP_PROPERTIES, *HEAP_FLAGS) callconv(.C) HRESULT, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); usingnamespace IResource.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn Map( self: *Self, subresource: u32, read_range: *const RANGE, data: **c_void, ) HRESULT { return self.vtbl.Map(self, subresource, read_range, data); } pub inline fn Unmap(self: *Self, subresource: u32, written_range: *const RANGE) void { self.vtbl.Unmap(self, subresource, written_range); } pub inline fn GetDesc(self: *Self) RESOURCE_DESC { var desc: RESOURCE_DESC = undefined; _ = self.vtbl.GetDesc(self, &desc); return desc; } pub inline fn GetGPUVirtualAddress(self: *Self) GPU_VIRTUAL_ADDRESS { return self.vtbl.GetGPUVirtualAddress(self); } pub inline fn WriteToSubresource( self: *Self, dst_subresource: u32, dst_box: *const BOX, src_data: *const c_void, src_row_pitch: u32, src_depth_pitch: u32, ) HRESULT { return self.vtbl.WriteToSubresource( self, dst_subresource, dst_box, src_data, src_row_pitch, src_depth_pitch, ); } pub inline fn ReadFromSubresource( self: *Self, dst_data: *c_void, dst_row_pitch: u32, dst_depth_pitch: u32, src_subresource: u32, src_box: *const BOX, ) HRESULT { return self.vtbl.ReadFromSubresource( self, dst_data, dst_row_pitch, dst_depth_pitch, src_subresource, src_box, ); } pub inline fn GetHeapProperties( self: *Self, properties: *HEAP_PROPERTIES, flags: *HEAP_FLAGS, ) HRESULT { return self.vtbl.GetHeapProperties(self, properties, flags); } }; } }; pub const ICommandAllocator = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, // ID3D12CommandAllocator Reset: fn (*Self) callconv(.C) HRESULT, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); usingnamespace ICommandAllocator.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn Reset(self: *T) HRESULT { return self.vtbl.Reset(self); } }; } }; pub const IFence = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, // ID3D12Fence GetCompletedValue: fn (*Self) callconv(.C) u64, SetEventOnCompletion: fn (*Self, u64, os.HANDLE) callconv(.C) HRESULT, Signal: fn (*Self, u64) callconv(.C) HRESULT, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); usingnamespace IFence.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn GetCompletedValue(self: *T) u64 { return self.vtbl.GetCompletedValue(self); } pub inline fn SetEventOnCompletion(self: *T, value: u64, event: os.HANDLE) HRESULT { return self.vtbl.SetEventOnCompletion(self, value, event); } pub inline fn Signal(self: *T, value: u64) HRESULT { return self.vtbl.Signal(self, value); } }; } }; pub const IPipelineState = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, // ID3D12PipelineState GetCachedBlob: fn (*Self, *IBlob) callconv(.C) HRESULT, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); usingnamespace IPipelineState.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn GetCachedBlob(self: *T, blob: *IBlob) HRESULT { return self.vtbl.GetCachedBlob(self, blob); } }; } }; pub const IDescriptorHeap = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, // ID3D12DescriptorHeap GetDesc: fn (*Self, *DESCRIPTOR_HEAP_DESC) callconv(.C) *DESCRIPTOR_HEAP_DESC, GetCPUDescriptorHandleForHeapStart: fn ( *Self, *CPU_DESCRIPTOR_HANDLE, ) callconv(.C) *CPU_DESCRIPTOR_HANDLE, GetGPUDescriptorHandleForHeapStart: fn ( *Self, *GPU_DESCRIPTOR_HANDLE, ) callconv(.C) *GPU_DESCRIPTOR_HANDLE, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); usingnamespace IDescriptorHeap.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn GetDesc(self: *T) DESCRIPTOR_HEAP_DESC { var desc: DESCRIPTOR_HEAP_DESC = undefined; self.vtbl.GetDesc(self, &desc); return desc; } pub inline fn GetCPUDescriptorHandleForHeapStart(self: *T) CPU_DESCRIPTOR_HANDLE { var handle: CPU_DESCRIPTOR_HANDLE = undefined; _ = self.vtbl.GetCPUDescriptorHandleForHeapStart(self, &handle); return handle; } pub inline fn GetGPUDescriptorHandleForHeapStart(self: *T) GPU_DESCRIPTOR_HANDLE { var handle: GPU_DESCRIPTOR_HANDLE = undefined; _ = self.vtbl.GetGPUDescriptorHandleForHeapStart(self, &handle); return handle; } }; } }; pub const ICommandList = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, // ID3D12CommandList GetType: fn (*Self) callconv(.C) COMMAND_LIST_TYPE, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); usingnamespace ICommandList.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn GetType(self: *T, blob: *IBlob) COMMAND_LIST_TYPE { return self.vtbl.GetType(self); } }; } }; pub const IGraphicsCommandList = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, // ID3D12CommandList GetType: fn (*Self) callconv(.C) COMMAND_LIST_TYPE, // ID3D12GraphicsCommandList Close: fn (*Self) callconv(.C) HRESULT, Reset: fn (*Self, *ICommandAllocator, ?*IPipelineState) callconv(.C) HRESULT, ClearState: fn (*Self, *IPipelineState) callconv(.C) void, DrawInstanced: fn (*Self, u32, u32, u32, u32) callconv(.C) void, DrawIndexedInstanced: fn (*Self, u32, u32, u32, i32, u32) callconv(.C) void, Dispatch: fn (*Self, u32, u32, u32) callconv(.C) void, CopyBufferRegion: fn (*Self, *IResource, u64, *IResource, u64, u64) callconv(.C) void, CopyTextureRegion: fn ( *Self, *const TEXTURE_COPY_LOCATION, u32, u32, u32, *const TEXTURE_COPY_LOCATION, ?*const BOX, ) callconv(.C) void, CopyResource: fn (*Self, *IResource, *IResource) callconv(.C) void, CopyTiles: fn ( *Self, *IResource, *const TILED_RESOURCE_COORDINATE, *const TILE_REGION_SIZE, *IResource, buffer_start_offset_in_bytes: u64, TILE_COPY_FLAGS, ) callconv(.C) void, ResolveSubresource: fn ( *Self, *IResource, u32, *IResource, u32, dxgi.FORMAT, ) callconv(.C) void, IASetPrimitiveTopology: fn (*Self, PRIMITIVE_TOPOLOGY) callconv(.C) void, RSSetViewports: fn (*Self, u32, [*]const VIEWPORT) callconv(.C) void, RSSetScissorRects: fn (*Self, u32, [*]const RECT) callconv(.C) void, OMSetBlendFactor: fn (*Self, *const [4]f32) callconv(.C) void, OMSetStencilRef: fn (*Self, u32) callconv(.C) void, SetPipelineState: fn (*Self, *IPipelineState) callconv(.C) void, ResourceBarrier: fn (*Self, u32, [*]const RESOURCE_BARRIER) callconv(.C) void, ExecuteBundle: fn (*Self, *IGraphicsCommandList) callconv(.C) void, SetDescriptorHeaps: fn (*Self, u32, [*]const *IDescriptorHeap) callconv(.C) void, SetComputeRootSignature: fn (*Self, *IRootSignature) callconv(.C) void, SetGraphicsRootSignature: fn (*Self, *IRootSignature) callconv(.C) void, SetComputeRootDescriptorTable: fn (*Self, u32, GPU_DESCRIPTOR_HANDLE) callconv(.C) void, SetGraphicsRootDescriptorTable: fn (*Self, u32, GPU_DESCRIPTOR_HANDLE) callconv(.C) void, SetComputeRoot32BitConstant: fn (*Self, u32, u32, u32) callconv(.C) void, SetGraphicsRoot32BitConstant: fn (*Self, u32, u32, u32) callconv(.C) void, SetComputeRoot32BitConstants: fn (*Self, u32, u32, *const c_void, u32) callconv(.C) void, SetGraphicsRoot32BitConstants: fn (*Self, u32, u32, *const c_void, u32) callconv(.C) void, SetComputeRootConstantBufferView: fn (*Self, u32, GPU_VIRTUAL_ADDRESS) callconv(.C) void, SetGraphicsRootConstantBufferView: fn (*Self, u32, GPU_VIRTUAL_ADDRESS) callconv(.C) void, SetComputeRootShaderResourceView: fn (*Self, u32, GPU_VIRTUAL_ADDRESS) callconv(.C) void, SetGraphicsRootShaderResourceView: fn (*Self, u32, GPU_VIRTUAL_ADDRESS) callconv(.C) void, SetComputeRootUnorderedAccessView: fn (*Self, u32, GPU_VIRTUAL_ADDRESS) callconv(.C) void, SetGraphicsRootUnorderedAccessView: fn (*Self, u32, GPU_VIRTUAL_ADDRESS) callconv(.C) void, IASetIndexBuffer: fn (*Self, *const INDEX_BUFFER_VIEW) callconv(.C) void, IASetVertexBuffers: fn (*Self, u32, u32, [*]const VERTEX_BUFFER_VIEW) callconv(.C) void, SOSetTargets: fn (*Self, u32, u32, [*]const STREAM_OUTPUT_BUFFER_VIEW) callconv(.C) void, OMSetRenderTargets: fn ( *Self, u32, ?[*]const CPU_DESCRIPTOR_HANDLE, os.BOOL, ?*const CPU_DESCRIPTOR_HANDLE, ) callconv(.C) void, ClearDepthStencilView: fn ( *Self, CPU_DESCRIPTOR_HANDLE, CLEAR_FLAGS, f32, u8, u32, ?[*]const RECT, ) callconv(.C) void, ClearRenderTargetView: fn ( *Self, CPU_DESCRIPTOR_HANDLE, *const [4]f32, u32, ?[*]const RECT, ) callconv(.C) void, ClearUnorderedAccessViewUint: fn ( *Self, GPU_DESCRIPTOR_HANDLE, CPU_DESCRIPTOR_HANDLE, *IResource, *const [4]u32, u32, ?[*]const RECT, ) callconv(.C) void, ClearUnorderedAccessViewFloat: fn ( *Self, GPU_DESCRIPTOR_HANDLE, CPU_DESCRIPTOR_HANDLE, *IResource, *const [4]f32, u32, ?[*]const RECT, ) callconv(.C) void, DiscardResource: fn (*Self, *IResource, *const DISCARD_REGION) callconv(.C) void, BeginQuery: fn (*Self, *IQueryHeap, QUERY_TYPE, u32) callconv(.C) void, EndQuery: fn (*Self, *IQueryHeap, QUERY_TYPE, u32) callconv(.C) void, ResolveQueryData: fn ( *Self, *IQueryHeap, QUERY_TYPE, u32, u32, *IResource, u64, ) callconv(.C) void, SetPredication: fn (*Self, *IResource, u64, PREDICATION_OP) callconv(.C) void, SetMarker: fn (*Self, u32, *const c_void, u32) callconv(.C) void, BeginEvent: fn (*Self, u32, *const c_void, u32) callconv(.C) void, EndEvent: fn (*Self) callconv(.C) void, ExecuteIndirect: fn ( *Self, *ICommandSignature, u32, *IResource, u64, *IResource, u64, ) callconv(.C) void, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); usingnamespace ICommandList.Methods(Self); usingnamespace IGraphicsCommandList.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn Close(self: *T) HRESULT { return self.vtbl.Close(self); } pub inline fn Reset( self: *T, allocator: *ICommandAllocator, initial_state: ?*IPipelineState, ) HRESULT { return self.vtbl.Reset(self, allocator, initial_state); } pub inline fn ClearState(self: *T, pso: *IPipelineState) void { self.vtbl.ClearState(self, pso); } pub inline fn DrawInstanced( self: *T, vertex_count_per_instance: u32, instance_count: u32, start_vertex_location: u32, start_instance_location: u32, ) void { self.vtbl.DrawInstanced( self, vertex_count_per_instance, instance_count, start_vertex_location, start_instance_location, ); } pub inline fn DrawIndexedInstanced( self: *T, index_count_per_instance: u32, instance_count: u32, start_index_location: u32, base_vertex_location: i32, start_instance_location: u32, ) void { self.vtbl.DrawIndexedInstanced( self, index_count_per_instance, instance_count, start_index_location, base_vertex_location, start_instance_location, ); } pub inline fn Dispatch(self: *T, count_x: u32, count_y: u32, count_z: u32) void { self.vtbl.Dispatch(self, count_x, count_y, count_z); } pub inline fn CopyBufferRegion( self: *T, dst_buffer: *IResource, dst_offset: u64, src_buffer: *IResource, src_offset: u64, num_bytes: u64, ) void { self.vtbl.CopyBufferRegion( self, dst_buffer, dst_offset, src_buffer, src_offset, num_bytes, ); } pub inline fn CopyTextureRegion( self: *T, dst: *const TEXTURE_COPY_LOCATION, dst_x: u32, dst_y: u32, dst_z: u32, src: *const TEXTURE_COPY_LOCATION, src_box: ?*const BOX, ) void { self.vtbl.CopyTextureRegion(self, dst, dst_x, dst_y, dst_z, src, src_box); } pub inline fn CopyResource(self: *T, dst: *IResource, src: *ID3D12Resource) void { self.vtbl.CopyResource(self, dst, src); } pub inline fn CopyTiles( self: *T, tiled_resource: *IResource, tile_region_start_coordinate: *const TILED_RESOURCE_COORDINATE, tile_region_size: *const TILE_REGION_SIZE, buffer: *IResource, buffer_start_offset_in_bytes: u64, flags: TILE_COPY_FLAGS, ) void { self.vtbl.CopyTiles( self, tiled_resource, tile_region_start_coordinate, tile_region_size, buffer, buffer_start_offset_in_bytes, flags, ); } pub inline fn ResolveSubresource( self: *T, dst_resource: *IResource, dst_subresource: u32, src_resource: *IResource, src_subresource: u32, format: dxgi.FORMAT, ) void { self.vtbl.ResolveSubresource( self, dst_resource, dst_subresource, src_resource, src_subresource, format, ); } pub inline fn IASetPrimitiveTopology(self: *T, topology: PRIMITIVE_TOPOLOGY) void { self.vtbl.IASetPrimitiveTopology(self, topology); } pub inline fn RSSetViewports(self: *T, num: u32, viewports: [*]const VIEWPORT) void { self.vtbl.RSSetViewports(self, num, viewports); } pub inline fn RSSetScissorRects(self: *T, num: u32, rects: [*]const RECT) void { self.vtbl.RSSetScissorRects(self, num, rects); } pub inline fn OMSetBlendFactor(self: *T, blend_factor: *const [4]f32) void { self.vtbl.OMSetBlendFactor(self, blend_factor); } pub inline fn OMSetStencilRef(self: *T, stencil_ref: u32) void { self.vtbl.OMSetStencilRef(self, stencil_ref); } pub inline fn SetPipelineState(self: *T, pso: *IPipelineState) void { self.vtbl.SetPipelineState(self, pso); } pub inline fn ResourceBarrier( self: *T, num: u32, barriers: [*]const RESOURCE_BARRIER, ) void { self.vtbl.ResourceBarrier(self, num, barriers); } pub inline fn ExecuteBundle(self: *T, cmdlist: *IGraphicsCommandList) void { self.vtbl.ExecuteBundle(self, cmdlist); } pub inline fn SetDescriptorHeaps(self: *T, num: u32, heaps: [*]const *IDescriptorHeap) void { self.vtbl.SetDescriptorHeaps(self, num, heaps); } pub inline fn SetComputeRootSignature(self: *T, root_signature: *IRootSignature) void { self.vtbl.SetComputeRootSignature(self, root_signature); } pub inline fn SetGraphicsRootSignature(self: *T, root_signature: *IRootSignature) void { self.vtbl.SetGraphicsRootSignature(self, root_signature); } pub inline fn SetComputeRootDescriptorTable( self: *T, root_index: u32, base_descriptor: GPU_DESCRIPTOR_HANDLE, ) void { self.vtbl.SetComputeRootDescriptorTable(self, root_index, base_descriptor); } pub inline fn SetGraphicsRootDescriptorTable( self: *T, root_index: u32, base_descriptor: GPU_DESCRIPTOR_HANDLE, ) void { self.vtbl.SetGraphicsRootDescriptorTable(self, root_index, base_descriptor); } pub inline fn SetComputeRoot32BitConstant(self: *T, index: u32, data: u32, off: u32) void { self.vtbl.SetComputeRoot32BitConstant(self, index, data, off); } pub inline fn SetGraphicsRoot32BitConstant(self: *T, index: u32, data: u32, off: u32) void { self.vtbl.SetGraphicsRoot32BitConstant(self, index, data, off); } pub inline fn SetComputeRoot32BitConstants( self: *T, root_index: u32, num: u32, data: *const c_void, offset: u32, ) void { self.vtbl.SetComputeRoot32BitConstants(self, root_index, num, data, offset); } pub inline fn SetGraphicsRoot32BitConstants( self: *T, root_index: u32, num: u32, data: *const c_void, offset: u32, ) void { self.vtbl.SetGraphicsRoot32BitConstants(self, root_index, num, data, offset); } pub inline fn SetComputeRootConstantBufferView( self: *T, index: u32, buffer_location: GPU_VIRTUAL_ADDRESS, ) void { self.vtbl.SetComputeRootConstantBufferView(self, index, buffer_location); } pub inline fn SetGraphicsRootConstantBufferView( self: *T, index: u32, buffer_location: GPU_VIRTUAL_ADDRESS, ) void { self.vtbl.SetGraphicsRootConstantBufferView(self, index, buffer_location); } pub inline fn SetComputeRootShaderResourceView( self: *T, index: u32, buffer_location: GPU_VIRTUAL_ADDRESS, ) void { self.vtbl.SetComputeRootShaderResourceView(self, index, buffer_location); } pub inline fn SetGraphicsRootShaderResourceView( self: *T, index: u32, buffer_location: GPU_VIRTUAL_ADDRESS, ) void { self.vtbl.SetGraphicsRootShaderResourceView(self, index, buffer_location); } pub inline fn SetComputeRootUnorderedAccessView( self: *T, index: u32, buffer_location: GPU_VIRTUAL_ADDRESS, ) void { self.vtbl.SetComputeRootUnorderedAccessView(self, index, buffer_location); } pub inline fn SetGraphicsRootUnorderedAccessView( self: *T, index: u32, buffer_location: GPU_VIRTUAL_ADDRESS, ) void { self.vtbl.SetGraphicsRootUnorderedAccessView(self, index, buffer_location); } pub inline fn IASetIndexBuffer(self: *T, view: *const INDEX_BUFFER_VIEW) void { self.vtbl.IASetIndexBuffer(self, view); } pub inline fn IASetVertexBuffers( self: *T, start_slot: u32, num_views: u32, views: [*]const VERTEX_BUFFER_VIEW, ) void { self.vtbl.IASetVertexBuffers(self, start_slot, num_views, views); } pub inline fn SOSetTargets( self: *T, start_slot: u32, num_views: u32, views: [*]const STREAM_OUTPUT_BUFFER_VIEW, ) void { self.vtbl.SOSetTargets(self, start_slot, num_views, views); } pub inline fn OMSetRenderTargets( self: *T, num_rt_descriptors: u32, rt_descriptors: ?[*]const CPU_DESCRIPTOR_HANDLE, single_handle: os.BOOL, ds_descriptors: ?*const CPU_DESCRIPTOR_HANDLE, ) void { self.vtbl.OMSetRenderTargets( self, num_rt_descriptors, rt_descriptors, single_handle, ds_descriptors, ); } pub inline fn ClearDepthStencilView( self: *T, ds_view: CPU_DESCRIPTOR_HANDLE, clear_flags: CLEAR_FLAGS, depth: f32, stencil: u8, num_rects: u32, rects: ?[*]const RECT, ) void { self.vtbl.ClearDepthStencilView( self, ds_view, clear_flags, depth, stencil, num_rects, rects, ); } pub inline fn ClearRenderTargetView( self: *T, rt_view: CPU_DESCRIPTOR_HANDLE, rgba: *const [4]f32, num_rects: u32, rects: ?[*]const RECT, ) void { self.vtbl.ClearRenderTargetView(self, rt_view, rgba, num_rects, rects); } pub inline fn ClearUnorderedAccessViewUint( self: *T, gpu_view: GPU_DESCRIPTOR_HANDLE, cpu_view: CPU_DESCRIPTOR_HANDLE, resource: *IResource, values: *const [4]u32, num_rects: u32, rects: ?[*]const RECT, ) void { self.vtbl.ClearUnorderedAccessViewUint( self, gpu_view, cpu_view, resource, values, num_rects, rects, ); } pub inline fn ClearUnorderedAccessViewFloat( self: *T, gpu_view: GPU_DESCRIPTOR_HANDLE, cpu_view: CPU_DESCRIPTOR_HANDLE, resource: *IResource, values: *const [4]f32, num_rects: u32, rects: ?[*]const RECT, ) void { self.vtbl.ClearUnorderedAccessViewFloat( self, gpu_view, cpu_view, resource, values, num_rects, rects, ); } pub inline fn DiscardResource( self: *T, resource: *IResource, region: *const DISCARD_REGION, ) void { self.vtbl.DiscardResource(self, resource, region); } pub inline fn BeginQuery( self: *T, query: *IQueryHeap, query_type: QUERY_TYPE, index: u32, ) void { self.vtbl.BeginQuery(self, query, query_type, index); } pub inline fn EndQuery( self: *T, query: *IQueryHeap, query_type: QUERY_TYPE, index: u32, ) void { self.vtbl.EndQuery(self, query, query_type, index); } pub inline fn ResolveQueryData( self: *T, query: *IQueryHeap, query_type: QUERY_TYPE, start_index: u32, num_queries: u32, dst_resource: *IResource, buffer_offset: u64, ) void { self.vtbl.ResolveQueryData( self, query, query_type, start_index, num_queries, dst_resource, buffer_offset, ); } pub inline fn SetPredication( self: *T, buffer: *IResource, buffer_offset: u64, operation: PREDICATION_OP, ) void { self.vtbl.SetPredication(self, buffer, buffer_offset, operation); } pub inline fn SetMarker(self: *T, metadata: u32, data: *const c_void, size: u32) void { self.vtbl.SetMarker(self, metadata, data, size); } pub inline fn BeginEvent(self: *T, metadata: u32, data: *const c_void, size: u32) void { self.vtbl.BeginEvent(self, metadata, data, size); } pub inline fn EndEvent(self: *T) void { self.vtbl.EndEvent(self); } pub inline fn ExecuteIndirect( self: *T, command_signature: *ICommandSignature, max_command_count: u32, arg_buffer: *IResource, arg_buffer_offset: u64, count_buffer: *IResource, count_buffer_offset: u64, ) void { self.vtbl.ExecuteIndirect( self, command_signature, max_command_count, arg_buffer, arg_buffer_offset, count_buffer, count_buffer_offset, ); } }; } }; pub const ICommandQueue = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12DeviceChild GetDevice: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, // ID3D12CommandQueue UpdateTileMappings: fn ( *Self, *IResource, u32, [*]const TILED_RESOURCE_COORDINATE, [*]const TILE_REGION_SIZE, *IHeap, u32, [*]const TILE_RANGE_FLAGS, [*]const u32, [*]const u32, flags: TILE_MAPPING_FLAGS, ) callconv(.C) void, CopyTileMappings: fn ( *Self, *IResource, *const TILED_RESOURCE_COORDINATE, *IResource, *const TILED_RESOURCE_COORDINATE, *const TILE_REGION_SIZE, TILE_MAPPING_FLAGS, ) callconv(.C) void, ExecuteCommandLists: fn (*Self, u32, [*]const *ICommandList) callconv(.C) void, SetMarker: fn (*Self, u32, *const c_void, u32) callconv(.C) void, BeginEvent: fn (*Self, u32, *const c_void, u32) callconv(.C) void, EndEvent: fn (*Self) callconv(.C) void, Signal: fn (*Self, *IFence, u64) callconv(.C) HRESULT, Wait: fn (*Self, *IFence, u64) callconv(.C) HRESULT, GetTimestampFrequency: fn (*Self, *u64) callconv(.C) HRESULT, GetClockCalibration: fn (*Self, *u64, *u64) callconv(.C) HRESULT, GetDesc: fn (*Self, *COMMAND_QUEUE_DESC) callconv(.C) *COMMAND_QUEUE_DESC, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDeviceChild.Methods(Self); usingnamespace ICommandQueue.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn UpdateTileMappings( self: *T, resource: *IResource, num_resource_regions: u32, resource_region_start_coordinates: [*]const TILED_RESOURCE_COORDINATE, resource_region_sizes: [*]const TILE_REGION_SIZE, heap: *IHeap, num_ranges: u32, range_flags: [*]const TILE_RANGE_FLAGS, heap_range_start_offsets: [*]const u32, range_tile_counts: [*]const u32, flags: TILE_MAPPING_FLAGS, ) void { self.vtbl.UpdateTileMappings( self, resource, num_resource_regions, resource_region_start_coordinates, resource_region_sizes, heap, num_ranges, range_flags, heap_range_start_offsets, range_tile_counts, flags, ); } pub inline fn CopyTileMappings( self: *T, dst_resource: *IResource, dst_region_start_coordinate: *const TILED_RESOURCE_COORDINATE, src_resource: *IResource, src_region_start_coordinate: *const TILED_RESOURCE_COORDINATE, region_size: *const TILE_REGION_SIZE, flags: TILE_MAPPING_FLAGS, ) void { self.vtbl.CopyTileMappings( self, dst_resource, dst_region_start_coordinate, src_resource, src_region_start_coordinate, region_size, flags, ); } pub inline fn ExecuteCommandLists( self: *T, num: u32, cmdlists: [*]const *ICommandList, ) void { self.vtbl.ExecuteCommandLists(self, num, cmdlists); } pub inline fn SetMarker(self: *T, metadata: u32, data: *const c_void, size: u32) void { self.vtbl.SetMarker(self, metadata, data, size); } pub inline fn BeginEvent(self: *T, metadata: u32, data: *const c_void, size: u32) void { self.vtbl.BeginEvent(self, metadata, data, size); } pub inline fn EndEvent(self: *T) void { self.vtbl.EndEvent(self); } pub inline fn Signal(self: *T, fence: *IFence, value: u64) HRESULT { return self.vtbl.Signal(self, fence, value); } pub inline fn Wait(self: *T, fence: *IFence, value: u64) HRESULT { return self.vtbl.Wait(self, fence, value); } pub inline fn GetTimestampFrequency(self: *T, frequency: *u64) HRESULT { return self.vtbl.GetTimestampFrequency(self, frequency); } pub inline fn GetClockCalibration( self: *T, gpu_timestamp: *u64, cpu_timestamp: *u64, ) HRESULT { return self.vtbl.GetClockCalibration(self, gpu_timestamp, cpu_timestamp); } pub inline fn GetDesc(self: *T) COMMAND_QUEUE_DESC { var desc: COMMAND_QUEUE_DESC = undefined; self.vtbl.GetDesc(self, &desc); return desc; } }; } }; pub const IDevice = extern struct { const Self = @This(); vtbl: *const extern struct { // IUnknown QueryInterface: fn (*Self, *const os.GUID, **c_void) callconv(.C) HRESULT, AddRef: fn (*Self) callconv(.C) u32, Release: fn (*Self) callconv(.C) u32, // ID3D12Object GetPrivateData: fn (*Self, *const os.GUID, *u32, ?*c_void) callconv(.C) HRESULT, SetPrivateData: fn (*Self, *const os.GUID, u32, ?*const c_void) callconv(.C) HRESULT, SetPrivateDataInterface: fn ( *Self, *const os.GUID, ?*const os.IUnknown, ) callconv(.C) HRESULT, SetName: fn (*Self, ?os.LPCWSTR) callconv(.C) HRESULT, // ID3D12Device GetNodeCount: fn (*Self) callconv(.C) u32, CreateCommandQueue: fn ( *Self, *const COMMAND_QUEUE_DESC, *const os.GUID, **c_void, ) callconv(.C) HRESULT, CreateCommandAllocator: fn ( *Self, COMMAND_LIST_TYPE, *const os.GUID, **c_void, ) callconv(.C) HRESULT, CreateGraphicsPipelineState: fn ( *Self, *const GRAPHICS_PIPELINE_STATE_DESC, *const os.GUID, **c_void, ) callconv(.C) HRESULT, CreateComputePipelineState: fn ( *Self, *const COMPUTE_PIPELINE_STATE_DESC, *const os.GUID, **c_void, ) callconv(.C) HRESULT, CreateCommandList: fn ( *Self, u32, COMMAND_LIST_TYPE, *ICommandAllocator, ?*IPipelineState, *const os.GUID, **c_void, ) callconv(.C) HRESULT, CheckFeatureSupport: fn (*Self, FEATURE, *c_void, u32) callconv(.C) HRESULT, CreateDescriptorHeap: fn ( *Self, *const DESCRIPTOR_HEAP_DESC, *const os.GUID, **c_void, ) callconv(.C) HRESULT, GetDescriptorHandleIncrementSize: fn (*Self, DESCRIPTOR_HEAP_TYPE) callconv(.C) u32, CreateRootSignature: fn ( *Self, u32, *const c_void, u64, *const os.GUID, **c_void, ) callconv(.C) HRESULT, CreateConstantBufferView: fn ( *Self, *const CONSTANT_BUFFER_VIEW_DESC, CPU_DESCRIPTOR_HANDLE, ) callconv(.C) void, CreateShaderResourceView: fn ( *Self, ?*IResource, ?*const SHADER_RESOURCE_VIEW_DESC, CPU_DESCRIPTOR_HANDLE, ) callconv(.C) void, CreateUnorderedAccessView: fn ( *Self, ?*IResource, ?*IResource, ?*const UNORDERED_ACCESS_VIEW_DESC, CPU_DESCRIPTOR_HANDLE, ) callconv(.C) void, CreateRenderTargetView: fn ( *Self, ?*IResource, ?*const RENDER_TARGET_VIEW_DESC, CPU_DESCRIPTOR_HANDLE, ) callconv(.C) void, CreateDepthStencilView: fn ( *Self, ?*IResource, ?*const DEPTH_STENCIL_VIEW_DESC, CPU_DESCRIPTOR_HANDLE, ) callconv(.C) void, CreateSampler: fn (*Self, *const SAMPLER_DESC, CPU_DESCRIPTOR_HANDLE) callconv(.C) void, CopyDescriptors: fn ( *Self, u32, [*]const CPU_DESCRIPTOR_HANDLE, [*]const u32, u32, [*]const CPU_DESCRIPTOR_HANDLE, [*]const u32, DESCRIPTOR_HEAP_TYPE, ) callconv(.C) void, CopyDescriptorsSimple: fn ( *Self, u32, CPU_DESCRIPTOR_HANDLE, CPU_DESCRIPTOR_HANDLE, DESCRIPTOR_HEAP_TYPE, ) callconv(.C) void, GetResourceAllocationInfo: fn ( *Self, u32, u32, [*]const RESOURCE_DESC, *RESOURCE_ALLOCATION_INFO, ) callconv(.C) *RESOURCE_ALLOCATION_INFO, GetCustomHeapProperties: fn ( *Self, u32, HEAP_TYPE, *HEAP_PROPERTIES, ) callconv(.C) *HEAP_PROPERTIES, CreateCommittedResource: fn ( *Self, *const HEAP_PROPERTIES, HEAP_FLAGS, *const RESOURCE_DESC, RESOURCE_STATES, ?*const CLEAR_VALUE, *const os.GUID, **c_void, ) callconv(.C) HRESULT, CreateHeap: fn (*Self, *const HEAP_DESC, *const os.GUID, **c_void) callconv(.C) HRESULT, CreatePlacedResource: fn ( *Self, *IHeap, u64, *const RESOURCE_DESC, RESOURCE_STATES, *const CLEAR_VALUE, *const os.GUID, **c_void, ) callconv(.C) HRESULT, CreateReservedResource: fn ( *Self, *const RESOURCE_DESC, RESOURCE_STATES, *const CLEAR_VALUE, *const os.GUID, **c_void, ) callconv(.C) HRESULT, CreateSharedHandle: fn ( *Self, *IDeviceChild, ?*const os.SECURITY_ATTRIBUTES, os.DWORD, ?os.LPCWSTR, *os.HANDLE, ) callconv(.C) HRESULT, OpenSharedHandle: fn (*Self, os.HANDLE, *const os.GUID, **c_void) callconv(.C) HRESULT, OpenSharedHandleByName: fn (*Self, os.LPCWSTR, os.DWORD, *os.HANDLE) callconv(.C) HRESULT, MakeResident: fn (*Self, u32, [*]const *IPageable) callconv(.C) HRESULT, Evict: fn (*Self, u32, [*]const *IPageable) callconv(.C) HRESULT, CreateFence: fn (*Self, u64, FENCE_FLAGS, *const os.GUID, **c_void) callconv(.C) HRESULT, GetDeviceRemovedReason: fn (*Self) callconv(.C) HRESULT, GetCopyableFootprints: fn ( *Self, *const RESOURCE_DESC, u32, u32, u64, ?*PLACED_SUBRESOURCE_FOOTPRINT, ?*u32, ?*u64, ?*u64, ) callconv(.C) void, CreateQueryHeap: fn ( *Self, *const QUERY_HEAP_DESC, *const os.GUID, **c_void, ) callconv(.C) HRESULT, SetStablePowerState: fn (*Self, os.BOOL) callconv(.C) HRESULT, CreateCommandSignature: fn ( *Self, *const COMMAND_SIGNATURE_DESC, *IRootSignature, *const os.GUID, **c_void, ) callconv(.C) HRESULT, GetResourceTiling: fn ( *Self, *IResource, *u32, *PACKED_MIP_INFO, *TILE_SHAPE, *u32, u32, *SUBRESOURCE_TILING, ) callconv(.C) void, GetAdapterLuid: fn (*Self) callconv(.C) i64, }, usingnamespace os.IUnknown.Methods(Self); usingnamespace IObject.Methods(Self); usingnamespace IDevice.Methods(Self); fn Methods(comptime T: type) type { return extern struct { pub inline fn GetNodeCount(self: *T) u32 { return self.vtbl.GetNodeCount(self); } pub inline fn CreateCommandQueue( self: *T, desc: *const COMMAND_QUEUE_DESC, guid: *const os.GUID, obj: **c_void, ) HRESULT { return self.vtbl.CreateCommandQueue(self, desc, guid, obj); } pub inline fn CreateCommandAllocator( self: *T, cmdlist_type: COMMAND_LIST_TYPE, guid: *const os.GUID, obj: **c_void, ) HRESULT { return self.vtbl.CreateCommandAllocator(self, cmdlist_type, guid, obj); } pub inline fn CreateGraphicsPipelineState( self: *T, desc: *const GRAPHICS_PIPELINE_STATE_DESC, guid: *const os.GUID, pso: **c_void, ) HRESULT { return self.vtbl.CreateGraphicsPipelineState(self, desc, guid, pso); } pub inline fn CreateComputePipelineState( self: *T, desc: *const COMPUTE_PIPELINE_STATE_DESC, guid: *const os.GUID, pso: **c_void, ) HRESULT { return self.vtbl.CreateComputePipelineState(self, desc, guid, pso); } pub inline fn CreateCommandList( self: *T, node_mask: u32, cmdlist_type: COMMAND_LIST_TYPE, cmdalloc: *ICommandAllocator, initial_state: ?*IPipelineState, guid: *const os.GUID, cmdlist: **c_void, ) HRESULT { return self.vtbl.CreateCommandList( self, node_mask, cmdlist_type, cmdalloc, initial_state, guid, cmdlist, ); } pub inline fn CheckFeatureSupport( self: *T, feature: FEATURE, data: *c_void, data_size: u32, ) HRESULT { return self.vtbl.CheckFeatureSupport(self, feature, data, data_size); } pub inline fn CreateDescriptorHeap( self: *T, desc: *const DESCRIPTOR_HEAP_DESC, guid: *const os.GUID, heap: **c_void, ) HRESULT { return self.vtbl.CreateDescriptorHeap(self, desc, guid, heap); } pub inline fn GetDescriptorHandleIncrementSize( self: *T, heap_type: DESCRIPTOR_HEAP_TYPE, ) u32 { return self.vtbl.GetDescriptorHandleIncrementSize(self, heap_type); } pub inline fn CreateRootSignature( self: *T, node_mask: u32, blob: *const c_void, blob_size: u64, guid: *const os.GUID, signature: **c_void, ) HRESULT { return self.vtbl.CreateRootSignature(self, node_mask, blob, blob_size, guid, signature); } pub inline fn CreateConstantBufferView( self: *T, desc: *const CONSTANT_BUFFER_VIEW_DESC, dst_descriptor: CPU_DESCRIPTOR_HANDLE, ) void { self.vtbl.CreateConstantBufferView(self, desc, dst_descriptor); } pub inline fn CreateShaderResourceView( self: *T, resource: ?*IResource, desc: ?*const SHADER_RESOURCE_VIEW_DESC, dst_descriptor: CPU_DESCRIPTOR_HANDLE, ) void { self.vtbl.CreateShaderResourceView(self, resource, desc, dst_descriptor); } pub inline fn CreateUnorderedAccessView( self: *T, resource: ?*IResource, counter_resource: ?*IResource, desc: ?*const UNORDERED_ACCESS_VIEW_DESC, dst_descriptor: CPU_DESCRIPTOR_HANDLE, ) void { self.vtbl.CreateUnorderedAccessView( self, resource, counter_resource, desc, dst_descriptor, ); } pub inline fn CreateRenderTargetView( self: *T, resource: ?*IResource, desc: ?*const RENDER_TARGET_VIEW_DESC, dst_descriptor: CPU_DESCRIPTOR_HANDLE, ) void { self.vtbl.CreateRenderTargetView(self, resource, desc, dst_descriptor); } pub inline fn CreateDepthStencilView( self: *T, resource: ?*IResource, desc: ?*const DEPTH_STENCIL_VIEW_DESC, dst_descriptor: CPU_DESCRIPTOR_HANDLE, ) void { self.vtbl.CreateDepthStencilView(self, resource, desc, dst_descriptor); } pub inline fn CreateSampler( self: *T, desc: *const SAMPLER_DESC, dst_descriptor: CPU_DESCRIPTOR_HANDLE, ) void { self.vtbl.CreateSampler(self, desc, dst_descriptor); } pub inline fn CopyDescriptors( self: *T, num_dst_ranges: u32, dst_range_starts: [*]const CPU_DESCRIPTOR_HANDLE, dst_range_sizes: [*]const u32, num_src_ranges: u32, src_range_starts: [*]const CPU_DESCRIPTOR_HANDLE, src_range_sizes: [*]const u32, heap_type: DESCRIPTOR_HEAP_TYPE, ) void { self.vtbl.CopyDescriptors( self, num_dst_ranges, dst_range_starts, dst_range_sizes, num_src_ranges, src_range_starts, src_range_sizes, heap_type, ); } pub inline fn CopyDescriptorsSimple( self: *T, num: u32, dst_range_start: CPU_DESCRIPTOR_HANDLE, src_range_start: CPU_DESCRIPTOR_HANDLE, heap_type: DESCRIPTOR_HEAP_TYPE, ) void { self.vtbl.CopyDescriptorsSimple(self, num, dst_range_start, src_range_start, heap_type); } pub inline fn GetResourceAllocationInfo( self: *T, visible_mask: u32, num_descs: u32, descs: [*]const RESOURCE_DESC, ) RESOURCE_ALLOCATION_INFO { var info: RESOURCE_ALLOCATION_INFO = undefined; self.vtbl.GetResourceAllocationInfo(self, visible_mask, num_descs, descs, &info); return info; } pub inline fn GetCustomHeapProperties( self: *T, node_mask: u32, heap_type: HEAP_TYPE, ) HEAP_PROPERTIES { var props: HEAP_PROPERTIES = undefined; self.vtbl.GetCustomHeapProperties(self, node_mask, heap_type, &props); return props; } pub inline fn CreateCommittedResource( self: *T, heap_props: *const HEAP_PROPERTIES, heap_flags: HEAP_FLAGS, desc: *const RESOURCE_DESC, state: RESOURCE_STATES, clear_value: ?*const CLEAR_VALUE, guid: *const os.GUID, resource: **c_void, ) HRESULT { return self.vtbl.CreateCommittedResource( self, heap_props, heap_flags, desc, state, clear_value, guid, resource, ); } pub inline fn CreateHeap( self: *T, desc: *const HEAP_DESC, guid: *const os.GUID, heap: **c_void, ) HRESULT { return self.vtbl.CreateHeap(self, desc, guid, heap); } pub inline fn CreatePlacedResource( self: *T, heap: *IHeap, heap_offset: u64, desc: *const RESOURCE_DESC, state: RESOURCE_STATES, clear_value: *const CLEAR_VALUE, guid: *const os.GUID, resource: **c_void, ) HRESULT { return self.vtbl.CreatePlacedResource( self, heap, heap_offset, desc, state, clear_value, guid, resource, ); } pub inline fn CreateReservedResource( self: *T, desc: *const RESOURCE_DESC, state: RESOURCE_STATES, clear_value: *const CLEAR_VALUE, guid: *const os.GUID, resource: **c_void, ) HRESULT { return self.vtbl.CreateReservedResource(self, desc, state, clear_value, guid, resource); } pub inline fn CreateSharedHandle( self: *T, object: *IDeviceChild, attributes: ?*const os.SECURITY_ATTRIBUTES, access: os.DWORD, name: ?os.LPCWSTR, handle: *os.HANDLE, ) HRESULT { return self.vtbl.CreateSharedHandle(self, object, attributes, access, name, handle); } pub inline fn OpenSharedHandle( self: *T, handle: os.HANDLE, guid: *const os.GUID, object: **c_void, ) HRESULT { return self.vtbl.OpenSharedHandle(self, handle, guid, object); } pub inline fn OpenSharedHandleByName( self: *T, name: os.LPCWSTR, access: os.DWORD, handle: *os.HANDLE, ) HRESULT { return self.vtbl.OpenSharedHandleByName(self, name, access, handle); } pub inline fn MakeResident(self: *T, num: u32, objects: [*]const *IPageable) HRESULT { return self.vtbl.MakeResident(self, num, objects); } pub inline fn Evict(self: *T, num: u32, objects: [*]const *IPageable) HRESULT { return self.vtbl.Evict(self, num, objects); } pub inline fn CreateFence( self: *T, initial_value: u64, flags: FENCE_FLAGS, guid: *const os.GUID, fence: **c_void, ) HRESULT { return self.vtbl.CreateFence(self, initial_value, flags, guid, fence); } pub inline fn GetDeviceRemovedReason(self: *T) HRESULT { return self.vtbl.GetDeviceRemovedReason(self); } pub inline fn GetCopyableFootprints( self: *T, desc: *const RESOURCE_DESC, first_subresource: u32, num_subresources: u32, base_offset: u64, layouts: ?*PLACED_SUBRESOURCE_FOOTPRINT, num_rows: ?*u32, row_size: ?*u64, total_sizie: ?*u64, ) void { self.vtbl.GetCopyableFootprints( self, desc, first_subresource, num_subresources, base_offset, layouts, num_rows, row_size, total_sizie, ); } pub inline fn CreateQueryHeap( self: *T, desc: *const QUERY_HEAP_DESC, guid: *const os.GUID, query_heap: **c_void, ) HRESULT { return self.vtbl.CreateQueryHeap(self, desc, guid, query_heap); } pub inline fn SetStablePowerState(self: *T, enable: os.BOOL) HRESULT { return self.vtbl.SetStablePowerState(self, enable); } pub inline fn CreateCommandSignature( self: *T, desc: *const COMMAND_SIGNATURE_DESC, root_signature: *IRootSignature, guid: *const os.GUID, cmd_signature: **c_void, ) HRESULT { return self.vtbl.CreateCommandSignature(self, desc, root_signature, guid, cmd_signature); } pub inline fn GetResourceTiling( self: *T, resource: *IResource, num_resource_tiles: *u32, packed_mip_desc: *PACKED_MIP_INFO, std_tile_shape_non_packed_mips: *TILE_SHAPE, num_subresource_tilings: *u32, first_subresource: u32, subresource_tiling_for_non_packed_mips: *SUBRESOURCE_TILING, ) void { self.vtbl.GetResourceTiling( self, resource, num_resource_tiles, packed_mip_desc, std_tile_shape_non_packed_mips, num_subresource_tilings, first_subresource, subresource_tiling_for_non_packed_mips, ); } pub inline fn GetAdapterLuid(self: *T) i64 { return self.vtbl.GetAdapterLuid(self); } }; } }; pub const IID_IDebug1 = os.GUID{ .Data1 = 0xaffaa4ca, .Data2 = 0x63fe, .Data3 = 0x4d8e, .Data4 = .{ 0xb8, 0xad, 0x15, 0x90, 0x00, 0xaf, 0x43, 0x04 }, }; pub const IID_IGraphicsCommandList = os.GUID{ .Data1 = 0x5b160d0f, .Data2 = 0xac1b, .Data3 = 0x4185, .Data4 = .{ 0x8b, 0xa8, 0xb3, 0xae, 0x42, 0xa5, 0xa4, 0x55 }, }; pub const IID_ICommandQueue = os.GUID{ .Data1 = 0x0ec870a6, .Data2 = 0x5d7e, .Data3 = 0x4c22, .Data4 = .{ 0x8c, 0xfc, 0x5b, 0xaa, 0xe0, 0x76, 0x16, 0xed }, }; pub const IID_IDevice = os.GUID{ .Data1 = 0x189819f1, .Data2 = 0x1db6, .Data3 = 0x4b57, .Data4 = .{ 0xbe, 0x54, 0x18, 0x21, 0x33, 0x9b, 0x85, 0xf7 }, }; pub const IID_IDescriptorHeap = os.GUID{ .Data1 = 0x8efb471d, .Data2 = 0x616c, .Data3 = 0x4f49, .Data4 = .{ 0x90, 0xf7, 0x12, 0x7b, 0xb7, 0x63, 0xfa, 0x51 }, }; pub const IID_IResource = os.GUID{ .Data1 = 0x696442be, .Data2 = 0xa72e, .Data3 = 0x4059, .Data4 = .{ 0xbc, 0x79, 0x5b, 0x5c, 0x98, 0x04, 0x0f, 0xad }, }; pub const IID_IRootSignature = os.GUID{ .Data1 = 0xc54a6b66, .Data2 = 0x72df, .Data3 = 0x4ee8, .Data4 = .{ 0x8b, 0xe5, 0xa9, 0x46, 0xa1, 0x42, 0x92, 0x14 }, }; pub const IID_ICommandAllocator = os.GUID{ .Data1 = 0x6102dee4, .Data2 = 0xaf59, .Data3 = 0x4b09, .Data4 = .{ 0xb9, 0x99, 0xb4, 0x4d, 0x73, 0xf0, 0x9b, 0x24 }, }; pub const IID_IFence = os.GUID{ .Data1 = 0x0a753dcf, .Data2 = 0xc4d8, .Data3 = 0x4b91, .Data4 = .{ 0xad, 0xf6, 0xbe, 0x5a, 0x60, 0xd9, 0x5a, 0x76 }, }; pub const IID_IPipelineState = os.GUID{ .Data1 = 0x765a30f3, .Data2 = 0xf624, .Data3 = 0x4c6f, .Data4 = .{ 0xa8, 0x28, 0xac, 0xe9, 0x48, 0x62, 0x24, 0x45 }, }; pub const IID_IInfoQueue = os.GUID{ .Data1 = 0x0742a90b, .Data2 = 0xc387, .Data3 = 0x483f, .Data4 = .{ 0xb9, 0x46, 0x30, 0xa7, 0xe4, 0xe6, 0x14, 0x58 }, }; pub var GetDebugInterface: fn (*const os.GUID, **c_void) callconv(.C) HRESULT = undefined; pub var CreateDevice: fn ( ?*os.IUnknown, dcommon.FEATURE_LEVEL, *const os.GUID, **c_void, ) callconv(.C) HRESULT = undefined; pub fn init() void { // TODO: Handle error. var d3d12_dll = os.LoadLibraryA("d3d12.dll").?; GetDebugInterface = @ptrCast( @TypeOf(GetDebugInterface), os.kernel32.GetProcAddress(d3d12_dll, "D3D12GetDebugInterface").?, ); CreateDevice = @ptrCast( @TypeOf(CreateDevice), os.kernel32.GetProcAddress(d3d12_dll, "D3D12CreateDevice").?, ); }
src/windows/d3d12.zig
const builtin = @import("builtin"); const TypeId = builtin.TypeId; const std = @import("std"); const math = std.math; const assert = std.debug.assert; const warn = std.debug.warn; const matrix = @import("matrix.zig"); const Matrix = matrix.Matrix; const M44f32 = matrix.M44f32; const mulM44f32 = matrix.mulM44f32; const vec = @import("vec.zig"); const V3f32 = vec.V3f32; const V2f32 = vec.V2f32; const degrad = @import("degrad.zig"); const degToRad = degrad.degToRad; const radToDeg = degrad.radToDeg; const ae = @import("modules/zig-approxeql/approxeql.zig"); const DBG = false; /// Builds a Pitch Yaw Roll Rotation matrix from point with x, y, z angles in radians. pub fn rotateCwPitchYawRollV3f32(point: V3f32) M44f32 { if (DBG) warn("rotateCwPitchYawRollV3f32: point={}\n", &point); return rotateCwPitchYawRoll(point.x(), point.y(), point.z()); } /// Builds a Pitch Yaw Roll Rotation matrix from y, x, z angles in radians. pub fn rotateCwPitchYawRoll(x: f32, y: f32, z: f32) M44f32 { const rx = RotateCwX(x); const ry = RotateCwY(y); const rz = RotateCwZ(z); var m = mulM44f32(&rz, &mulM44f32(&ry, &rx)); if (DBG) warn("rotateCwPitchYawRoll x={.5} y={.5} z={.5} m:\n{}\n", x, y, z, &m); return m; } /// Builds a Pitch Yaw Roll Rotation matrix from x, y, z angles in radians. pub fn rotateCwPitchYawRollNeg(x: f32, y: f32, z: f32) M44f32 { const rx = RotateCwX(x); const ry = RotateCwY(y); const rz = RotateCwZ(z); var m = mulM44f32(&rx, &mulM44f32(&ry, &rz)); if (DBG) warn("rotateCwPitchYawRollNeg x={.5} y={.5} z={.5} m:\n{}\n", x, y, z, &m); return m; } // Return a `M44f32` for x axis Clockwise // Bug: This rotates Monkey Counter Clockwise fn RotateCwX(x: f32) M44f32 { const rx = M44f32{ .data = [][4]f32{ []f32{ 1.0, 0.0, 0.0, 0.0 }, []f32{ 0.0, math.cos(x), math.sin(x), 0.0 }, []f32{ 0.0, -math.sin(x), math.cos(x), 0.0 }, []f32{ 0.0, 0.0, 0.0, 1.0 }, } }; if (DBG) warn("rotateCwX x={.5} rx:\n{}\n", x, &rx); return rx; } // Return a `M44f32` for y axis that rotates Clockwise // Bug: This rotates Monkey Counter Clockwise fn RotateCwY(y: f32) M44f32 { const ry = M44f32{ .data = [][4]f32{ []f32{ math.cos(y), 0.0, -math.sin(y), 0.0 }, []f32{ 0.0, 1.0, 0.0, 0.0 }, []f32{ math.sin(y), 0.0, math.cos(y), 0.0 }, []f32{ 0.0, 0.0, 0.0, 1.0 }, } }; if (DBG) warn("rotateCwY y={.5} ry:\n{}\n", y, &ry); return ry; } // Return a `M44f32` for z axis Clockwise. fn RotateCwZ(z: f32) M44f32 { const rz = M44f32{ .data = [][4]f32{ []f32{ math.cos(z), math.sin(z), 0.0, 0.0 }, []f32{ -math.sin(z), math.cos(z), 0.0, 0.0 }, []f32{ 0.0, 0.0, 1.0, 0.0 }, []f32{ 0.0, 0.0, 0.0, 1.0 }, } }; if (DBG) warn("rotateCwZ z={.5} rz:\n{}\n", z, &rz); return rz; } test "rotation.rotateCwX" { if (DBG) warn("\n"); const radians: f32 = degToRad(10.0); var m = RotateCwX(radians); if (DBG) warn("m:\n{}\n", &m); var c: f32 = math.cos(radians); var s: f32 = math.sin(radians); assert(m.data[1][1] == c); assert(m.data[2][2] == c); assert(m.data[1][2] == s); assert(m.data[2][1] == -s); var point = V3f32.init(0, 1, 0); if (DBG) warn("point: {}\n", &point); var rot_point = point.transform(&m); if (DBG) warn("rot_point: {}\n", &rot_point); assert(rot_point.x() == 0); assert(ae.approxEql(rot_point.y(), c, 7)); assert(ae.approxEql(rot_point.z(), s, 7)); } test "rotation.rotateCwY" { if (DBG) warn("\n"); const radians: f32 = degToRad(10.0); var m = RotateCwY(radians); if (DBG) warn("m:\n{}\n", &m); var c: f32 = math.cos(radians); var s: f32 = math.sin(radians); assert(m.data[0][0] == c); assert(m.data[2][2] == c); assert(m.data[0][2] == -s); assert(m.data[2][0] == s); var point = V3f32.init(0, 0, 1); if (DBG) warn("point: {}\n", &point); var rot_point = point.transform(&m); if (DBG) warn("rot_point: {}\n", &rot_point); assert(ae.approxEql(rot_point.x(), s, 7)); assert(rot_point.y() == 0); assert(ae.approxEql(rot_point.z(), c, 7)); } test "rotation.rotateCwZ" { if (DBG) warn("\n"); const radians: f32 = degToRad(10.0); var m = RotateCwZ(radians); if (DBG) warn("m:\n{}\n", &m); var c: f32 = math.cos(radians); var s: f32 = math.sin(radians); assert(m.data[0][0] == c); assert(m.data[1][1] == c); assert(m.data[0][1] == s); assert(m.data[1][0] == -s); var point = V3f32.init(0, 1, 0); if (DBG) warn("point: {}\n", &point); var rot_point = point.transform(&m); if (DBG) warn("rot_point: {}\n", &rot_point); assert(ae.approxEql(rot_point.x(), -s, 7)); assert(ae.approxEql(rot_point.y(), c, 7)); assert(rot_point.z() == 0); } test "rotation.rotateCwPitchYawRoll" { if (DBG) warn("\n"); const radians: f32 = degToRad(10.0); var m_zero = rotateCwPitchYawRoll(0, 0, 0); var m_x_pos_ten_deg = rotateCwPitchYawRoll(radians, 0, 0); var m_x_neg_ten_deg = rotateCwPitchYawRoll(-radians, 0, 0); var x = mulM44f32(&m_x_pos_ten_deg, &m_x_neg_ten_deg); if (DBG) warn("m_x_pos_ten_deg:\n{}\n", &m_x_pos_ten_deg); if (DBG) warn("m_x_neg_ten_deg:\n{}\n", &m_x_neg_ten_deg); if (DBG) warn("x = pos * neg:\n{}\n", &x); assert(matrix.approxEql(&m_zero, &x, 7)); var m_y_pos_ten_deg = rotateCwPitchYawRoll(0, radians, 0); var m_y_neg_ten_deg = rotateCwPitchYawRoll(0, -radians, 0); var y = mulM44f32(&m_y_pos_ten_deg, &m_y_neg_ten_deg); if (DBG) warn("m_y_pos_ten_deg:\n{}\n", m_y_pos_ten_deg); if (DBG) warn("m_y_neg_ten_deg:\n{}\n", m_y_neg_ten_deg); if (DBG) warn("y = pos * neg:\n{}\n", &y); assert(matrix.approxEql(&m_zero, &y, 7)); var m_z_pos_ten_deg = rotateCwPitchYawRoll(0, 0, radians); var m_z_neg_ten_deg = rotateCwPitchYawRoll(0, 0, -radians); var z = mulM44f32(&m_z_pos_ten_deg, &m_z_neg_ten_deg); if (DBG) warn("m_z_neg_ten_deg:\n{}\n", m_z_neg_ten_deg); if (DBG) warn("m_z_pos_ten_deg:\n{}\n", m_z_pos_ten_deg); if (DBG) warn("z = pos * neg:\n{}\n", &z); assert(matrix.approxEql(&m_zero, &z, 7)); var xy_pos = mulM44f32(&m_x_pos_ten_deg, &m_y_pos_ten_deg); var a = mulM44f32(&xy_pos, &m_y_neg_ten_deg); var b = mulM44f32(&a, &m_x_neg_ten_deg); if (DBG) warn("xy_pos = x_pos_ten * y_pos_ten:\n{}\n", &xy_pos); if (DBG) warn("a = xy_pos * y_pos_ten\n{}\n", &a); if (DBG) warn("b = a * x_pos_ten\n{}\n", &b); assert(matrix.approxEql(&m_zero, &b, 7)); // To undo a rotateCwPitchYawRoll the multiplication in rotateCwPitchYawRoll // must be applied in reverse order. mulM44f32(&rz, &mulM44f32(&ry, &rx)) // 1) r1 = ry * rx // 2) r2 = rz * ry // must be applied: // 1) r3 = -rz * r2 // 2) r4 = -ry * r3 // 3) r5 = -rx * r4 var r2 = rotateCwPitchYawRoll(radians, radians, radians); if (DBG) warn("r2:\n{}\n", &r2); var r3 = mulM44f32(&m_z_neg_ten_deg, &r2); var r4 = mulM44f32(&m_y_neg_ten_deg, &r3); var r5 = mulM44f32(&m_x_neg_ten_deg, &r4); if (DBG) warn("r5:\n{}\n", &r5); assert(matrix.approxEql(&m_zero, &r5, 7)); // Here is the above as a single line both are equal to m_zero r5 = mulM44f32(&m_x_neg_ten_deg, &mulM44f32(&m_y_neg_ten_deg, &mulM44f32(&m_z_neg_ten_deg, &r2))); if (DBG) warn("r5 one line:\n{}\n", &r5); assert(matrix.approxEql(&m_zero, &r5, 7)); // Or you can use rotateCwPitchYawRollNeg var rneg = rotateCwPitchYawRollNeg(-radians, -radians, -radians); if (DBG) warn("rneg:\n{}\n", &rneg); var r6 = mulM44f32(&rneg, &r2); if (DBG) warn("r6:\n{}\n", &r6); assert(matrix.approxEql(&m_zero, &r6, 7)); }
rotation.zig
const std = @import("std"); const util = @import("util.zig"); const data = @embedFile("../data/day03.txt"); fn parseInput(input_text: []const u8) std.ArrayList([]const u8) { var list = std.ArrayList([]const u8).init(std.testing.allocator); var lines = std.mem.tokenize(u8, input_text, "\r\n"); while (lines.next()) |line| { list.append(line) catch unreachable; } return list; } fn bitDeltaAtPosition(input: std.ArrayList([]const u8), skip: ?std.ArrayList(bool), bit_index: usize) i64 { var delta: i64 = 0; for (input.items) |bits, i| { if (skip == null or !skip.?.items[i]) delta = if (bits[bit_index] == '0') delta - 1 else delta + 1; } return delta; } fn part1(input: std.ArrayList([]const u8)) i64 { var gamma_rate: i64 = 0; var epsilon_rate: i64 = 0; var i: usize = 0; const bit_count = input.items[0].len; while (i < bit_count) : (i += 1) { const delta = bitDeltaAtPosition(input, null, i); gamma_rate *= 2; gamma_rate += if (delta > 0) @as(i64, 1) else @as(i64, 0); epsilon_rate *= 2; epsilon_rate += if (delta > 0) @as(i64, 0) else @as(i64, 1); } return gamma_rate * epsilon_rate; } fn part2(input: std.ArrayList([]const u8)) i64 { var i: usize = 0; var skip = std.ArrayList(bool).init(std.testing.allocator); skip.ensureTotalCapacity(input.items.len) catch unreachable; defer skip.deinit(); while (i < input.items.len) : (i += 1) { skip.append(false) catch unreachable; } const bit_count = input.items[0].len; var winner: usize = input.items.len; var candidates_remaining = input.items.len; i = 0; var oxygen_rating = while (i < bit_count) : (i += 1) { const delta = bitDeltaAtPosition(input, skip, i); const target: u8 = if (delta >= 0) '1' else '0'; for (input.items) |bits, j| { if (skip.items[j]) { continue; } if (bits[i] != target) { skip.items[j] = true; candidates_remaining -= 1; continue; } winner = j; } if (candidates_remaining == 1) { break parseInt(i64, input.items[winner], 2) catch unreachable; } } else -1; winner = input.items.len; candidates_remaining = input.items.len; std.mem.set(bool, skip.items, false); i = 0; var co2_rating = while (i < bit_count) : (i += 1) { const delta = bitDeltaAtPosition(input, skip, i); const target: u8 = if (delta >= 0) '0' else '1'; // flip target for CO2 scrubber for (input.items) |bits, j| { if (skip.items[j]) { continue; } if (bits[i] != target) { skip.items[j] = true; candidates_remaining -= 1; continue; } winner = j; } if (candidates_remaining == 1) { break parseInt(i64, input.items[winner], 2) catch unreachable; } } else -1; return oxygen_rating * co2_rating; } pub fn main() !void {} const test_data = \\00100 \\11110 \\10110 \\10111 \\10101 \\01111 \\00111 \\11100 \\10000 \\11001 \\00010 \\01010 ; test "part1" { const test_input = parseInput(test_data); defer test_input.deinit(); try std.testing.expectEqual(@as(i64, 198), part1(test_input)); const input = parseInput(data); defer input.deinit(); try std.testing.expectEqual(@as(i64, 4001724), part1(input)); } test "part2" { const test_input = parseInput(test_data); defer test_input.deinit(); try std.testing.expectEqual(@as(i64, 230), part2(test_input)); const input = parseInput(data); defer input.deinit(); try std.testing.expectEqual(@as(i64, 587895), part2(input)); } // Useful stdlib functions const tokenize = std.mem.tokenize; const split = std.mem.split; const parseInt = std.fmt.parseInt; const min = std.math.min; const max = std.math.max; const print = std.debug.print;
src/day03.zig
const std = @import("std"); const assert = std.debug.assert; const ArrayList = std.ArrayList; const Mutex = std.Thread.Mutex; const page_alloc = std.heap.page_allocator; const AdmaAllocator = @import("adma").AdmaAllocator; const debug_print = false; const print = if (debug_print) std.debug.print else fakeprint; fn fakeprint(_: []const u8, __: anytype) void {} threadlocal var adma_alloc: ?*AdmaAllocator = null; var lib_init = false; var allocations: ArrayList([]u8) = undefined; var mutex = Mutex{}; export fn malloc(size: usize) ?[*]u8 { const adma = adma_alloc orelse blk: { adma_alloc = AdmaAllocator.init(); assert(adma_alloc != null); allocations = ArrayList([]u8).init(&adma_alloc.?.allocator); std.debug.print("Initialized Adma Allocator\n", .{}); break :blk adma_alloc.?; }; var lock = mutex.acquire(); defer lock.release(); var data = adma.allocator.alloc(u8, size) catch @panic("Failed to allocate"); //var data = page_alloc.alloc(u8, size) catch @panic("Failed to allocate"); allocations.append(data) catch @panic("Failed to track allocation"); print("Malloced {} ask size: {} | given size: {}\n", .{ data.ptr, size, data.len }); return data.ptr; } export fn free(raw_ptr: ?[*]u8) void { const ptr = raw_ptr orelse return; const adma = adma_alloc orelse @panic("Attempt to free without initing adma"); print("Freeing pointer: {}\n", .{ptr}); var lock = mutex.acquire(); defer lock.release(); for (allocations.items) |item, i| { if (ptr == item.ptr) { print("Found and freeing: {}\n", .{ptr}); adma.allocator.free(item); //page_alloc.free(item); _ = allocations.swapRemove(i); return; } } @panic("Attempt to free unknown pointer"); } export fn calloc(num: usize, size: usize) ?[*]u8 { const tsize = num * size; if (tsize == 0) return null; const may_ptr = malloc(tsize); print("Calloced {} bytes: {}\n", .{ tsize, may_ptr }); if (may_ptr) |ptr| { @memset(ptr, 0, tsize); } return may_ptr; } export fn realloc(rptr: ?[*]u8, size: usize) ?[*]u8 { const ptr = rptr orelse return null; const adma = adma_alloc orelse return null; var lock = mutex.acquire(); defer lock.release(); for (allocations.items) |item, i| { if (ptr == item.ptr) { print("Reallocing ptr: {} | oldsize: {} | newsize: {}\n", .{ ptr, item.len, size }); const newdata = adma.allocator.realloc(item, size) catch //const newdata = page_alloc.realloc(item, size) catch @panic("Failed to realloc"); allocations.items[i] = newdata; assert(newdata.len == size); return newdata.ptr; } } @panic("Failed to realloc"); } export fn memalign(alignment: usize, size: usize) ?[*]u8 { std.debug.print("Memalign called.\n", .{}); return null; } export fn exit(code: u8) void { std.debug.print("\nGoodbye from exit!\n", .{}); cleanup(); std.os.exit(code); } export fn abort() void { std.debug.print("\nGoodbye from abort!\n", .{}); cleanup(); std.os.abort(); } fn cleanup() void { const adma = adma_alloc orelse return; for (allocations.items) |item| { adma.allocator.free(item); } print("Cleaned up allocations\n", .{}); allocations.deinit(); print("Deinit allocations\n", .{}); adma.deinit(); std.debug.print("Deinit adma\n", .{}); }
src/externs.zig
const std = @import("std"); const mem = std.mem; usingnamespace @import("vector_types.zig"); usingnamespace @import("llvm_intrinsics.zig"); const main = @import("main.zig"); const CharUtils = @import("string_parsing.zig").CharUtils; const dom = @import("dom.zig"); const Iterator = dom.Iterator; const TapeBuilder = dom.TapeBuilder; const TapeType = dom.TapeType; const common = @import("common.zig"); const Error = common.Error; const println = common.println; fn INVALID_NUMBER(src: [*]const u8) Error { _ = src; return error.NUMBER_ERROR; } fn WRITE_DOUBLE(VALUE: f64, _: [*]const u8, WRITER: *TapeBuilder) void { (WRITER).append_double(VALUE); } fn WRITE_INTEGER(VALUE: u64, _: [*]const u8, WRITER: *TapeBuilder) void { (WRITER).append_i64(VALUE); } fn WRITE_UNSIGNED(VALUE: u64, _: [*]const u8, WRITER: *TapeBuilder) void { (WRITER).append_u64(VALUE); } pub fn parse_number( src: [*]const u8, tb: *TapeBuilder, ) Error!void { const negative = src[0] == '-'; var p = src + @boolToInt(negative); // // Parse the integer part. // // PERF NOTE: we don't use is_made_of_eight_digits_fast because large integers like 123456789 are rare const start_digits = p; var i: u64 = 0; while (parse_digit(u64, p[0], &i)) p += 1; var digit_count = try common.ptr_diff(u32, p, start_digits); // println("parse_digit i {} digit_count {}", .{ i, digit_count }); if (digit_count == 0 or ('0' == start_digits[0] and digit_count > 1)) { return INVALID_NUMBER(src); } // // Handle floats if there is a . or e (or both) // var exponent: i64 = 0; var is_float = false; if ('.' == p[0]) { is_float = true; p += 1; // std.log.debug("calling parse_decimal p[0] {c} i {} ", .{ p[0], i }); try parse_decimal(src, &p, &i, &exponent); digit_count = try common.ptr_diff(u32, p, start_digits); // used later to guard against overflows } if ('e' == p[0] or 'E' == p[0]) { is_float = true; p += 1; try parse_exponent(src, &p, &exponent); } if (is_float) { // main.println("is_float {c}", .{p[0]}); const dirty_end = CharUtils.is_not_structural_or_whitespace(p[0]); try write_float(src, negative, i, start_digits, digit_count, exponent, tb); if (dirty_end) { return INVALID_NUMBER(src); } return; } // The longest negative 64-bit number is 19 digits. // The longest positive 64-bit number is 20 digits. // We do it this way so we don't trigger this branch unless we must. const longest_digit_count: usize = if (negative) 19 else 20; if (digit_count > longest_digit_count) { return INVALID_NUMBER(src); } if (digit_count == longest_digit_count) { if (negative) { // Anything negative above INT64_MAX+1 is invalid if (i > @as(u64, std.math.maxInt(i64)) + 1) { return INVALID_NUMBER(src); } WRITE_INTEGER(~i + 1, src, tb); if (CharUtils.is_not_structural_or_whitespace(p[0])) { return INVALID_NUMBER(src); } return; // Positive overflow check: // - A 20 digit number starting with 2-9 is overflow, because 18,446,744,073,709,551,615 is the // biggest uint64_t. // - A 20 digit number starting with 1 is overflow if it is less than INT64_MAX. // If we got here, it's a 20 digit number starting with the digit "1". // - If a 20 digit number starting with 1 overflowed (i*10+digit), the result will be smaller // than 1,553,255,926,290,448,384. // - That is smaller than the smallest possible 20-digit number the user could write: // 10,000,000,000,000,000,000. // - Therefore, if the number is positive and lower than that, it's overflow. // - The value we are looking at is less than or equal to 9,223,372,036,854,775,808 (INT64_MAX). // } else if (src[0] != '1' or i <= @as(u64, std.math.maxInt(i64))) { return INVALID_NUMBER(src); } } // Write unsigned if it doesn't fit in a signed integer. if (i > @as(u64, std.math.maxInt(i64))) { WRITE_UNSIGNED(i, src, tb); // try tb.append2(iter, 0, i, .INT64); } else { WRITE_INTEGER(if (negative) (~i +% 1) else i, src, tb); // try tb.append2(iter, 0, if (negative) (~i +% 1) else i, .INT64); } // std.log.debug("parse number last '{c}'(0x{x}:{})", .{ p[0], p[0], p[0] }); if (CharUtils.is_not_structural_or_whitespace(p[0])) return INVALID_NUMBER(src); } // We deliberately allow overflow here and check later fn parse_digit(comptime I: type, char: u8, i: *I) bool { const digit = char -% '0'; if (digit > 9) return false; // PERF NOTE: multiplication by 10 is cheaper than arbitrary integer multiplication i.* = 10 *% i.* + digit; // might overflow, we will handle the overflow later return true; } fn parse_decimal(src: [*]const u8, p: *[*]const u8, i: *u64, exponent: *i64) !void { // we continue with the fiction that we have an integer. If the // floating point number is representable as x * 10^z for some integer // z that fits in 53 bits, then we will be able to convert back the // the integer into a float in a lossless manner. const first_after_period = p.*; // TODO // #ifdef SWAR_NUMBER_PARSING // // this helps if we have lots of decimals! // // this turns out to be frequent enough. // if (is_made_of_eight_digits_fast(p)) { // i = i * 100000000 + parse_eight_digits_unrolled(p); // p += 8; // } // #endif // Unrolling the first digit makes a small difference on some implementations (e.g. westmere) // if (parse_digit(u64, p.*[0], i)) // p.* += 1; while (parse_digit(u64, p.*[0], i)) p.* += 1; exponent.* = (try std.math.cast(i64, @ptrToInt(first_after_period))) - (try std.math.cast(i64, @ptrToInt(p.*))); // std.log.debug("exponent {} firstap {*} p {*}", .{ exponent.*, first_after_period, p }); // Decimal without digits (123.) is illegal if (exponent.* == 0) { return INVALID_NUMBER(src); } } inline fn parse_exponent(src: [*]const u8, p: *[*]const u8, exponent: *i64) !void { // Exp Sign: -123.456e[-]78 const neg_exp = ('-' == p.*[0]); if (neg_exp or '+' == p.*[0]) { p.* += 1; } // Skip + as well // Exponent: -123.456e-[78] var start_exp = p.*; var exp_number: i64 = 0; while (parse_digit(i64, p.*[0], &exp_number)) p.* += 1; // It is possible for parse_digit to overflow. // In particular, it could overflow to INT64_MIN, and we cannot do - INT64_MIN. // Thus we *must* check for possible overflow before we negate exp_number. // Performance notes: it may seem like combining the two "simdjson_unlikely checks" below into // a single simdjson_unlikely path would be faster. The reasoning is sound, but the compiler may // not oblige and may, in fact, generate two distinct paths in any case. It might be // possible to do uint64_t(p - start_exp - 1) >= 18 but it could end up trading off // instructions for a simdjson_likely branch, an unconclusive gain. // If there were no digits, it's an error. if (p.* == start_exp) { return INVALID_NUMBER(src); } // We have a valid positive exponent in exp_number at this point, except that // it may have overflowed. // If there were more than 18 digits, we may have overflowed the integer. We have to do // something!!!! if (@ptrToInt(p.*) > @ptrToInt(start_exp + 18)) { // Skip leading zeroes: 1e000000000000000000001 is technically valid and doesn't overflow while (start_exp[0] == '0') start_exp += 1; // 19 digits could overflow int64_t and is kind of absurd anyway. We don't // support exponents smaller than -999,999,999,999,999,999 and bigger // than 999,999,999,999,999,999. // We can truncate. // Note that 999999999999999999 is assuredly too large. The maximal ieee64 value before // infinity is ~1.8e308. The smallest subnormal is ~5e-324. So, actually, we could // truncate at 324. // Note that there is no reason to fail per se at this point in time. // E.g., 0e999999999999999999999 is a fine number. if (@ptrToInt(p.*) > @ptrToInt(start_exp + 18)) { exp_number = 999999999999999999; } } // At this point, we know that exp_number is a sane, positive, signed integer. // It is <= 999,999,999,999,999,999. As long as 'exponent' is in // [-8223372036854775808, 8223372036854775808], we won't overflow. Because 'exponent' // is bounded in magnitude by the size of the JSON input, we are fine in this universe. // To sum it up: the next line should never overflow. exponent.* += if (neg_exp) -exp_number else exp_number; } fn significant_digits(start_digits: [*]const u8, digit_count: usize) usize { // It is possible that the integer had an overflow. // We have to handle the case where we have 0.0000somenumber. var start = start_digits; while ((start[0] == '0') or (start[0] == '.')) start += 1; // we over-decrement by one when there is a '.' return digit_count -% @ptrToInt(start) - @ptrToInt(start_digits); } fn slow_float_parsing(src: [*]const u8, writer: *TapeBuilder) !void { var d: f64 = undefined; if (parse_float_fallback(src, &d)) { writer.append_double(d); return; } return INVALID_NUMBER(src); } const AdjustedMantissa = struct { mantissa: u64 = 0, power2: i32 = 0, }; const max_digits = 768; const decimal_point_range = 2047; const Decimal = struct { num_digits: u32, decimal_point: i32, negative: bool, truncated: bool, digits: [max_digits]u8, }; fn is_integer(c: u8) bool { return (c >= '0' and c <= '9'); } // This should always succeed since it follows a call to parse_number. fn parse_decimal2(p_: [*]const u8) Decimal { var p = p_; var answer: Decimal = undefined; answer.num_digits = 0; answer.decimal_point = 0; answer.truncated = false; answer.negative = (p[0] == '-'); if (p[0] == '-' or p[0] == '+') p += 1; while (p[0] == '0') p += 1; while (is_integer(p[0])) { if (answer.num_digits < max_digits) answer.digits[answer.num_digits] = p[0] - '0'; answer.num_digits += 1; p += 1; } if (p[0] == '.') { p += 1; const first_after_period = p; // if we have not yet encountered a zero, we have to skip it as well if (answer.num_digits == 0) { // skip zeros while (p[0] == '0') { p += 1; } } while (is_integer(p[0])) { if (answer.num_digits < max_digits) { answer.digits[answer.num_digits] = p[0] - '0'; } answer.num_digits += 1; p += 1; } answer.decimal_point = @intCast(i32, @ptrToInt(first_after_period) -% @ptrToInt(p)); } if (answer.num_digits > 0) { var preverse = p - 1; var trailing_zeros: i32 = 0; while ((preverse[0] == '0') or (preverse[0] == '.')) { if (preverse[0] == '0') trailing_zeros += 1; preverse -= 1; } answer.decimal_point += @intCast(i32, answer.num_digits); answer.num_digits -= @intCast(u32, trailing_zeros); } if (answer.num_digits > max_digits) { answer.num_digits = max_digits; answer.truncated = true; } if (('e' == p[0]) or ('E' == p[0])) { p += 1; var neg_exp = false; if ('-' == p[0]) { neg_exp = true; p += 1; } else if ('+' == p[0]) { p += 1; } var exp_number: i32 = 0; // exponential part while (is_integer(p[0])) { var digit = p[0] - '0'; if (exp_number < 0x10000) exp_number = 10 * exp_number + digit; p += 1; } answer.decimal_point += if (neg_exp) -exp_number else exp_number; } return answer; } // remove all final zeroes inline fn trim(h: *Decimal) void { while ((h.num_digits > 0) and (h.digits[h.num_digits - 1] == 0)) { h.num_digits -= 1; } } // computes h * 2^shift fn decimal_right_shift(h: *Decimal, shift: u6) void { var read_index: u32 = 0; var write_index: u32 = 0; var n: u64 = 0; while ((n >> shift) == 0) { if (read_index < h.num_digits) { n = (10 * n) + h.digits[read_index]; read_index += 1; } else if (n == 0) { return; } else { while ((n >> shift) == 0) { n = 10 * n; read_index += 1; } break; } } h.decimal_point -= @intCast(i32, read_index - 1); if (h.decimal_point < -decimal_point_range) { // it is zero h.num_digits = 0; h.decimal_point = 0; h.negative = false; h.truncated = false; return; } const mask = (@as(u64, 1) << shift) - 1; while (read_index < h.num_digits) { const new_digit = @intCast(u8, n >> shift); n = (10 * (n & mask)) + h.digits[read_index]; read_index += 1; h.digits[write_index] = new_digit; write_index += 1; } while (n > 0) { const new_digit = @intCast(u8, n >> shift); n = 10 * (n & mask); if (write_index < max_digits) { h.digits[write_index] = new_digit; write_index += 1; } else if (new_digit > 0) { h.truncated = true; } } h.num_digits = write_index; trim(h); } fn number_of_digits_decimal_left_shift(h: *Decimal, shift_: u32) u32 { var shift = shift_ & 63; const number_of_digits_decimal_left_shift_table = [65]u16{ 0x0000, 0x0800, 0x0801, 0x0803, 0x1006, 0x1009, 0x100D, 0x1812, 0x1817, 0x181D, 0x2024, 0x202B, 0x2033, 0x203C, 0x2846, 0x2850, 0x285B, 0x3067, 0x3073, 0x3080, 0x388E, 0x389C, 0x38AB, 0x38BB, 0x40CC, 0x40DD, 0x40EF, 0x4902, 0x4915, 0x4929, 0x513E, 0x5153, 0x5169, 0x5180, 0x5998, 0x59B0, 0x59C9, 0x61E3, 0x61FD, 0x6218, 0x6A34, 0x6A50, 0x6A6D, 0x6A8B, 0x72AA, 0x72C9, 0x72E9, 0x7B0A, 0x7B2B, 0x7B4D, 0x8370, 0x8393, 0x83B7, 0x83DC, 0x8C02, 0x8C28, 0x8C4F, 0x9477, 0x949F, 0x94C8, 0x9CF2, 0x051C, 0x051C, 0x051C, 0x051C, }; const x_a: u32 = number_of_digits_decimal_left_shift_table[shift]; const x_b: u32 = number_of_digits_decimal_left_shift_table[shift + 1]; const num_new_digits: u32 = x_a >> 11; const pow5_a: u32 = 0x7FF & x_a; const pow5_b: u32 = 0x7FF & x_b; const number_of_digits_decimal_left_shift_table_powers_of_5 = [0x051C]u8{ 5, 2, 5, 1, 2, 5, 6, 2, 5, 3, 1, 2, 5, 1, 5, 6, 2, 5, 7, 8, 1, 2, 5, 3, 9, 0, 6, 2, 5, 1, 9, 5, 3, 1, 2, 5, 9, 7, 6, 5, 6, 2, 5, 4, 8, 8, 2, 8, 1, 2, 5, 2, 4, 4, 1, 4, 0, 6, 2, 5, 1, 2, 2, 0, 7, 0, 3, 1, 2, 5, 6, 1, 0, 3, 5, 1, 5, 6, 2, 5, 3, 0, 5, 1, 7, 5, 7, 8, 1, 2, 5, 1, 5, 2, 5, 8, 7, 8, 9, 0, 6, 2, 5, 7, 6, 2, 9, 3, 9, 4, 5, 3, 1, 2, 5, 3, 8, 1, 4, 6, 9, 7, 2, 6, 5, 6, 2, 5, 1, 9, 0, 7, 3, 4, 8, 6, 3, 2, 8, 1, 2, 5, 9, 5, 3, 6, 7, 4, 3, 1, 6, 4, 0, 6, 2, 5, 4, 7, 6, 8, 3, 7, 1, 5, 8, 2, 0, 3, 1, 2, 5, 2, 3, 8, 4, 1, 8, 5, 7, 9, 1, 0, 1, 5, 6, 2, 5, 1, 1, 9, 2, 0, 9, 2, 8, 9, 5, 5, 0, 7, 8, 1, 2, 5, 5, 9, 6, 0, 4, 6, 4, 4, 7, 7, 5, 3, 9, 0, 6, 2, 5, 2, 9, 8, 0, 2, 3, 2, 2, 3, 8, 7, 6, 9, 5, 3, 1, 2, 5, 1, 4, 9, 0, 1, 1, 6, 1, 1, 9, 3, 8, 4, 7, 6, 5, 6, 2, 5, 7, 4, 5, 0, 5, 8, 0, 5, 9, 6, 9, 2, 3, 8, 2, 8, 1, 2, 5, 3, 7, 2, 5, 2, 9, 0, 2, 9, 8, 4, 6, 1, 9, 1, 4, 0, 6, 2, 5, 1, 8, 6, 2, 6, 4, 5, 1, 4, 9, 2, 3, 0, 9, 5, 7, 0, 3, 1, 2, 5, 9, 3, 1, 3, 2, 2, 5, 7, 4, 6, 1, 5, 4, 7, 8, 5, 1, 5, 6, 2, 5, 4, 6, 5, 6, 6, 1, 2, 8, 7, 3, 0, 7, 7, 3, 9, 2, 5, 7, 8, 1, 2, 5, 2, 3, 2, 8, 3, 0, 6, 4, 3, 6, 5, 3, 8, 6, 9, 6, 2, 8, 9, 0, 6, 2, 5, 1, 1, 6, 4, 1, 5, 3, 2, 1, 8, 2, 6, 9, 3, 4, 8, 1, 4, 4, 5, 3, 1, 2, 5, 5, 8, 2, 0, 7, 6, 6, 0, 9, 1, 3, 4, 6, 7, 4, 0, 7, 2, 2, 6, 5, 6, 2, 5, 2, 9, 1, 0, 3, 8, 3, 0, 4, 5, 6, 7, 3, 3, 7, 0, 3, 6, 1, 3, 2, 8, 1, 2, 5, 1, 4, 5, 5, 1, 9, 1, 5, 2, 2, 8, 3, 6, 6, 8, 5, 1, 8, 0, 6, 6, 4, 0, 6, 2, 5, 7, 2, 7, 5, 9, 5, 7, 6, 1, 4, 1, 8, 3, 4, 2, 5, 9, 0, 3, 3, 2, 0, 3, 1, 2, 5, 3, 6, 3, 7, 9, 7, 8, 8, 0, 7, 0, 9, 1, 7, 1, 2, 9, 5, 1, 6, 6, 0, 1, 5, 6, 2, 5, 1, 8, 1, 8, 9, 8, 9, 4, 0, 3, 5, 4, 5, 8, 5, 6, 4, 7, 5, 8, 3, 0, 0, 7, 8, 1, 2, 5, 9, 0, 9, 4, 9, 4, 7, 0, 1, 7, 7, 2, 9, 2, 8, 2, 3, 7, 9, 1, 5, 0, 3, 9, 0, 6, 2, 5, 4, 5, 4, 7, 4, 7, 3, 5, 0, 8, 8, 6, 4, 6, 4, 1, 1, 8, 9, 5, 7, 5, 1, 9, 5, 3, 1, 2, 5, 2, 2, 7, 3, 7, 3, 6, 7, 5, 4, 4, 3, 2, 3, 2, 0, 5, 9, 4, 7, 8, 7, 5, 9, 7, 6, 5, 6, 2, 5, 1, 1, 3, 6, 8, 6, 8, 3, 7, 7, 2, 1, 6, 1, 6, 0, 2, 9, 7, 3, 9, 3, 7, 9, 8, 8, 2, 8, 1, 2, 5, 5, 6, 8, 4, 3, 4, 1, 8, 8, 6, 0, 8, 0, 8, 0, 1, 4, 8, 6, 9, 6, 8, 9, 9, 4, 1, 4, 0, 6, 2, 5, 2, 8, 4, 2, 1, 7, 0, 9, 4, 3, 0, 4, 0, 4, 0, 0, 7, 4, 3, 4, 8, 4, 4, 9, 7, 0, 7, 0, 3, 1, 2, 5, 1, 4, 2, 1, 0, 8, 5, 4, 7, 1, 5, 2, 0, 2, 0, 0, 3, 7, 1, 7, 4, 2, 2, 4, 8, 5, 3, 5, 1, 5, 6, 2, 5, 7, 1, 0, 5, 4, 2, 7, 3, 5, 7, 6, 0, 1, 0, 0, 1, 8, 5, 8, 7, 1, 1, 2, 4, 2, 6, 7, 5, 7, 8, 1, 2, 5, 3, 5, 5, 2, 7, 1, 3, 6, 7, 8, 8, 0, 0, 5, 0, 0, 9, 2, 9, 3, 5, 5, 6, 2, 1, 3, 3, 7, 8, 9, 0, 6, 2, 5, 1, 7, 7, 6, 3, 5, 6, 8, 3, 9, 4, 0, 0, 2, 5, 0, 4, 6, 4, 6, 7, 7, 8, 1, 0, 6, 6, 8, 9, 4, 5, 3, 1, 2, 5, 8, 8, 8, 1, 7, 8, 4, 1, 9, 7, 0, 0, 1, 2, 5, 2, 3, 2, 3, 3, 8, 9, 0, 5, 3, 3, 4, 4, 7, 2, 6, 5, 6, 2, 5, 4, 4, 4, 0, 8, 9, 2, 0, 9, 8, 5, 0, 0, 6, 2, 6, 1, 6, 1, 6, 9, 4, 5, 2, 6, 6, 7, 2, 3, 6, 3, 2, 8, 1, 2, 5, 2, 2, 2, 0, 4, 4, 6, 0, 4, 9, 2, 5, 0, 3, 1, 3, 0, 8, 0, 8, 4, 7, 2, 6, 3, 3, 3, 6, 1, 8, 1, 6, 4, 0, 6, 2, 5, 1, 1, 1, 0, 2, 2, 3, 0, 2, 4, 6, 2, 5, 1, 5, 6, 5, 4, 0, 4, 2, 3, 6, 3, 1, 6, 6, 8, 0, 9, 0, 8, 2, 0, 3, 1, 2, 5, 5, 5, 5, 1, 1, 1, 5, 1, 2, 3, 1, 2, 5, 7, 8, 2, 7, 0, 2, 1, 1, 8, 1, 5, 8, 3, 4, 0, 4, 5, 4, 1, 0, 1, 5, 6, 2, 5, 2, 7, 7, 5, 5, 5, 7, 5, 6, 1, 5, 6, 2, 8, 9, 1, 3, 5, 1, 0, 5, 9, 0, 7, 9, 1, 7, 0, 2, 2, 7, 0, 5, 0, 7, 8, 1, 2, 5, 1, 3, 8, 7, 7, 7, 8, 7, 8, 0, 7, 8, 1, 4, 4, 5, 6, 7, 5, 5, 2, 9, 5, 3, 9, 5, 8, 5, 1, 1, 3, 5, 2, 5, 3, 9, 0, 6, 2, 5, 6, 9, 3, 8, 8, 9, 3, 9, 0, 3, 9, 0, 7, 2, 2, 8, 3, 7, 7, 6, 4, 7, 6, 9, 7, 9, 2, 5, 5, 6, 7, 6, 2, 6, 9, 5, 3, 1, 2, 5, 3, 4, 6, 9, 4, 4, 6, 9, 5, 1, 9, 5, 3, 6, 1, 4, 1, 8, 8, 8, 2, 3, 8, 4, 8, 9, 6, 2, 7, 8, 3, 8, 1, 3, 4, 7, 6, 5, 6, 2, 5, 1, 7, 3, 4, 7, 2, 3, 4, 7, 5, 9, 7, 6, 8, 0, 7, 0, 9, 4, 4, 1, 1, 9, 2, 4, 4, 8, 1, 3, 9, 1, 9, 0, 6, 7, 3, 8, 2, 8, 1, 2, 5, 8, 6, 7, 3, 6, 1, 7, 3, 7, 9, 8, 8, 4, 0, 3, 5, 4, 7, 2, 0, 5, 9, 6, 2, 2, 4, 0, 6, 9, 5, 9, 5, 3, 3, 6, 9, 1, 4, 0, 6, 2, 5, }; const pow5 = @ptrCast([*]const u8, &number_of_digits_decimal_left_shift_table_powers_of_5[pow5_a]); var i: u32 = 0; var n: u32 = pow5_b - pow5_a; while (i < n) : (i += 1) { if (i >= h.num_digits) { return num_new_digits - 1; } else if (h.digits[i] == pow5[i]) { continue; } else if (h.digits[i] < pow5[i]) { return num_new_digits - 1; } else { return num_new_digits; } } return num_new_digits; } fn round(h: *Decimal) u64 { if ((h.num_digits == 0) or (h.decimal_point < 0)) { return 0; } else if (h.decimal_point > 18) { return std.math.maxInt(u64); } // at this point, we know that h.decimal_point >= 0 const dp: u64 = @intCast(u32, h.decimal_point); var n: u64 = 0; var i: u32 = 0; while (i < dp) : (i += 1) { n = (10 * n) + if (i < h.num_digits) h.digits[i] else 0; } var round_up = false; if (dp < h.num_digits) { round_up = h.digits[dp] >= 5; // normally, we round up // but we may need to round to even! if ((h.digits[dp] == 5) and (dp + 1 == h.num_digits)) { round_up = h.truncated or ((dp > 0) and (1 & h.digits[dp - 1] != 0)); } } if (round_up) { n += 1; } return n; } // computes h * 2^-shift fn decimal_left_shift(h: *Decimal, shift: u6) void { if (h.num_digits == 0) return; const num_new_digits = number_of_digits_decimal_left_shift(h, shift); var read_index = @intCast(i32, h.num_digits - 1); var write_index = h.num_digits - 1 + num_new_digits; var n: u64 = 0; while (read_index >= 0) { n += @as(u64, h.digits[@intCast(u32, read_index)]) << shift; const quotient = n / 10; const remainder = n - (10 * quotient); if (write_index < max_digits) { h.digits[write_index] = @intCast(u8, remainder); } else if (remainder > 0) { h.truncated = true; } n = quotient; write_index -%= 1; read_index -= 1; } while (n > 0) { const quotient = n / 10; const remainder = n - (10 * quotient); if (write_index < max_digits) { h.digits[write_index] = @intCast(u8, remainder); } else if (remainder > 0) { h.truncated = true; } n = quotient; write_index -%= 1; } h.num_digits += num_new_digits; if (h.num_digits > max_digits) { h.num_digits = max_digits; } h.decimal_point += @intCast(i32, num_new_digits); trim(h); } fn compute_float(d: *Decimal) AdjustedMantissa { var answer: AdjustedMantissa = undefined; if (d.num_digits == 0) { // should be zero answer.power2 = 0; answer.mantissa = 0; return answer; } // At this point, going further, we can assume that d.num_digits > 0. // We want to guard against excessive decimal point values because // they can result in long running times. Indeed, we do // shifts by at most 60 bits. We have that log(10**400)/log(2**60) ~= 22 // which is fine, but log(10**299995)/log(2**60) ~= 16609 which is not // fine (runs for a long time). // if (d.decimal_point < -324) { // We have something smaller than 1e-324 which is always zero // in binary64 and binary32. // It should be zero. answer.power2 = 0; answer.mantissa = 0; return answer; } else if (d.decimal_point >= 310) { // We have something at least as large as 0.1e310 which is // always infinite. answer.power2 = BinaryFormat.infinite_power; answer.mantissa = 0; return answer; } const max_shift = 60; const num_powers = 19; const powers: [19]u6 = .{ 0, 3, 6, 9, 13, 16, 19, 23, 26, 29, // 33, 36, 39, 43, 46, 49, 53, 56, 59, // }; var exp2: i32 = 0; while (d.decimal_point > 0) { const n = @intCast(u32, d.decimal_point); const shift = if (n < num_powers) powers[n] else max_shift; decimal_right_shift(d, @intCast(u6, shift)); if (d.decimal_point < -decimal_point_range) { // should be zero answer.power2 = 0; answer.mantissa = 0; return answer; } exp2 += @as(i32, shift); } // We shift left toward [1/2 ... 1]. while (d.decimal_point <= 0) { var shift: u6 = undefined; if (d.decimal_point == 0) { if (d.digits[0] >= 5) { break; } shift = if (d.digits[0] < 2) 2 else 1; } else { var n = @intCast(u32, -d.decimal_point); shift = if (n < num_powers) powers[n] else max_shift; } decimal_left_shift(d, shift); if (d.decimal_point > decimal_point_range) { // we want to get infinity: answer.power2 = 0xFF; answer.mantissa = 0; return answer; } exp2 -= @intCast(i32, shift); } // We are now in the range [1/2 ... 1] but the binary format uses [1 ... 2]. exp2 -= 1; var minimum_exponent: i32 = BinaryFormat.minimum_exponent; while ((minimum_exponent + 1) > exp2) { var n = @intCast(u6, (minimum_exponent + 1) - exp2); if (n > max_shift) { n = max_shift; } decimal_right_shift(d, n); exp2 += @as(i32, n); } if ((exp2 - minimum_exponent) >= BinaryFormat.infinite_power) { answer.power2 = BinaryFormat.infinite_power; answer.mantissa = 0; return answer; } const mantissa_size_in_bits = BinaryFormat.mantissa_explicit_bits + 1; decimal_left_shift(d, mantissa_size_in_bits); var mantissa = round(d); // It is possible that we have an overflow, in which case we need // to shift back. if (mantissa >= (@as(u64, 1) << mantissa_size_in_bits)) { decimal_right_shift(d, 1); exp2 += 1; mantissa = round(d); if ((exp2 - minimum_exponent) >= BinaryFormat.infinite_power) { answer.power2 = BinaryFormat.infinite_power; answer.mantissa = 0; return answer; } } answer.power2 = exp2 - BinaryFormat.minimum_exponent; if (mantissa < (@as(u64, 1) << BinaryFormat.mantissa_explicit_bits)) { answer.power2 -= 1; } answer.mantissa = mantissa & ((@as(u64, 1) << BinaryFormat.mantissa_explicit_bits) - 1); return answer; } fn parse_long_mantissa(comptime F: type, first: [*]const u8) AdjustedMantissa { _ = F; var d = parse_decimal2(first); return compute_float(&d); } const BinaryFormat = struct { const mantissa_explicit_bits = 52; const minimum_exponent = -1023; const infinite_power = 0x7FF; const sign_index = 63; }; fn from_chars(first_: [*]const u8) f64 { var first = first_; const negative = first[0] == '-'; if (negative) { first += 1; } const am = parse_long_mantissa(f64, first); var word = am.mantissa; word |= @intCast(u64, @as(i64, am.power2)) << BinaryFormat.mantissa_explicit_bits; word = if (negative) word | (@as(u64, 1) << BinaryFormat.sign_index) else word; var value: f64 = undefined; // std::memcpy(&value, &word, sizeof(double)); @memcpy(@ptrCast([*]u8, &value), @ptrCast([*]const u8, &word), @sizeOf(f64)); // return @bitCast(f64, word); return value; } // We call a fallback floating-point parser that might be slow. Note // it will accept JSON numbers, but the JSON spec. is more restrictive so // before you call parse_float_fallback, you need to have validated the input // string with the JSON grammar. // It will return an error (false) if the parsed number is infinite. // The string parsing itself always succeeds. We know that there is at least // one digit. fn parse_float_fallback(ptr: [*]const u8, outDouble: *f64) bool { // @setFloatMode(.Optimized); outDouble.* = from_chars(ptr); // We do not accept infinite values. // Detecting finite values in a portable manner is ridiculously hard, ideally // we would want to do: // return !std::isfinite(*outDouble); // but that mysteriously fails under legacy/old libc++ libraries, see // https://github.com/simdjson/simdjson/issues/1286 // // Therefore, fall back to this solution (the extra parens are there // to handle that max may be a macro on windows). // println("parse_float_fallback {} std.math.f64_min {} std.math.f64_max {} {} {}", .{ outDouble.*, std.math.f64_min, std.math.f64_max, std.math.f64_min < outDouble.*, outDouble.* < std.math.f64_max }); return !(outDouble.* > std.math.f64_max or outDouble.* < -std.math.f64_max); } fn write_float( src: [*]const u8, negative: bool, i: u64, start_digits: [*]const u8, digit_count: usize, exponent: i64, writer: *TapeBuilder, ) !void { // If we frequently had to deal with long strings of digits, // we could extend our code by using a 128-bit integer instead // of a 64-bit integer. However, this is uncommon in practice. // // 9999999999999999999 < 2**64 so we can accommodate 19 digits. // If we have a decimal separator, then digit_count - 1 is the number of digits, but we // may not have a decimal separator! if (digit_count > 19 and significant_digits(start_digits, digit_count) > 19) { // Ok, chances are good that we had an overflow! // this is almost never going to get called!!! // we start anew, going slowly!!! // This will happen in the following examples: // 10000000000000000000000000000000000000000000e+308 // 3.1415926535897932384626433832795028841971693993751 // // NOTE: This makes a *copy* of the writer and passes it to slow_float_parsing. This happens // because slow_float_parsing is a non-inlined function. If we passed our writer reference to // it, it would force it to be stored in memory, preventing the compiler from picking it apart // and putting into registers. i.e. if we pass it as reference, it gets slow. // This is what forces the skip_double, as well. return slow_float_parsing(src, writer); // TLS: don't need to call skip_double(). maybe in the future i'll discover that it would be // faster to pass a copy somewhere and decide to put it back. } // NOTE: it's weird that the simdjson_unlikely() only wraps half the if, but it seems to get slower any other // way we've tried: https://github.com/simdjson/simdjson/pull/990#discussion_r448497331 // To future reader: we'd love if someone found a better way, or at least could explain this result! if (exponent < smallest_power or exponent > largest_power) { // // Important: smallest_power is such that it leads to a zero value. // Observe that 18446744073709551615e-343 == 0, i.e. (2**64 - 1) e -343 is zero // so something x 10^-343 goes to zero, but not so with something x 10^-342. if (smallest_power > -342) @compileError("smallest_power is not small enough"); // if (exponent < smallest_power or i == 0) { WRITE_DOUBLE(0, src, writer); return; } else { // (exponent > largest_power) and (i != 0) // We have, for sure, an infinite value and simdjson refuses to parse infinite values. return INVALID_NUMBER(src); } } // std.log.debug("exponent {}, i {}, negative {}", .{ exponent, i, negative }); var d: f64 = undefined; if (!compute_float_64(exponent, i, negative, &d)) { // we are almost never going to get here. if (!parse_float_fallback(src, &d)) { return INVALID_NUMBER(src); } } WRITE_DOUBLE(d, src, writer); } const smallest_power = -342; const largest_power = 308; const power_of_ten = [_]f64{ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, 1e20, 1e21, 1e22, }; fn to_double(_mantissa: u64, real_exponent: u64, negative: u1) f64 { var d: f64 = undefined; var mantissa = _mantissa; mantissa &= ~(@as(u64, 1) << 52); mantissa |= real_exponent << 52; mantissa |= (@as(u64, negative) << 63); // std::memcpy(&d, &mantissa, sizeof(d)); @memcpy(@ptrCast([*]u8, &d), @ptrCast([*]const u8, &mantissa), @sizeOf(f64)); return d; } inline fn compute_float_64(power: i64, _i: u64, negative: bool, d: *f64) bool { var i = _i; // we start with a fast path // It was described in // Clinger WD. How to read floating point numbers accurately. // ACM SIGPLAN Notices. 1990 // TODO // #ifndef FLT_EVAL_METHOD // #error "FLT_EVAL_METHOD should be defined, please include cfloat." // #endif // #if (FLT_EVAL_METHOD != 1) && (FLT_EVAL_METHOD != 0) // // We cannot be certain that x/y is rounded to nearest. if (0 <= power and power <= 22 and i <= 9007199254740991) { // #else // if (-22 <= power and power <= 22 and i <= 9007199254740991) { // #endif // convert the integer into a double. This is lossless since // 0 <= i <= 2^53 - 1. // d = double(i); d.* = @intToFloat(f64, i); // // The general idea is as follows. // If 0 <= s < 2^53 and if 10^0 <= p <= 10^22 then // 1) Both s and p can be represented exactly as 64-bit floating-point // values // (binary64). // 2) Because s and p can be represented exactly as floating-point values, // then s * p // and s / p will produce correctly rounded values. // if (power < 0) { d.* /= power_of_ten[@intCast(u64, -power)]; } else { d.* *= power_of_ten[@intCast(u64, power)]; } if (negative) { d.* = -d.*; } return true; } // When 22 < power && power < 22 + 16, we could // hope for another, secondary fast path. It was // described by <NAME> in "Correctly rounded // binary-decimal and decimal-binary conversions." (1990) // If you need to compute i * 10^(22 + x) for x < 16, // first compute i * 10^x, if you know that result is exact // (e.g., when i * 10^x < 2^53), // then you can still proceed and do (i * 10^x) * 10^22. // Is this worth your time? // You need 22 < power *and* power < 22 + 16 *and* (i * 10^(x-22) < 2^53) // for this second fast path to work. // If you you have 22 < power *and* power < 22 + 16, and then you // optimistically compute "i * 10^(x-22)", there is still a chance that you // have wasted your time if i * 10^(x-22) >= 2^53. It makes the use cases of // this optimization maybe less common than we would like. Source: // http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/ // also used in RapidJSON: https://rapidjson.org/strtod_8h_source.html // The fast path has now failed, so we are failing back on the slower path. // In the slow path, we need to adjust i so that it is > 1<<63 which is always // possible, except if i == 0, so we handle i == 0 separately. if (i == 0) { d.* = 0.0; return true; } // The exponent is 1024 + 63 + power // + floor(log(5**power)/log(2)). // The 1024 comes from the ieee64 standard. // The 63 comes from the fact that we use a 64-bit word. // // Computing floor(log(5**power)/log(2)) could be // slow. Instead we use a fast function. // // For power in (-400,350), we have that // (((152170 + 65536) * power ) >> 16); // is equal to // floor(log(5**power)/log(2)) + power when power >= 0 // and it is equal to // ceil(log(5**-power)/log(2)) + power when power < 0 // // The 65536 is (1<<16) and corresponds to // (65536 * power) >> 16 ---> power // // ((152170 * power ) >> 16) is equal to // floor(log(5**power)/log(2)) // // Note that this is not magic: 152170/(1<<16) is // approximatively equal to log(5)/log(2). // The 1<<16 value is a power of two; we could use a // larger power of 2 if we wanted to. // const exponent: i64 = (((152170 + 65536) * power) >> 16) + 1024 + 63; // We want the most significant bit of i to be 1. Shift if needed. var lz = @intCast(u6, @clz(u64, i)); i <<= lz; // We are going to need to do some 64-bit arithmetic to get a precise product. // We use a table lookup approach. // It is safe because // power >= smallest_power // and power <= largest_power // We recover the mantissa of the power, it has a leading 1. It is always // rounded down. // // We want the most significant 64 bits of the product. We know // this will be non-zero because the most significant bit of i is // 1. const index: u32 = 2 * @intCast(u32, power - smallest_power); // Optimization: It may be that materializing the index as a variable might confuse some compilers and prevent effective complex-addressing loads. (Done for code clarity.) // // The full_multiplication function computes the 128-bit product of two 64-bit words // with a returned value of type value128 with a "low component" corresponding to the // 64-bit least significant bits of the product and with a "high component" corresponding // to the 64-bit most significant bits of the product. var firstproduct = CharUtils.full_multiplication(i, power_of_five_128[index]); // Both i and power_of_five_128[index] have their most significant bit set to 1 which // implies that the either the most or the second most significant bit of the product // is 1. We pack values in this manner for efficiency reasons: it maximizes the use // we make of the product. It also makes it easy to reason about the product: there // is 0 or 1 leading zero in the product. // Unless the least significant 9 bits of the high (64-bit) part of the full // product are all 1s, then we know that the most significant 55 bits are // exact and no further work is needed. Having 55 bits is necessary because // we need 53 bits for the mantissa but we have to have one rounding bit and // we can waste a bit if the most significant bit of the product is zero. if ((firstproduct.high & 0x1FF) == 0x1FF) { // We want to compute i * 5^q, but only care about the top 55 bits at most. // Consider the scenario where q>=0. Then 5^q may not fit in 64-bits. Doing // the full computation is wasteful. So we do what is called a "truncated // multiplication". // We take the most significant 64-bits, and we put them in // power_of_five_128[index]. Usually, that's good enough to approximate i * 5^q // to the desired approximation using one multiplication. Sometimes it does not suffice. // Then we store the next most significant 64 bits in power_of_five_128[index + 1], and // then we get a better approximation to i * 5^q. In very rare cases, even that // will not suffice, though it is seemingly very hard to find such a scenario. // // That's for when q>=0. The logic for q<0 is somewhat similar but it is somewhat // more complicated. // // There is an extra layer of complexity in that we need more than 55 bits of // accuracy in the round-to-even scenario. // // The full_multiplication function computes the 128-bit product of two 64-bit words // with a returned value of type value128 with a "low component" corresponding to the // 64-bit least significant bits of the product and with a "high component" corresponding // to the 64-bit most significant bits of the product. var secondproduct = CharUtils.full_multiplication(i, power_of_five_128[index + 1]); firstproduct.low +%= secondproduct.high; if (secondproduct.high > firstproduct.low) { firstproduct.high += 1; } // At this point, we might need to add at most one to firstproduct, but this // can only change the value of firstproduct.high if firstproduct.low is maximal. if (firstproduct.low == 0xFFFFFFFFFFFFFFFF) { // This is very unlikely, but if so, we need to do much more work! return false; } } const lower: u64 = firstproduct.low; const upper: u64 = firstproduct.high; // The final mantissa should be 53 bits with a leading 1. // We shift it so that it occupies 54 bits with a leading 1. /////// const upperbit = @intCast(u6, upper >> 63); var mantissa: u64 = upper >> (upperbit + 9); lz +%= @intCast(u6, 1 ^ upperbit); // Here we have mantissa < (1<<54). var real_exponent: i64 = exponent - lz; if (real_exponent <= 0) { // we have a subnormal? // Here have that real_exponent <= 0 so -real_exponent >= 0 if (-real_exponent + 1 >= 64) { // if we have more than 64 bits below the minimum exponent, you have a zero for sure. d.* = 0.0; return true; } // next line is safe because -real_exponent + 1 < 0 mantissa >>= @bitCast(u6, @intCast(i6, -real_exponent + 1)); // Thankfully, we can't have both "round-to-even" and subnormals because // "round-to-even" only occurs for powers close to 0. mantissa += (mantissa & 1); // round up mantissa >>= 1; // There is a weird scenario where we don't have a subnormal but just. // Suppose we start with 2.2250738585072013e-308, we end up // with 0x3fffffffffffff x 2^-1023-53 which is technically subnormal // whereas 0x40000000000000 x 2^-1023-53 is normal. Now, we need to round // up 0x3fffffffffffff x 2^-1023-53 and once we do, we are no longer // subnormal, but we can only know this after rounding. // So we only declare a subnormal if we are smaller than the threshold. real_exponent = if (mantissa < (@as(u64, 1) << 52)) 0 else 1; d.* = to_double(mantissa, @bitCast(u64, real_exponent), @boolToInt(negative)); return true; } // We have to round to even. The "to even" part // is only a problem when we are right in between two floats // which we guard against. // If we have lots of trailing zeros, we may fall right between two // floating-point values. // // The round-to-even cases take the form of a number 2m+1 which is in (2^53,2^54] // times a power of two. That is, it is right between a number with binary significand // m and another number with binary significand m+1; and it must be the case // that it cannot be represented by a float itself. // // We must have that w * 10 ^q == (2m+1) * 2^p for some power of two 2^p. // Recall that 10^q = 5^q * 2^q. // When q >= 0, we must have that (2m+1) is divible by 5^q, so 5^q <= 2^54. We have that // 5^23 <= 2^54 and it is the last power of five to qualify, so q <= 23. // When q<0, we have w >= (2m+1) x 5^{-q}. We must have that w<2^{64} so // (2m+1) x 5^{-q} < 2^{64}. We have that 2m+1>2^{53}. Hence, we must have // 2^{53} x 5^{-q} < 2^{64}. // Hence we have 5^{-q} < 2^{11}$ or q>= -4. // // We require lower <= 1 and not lower == 0 because we could not prove that // that lower == 0 is implied; but we could prove that lower <= 1 is a necessary and sufficient test. if ((lower <= 1) and (power >= -4) and (power <= 23) and ((mantissa & 3) == 1)) { if ((mantissa << @intCast(u6, upperbit + (64 - 53 - 2))) == upper) { mantissa &= ~@as(u64, 1); // flip it so that we do not round up } } mantissa += mantissa & 1; mantissa >>= 1; // Here we have mantissa < (1<<53), unless there was an overflow if (mantissa >= (1 << 53)) { ////////// // This will happen when parsing values such as 7.2057594037927933e+16 //////// mantissa = (1 << 52); real_exponent += 1; } mantissa &= ~@as(u64, 1 << 52); // we have to check that real_exponent is in range, otherwise we bail out if (real_exponent > 2046) { // We have an infinite value!!! We could actually throw an error here if we could. return false; } d.* = to_double(mantissa, @bitCast(u64, real_exponent), @boolToInt(negative)); return true; } test "shr neg i32" { var x: u64 = 0xffffffffffffffff; x >>= @bitCast(u6, @as(i6, -3)); try std.testing.expectEqual(@as(u64, 7), x); } pub const power_of_five_128 = [_]u64{ 0xeef453d6923bd65a, 0x113faa2906a13b3f, 0x9558b4661b6565f8, 0x4ac7ca59a424c507, 0xbaaee17fa23ebf76, 0x5d79bcf00d2df649, 0xe95a99df8ace6f53, 0xf4d82c2c107973dc, 0x91d8a02bb6c10594, 0x79071b9b8a4be869, 0xb64ec836a47146f9, 0x9748e2826cdee284, 0xe3e27a444d8d98b7, 0xfd1b1b2308169b25, 0x8e6d8c6ab0787f72, 0xfe30f0f5e50e20f7, 0xb208ef855c969f4f, 0xbdbd2d335e51a935, 0xde8b2b66b3bc4723, 0xad2c788035e61382, 0x8b16fb203055ac76, 0x4c3bcb5021afcc31, 0xaddcb9e83c6b1793, 0xdf4abe242a1bbf3d, 0xd953e8624b85dd78, 0xd71d6dad34a2af0d, 0x87d4713d6f33aa6b, 0x8672648c40e5ad68, 0xa9c98d8ccb009506, 0x680efdaf511f18c2, 0xd43bf0effdc0ba48, 0x212bd1b2566def2, 0x84a57695fe98746d, 0x14bb630f7604b57, 0xa5ced43b7e3e9188, 0x419ea3bd35385e2d, 0xcf42894a5dce35ea, 0x52064cac828675b9, 0x818995ce7aa0e1b2, 0x7343efebd1940993, 0xa1ebfb4219491a1f, 0x1014ebe6c5f90bf8, 0xca66fa129f9b60a6, 0xd41a26e077774ef6, 0xfd00b897478238d0, 0x8920b098955522b4, 0x9e20735e8cb16382, 0x55b46e5f5d5535b0, 0xc5a890362fddbc62, 0xeb2189f734aa831d, 0xf712b443bbd52b7b, 0xa5e9ec7501d523e4, 0x9a6bb0aa55653b2d, 0x47b233c92125366e, 0xc1069cd4eabe89f8, 0x999ec0bb696e840a, 0xf148440a256e2c76, 0xc00670ea43ca250d, 0x96cd2a865764dbca, 0x380406926a5e5728, 0xbc807527ed3e12bc, 0xc605083704f5ecf2, 0xeba09271e88d976b, 0xf7864a44c633682e, 0x93445b8731587ea3, 0x7ab3ee6afbe0211d, 0xb8157268fdae9e4c, 0x5960ea05bad82964, 0xe61acf033d1a45df, 0x6fb92487298e33bd, 0x8fd0c16206306bab, 0xa5d3b6d479f8e056, 0xb3c4f1ba87bc8696, 0x8f48a4899877186c, 0xe0b62e2929aba83c, 0x331acdabfe94de87, 0x8c71dcd9ba0b4925, 0x9ff0c08b7f1d0b14, 0xaf8e5410288e1b6f, 0x7ecf0ae5ee44dd9, 0xdb71e91432b1a24a, 0xc9e82cd9f69d6150, 0x892731ac9faf056e, 0xbe311c083a225cd2, 0xab70fe17c79ac6ca, 0x6dbd630a48aaf406, 0xd64d3d9db981787d, 0x92cbbccdad5b108, 0x85f0468293f0eb4e, 0x25bbf56008c58ea5, 0xa76c582338ed2621, 0xaf2af2b80af6f24e, 0xd1476e2c07286faa, 0x1af5af660db4aee1, 0x82cca4db847945ca, 0x50d98d9fc890ed4d, 0xa37fce126597973c, 0xe50ff107bab528a0, 0xcc5fc196fefd7d0c, 0x1e53ed49a96272c8, 0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7a, 0x9faacf3df73609b1, 0x77b191618c54e9ac, 0xc795830d75038c1d, 0xd59df5b9ef6a2417, 0xf97ae3d0d2446f25, 0x4b0573286b44ad1d, 0x9becce62836ac577, 0x4ee367f9430aec32, 0xc2e801fb244576d5, 0x229c41f793cda73f, 0xf3a20279ed56d48a, 0x6b43527578c1110f, 0x9845418c345644d6, 0x830a13896b78aaa9, 0xbe5691ef416bd60c, 0x23cc986bc656d553, 0xedec366b11c6cb8f, 0x2cbfbe86b7ec8aa8, 0x94b3a202eb1c3f39, 0x7bf7d71432f3d6a9, 0xb9e08a83a5e34f07, 0xdaf5ccd93fb0cc53, 0xe858ad248f5c22c9, 0xd1b3400f8f9cff68, 0x91376c36d99995be, 0x23100809b9c21fa1, 0xb58547448ffffb2d, 0xabd40a0c2832a78a, 0xe2e69915b3fff9f9, 0x16c90c8f323f516c, 0x8dd01fad907ffc3b, 0xae3da7d97f6792e3, 0xb1442798f49ffb4a, 0x99cd11cfdf41779c, 0xdd95317f31c7fa1d, 0x40405643d711d583, 0x8a7d3eef7f1cfc52, 0x482835ea666b2572, 0xad1c8eab5ee43b66, 0xda3243650005eecf, 0xd863b256369d4a40, 0x90bed43e40076a82, 0x873e4f75e2224e68, 0x5a7744a6e804a291, 0xa90de3535aaae202, 0x711515d0a205cb36, 0xd3515c2831559a83, 0xd5a5b44ca873e03, 0x8412d9991ed58091, 0xe858790afe9486c2, 0xa5178fff668ae0b6, 0x626e974dbe39a872, 0xce5d73ff402d98e3, 0xfb0a3d212dc8128f, 0x80fa687f881c7f8e, 0x7ce66634bc9d0b99, 0xa139029f6a239f72, 0x1c1fffc1ebc44e80, 0xc987434744ac874e, 0xa327ffb266b56220, 0xfbe9141915d7a922, 0x4bf1ff9f0062baa8, 0x9d71ac8fada6c9b5, 0x6f773fc3603db4a9, 0xc4ce17b399107c22, 0xcb550fb4384d21d3, 0xf6019da07f549b2b, 0x7e2a53a146606a48, 0x99c102844f94e0fb, 0x2eda7444cbfc426d, 0xc0314325637a1939, 0xfa911155fefb5308, 0xf03d93eebc589f88, 0x793555ab7eba27ca, 0x96267c7535b763b5, 0x4bc1558b2f3458de, 0xbbb01b9283253ca2, 0x9eb1aaedfb016f16, 0xea9c227723ee8bcb, 0x465e15a979c1cadc, 0x92a1958a7675175f, 0xbfacd89ec191ec9, 0xb749faed14125d36, 0xcef980ec671f667b, 0xe51c79a85916f484, 0x82b7e12780e7401a, 0x8f31cc0937ae58d2, 0xd1b2ecb8b0908810, 0xb2fe3f0b8599ef07, 0x861fa7e6dcb4aa15, 0xdfbdcece67006ac9, 0x67a791e093e1d49a, 0x8bd6a141006042bd, 0xe0c8bb2c5c6d24e0, 0xaecc49914078536d, 0x58fae9f773886e18, 0xda7f5bf590966848, 0xaf39a475506a899e, 0x888f99797a5e012d, 0x6d8406c952429603, 0xaab37fd7d8f58178, 0xc8e5087ba6d33b83, 0xd5605fcdcf32e1d6, 0xfb1e4a9a90880a64, 0x855c3be0a17fcd26, 0x5cf2eea09a55067f, 0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481e, 0xd0601d8efc57b08b, 0xf13b94daf124da26, 0x823c12795db6ce57, 0x76c53d08d6b70858, 0xa2cb1717b52481ed, 0x54768c4b0c64ca6e, 0xcb7ddcdda26da268, 0xa9942f5dcf7dfd09, 0xfe5d54150b090b02, 0xd3f93b35435d7c4c, 0x9efa548d26e5a6e1, 0xc47bc5014a1a6daf, 0xc6b8e9b0709f109a, 0x359ab6419ca1091b, 0xf867241c8cc6d4c0, 0xc30163d203c94b62, 0x9b407691d7fc44f8, 0x79e0de63425dcf1d, 0xc21094364dfb5636, 0x985915fc12f542e4, 0xf294b943e17a2bc4, 0x3e6f5b7b17b2939d, 0x979cf3ca6cec5b5a, 0xa705992ceecf9c42, 0xbd8430bd08277231, 0x50c6ff782a838353, 0xece53cec4a314ebd, 0xa4f8bf5635246428, 0x940f4613ae5ed136, 0x871b7795e136be99, 0xb913179899f68584, 0x28e2557b59846e3f, 0xe757dd7ec07426e5, 0x331aeada2fe589cf, 0x9096ea6f3848984f, 0x3ff0d2c85def7621, 0xb4bca50b065abe63, 0xfed077a756b53a9, 0xe1ebce4dc7f16dfb, 0xd3e8495912c62894, 0x8d3360f09cf6e4bd, 0x64712dd7abbbd95c, 0xb080392cc4349dec, 0xbd8d794d96aacfb3, 0xdca04777f541c567, 0xecf0d7a0fc5583a0, 0x89e42caaf9491b60, 0xf41686c49db57244, 0xac5d37d5b79b6239, 0x311c2875c522ced5, 0xd77485cb25823ac7, 0x7d633293366b828b, 0x86a8d39ef77164bc, 0xae5dff9c02033197, 0xa8530886b54dbdeb, 0xd9f57f830283fdfc, 0xd267caa862a12d66, 0xd072df63c324fd7b, 0x8380dea93da4bc60, 0x4247cb9e59f71e6d, 0xa46116538d0deb78, 0x52d9be85f074e608, 0xcd795be870516656, 0x67902e276c921f8b, 0x806bd9714632dff6, 0xba1cd8a3db53b6, 0xa086cfcd97bf97f3, 0x80e8a40eccd228a4, 0xc8a883c0fdaf7df0, 0x6122cd128006b2cd, 0xfad2a4b13d1b5d6c, 0x796b805720085f81, 0x9cc3a6eec6311a63, 0xcbe3303674053bb0, 0xc3f490aa77bd60fc, 0xbedbfc4411068a9c, 0xf4f1b4d515acb93b, 0xee92fb5515482d44, 0x991711052d8bf3c5, 0x751bdd152d4d1c4a, 0xbf5cd54678eef0b6, 0xd262d45a78a0635d, 0xef340a98172aace4, 0x86fb897116c87c34, 0x9580869f0e7aac0e, 0xd45d35e6ae3d4da0, 0xbae0a846d2195712, 0x8974836059cca109, 0xe998d258869facd7, 0x2bd1a438703fc94b, 0x91ff83775423cc06, 0x7b6306a34627ddcf, 0xb67f6455292cbf08, 0x1a3bc84c17b1d542, 0xe41f3d6a7377eeca, 0x20caba5f1d9e4a93, 0x8e938662882af53e, 0x547eb47b7282ee9c, 0xb23867fb2a35b28d, 0xe99e619a4f23aa43, 0xdec681f9f4c31f31, 0x6405fa00e2ec94d4, 0x8b3c113c38f9f37e, 0xde83bc408dd3dd04, 0xae0b158b4738705e, 0x9624ab50b148d445, 0xd98ddaee19068c76, 0x3badd624dd9b0957, 0x87f8a8d4cfa417c9, 0xe54ca5d70a80e5d6, 0xa9f6d30a038d1dbc, 0x5e9fcf4ccd211f4c, 0xd47487cc8470652b, 0x7647c3200069671f, 0x84c8d4dfd2c63f3b, 0x29ecd9f40041e073, 0xa5fb0a17c777cf09, 0xf468107100525890, 0xcf79cc9db955c2cc, 0x7182148d4066eeb4, 0x81ac1fe293d599bf, 0xc6f14cd848405530, 0xa21727db38cb002f, 0xb8ada00e5a506a7c, 0xca9cf1d206fdc03b, 0xa6d90811f0e4851c, 0xfd442e4688bd304a, 0x908f4a166d1da663, 0x9e4a9cec15763e2e, 0x9a598e4e043287fe, 0xc5dd44271ad3cdba, 0x40eff1e1853f29fd, 0xf7549530e188c128, 0xd12bee59e68ef47c, 0x9a94dd3e8cf578b9, 0x82bb74f8301958ce, 0xc13a148e3032d6e7, 0xe36a52363c1faf01, 0xf18899b1bc3f8ca1, 0xdc44e6c3cb279ac1, 0x96f5600f15a7b7e5, 0x29ab103a5ef8c0b9, 0xbcb2b812db11a5de, 0x7415d448f6b6f0e7, 0xebdf661791d60f56, 0x111b495b3464ad21, 0x936b9fcebb25c995, 0xcab10dd900beec34, 0xb84687c269ef3bfb, 0x3d5d514f40eea742, 0xe65829b3046b0afa, 0xcb4a5a3112a5112, 0x8ff71a0fe2c2e6dc, 0x47f0e785eaba72ab, 0xb3f4e093db73a093, 0x59ed216765690f56, 0xe0f218b8d25088b8, 0x306869c13ec3532c, 0x8c974f7383725573, 0x1e414218c73a13fb, 0xafbd2350644eeacf, 0xe5d1929ef90898fa, 0xdbac6c247d62a583, 0xdf45f746b74abf39, 0x894bc396ce5da772, 0x6b8bba8c328eb783, 0xab9eb47c81f5114f, 0x66ea92f3f326564, 0xd686619ba27255a2, 0xc80a537b0efefebd, 0x8613fd0145877585, 0xbd06742ce95f5f36, 0xa798fc4196e952e7, 0x2c48113823b73704, 0xd17f3b51fca3a7a0, 0xf75a15862ca504c5, 0x82ef85133de648c4, 0x9a984d73dbe722fb, 0xa3ab66580d5fdaf5, 0xc13e60d0d2e0ebba, 0xcc963fee10b7d1b3, 0x318df905079926a8, 0xffbbcfe994e5c61f, 0xfdf17746497f7052, 0x9fd561f1fd0f9bd3, 0xfeb6ea8bedefa633, 0xc7caba6e7c5382c8, 0xfe64a52ee96b8fc0, 0xf9bd690a1b68637b, 0x3dfdce7aa3c673b0, 0x9c1661a651213e2d, 0x6bea10ca65c084e, 0xc31bfa0fe5698db8, 0x486e494fcff30a62, 0xf3e2f893dec3f126, 0x5a89dba3c3efccfa, 0x986ddb5c6b3a76b7, 0xf89629465a75e01c, 0xbe89523386091465, 0xf6bbb397f1135823, 0xee2ba6c0678b597f, 0x746aa07ded582e2c, 0x94db483840b717ef, 0xa8c2a44eb4571cdc, 0xba121a4650e4ddeb, 0x92f34d62616ce413, 0xe896a0d7e51e1566, 0x77b020baf9c81d17, 0x915e2486ef32cd60, 0xace1474dc1d122e, 0xb5b5ada8aaff80b8, 0xd819992132456ba, 0xe3231912d5bf60e6, 0x10e1fff697ed6c69, 0x8df5efabc5979c8f, 0xca8d3ffa1ef463c1, 0xb1736b96b6fd83b3, 0xbd308ff8a6b17cb2, 0xddd0467c64bce4a0, 0xac7cb3f6d05ddbde, 0x8aa22c0dbef60ee4, 0x6bcdf07a423aa96b, 0xad4ab7112eb3929d, 0x86c16c98d2c953c6, 0xd89d64d57a607744, 0xe871c7bf077ba8b7, 0x87625f056c7c4a8b, 0x11471cd764ad4972, 0xa93af6c6c79b5d2d, 0xd598e40d3dd89bcf, 0xd389b47879823479, 0x4aff1d108d4ec2c3, 0x843610cb4bf160cb, 0xcedf722a585139ba, 0xa54394fe1eedb8fe, 0xc2974eb4ee658828, 0xce947a3da6a9273e, 0x733d226229feea32, 0x811ccc668829b887, 0x806357d5a3f525f, 0xa163ff802a3426a8, 0xca07c2dcb0cf26f7, 0xc9bcff6034c13052, 0xfc89b393dd02f0b5, 0xfc2c3f3841f17c67, 0xbbac2078d443ace2, 0x9d9ba7832936edc0, 0xd54b944b84aa4c0d, 0xc5029163f384a931, 0xa9e795e65d4df11, 0xf64335bcf065d37d, 0x4d4617b5ff4a16d5, 0x99ea0196163fa42e, 0x504bced1bf8e4e45, 0xc06481fb9bcf8d39, 0xe45ec2862f71e1d6, 0xf07da27a82c37088, 0x5d767327bb4e5a4c, 0x964e858c91ba2655, 0x3a6a07f8d510f86f, 0xbbe226efb628afea, 0x890489f70a55368b, 0xeadab0aba3b2dbe5, 0x2b45ac74ccea842e, 0x92c8ae6b464fc96f, 0x3b0b8bc90012929d, 0xb77ada0617e3bbcb, 0x9ce6ebb40173744, 0xe55990879ddcaabd, 0xcc420a6a101d0515, 0x8f57fa54c2a9eab6, 0x9fa946824a12232d, 0xb32df8e9f3546564, 0x47939822dc96abf9, 0xdff9772470297ebd, 0x59787e2b93bc56f7, 0x8bfbea76c619ef36, 0x57eb4edb3c55b65a, 0xaefae51477a06b03, 0xede622920b6b23f1, 0xdab99e59958885c4, 0xe95fab368e45eced, 0x88b402f7fd75539b, 0x11dbcb0218ebb414, 0xaae103b5fcd2a881, 0xd652bdc29f26a119, 0xd59944a37c0752a2, 0x4be76d3346f0495f, 0x857fcae62d8493a5, 0x6f70a4400c562ddb, 0xa6dfbd9fb8e5b88e, 0xcb4ccd500f6bb952, 0xd097ad07a71f26b2, 0x7e2000a41346a7a7, 0x825ecc24c873782f, 0x8ed400668c0c28c8, 0xa2f67f2dfa90563b, 0x728900802f0f32fa, 0xcbb41ef979346bca, 0x4f2b40a03ad2ffb9, 0xfea126b7d78186bc, 0xe2f610c84987bfa8, 0x9f24b832e6b0f436, 0xdd9ca7d2df4d7c9, 0xc6ede63fa05d3143, 0x91503d1c79720dbb, 0xf8a95fcf88747d94, 0x75a44c6397ce912a, 0x9b69dbe1b548ce7c, 0xc986afbe3ee11aba, 0xc24452da229b021b, 0xfbe85badce996168, 0xf2d56790ab41c2a2, 0xfae27299423fb9c3, 0x97c560ba6b0919a5, 0xdccd879fc967d41a, 0xbdb6b8e905cb600f, 0x5400e987bbc1c920, 0xed246723473e3813, 0x290123e9aab23b68, 0x9436c0760c86e30b, 0xf9a0b6720aaf6521, 0xb94470938fa89bce, 0xf808e40e8d5b3e69, 0xe7958cb87392c2c2, 0xb60b1d1230b20e04, 0x90bd77f3483bb9b9, 0xb1c6f22b5e6f48c2, 0xb4ecd5f01a4aa828, 0x1e38aeb6360b1af3, 0xe2280b6c20dd5232, 0x25c6da63c38de1b0, 0x8d590723948a535f, 0x579c487e5a38ad0e, 0xb0af48ec79ace837, 0x2d835a9df0c6d851, 0xdcdb1b2798182244, 0xf8e431456cf88e65, 0x8a08f0f8bf0f156b, 0x1b8e9ecb641b58ff, 0xac8b2d36eed2dac5, 0xe272467e3d222f3f, 0xd7adf884aa879177, 0x5b0ed81dcc6abb0f, 0x86ccbb52ea94baea, 0x98e947129fc2b4e9, 0xa87fea27a539e9a5, 0x3f2398d747b36224, 0xd29fe4b18e88640e, 0x8eec7f0d19a03aad, 0x83a3eeeef9153e89, 0x1953cf68300424ac, 0xa48ceaaab75a8e2b, 0x5fa8c3423c052dd7, 0xcdb02555653131b6, 0x3792f412cb06794d, 0x808e17555f3ebf11, 0xe2bbd88bbee40bd0, 0xa0b19d2ab70e6ed6, 0x5b6aceaeae9d0ec4, 0xc8de047564d20a8b, 0xf245825a5a445275, 0xfb158592be068d2e, 0xeed6e2f0f0d56712, 0x9ced737bb6c4183d, 0x55464dd69685606b, 0xc428d05aa4751e4c, 0xaa97e14c3c26b886, 0xf53304714d9265df, 0xd53dd99f4b3066a8, 0x993fe2c6d07b7fab, 0xe546a8038efe4029, 0xbf8fdb78849a5f96, 0xde98520472bdd033, 0xef73d256a5c0f77c, 0x963e66858f6d4440, 0x95a8637627989aad, 0xdde7001379a44aa8, 0xbb127c53b17ec159, 0x5560c018580d5d52, 0xe9d71b689dde71af, 0xaab8f01e6e10b4a6, 0x9226712162ab070d, 0xcab3961304ca70e8, 0xb6b00d69bb55c8d1, 0x3d607b97c5fd0d22, 0xe45c10c42a2b3b05, 0x8cb89a7db77c506a, 0x8eb98a7a9a5b04e3, 0x77f3608e92adb242, 0xb267ed1940f1c61c, 0x55f038b237591ed3, 0xdf01e85f912e37a3, 0x6b6c46dec52f6688, 0x8b61313bbabce2c6, 0x2323ac4b3b3da015, 0xae397d8aa96c1b77, 0xabec975e0a0d081a, 0xd9c7dced53c72255, 0x96e7bd358c904a21, 0x881cea14545c7575, 0x7e50d64177da2e54, 0xaa242499697392d2, 0xdde50bd1d5d0b9e9, 0xd4ad2dbfc3d07787, 0x955e4ec64b44e864, 0x84ec3c97da624ab4, 0xbd5af13bef0b113e, 0xa6274bbdd0fadd61, 0xecb1ad8aeacdd58e, 0xcfb11ead453994ba, 0x67de18eda5814af2, 0x81ceb32c4b43fcf4, 0x80eacf948770ced7, 0xa2425ff75e14fc31, 0xa1258379a94d028d, 0xcad2f7f5359a3b3e, 0x96ee45813a04330, 0xfd87b5f28300ca0d, 0x8bca9d6e188853fc, 0x9e74d1b791e07e48, 0x775ea264cf55347e, 0xc612062576589dda, 0x95364afe032a81a0, 0xf79687aed3eec551, 0x3a83ddbd83f52210, 0x9abe14cd44753b52, 0xc4926a9672793580, 0xc16d9a0095928a27, 0x75b7053c0f178400, 0xf1c90080baf72cb1, 0x5324c68b12dd6800, 0x971da05074da7bee, 0xd3f6fc16ebca8000, 0xbce5086492111aea, 0x88f4bb1ca6bd0000, 0xec1e4a7db69561a5, 0x2b31e9e3d0700000, 0x9392ee8e921d5d07, 0x3aff322e62600000, 0xb877aa3236a4b449, 0x9befeb9fad487c3, 0xe69594bec44de15b, 0x4c2ebe687989a9b4, 0x901d7cf73ab0acd9, 0xf9d37014bf60a11, 0xb424dc35095cd80f, 0x538484c19ef38c95, 0xe12e13424bb40e13, 0x2865a5f206b06fba, 0x8cbccc096f5088cb, 0xf93f87b7442e45d4, 0xafebff0bcb24aafe, 0xf78f69a51539d749, 0xdbe6fecebdedd5be, 0xb573440e5a884d1c, 0x89705f4136b4a597, 0x31680a88f8953031, 0xabcc77118461cefc, 0xfdc20d2b36ba7c3e, 0xd6bf94d5e57a42bc, 0x3d32907604691b4d, 0x8637bd05af6c69b5, 0xa63f9a49c2c1b110, 0xa7c5ac471b478423, 0xfcf80dc33721d54, 0xd1b71758e219652b, 0xd3c36113404ea4a9, 0x83126e978d4fdf3b, 0x645a1cac083126ea, 0xa3d70a3d70a3d70a, 0x3d70a3d70a3d70a4, 0xcccccccccccccccc, 0xcccccccccccccccd, 0x8000000000000000, 0x0, 0xa000000000000000, 0x0, 0xc800000000000000, 0x0, 0xfa00000000000000, 0x0, 0x9c40000000000000, 0x0, 0xc350000000000000, 0x0, 0xf424000000000000, 0x0, 0x9896800000000000, 0x0, 0xbebc200000000000, 0x0, 0xee6b280000000000, 0x0, 0x9502f90000000000, 0x0, 0xba43b74000000000, 0x0, 0xe8d4a51000000000, 0x0, 0x9184e72a00000000, 0x0, 0xb5e620f480000000, 0x0, 0xe35fa931a0000000, 0x0, 0x8e1bc9bf04000000, 0x0, 0xb1a2bc2ec5000000, 0x0, 0xde0b6b3a76400000, 0x0, 0x8ac7230489e80000, 0x0, 0xad78ebc5ac620000, 0x0, 0xd8d726b7177a8000, 0x0, 0x878678326eac9000, 0x0, 0xa968163f0a57b400, 0x0, 0xd3c21bcecceda100, 0x0, 0x84595161401484a0, 0x0, 0xa56fa5b99019a5c8, 0x0, 0xcecb8f27f4200f3a, 0x0, 0x813f3978f8940984, 0x4000000000000000, 0xa18f07d736b90be5, 0x5000000000000000, 0xc9f2c9cd04674ede, 0xa400000000000000, 0xfc6f7c4045812296, 0x4d00000000000000, 0x9dc5ada82b70b59d, 0xf020000000000000, 0xc5371912364ce305, 0x6c28000000000000, 0xf684df56c3e01bc6, 0xc732000000000000, 0x9a130b963a6c115c, 0x3c7f400000000000, 0xc097ce7bc90715b3, 0x4b9f100000000000, 0xf0bdc21abb48db20, 0x1e86d40000000000, 0x96769950b50d88f4, 0x1314448000000000, 0xbc143fa4e250eb31, 0x17d955a000000000, 0xeb194f8e1ae525fd, 0x5dcfab0800000000, 0x92efd1b8d0cf37be, 0x5aa1cae500000000, 0xb7abc627050305ad, 0xf14a3d9e40000000, 0xe596b7b0c643c719, 0x6d9ccd05d0000000, 0x8f7e32ce7bea5c6f, 0xe4820023a2000000, 0xb35dbf821ae4f38b, 0xdda2802c8a800000, 0xe0352f62a19e306e, 0xd50b2037ad200000, 0x8c213d9da502de45, 0x4526f422cc340000, 0xaf298d050e4395d6, 0x9670b12b7f410000, 0xdaf3f04651d47b4c, 0x3c0cdd765f114000, 0x88d8762bf324cd0f, 0xa5880a69fb6ac800, 0xab0e93b6efee0053, 0x8eea0d047a457a00, 0xd5d238a4abe98068, 0x72a4904598d6d880, 0x85a36366eb71f041, 0x47a6da2b7f864750, 0xa70c3c40a64e6c51, 0x999090b65f67d924, 0xd0cf4b50cfe20765, 0xfff4b4e3f741cf6d, 0x82818f1281ed449f, 0xbff8f10e7a8921a4, 0xa321f2d7226895c7, 0xaff72d52192b6a0d, 0xcbea6f8ceb02bb39, 0x9bf4f8a69f764490, 0xfee50b7025c36a08, 0x2f236d04753d5b4, 0x9f4f2726179a2245, 0x1d762422c946590, 0xc722f0ef9d80aad6, 0x424d3ad2b7b97ef5, 0xf8ebad2b84e0d58b, 0xd2e0898765a7deb2, 0x9b934c3b330c8577, 0x63cc55f49f88eb2f, 0xc2781f49ffcfa6d5, 0x3cbf6b71c76b25fb, 0xf316271c7fc3908a, 0x8bef464e3945ef7a, 0x97edd871cfda3a56, 0x97758bf0e3cbb5ac, 0xbde94e8e43d0c8ec, 0x3d52eeed1cbea317, 0xed63a231d4c4fb27, 0x4ca7aaa863ee4bdd, 0x945e455f24fb1cf8, 0x8fe8caa93e74ef6a, 0xb975d6b6ee39e436, 0xb3e2fd538e122b44, 0xe7d34c64a9c85d44, 0x60dbbca87196b616, 0x90e40fbeea1d3a4a, 0xbc8955e946fe31cd, 0xb51d13aea4a488dd, 0x6babab6398bdbe41, 0xe264589a4dcdab14, 0xc696963c7eed2dd1, 0x8d7eb76070a08aec, 0xfc1e1de5cf543ca2, 0xb0de65388cc8ada8, 0x3b25a55f43294bcb, 0xdd15fe86affad912, 0x49ef0eb713f39ebe, 0x8a2dbf142dfcc7ab, 0x6e3569326c784337, 0xacb92ed9397bf996, 0x49c2c37f07965404, 0xd7e77a8f87daf7fb, 0xdc33745ec97be906, 0x86f0ac99b4e8dafd, 0x69a028bb3ded71a3, 0xa8acd7c0222311bc, 0xc40832ea0d68ce0c, 0xd2d80db02aabd62b, 0xf50a3fa490c30190, 0x83c7088e1aab65db, 0x792667c6da79e0fa, 0xa4b8cab1a1563f52, 0x577001b891185938, 0xcde6fd5e09abcf26, 0xed4c0226b55e6f86, 0x80b05e5ac60b6178, 0x544f8158315b05b4, 0xa0dc75f1778e39d6, 0x696361ae3db1c721, 0xc913936dd571c84c, 0x3bc3a19cd1e38e9, 0xfb5878494ace3a5f, 0x4ab48a04065c723, 0x9d174b2dcec0e47b, 0x62eb0d64283f9c76, 0xc45d1df942711d9a, 0x3ba5d0bd324f8394, 0xf5746577930d6500, 0xca8f44ec7ee36479, 0x9968bf6abbe85f20, 0x7e998b13cf4e1ecb, 0xbfc2ef456ae276e8, 0x9e3fedd8c321a67e, 0xefb3ab16c59b14a2, 0xc5cfe94ef3ea101e, 0x95d04aee3b80ece5, 0xbba1f1d158724a12, 0xbb445da9ca61281f, 0x2a8a6e45ae8edc97, 0xea1575143cf97226, 0xf52d09d71a3293bd, 0x924d692ca61be758, 0x593c2626705f9c56, 0xb6e0c377cfa2e12e, 0x6f8b2fb00c77836c, 0xe498f455c38b997a, 0xb6dfb9c0f956447, 0x8edf98b59a373fec, 0x4724bd4189bd5eac, 0xb2977ee300c50fe7, 0x58edec91ec2cb657, 0xdf3d5e9bc0f653e1, 0x2f2967b66737e3ed, 0x8b865b215899f46c, 0xbd79e0d20082ee74, 0xae67f1e9aec07187, 0xecd8590680a3aa11, 0xda01ee641a708de9, 0xe80e6f4820cc9495, 0x884134fe908658b2, 0x3109058d147fdcdd, 0xaa51823e34a7eede, 0xbd4b46f0599fd415, 0xd4e5e2cdc1d1ea96, 0x6c9e18ac7007c91a, 0x850fadc09923329e, 0x3e2cf6bc604ddb0, 0xa6539930bf6bff45, 0x84db8346b786151c, 0xcfe87f7cef46ff16, 0xe612641865679a63, 0x81f14fae158c5f6e, 0x4fcb7e8f3f60c07e, 0xa26da3999aef7749, 0xe3be5e330f38f09d, 0xcb090c8001ab551c, 0x5cadf5bfd3072cc5, 0xfdcb4fa002162a63, 0x73d9732fc7c8f7f6, 0x9e9f11c4014dda7e, 0x2867e7fddcdd9afa, 0xc646d63501a1511d, 0xb281e1fd541501b8, 0xf7d88bc24209a565, 0x1f225a7ca91a4226, 0x9ae757596946075f, 0x3375788de9b06958, 0xc1a12d2fc3978937, 0x52d6b1641c83ae, 0xf209787bb47d6b84, 0xc0678c5dbd23a49a, 0x9745eb4d50ce6332, 0xf840b7ba963646e0, 0xbd176620a501fbff, 0xb650e5a93bc3d898, 0xec5d3fa8ce427aff, 0xa3e51f138ab4cebe, 0x93ba47c980e98cdf, 0xc66f336c36b10137, 0xb8a8d9bbe123f017, 0xb80b0047445d4184, 0xe6d3102ad96cec1d, 0xa60dc059157491e5, 0x9043ea1ac7e41392, 0x87c89837ad68db2f, 0xb454e4a179dd1877, 0x29babe4598c311fb, 0xe16a1dc9d8545e94, 0xf4296dd6fef3d67a, 0x8ce2529e2734bb1d, 0x1899e4a65f58660c, 0xb01ae745b101e9e4, 0x5ec05dcff72e7f8f, 0xdc21a1171d42645d, 0x76707543f4fa1f73, 0x899504ae72497eba, 0x6a06494a791c53a8, 0xabfa45da0edbde69, 0x487db9d17636892, 0xd6f8d7509292d603, 0x45a9d2845d3c42b6, 0x865b86925b9bc5c2, 0xb8a2392ba45a9b2, 0xa7f26836f282b732, 0x8e6cac7768d7141e, 0xd1ef0244af2364ff, 0x3207d795430cd926, 0x8335616aed761f1f, 0x7f44e6bd49e807b8, 0xa402b9c5a8d3a6e7, 0x5f16206c9c6209a6, 0xcd036837130890a1, 0x36dba887c37a8c0f, 0x802221226be55a64, 0xc2494954da2c9789, 0xa02aa96b06deb0fd, 0xf2db9baa10b7bd6c, 0xc83553c5c8965d3d, 0x6f92829494e5acc7, 0xfa42a8b73abbf48c, 0xcb772339ba1f17f9, 0x9c69a97284b578d7, 0xff2a760414536efb, 0xc38413cf25e2d70d, 0xfef5138519684aba, 0xf46518c2ef5b8cd1, 0x7eb258665fc25d69, 0x98bf2f79d5993802, 0xef2f773ffbd97a61, 0xbeeefb584aff8603, 0xaafb550ffacfd8fa, 0xeeaaba2e5dbf6784, 0x95ba2a53f983cf38, 0x952ab45cfa97a0b2, 0xdd945a747bf26183, 0xba756174393d88df, 0x94f971119aeef9e4, 0xe912b9d1478ceb17, 0x7a37cd5601aab85d, 0x91abb422ccb812ee, 0xac62e055c10ab33a, 0xb616a12b7fe617aa, 0x577b986b314d6009, 0xe39c49765fdf9d94, 0xed5a7e85fda0b80b, 0x8e41ade9fbebc27d, 0x14588f13be847307, 0xb1d219647ae6b31c, 0x596eb2d8ae258fc8, 0xde469fbd99a05fe3, 0x6fca5f8ed9aef3bb, 0x8aec23d680043bee, 0x25de7bb9480d5854, 0xada72ccc20054ae9, 0xaf561aa79a10ae6a, 0xd910f7ff28069da4, 0x1b2ba1518094da04, 0x87aa9aff79042286, 0x90fb44d2f05d0842, 0xa99541bf57452b28, 0x353a1607ac744a53, 0xd3fa922f2d1675f2, 0x42889b8997915ce8, 0x847c9b5d7c2e09b7, 0x69956135febada11, 0xa59bc234db398c25, 0x43fab9837e699095, 0xcf02b2c21207ef2e, 0x94f967e45e03f4bb, 0x8161afb94b44f57d, 0x1d1be0eebac278f5, 0xa1ba1ba79e1632dc, 0x6462d92a69731732, 0xca28a291859bbf93, 0x7d7b8f7503cfdcfe, 0xfcb2cb35e702af78, 0x5cda735244c3d43e, 0x9defbf01b061adab, 0x3a0888136afa64a7, 0xc56baec21c7a1916, 0x88aaa1845b8fdd0, 0xf6c69a72a3989f5b, 0x8aad549e57273d45, 0x9a3c2087a63f6399, 0x36ac54e2f678864b, 0xc0cb28a98fcf3c7f, 0x84576a1bb416a7dd, 0xf0fdf2d3f3c30b9f, 0x656d44a2a11c51d5, 0x969eb7c47859e743, 0x9f644ae5a4b1b325, 0xbc4665b596706114, 0x873d5d9f0dde1fee, 0xeb57ff22fc0c7959, 0xa90cb506d155a7ea, 0x9316ff75dd87cbd8, 0x9a7f12442d588f2, 0xb7dcbf5354e9bece, 0xc11ed6d538aeb2f, 0xe5d3ef282a242e81, 0x8f1668c8a86da5fa, 0x8fa475791a569d10, 0xf96e017d694487bc, 0xb38d92d760ec4455, 0x37c981dcc395a9ac, 0xe070f78d3927556a, 0x85bbe253f47b1417, 0x8c469ab843b89562, 0x93956d7478ccec8e, 0xaf58416654a6babb, 0x387ac8d1970027b2, 0xdb2e51bfe9d0696a, 0x6997b05fcc0319e, 0x88fcf317f22241e2, 0x441fece3bdf81f03, 0xab3c2fddeeaad25a, 0xd527e81cad7626c3, 0xd60b3bd56a5586f1, 0x8a71e223d8d3b074, 0x85c7056562757456, 0xf6872d5667844e49, 0xa738c6bebb12d16c, 0xb428f8ac016561db, 0xd106f86e69d785c7, 0xe13336d701beba52, 0x82a45b450226b39c, 0xecc0024661173473, 0xa34d721642b06084, 0x27f002d7f95d0190, 0xcc20ce9bd35c78a5, 0x31ec038df7b441f4, 0xff290242c83396ce, 0x7e67047175a15271, 0x9f79a169bd203e41, 0xf0062c6e984d386, 0xc75809c42c684dd1, 0x52c07b78a3e60868, 0xf92e0c3537826145, 0xa7709a56ccdf8a82, 0x9bbcc7a142b17ccb, 0x88a66076400bb691, 0xc2abf989935ddbfe, 0x6acff893d00ea435, 0xf356f7ebf83552fe, 0x583f6b8c4124d43, 0x98165af37b2153de, 0xc3727a337a8b704a, 0xbe1bf1b059e9a8d6, 0x744f18c0592e4c5c, 0xeda2ee1c7064130c, 0x1162def06f79df73, 0x9485d4d1c63e8be7, 0x8addcb5645ac2ba8, 0xb9a74a0637ce2ee1, 0x6d953e2bd7173692, 0xe8111c87c5c1ba99, 0xc8fa8db6ccdd0437, 0x910ab1d4db9914a0, 0x1d9c9892400a22a2, 0xb54d5e4a127f59c8, 0x2503beb6d00cab4b, 0xe2a0b5dc971f303a, 0x2e44ae64840fd61d, 0x8da471a9de737e24, 0x5ceaecfed289e5d2, 0xb10d8e1456105dad, 0x7425a83e872c5f47, 0xdd50f1996b947518, 0xd12f124e28f77719, 0x8a5296ffe33cc92f, 0x82bd6b70d99aaa6f, 0xace73cbfdc0bfb7b, 0x636cc64d1001550b, 0xd8210befd30efa5a, 0x3c47f7e05401aa4e, 0x8714a775e3e95c78, 0x65acfaec34810a71, 0xa8d9d1535ce3b396, 0x7f1839a741a14d0d, 0xd31045a8341ca07c, 0x1ede48111209a050, 0x83ea2b892091e44d, 0x934aed0aab460432, 0xa4e4b66b68b65d60, 0xf81da84d5617853f, 0xce1de40642e3f4b9, 0x36251260ab9d668e, 0x80d2ae83e9ce78f3, 0xc1d72b7c6b426019, 0xa1075a24e4421730, 0xb24cf65b8612f81f, 0xc94930ae1d529cfc, 0xdee033f26797b627, 0xfb9b7cd9a4a7443c, 0x169840ef017da3b1, 0x9d412e0806e88aa5, 0x8e1f289560ee864e, 0xc491798a08a2ad4e, 0xf1a6f2bab92a27e2, 0xf5b5d7ec8acb58a2, 0xae10af696774b1db, 0x9991a6f3d6bf1765, 0xacca6da1e0a8ef29, 0xbff610b0cc6edd3f, 0x17fd090a58d32af3, 0xeff394dcff8a948e, 0xddfc4b4cef07f5b0, 0x95f83d0a1fb69cd9, 0x4abdaf101564f98e, 0xbb764c4ca7a4440f, 0x9d6d1ad41abe37f1, 0xea53df5fd18d5513, 0x84c86189216dc5ed, 0x92746b9be2f8552c, 0x32fd3cf5b4e49bb4, 0xb7118682dbb66a77, 0x3fbc8c33221dc2a1, 0xe4d5e82392a40515, 0xfabaf3feaa5334a, 0x8f05b1163ba6832d, 0x29cb4d87f2a7400e, 0xb2c71d5bca9023f8, 0x743e20e9ef511012, 0xdf78e4b2bd342cf6, 0x914da9246b255416, 0x8bab8eefb6409c1a, 0x1ad089b6c2f7548e, 0xae9672aba3d0c320, 0xa184ac2473b529b1, 0xda3c0f568cc4f3e8, 0xc9e5d72d90a2741e, 0x8865899617fb1871, 0x7e2fa67c7a658892, 0xaa7eebfb9df9de8d, 0xddbb901b98feeab7, 0xd51ea6fa85785631, 0x552a74227f3ea565, 0x8533285c936b35de, 0xd53a88958f87275f, 0xa67ff273b8460356, 0x8a892abaf368f137, 0xd01fef10a657842c, 0x2d2b7569b0432d85, 0x8213f56a67f6b29b, 0x9c3b29620e29fc73, 0xa298f2c501f45f42, 0x8349f3ba91b47b8f, 0xcb3f2f7642717713, 0x241c70a936219a73, 0xfe0efb53d30dd4d7, 0xed238cd383aa0110, 0x9ec95d1463e8a506, 0xf4363804324a40aa, 0xc67bb4597ce2ce48, 0xb143c6053edcd0d5, 0xf81aa16fdc1b81da, 0xdd94b7868e94050a, 0x9b10a4e5e9913128, 0xca7cf2b4191c8326, 0xc1d4ce1f63f57d72, 0xfd1c2f611f63a3f0, 0xf24a01a73cf2dccf, 0xbc633b39673c8cec, 0x976e41088617ca01, 0xd5be0503e085d813, 0xbd49d14aa79dbc82, 0x4b2d8644d8a74e18, 0xec9c459d51852ba2, 0xddf8e7d60ed1219e, 0x93e1ab8252f33b45, 0xcabb90e5c942b503, 0xb8da1662e7b00a17, 0x3d6a751f3b936243, 0xe7109bfba19c0c9d, 0xcc512670a783ad4, 0x906a617d450187e2, 0x27fb2b80668b24c5, 0xb484f9dc9641e9da, 0xb1f9f660802dedf6, 0xe1a63853bbd26451, 0x5e7873f8a0396973, 0x8d07e33455637eb2, 0xdb0b487b6423e1e8, 0xb049dc016abc5e5f, 0x91ce1a9a3d2cda62, 0xdc5c5301c56b75f7, 0x7641a140cc7810fb, 0x89b9b3e11b6329ba, 0xa9e904c87fcb0a9d, 0xac2820d9623bf429, 0x546345fa9fbdcd44, 0xd732290fbacaf133, 0xa97c177947ad4095, 0x867f59a9d4bed6c0, 0x49ed8eabcccc485d, 0xa81f301449ee8c70, 0x5c68f256bfff5a74, 0xd226fc195c6a2f8c, 0x73832eec6fff3111, 0x83585d8fd9c25db7, 0xc831fd53c5ff7eab, 0xa42e74f3d032f525, 0xba3e7ca8b77f5e55, 0xcd3a1230c43fb26f, 0x28ce1bd2e55f35eb, 0x80444b5e7aa7cf85, 0x7980d163cf5b81b3, 0xa0555e361951c366, 0xd7e105bcc332621f, 0xc86ab5c39fa63440, 0x8dd9472bf3fefaa7, 0xfa856334878fc150, 0xb14f98f6f0feb951, 0x9c935e00d4b9d8d2, 0x6ed1bf9a569f33d3, 0xc3b8358109e84f07, 0xa862f80ec4700c8, 0xf4a642e14c6262c8, 0xcd27bb612758c0fa, 0x98e7e9cccfbd7dbd, 0x8038d51cb897789c, 0xbf21e44003acdd2c, 0xe0470a63e6bd56c3, 0xeeea5d5004981478, 0x1858ccfce06cac74, 0x95527a5202df0ccb, 0xf37801e0c43ebc8, 0xbaa718e68396cffd, 0xd30560258f54e6ba, 0xe950df20247c83fd, 0x47c6b82ef32a2069, 0x91d28b7416cdd27e, 0x4cdc331d57fa5441, 0xb6472e511c81471d, 0xe0133fe4adf8e952, 0xe3d8f9e563a198e5, 0x58180fddd97723a6, 0x8e679c2f5e44ff8f, 0x570f09eaa7ea7648, }; const integer_string_finisher = [256]Error!void{ error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, {}, {}, error.NUMBER_ERROR, error.NUMBER_ERROR, {}, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, {}, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, {}, error.NUMBER_ERROR, error.INCORRECT_TYPE, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, {}, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.INCORRECT_TYPE, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, {}, error.NUMBER_ERROR, {}, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.INCORRECT_TYPE, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, {}, error.NUMBER_ERROR, {}, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, error.NUMBER_ERROR, }; // Parse any number from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807 pub fn parse_integer(src: [*]const u8) !u64 { // // Check for minus sign // const negative = (src[0] == '-'); var p = src + @boolToInt(negative); // // Parse the integer part. // // PERF NOTE: we don't use is_made_of_eight_digits_fast because large integers like 123456789 are rare const start_digits = p; var i: u64 = 0; while (parse_digit(u64, p[0], &i)) : (p += 1) {} // If there were no digits, or if the integer starts with 0 and has more than one digit, it's an error. // Optimization note: size_t is expected to be unsigned. var digit_count = try common.ptr_diff(u32, p, start_digits); // The longest negative 64-bit number is 19 digits. // The longest positive 64-bit number is 20 digits. // We do it this way so we don't trigger this branch unless we must. const longest_digit_count: u64 = if (negative) 19 else 20; // Optimization note: the compiler can probably merge // ((digit_count == 0) || (digit_count > longest_digit_count)) // into a single branch since digit_count is unsigned. if ((digit_count == 0) or (digit_count > longest_digit_count)) { return error.INCORRECT_TYPE; } // Here digit_count > 0. if (('0' == start_digits[0]) and (digit_count > 1)) { return error.NUMBER_ERROR; } // We can do the following... // if (!jsoncharutils::is_structural_or_whitespace(*p)) { // return (*p == '.' || *p == 'e' || *p == 'E') ? INCORRECT_TYPE : NUMBER_ERROR; // } // as a single table lookup: if (integer_string_finisher[p[0]]) {} else |err| return err; if (digit_count == longest_digit_count) { if (negative) { // Anything negative above INT64_MAX+1 is invalid if (i > std.math.maxInt(i64) + 1) return error.INCORRECT_TYPE; return ~i + 1; // Positive overflow check: // - A 20 digit number starting with 2-9 is overflow, because 18,446,744,073,709,551,615 is the // biggest uint64_t. // - A 20 digit number starting with 1 is overflow if it is less than INT64_MAX. // If we got here, it's a 20 digit number starting with the digit "1". // - If a 20 digit number starting with 1 overflowed (i*10+digit), the result will be smaller // than 1,553,255,926,290,448,384. // - That is smaller than the smallest possible 20-digit number the user could write: // 10,000,000,000,000,000,000. // - Therefore, if the number is positive and lower than that, it's overflow. // - The value we are looking at is less than or equal to 9,223,372,036,854,775,808 (INT64_MAX). // } else if (src[0] != '1' or i <= std.math.maxInt(i64)) return error.INCORRECT_TYPE; } return if (negative) (~i +% 1) else i; } pub fn parse_double(src_: [*]const u8) !f64 { // // Check for minus sign // const negative = (src_[0] == '-'); var src = src_ + @boolToInt(negative); // // Parse the integer part. // var i: u64 = 0; var p = src; p += @boolToInt(parse_digit(u64, p[0], &i)); const leading_zero = (i == 0); while (parse_digit(u64, p[0], &i)) { p += 1; } // no integer digits, or 0123 (zero must be solo) if (p == src) return error.INCORRECT_TYPE; if ((leading_zero and p != src + 1)) { return error.NUMBER_ERROR; } // // Parse the decimal part. // var exponent: i64 = 0; var overflow: bool = undefined; if (p[0] == '.') { p += 1; const start_decimal_digits = p; if (!parse_digit(u64, p[0], &i)) return error.NUMBER_ERROR; // no decimal digits p += 1; while (parse_digit(u64, p[0], &i)) { p += 1; } exponent = -try common.ptr_diff(i64, p, start_decimal_digits); // Overflow check. More than 19 digits (minus the decimal) may be overflow. overflow = (try common.ptr_diff(u16, p, src)) - 1 > 19; if (overflow and leading_zero) { // Skip leading 0.00000 and see if it still overflows var start_digits = src + 2; while (start_digits[0] == '0') { start_digits += 1; } overflow = (try common.ptr_diff(u32, start_digits, src)) > 19; } } else { overflow = (try common.ptr_diff(u16, p, src)) > 19; } // // Parse the exponent // if (p[0] == 'e' or p[0] == 'E') { p += 1; const exp_neg = p[0] == '-'; p += @boolToInt(exp_neg or p[0] == '+'); var exp: u64 = 0; const start_exp_digits = p; while (parse_digit(u64, p[0], &exp)) { p += 1; } // no exp digits, or 20+ exp digits const num_exp_digits = try common.ptr_diff(u16, p, start_exp_digits); if (num_exp_digits == 0 or num_exp_digits > 19) return error.NUMBER_ERROR; exponent += @bitCast(i64, if (exp_neg) 0 -% exp else exp); } if (CharUtils.is_not_structural_or_whitespace(p[0])) return error.NUMBER_ERROR; overflow = overflow or exponent < smallest_power or exponent > largest_power; // // Assemble (or slow-parse) the float // var d: f64 = undefined; if (!overflow) { if (compute_float_64(exponent, i, negative, &d)) { return d; } } if (!parse_float_fallback(src - @boolToInt(negative), &d)) { return error.NUMBER_ERROR; } return d; }
src/number_parsing.zig
pub const ASN_UNIVERSAL = @as(u32, 0); pub const ASN_APPLICATION = @as(u32, 64); pub const ASN_CONTEXT = @as(u32, 128); pub const ASN_PRIVATE = @as(u32, 192); pub const ASN_PRIMITIVE = @as(u32, 0); pub const ASN_CONSTRUCTOR = @as(u32, 32); pub const SNMP_ACCESS_NONE = @as(u32, 0); pub const SNMP_ACCESS_NOTIFY = @as(u32, 1); pub const SNMP_ACCESS_READ_ONLY = @as(u32, 2); pub const SNMP_ACCESS_READ_WRITE = @as(u32, 3); pub const SNMP_ACCESS_READ_CREATE = @as(u32, 4); pub const SNMPAPI_NOERROR = @as(u32, 1); pub const SNMPAPI_ERROR = @as(u32, 0); pub const SNMP_OUTPUT_TO_EVENTLOG = @as(u32, 4); pub const DEFAULT_SNMP_PORT_UDP = @as(u32, 161); pub const DEFAULT_SNMP_PORT_IPX = @as(u32, 36879); pub const DEFAULT_SNMPTRAP_PORT_UDP = @as(u32, 162); pub const DEFAULT_SNMPTRAP_PORT_IPX = @as(u32, 36880); pub const SNMP_MAX_OID_LEN = @as(u32, 128); pub const SNMP_MEM_ALLOC_ERROR = @as(u32, 1); pub const SNMP_BERAPI_INVALID_LENGTH = @as(u32, 10); pub const SNMP_BERAPI_INVALID_TAG = @as(u32, 11); pub const SNMP_BERAPI_OVERFLOW = @as(u32, 12); pub const SNMP_BERAPI_SHORT_BUFFER = @as(u32, 13); pub const SNMP_BERAPI_INVALID_OBJELEM = @as(u32, 14); pub const SNMP_PDUAPI_UNRECOGNIZED_PDU = @as(u32, 20); pub const SNMP_PDUAPI_INVALID_ES = @as(u32, 21); pub const SNMP_PDUAPI_INVALID_GT = @as(u32, 22); pub const SNMP_AUTHAPI_INVALID_VERSION = @as(u32, 30); pub const SNMP_AUTHAPI_INVALID_MSG_TYPE = @as(u32, 31); pub const SNMP_AUTHAPI_TRIV_AUTH_FAILED = @as(u32, 32); pub const SNMP_MGMTAPI_TIMEOUT = @as(u32, 40); pub const SNMP_MGMTAPI_SELECT_FDERRORS = @as(u32, 41); pub const SNMP_MGMTAPI_TRAP_ERRORS = @as(u32, 42); pub const SNMP_MGMTAPI_TRAP_DUPINIT = @as(u32, 43); pub const SNMP_MGMTAPI_NOTRAPS = @as(u32, 44); pub const SNMP_MGMTAPI_AGAIN = @as(u32, 45); pub const SNMP_MGMTAPI_INVALID_CTL = @as(u32, 46); pub const SNMP_MGMTAPI_INVALID_SESSION = @as(u32, 47); pub const SNMP_MGMTAPI_INVALID_BUFFER = @as(u32, 48); pub const MGMCTL_SETAGENTPORT = @as(u32, 1); pub const MAXOBJIDSIZE = @as(u32, 128); pub const MAXOBJIDSTRSIZE = @as(u32, 1408); pub const SNMPLISTEN_USEENTITY_ADDR = @as(u32, 0); pub const SNMPLISTEN_ALL_ADDR = @as(u32, 1); pub const SNMP_TRAP_COLDSTART = @as(u32, 0); pub const SNMP_TRAP_WARMSTART = @as(u32, 1); pub const SNMP_TRAP_LINKDOWN = @as(u32, 2); pub const SNMP_TRAP_LINKUP = @as(u32, 3); pub const SNMP_TRAP_AUTHFAIL = @as(u32, 4); pub const SNMP_TRAP_EGPNEIGHBORLOSS = @as(u32, 5); pub const SNMP_TRAP_ENTERPRISESPECIFIC = @as(u32, 6); pub const SNMPAPI_NO_SUPPORT = @as(u32, 0); pub const SNMPAPI_V1_SUPPORT = @as(u32, 1); pub const SNMPAPI_V2_SUPPORT = @as(u32, 2); pub const SNMPAPI_M2M_SUPPORT = @as(u32, 3); pub const SNMPAPI_FAILURE = @as(u32, 0); pub const SNMPAPI_SUCCESS = @as(u32, 1); pub const SNMPAPI_ALLOC_ERROR = @as(u32, 2); pub const SNMPAPI_CONTEXT_INVALID = @as(u32, 3); pub const SNMPAPI_CONTEXT_UNKNOWN = @as(u32, 4); pub const SNMPAPI_ENTITY_INVALID = @as(u32, 5); pub const SNMPAPI_ENTITY_UNKNOWN = @as(u32, 6); pub const SNMPAPI_INDEX_INVALID = @as(u32, 7); pub const SNMPAPI_NOOP = @as(u32, 8); pub const SNMPAPI_OID_INVALID = @as(u32, 9); pub const SNMPAPI_OPERATION_INVALID = @as(u32, 10); pub const SNMPAPI_OUTPUT_TRUNCATED = @as(u32, 11); pub const SNMPAPI_PDU_INVALID = @as(u32, 12); pub const SNMPAPI_SESSION_INVALID = @as(u32, 13); pub const SNMPAPI_SYNTAX_INVALID = @as(u32, 14); pub const SNMPAPI_VBL_INVALID = @as(u32, 15); pub const SNMPAPI_MODE_INVALID = @as(u32, 16); pub const SNMPAPI_SIZE_INVALID = @as(u32, 17); pub const SNMPAPI_NOT_INITIALIZED = @as(u32, 18); pub const SNMPAPI_MESSAGE_INVALID = @as(u32, 19); pub const SNMPAPI_HWND_INVALID = @as(u32, 20); pub const SNMPAPI_OTHER_ERROR = @as(u32, 99); pub const SNMPAPI_TL_NOT_INITIALIZED = @as(u32, 100); pub const SNMPAPI_TL_NOT_SUPPORTED = @as(u32, 101); pub const SNMPAPI_TL_NOT_AVAILABLE = @as(u32, 102); pub const SNMPAPI_TL_RESOURCE_ERROR = @as(u32, 103); pub const SNMPAPI_TL_UNDELIVERABLE = @as(u32, 104); pub const SNMPAPI_TL_SRC_INVALID = @as(u32, 105); pub const SNMPAPI_TL_INVALID_PARAM = @as(u32, 106); pub const SNMPAPI_TL_IN_USE = @as(u32, 107); pub const SNMPAPI_TL_TIMEOUT = @as(u32, 108); pub const SNMPAPI_TL_PDU_TOO_BIG = @as(u32, 109); pub const SNMPAPI_TL_OTHER = @as(u32, 199); pub const MAXVENDORINFO = @as(u32, 32); //-------------------------------------------------------------------------------- // Section: Types (32) //-------------------------------------------------------------------------------- pub const SNMP_PDU_TYPE = enum(u32) { GET = 160, GETNEXT = 161, RESPONSE = 162, SET = 163, GETBULK = 165, TRAP = 167, }; pub const SNMP_PDU_GET = SNMP_PDU_TYPE.GET; pub const SNMP_PDU_GETNEXT = SNMP_PDU_TYPE.GETNEXT; pub const SNMP_PDU_RESPONSE = SNMP_PDU_TYPE.RESPONSE; pub const SNMP_PDU_SET = SNMP_PDU_TYPE.SET; pub const SNMP_PDU_GETBULK = SNMP_PDU_TYPE.GETBULK; pub const SNMP_PDU_TRAP = SNMP_PDU_TYPE.TRAP; pub const SNMP_EXTENSION_REQUEST_TYPE = enum(u32) { GET = 160, GET_NEXT = 161, SET_TEST = 224, SET_COMMIT = 163, SET_UNDO = 225, SET_CLEANUP = 226, }; pub const SNMP_EXTENSION_GET = SNMP_EXTENSION_REQUEST_TYPE.GET; pub const SNMP_EXTENSION_GET_NEXT = SNMP_EXTENSION_REQUEST_TYPE.GET_NEXT; pub const SNMP_EXTENSION_SET_TEST = SNMP_EXTENSION_REQUEST_TYPE.SET_TEST; pub const SNMP_EXTENSION_SET_COMMIT = SNMP_EXTENSION_REQUEST_TYPE.SET_COMMIT; pub const SNMP_EXTENSION_SET_UNDO = SNMP_EXTENSION_REQUEST_TYPE.SET_UNDO; pub const SNMP_EXTENSION_SET_CLEANUP = SNMP_EXTENSION_REQUEST_TYPE.SET_CLEANUP; pub const SNMP_API_TRANSLATE_MODE = enum(u32) { TRANSLATED = 0, UNTRANSLATED_V1 = 1, UNTRANSLATED_V2 = 2, }; pub const SNMPAPI_TRANSLATED = SNMP_API_TRANSLATE_MODE.TRANSLATED; pub const SNMPAPI_UNTRANSLATED_V1 = SNMP_API_TRANSLATE_MODE.UNTRANSLATED_V1; pub const SNMPAPI_UNTRANSLATED_V2 = SNMP_API_TRANSLATE_MODE.UNTRANSLATED_V2; pub const SNMP_GENERICTRAP = enum(u32) { COLDSTART = 0, WARMSTART = 1, LINKDOWN = 2, LINKUP = 3, AUTHFAILURE = 4, EGPNEIGHLOSS = 5, ENTERSPECIFIC = 6, }; pub const SNMP_GENERICTRAP_COLDSTART = SNMP_GENERICTRAP.COLDSTART; pub const SNMP_GENERICTRAP_WARMSTART = SNMP_GENERICTRAP.WARMSTART; pub const SNMP_GENERICTRAP_LINKDOWN = SNMP_GENERICTRAP.LINKDOWN; pub const SNMP_GENERICTRAP_LINKUP = SNMP_GENERICTRAP.LINKUP; pub const SNMP_GENERICTRAP_AUTHFAILURE = SNMP_GENERICTRAP.AUTHFAILURE; pub const SNMP_GENERICTRAP_EGPNEIGHLOSS = SNMP_GENERICTRAP.EGPNEIGHLOSS; pub const SNMP_GENERICTRAP_ENTERSPECIFIC = SNMP_GENERICTRAP.ENTERSPECIFIC; pub const SNMP_ERROR_STATUS = enum(u32) { NOERROR = 0, TOOBIG = 1, NOSUCHNAME = 2, BADVALUE = 3, READONLY = 4, GENERR = 5, NOACCESS = 6, WRONGTYPE = 7, WRONGLENGTH = 8, WRONGENCODING = 9, WRONGVALUE = 10, NOCREATION = 11, INCONSISTENTVALUE = 12, RESOURCEUNAVAILABLE = 13, COMMITFAILED = 14, UNDOFAILED = 15, AUTHORIZATIONERROR = 16, NOTWRITABLE = 17, INCONSISTENTNAME = 18, }; pub const SNMP_ERRORSTATUS_NOERROR = SNMP_ERROR_STATUS.NOERROR; pub const SNMP_ERRORSTATUS_TOOBIG = SNMP_ERROR_STATUS.TOOBIG; pub const SNMP_ERRORSTATUS_NOSUCHNAME = SNMP_ERROR_STATUS.NOSUCHNAME; pub const SNMP_ERRORSTATUS_BADVALUE = SNMP_ERROR_STATUS.BADVALUE; pub const SNMP_ERRORSTATUS_READONLY = SNMP_ERROR_STATUS.READONLY; pub const SNMP_ERRORSTATUS_GENERR = SNMP_ERROR_STATUS.GENERR; pub const SNMP_ERRORSTATUS_NOACCESS = SNMP_ERROR_STATUS.NOACCESS; pub const SNMP_ERRORSTATUS_WRONGTYPE = SNMP_ERROR_STATUS.WRONGTYPE; pub const SNMP_ERRORSTATUS_WRONGLENGTH = SNMP_ERROR_STATUS.WRONGLENGTH; pub const SNMP_ERRORSTATUS_WRONGENCODING = SNMP_ERROR_STATUS.WRONGENCODING; pub const SNMP_ERRORSTATUS_WRONGVALUE = SNMP_ERROR_STATUS.WRONGVALUE; pub const SNMP_ERRORSTATUS_NOCREATION = SNMP_ERROR_STATUS.NOCREATION; pub const SNMP_ERRORSTATUS_INCONSISTENTVALUE = SNMP_ERROR_STATUS.INCONSISTENTVALUE; pub const SNMP_ERRORSTATUS_RESOURCEUNAVAILABLE = SNMP_ERROR_STATUS.RESOURCEUNAVAILABLE; pub const SNMP_ERRORSTATUS_COMMITFAILED = SNMP_ERROR_STATUS.COMMITFAILED; pub const SNMP_ERRORSTATUS_UNDOFAILED = SNMP_ERROR_STATUS.UNDOFAILED; pub const SNMP_ERRORSTATUS_AUTHORIZATIONERROR = SNMP_ERROR_STATUS.AUTHORIZATIONERROR; pub const SNMP_ERRORSTATUS_NOTWRITABLE = SNMP_ERROR_STATUS.NOTWRITABLE; pub const SNMP_ERRORSTATUS_INCONSISTENTNAME = SNMP_ERROR_STATUS.INCONSISTENTNAME; pub const SNMP_STATUS = enum(u32) { N = 1, FF = 0, }; pub const SNMPAPI_ON = SNMP_STATUS.N; pub const SNMPAPI_OFF = SNMP_STATUS.FF; pub const SNMP_OUTPUT_LOG_TYPE = enum(u32) { CONSOLE = 1, LOGFILE = 2, DEBUGGER = 8, }; pub const SNMP_OUTPUT_TO_CONSOLE = SNMP_OUTPUT_LOG_TYPE.CONSOLE; pub const SNMP_OUTPUT_TO_LOGFILE = SNMP_OUTPUT_LOG_TYPE.LOGFILE; pub const SNMP_OUTPUT_TO_DEBUGGER = SNMP_OUTPUT_LOG_TYPE.DEBUGGER; pub const SNMP_LOG = enum(u32) { SILENT = 0, FATAL = 1, ERROR = 2, WARNING = 3, TRACE = 4, VERBOSE = 5, }; pub const SNMP_LOG_SILENT = SNMP_LOG.SILENT; pub const SNMP_LOG_FATAL = SNMP_LOG.FATAL; pub const SNMP_LOG_ERROR = SNMP_LOG.ERROR; pub const SNMP_LOG_WARNING = SNMP_LOG.WARNING; pub const SNMP_LOG_TRACE = SNMP_LOG.TRACE; pub const SNMP_LOG_VERBOSE = SNMP_LOG.VERBOSE; pub const SNMP_ERROR = enum(u32) { NOERROR = 0, TOOBIG = 1, NOSUCHNAME = 2, BADVALUE = 3, READONLY = 4, GENERR = 5, NOACCESS = 6, WRONGTYPE = 7, WRONGLENGTH = 8, WRONGENCODING = 9, WRONGVALUE = 10, NOCREATION = 11, INCONSISTENTVALUE = 12, RESOURCEUNAVAILABLE = 13, COMMITFAILED = 14, UNDOFAILED = 15, AUTHORIZATIONERROR = 16, NOTWRITABLE = 17, INCONSISTENTNAME = 18, }; pub const SNMP_ERROR_NOERROR = SNMP_ERROR.NOERROR; pub const SNMP_ERROR_TOOBIG = SNMP_ERROR.TOOBIG; pub const SNMP_ERROR_NOSUCHNAME = SNMP_ERROR.NOSUCHNAME; pub const SNMP_ERROR_BADVALUE = SNMP_ERROR.BADVALUE; pub const SNMP_ERROR_READONLY = SNMP_ERROR.READONLY; pub const SNMP_ERROR_GENERR = SNMP_ERROR.GENERR; pub const SNMP_ERROR_NOACCESS = SNMP_ERROR.NOACCESS; pub const SNMP_ERROR_WRONGTYPE = SNMP_ERROR.WRONGTYPE; pub const SNMP_ERROR_WRONGLENGTH = SNMP_ERROR.WRONGLENGTH; pub const SNMP_ERROR_WRONGENCODING = SNMP_ERROR.WRONGENCODING; pub const SNMP_ERROR_WRONGVALUE = SNMP_ERROR.WRONGVALUE; pub const SNMP_ERROR_NOCREATION = SNMP_ERROR.NOCREATION; pub const SNMP_ERROR_INCONSISTENTVALUE = SNMP_ERROR.INCONSISTENTVALUE; pub const SNMP_ERROR_RESOURCEUNAVAILABLE = SNMP_ERROR.RESOURCEUNAVAILABLE; pub const SNMP_ERROR_COMMITFAILED = SNMP_ERROR.COMMITFAILED; pub const SNMP_ERROR_UNDOFAILED = SNMP_ERROR.UNDOFAILED; pub const SNMP_ERROR_AUTHORIZATIONERROR = SNMP_ERROR.AUTHORIZATIONERROR; pub const SNMP_ERROR_NOTWRITABLE = SNMP_ERROR.NOTWRITABLE; pub const SNMP_ERROR_INCONSISTENTNAME = SNMP_ERROR.INCONSISTENTNAME; pub const AsnAny = extern struct { asnType: u8, asnValue: extern union { // WARNING: unable to add field alignment because it's not implemented for unions number: i32, unsigned32: u32, counter64: ULARGE_INTEGER, string: AsnOctetString, bits: AsnOctetString, object: AsnObjectIdentifier, sequence: AsnOctetString, address: AsnOctetString, counter: u32, gauge: u32, ticks: u32, arbitrary: AsnOctetString, }, }; pub const SnmpVarBind = extern struct { name: AsnObjectIdentifier, value: AsnAny, }; pub const PFNSNMPEXTENSIONINIT = fn( dwUpTimeReference: u32, phSubagentTrapEvent: ?*?HANDLE, pFirstSupportedRegion: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFNSNMPEXTENSIONINITEX = fn( pNextSupportedRegion: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFNSNMPEXTENSIONMONITOR = fn( pAgentMgmtData: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFNSNMPEXTENSIONQUERY = fn( bPduType: u8, pVarBindList: ?*SnmpVarBindList, pErrorStatus: ?*i32, pErrorIndex: ?*i32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFNSNMPEXTENSIONQUERYEX = fn( nRequestType: u32, nTransactionId: u32, pVarBindList: ?*SnmpVarBindList, pContextInfo: ?*AsnOctetString, pErrorStatus: ?*i32, pErrorIndex: ?*i32, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFNSNMPEXTENSIONTRAP = fn( pEnterpriseOid: ?*AsnObjectIdentifier, pGenericTrapId: ?*i32, pSpecificTrapId: ?*i32, pTimeStamp: ?*u32, pVarBindList: ?*SnmpVarBindList, ) callconv(@import("std").os.windows.WINAPI) BOOL; pub const PFNSNMPEXTENSIONCLOSE = fn( ) callconv(@import("std").os.windows.WINAPI) void; pub const smiOCTETS = extern struct { len: u32, ptr: ?*u8, }; pub const smiOID = extern struct { len: u32, ptr: ?*u32, }; pub const smiCNTR64 = extern struct { hipart: u32, lopart: u32, }; pub const smiVALUE = extern struct { syntax: u32, value: extern union { sNumber: i32, uNumber: u32, hNumber: smiCNTR64, string: smiOCTETS, oid: smiOID, empty: u8, }, }; pub const smiVENDORINFO = extern struct { vendorName: [64]CHAR, vendorContact: [64]CHAR, vendorVersionId: [32]CHAR, vendorVersionDate: [32]CHAR, vendorEnterprise: u32, }; pub const SNMPAPI_CALLBACK = fn( hSession: isize, hWnd: ?HWND, wMsg: u32, wParam: WPARAM, lParam: LPARAM, lpClientData: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) u32; pub const PFNSNMPSTARTUPEX = fn( param0: ?*u32, param1: ?*u32, param2: ?*u32, param3: ?*u32, param4: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; pub const PFNSNMPCLEANUPEX = fn( ) callconv(@import("std").os.windows.WINAPI) u32; pub const AsnOctetString = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug stream: ?*u8, length: u32, dynamic: BOOL, }, .X86 => extern struct { stream: ?*u8, length: u32, dynamic: BOOL, }, }; pub const AsnObjectIdentifier = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug idLength: u32, ids: ?*u32, }, .X86 => extern struct { idLength: u32, ids: ?*u32, }, }; pub const SnmpVarBindList = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug list: ?*SnmpVarBind, len: u32, }, .X86 => extern struct { list: ?*SnmpVarBind, len: u32, }, }; //-------------------------------------------------------------------------------- // Section: Functions (84) //-------------------------------------------------------------------------------- // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOidCpy( pOidDst: ?*AsnObjectIdentifier, pOidSrc: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOidAppend( pOidDst: ?*AsnObjectIdentifier, pOidSrc: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOidNCmp( pOid1: ?*AsnObjectIdentifier, pOid2: ?*AsnObjectIdentifier, nSubIds: u32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOidCmp( pOid1: ?*AsnObjectIdentifier, pOid2: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOidFree( pOid: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOctetsCmp( pOctets1: ?*AsnOctetString, pOctets2: ?*AsnOctetString, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOctetsNCmp( pOctets1: ?*AsnOctetString, pOctets2: ?*AsnOctetString, nChars: u32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOctetsCpy( pOctetsDst: ?*AsnOctetString, pOctetsSrc: ?*AsnOctetString, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOctetsFree( pOctets: ?*AsnOctetString, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilAsnAnyCpy( pAnyDst: ?*AsnAny, pAnySrc: ?*AsnAny, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilAsnAnyFree( pAny: ?*AsnAny, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilVarBindCpy( pVbDst: ?*SnmpVarBind, pVbSrc: ?*SnmpVarBind, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilVarBindFree( pVb: ?*SnmpVarBind, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilVarBindListCpy( pVblDst: ?*SnmpVarBindList, pVblSrc: ?*SnmpVarBindList, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilVarBindListFree( pVbl: ?*SnmpVarBindList, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilMemFree( pMem: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilMemAlloc( nBytes: u32, ) callconv(@import("std").os.windows.WINAPI) ?*c_void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilMemReAlloc( pMem: ?*c_void, nBytes: u32, ) callconv(@import("std").os.windows.WINAPI) ?*c_void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilOidToA( Oid: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) ?PSTR; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilIdsToA( Ids: ?*u32, IdLength: u32, ) callconv(@import("std").os.windows.WINAPI) ?PSTR; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilPrintOid( Oid: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilPrintAsnAny( pAny: ?*AsnAny, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpSvcGetUptime( ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpSvcSetLogLevel( nLogLevel: SNMP_LOG, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpSvcSetLogType( nLogType: SNMP_OUTPUT_LOG_TYPE, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "snmpapi" fn SnmpUtilDbgPrint( nLogLevel: SNMP_LOG, szFormat: ?PSTR, ) callconv(@import("std").os.windows.WINAPI) void; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrOpen( lpAgentAddress: ?PSTR, lpAgentCommunity: ?PSTR, nTimeOut: i32, nRetries: i32, ) callconv(@import("std").os.windows.WINAPI) ?*c_void; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrCtl( session: ?*c_void, dwCtlCode: u32, lpvInBuffer: ?*c_void, cbInBuffer: u32, lpvOUTBuffer: ?*c_void, cbOUTBuffer: u32, lpcbBytesReturned: ?*u32, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrClose( session: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrRequest( session: ?*c_void, requestType: u8, variableBindings: ?*SnmpVarBindList, errorStatus: ?*SNMP_ERROR_STATUS, errorIndex: ?*i32, ) callconv(@import("std").os.windows.WINAPI) i32; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrStrToOid( string: ?PSTR, oid: ?*AsnObjectIdentifier, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrOidToStr( oid: ?*AsnObjectIdentifier, string: ?*?PSTR, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrTrapListen( phTrapAvailable: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrGetTrap( enterprise: ?*AsnObjectIdentifier, IPAddress: ?*AsnOctetString, genericTrap: ?*SNMP_GENERICTRAP, specificTrap: ?*i32, timeStamp: ?*u32, variableBindings: ?*SnmpVarBindList, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "mgmtapi" fn SnmpMgrGetTrapEx( enterprise: ?*AsnObjectIdentifier, agentAddress: ?*AsnOctetString, sourceAddress: ?*AsnOctetString, genericTrap: ?*SNMP_GENERICTRAP, specificTrap: ?*i32, community: ?*AsnOctetString, timeStamp: ?*u32, variableBindings: ?*SnmpVarBindList, ) callconv(@import("std").os.windows.WINAPI) BOOL; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetTranslateMode( nTranslateMode: ?*SNMP_API_TRANSLATE_MODE, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetTranslateMode( nTranslateMode: SNMP_API_TRANSLATE_MODE, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetRetransmitMode( nRetransmitMode: ?*SNMP_STATUS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetRetransmitMode( nRetransmitMode: SNMP_STATUS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetTimeout( hEntity: isize, nPolicyTimeout: ?*u32, nActualTimeout: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetTimeout( hEntity: isize, nPolicyTimeout: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetRetry( hEntity: isize, nPolicyRetry: ?*u32, nActualRetry: ?*u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetRetry( hEntity: isize, nPolicyRetry: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetVendorInfo( vendorInfo: ?*smiVENDORINFO, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpStartup( nMajorVersion: ?*u32, nMinorVersion: ?*u32, nLevel: ?*u32, nTranslateMode: ?*SNMP_API_TRANSLATE_MODE, nRetransmitMode: ?*SNMP_STATUS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCleanup( ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpOpen( hWnd: ?HWND, wMsg: u32, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpClose( session: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSendMsg( session: isize, srcEntity: isize, dstEntity: isize, context: isize, PDU: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpRecvMsg( session: isize, srcEntity: ?*isize, dstEntity: ?*isize, context: ?*isize, PDU: ?*isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpRegister( session: isize, srcEntity: isize, dstEntity: isize, context: isize, notification: ?*smiOID, state: SNMP_STATUS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCreateSession( hWnd: ?HWND, wMsg: u32, fCallBack: ?SNMPAPI_CALLBACK, lpClientData: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpListen( hEntity: isize, lStatus: SNMP_STATUS, ) callconv(@import("std").os.windows.WINAPI) u32; pub extern "wsnmp32" fn SnmpListenEx( hEntity: isize, lStatus: u32, nUseEntityAddr: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCancelMsg( session: isize, reqId: i32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpStartupEx( nMajorVersion: ?*u32, nMinorVersion: ?*u32, nLevel: ?*u32, nTranslateMode: ?*SNMP_API_TRANSLATE_MODE, nRetransmitMode: ?*SNMP_STATUS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCleanupEx( ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpStrToEntity( session: isize, string: ?[*:0]const u8, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpEntityToStr( entity: isize, size: u32, string: [*:0]u8, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpFreeEntity( entity: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpStrToContext( session: isize, string: ?*smiOCTETS, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpContextToStr( context: isize, string: ?*smiOCTETS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpFreeContext( context: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetPort( hEntity: isize, nPort: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCreatePdu( session: isize, PDU_type: SNMP_PDU_TYPE, request_id: i32, error_status: i32, error_index: i32, varbindlist: isize, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetPduData( PDU: isize, PDU_type: ?*SNMP_PDU_TYPE, request_id: ?*i32, error_status: ?*SNMP_ERROR, error_index: ?*i32, varbindlist: ?*isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetPduData( PDU: isize, PDU_type: ?*const i32, request_id: ?*const i32, non_repeaters: ?*const i32, max_repetitions: ?*const i32, varbindlist: ?*const isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpDuplicatePdu( session: isize, PDU: isize, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpFreePdu( PDU: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCreateVbl( session: isize, name: ?*smiOID, value: ?*smiVALUE, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpDuplicateVbl( session: isize, vbl: isize, ) callconv(@import("std").os.windows.WINAPI) isize; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpFreeVbl( vbl: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpCountVbl( vbl: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetVb( vbl: isize, index: u32, name: ?*smiOID, value: ?*smiVALUE, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpSetVb( vbl: isize, index: u32, name: ?*smiOID, value: ?*smiVALUE, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpDeleteVb( vbl: isize, index: u32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpGetLastError( session: isize, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpStrToOid( string: ?[*:0]const u8, dstOID: ?*smiOID, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpOidToStr( srcOID: ?*smiOID, size: u32, string: [*:0]u8, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpOidCopy( srcOID: ?*smiOID, dstOID: ?*smiOID, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpOidCompare( xOID: ?*smiOID, yOID: ?*smiOID, maxlen: u32, result: ?*i32, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpEncodeMsg( session: isize, srcEntity: isize, dstEntity: isize, context: isize, pdu: isize, msgBufDesc: ?*smiOCTETS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpDecodeMsg( session: isize, srcEntity: ?*isize, dstEntity: ?*isize, context: ?*isize, pdu: ?*isize, msgBufDesc: ?*smiOCTETS, ) callconv(@import("std").os.windows.WINAPI) u32; // TODO: this type is limited to platform 'windows5.0' pub extern "wsnmp32" fn SnmpFreeDescriptor( syntax: u32, descriptor: ?*smiOCTETS, ) callconv(@import("std").os.windows.WINAPI) u32; //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (8) //-------------------------------------------------------------------------------- const BOOL = @import("../foundation.zig").BOOL; const CHAR = @import("../system/system_services.zig").CHAR; const HANDLE = @import("../foundation.zig").HANDLE; const HWND = @import("../foundation.zig").HWND; const LPARAM = @import("../foundation.zig").LPARAM; const PSTR = @import("../foundation.zig").PSTR; const ULARGE_INTEGER = @import("../system/system_services.zig").ULARGE_INTEGER; const WPARAM = @import("../foundation.zig").WPARAM; test { // The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476 if (@hasDecl(@This(), "PFNSNMPEXTENSIONINIT")) { _ = PFNSNMPEXTENSIONINIT; } if (@hasDecl(@This(), "PFNSNMPEXTENSIONINITEX")) { _ = PFNSNMPEXTENSIONINITEX; } if (@hasDecl(@This(), "PFNSNMPEXTENSIONMONITOR")) { _ = PFNSNMPEXTENSIONMONITOR; } if (@hasDecl(@This(), "PFNSNMPEXTENSIONQUERY")) { _ = PFNSNMPEXTENSIONQUERY; } if (@hasDecl(@This(), "PFNSNMPEXTENSIONQUERYEX")) { _ = PFNSNMPEXTENSIONQUERYEX; } if (@hasDecl(@This(), "PFNSNMPEXTENSIONTRAP")) { _ = PFNSNMPEXTENSIONTRAP; } if (@hasDecl(@This(), "PFNSNMPEXTENSIONCLOSE")) { _ = PFNSNMPEXTENSIONCLOSE; } if (@hasDecl(@This(), "SNMPAPI_CALLBACK")) { _ = SNMPAPI_CALLBACK; } if (@hasDecl(@This(), "PFNSNMPSTARTUPEX")) { _ = PFNSNMPSTARTUPEX; } if (@hasDecl(@This(), "PFNSNMPCLEANUPEX")) { _ = PFNSNMPCLEANUPEX; } @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
deps/zigwin32/win32/network_management/snmp.zig
const std = @import("std"); const prot = @import("protocols.zig"); const renderer = @import("renderer.zig"); const compositor = @import("compositor.zig"); const Client = @import("client.zig").Client; const Rectangle = @import("rectangle.zig").Rectangle; const Region = @import("region.zig").Region; const Positioner = @import("positioner.zig").Positioner; const LinearFifo = std.fifo.LinearFifo; const LinearFifoBufferType = std.fifo.LinearFifoBufferType; const View = @import("view.zig").View; const MAX_WINDOWS = 512; pub var WINDOWS: [MAX_WINDOWS]Window = undefined; pub const XdgConfigurations = LinearFifo(XdgConfiguration, LinearFifoBufferType{ .Static = 32 }); pub const Window = struct { index: usize = 0, in_use: bool = false, client: *Client, mapped: bool = false, view: ?*View, parent: ?*Window, popup: ?*Window, toplevel: Link, ready_for_callback: bool = false, texture: ?u32, width: i32, height: i32, wl_surface_id: u32, wl_buffer_id: ?u32, xdg_surface_id: ?u32, xdg_toplevel_id: ?u32, xdg_popup_id: ?u32, wl_subsurface_id: ?u32, positioner: ?*Positioner, window_geometry: ?Rectangle, synchronized: bool = false, state: [2]BufferedState = undefined, stateIndex: u1 = 0, // When not null, Rectangle defines the OLD unmaximised geometry maximized: ?Rectangle, xdg_configurations: XdgConfigurations, title: [128]u8 = undefined, app_id: [256]u8 = undefined, callbacks: LinearFifo(u32, LinearFifoBufferType{ .Static = 32 }), const Self = @This(); // flip double-buffered state pub fn flip(self: *Self) void { // std.debug.warn("flipping: {}\n", .{self.index}); self.stateIndex +%= 1; if (self.current().input_region != self.pending().input_region) { if (self.pending().input_region) |input_region| { try input_region.deinit(); } } if (self.current().opaque_region != self.pending().opaque_region) { if (self.pending().opaque_region) |opaque_region| { try opaque_region.deinit(); } } self.pending().* = self.current().*; // flip synchronized subwindows above self var forward_it = self.subwindowIterator(); while (forward_it.nextPending()) |subwindow| { if (subwindow != self and subwindow.synchronized) { subwindow.flip(); } } // flip synchronized subwindows below self var backward_it = self.subwindowIterator(); while (backward_it.prevPending()) |subwindow| { if (subwindow != self and subwindow.synchronized) { subwindow.flip(); } } } pub fn current(self: *Self) *BufferedState { return &self.state[self.stateIndex]; } pub fn pending(self: *Self) *BufferedState { return &self.state[self.stateIndex +% 1]; } pub fn render(self: *Self, x: i32, y: i32) anyerror!void { var it = self.forwardIterator(); while (it.next()) |window| { window.ready_for_callback = true; if (window == self) { const texture = window.texture orelse continue; try renderer.scale(1.0, 1.0); try renderer.translate(@intToFloat(f32, x + window.absoluteX()), @intToFloat(f32, y + window.absoluteY())); try renderer.setUniformMatrix(renderer.PROGRAM, "origin", renderer.identity); try renderer.setUniformMatrix(renderer.PROGRAM, "originInverse", renderer.identity); try renderer.setUniformFloat(renderer.PROGRAM, "opacity", 1.0); renderer.setGeometry(window.width, window.height); try renderer.renderSurface(renderer.PROGRAM, texture); } else { try window.render(x, y); } } if (self.popup) |popup| { try popup.render(x, y); } } pub fn absoluteX(self: *Self) i32 { var parent_x = (if (self.parent) |p| p.absoluteX() else 0); var self_x = self.current().x; var positioner_x: i32 = 0; if (self.positioner) |positioner| { var rect = positioner.anchor_rect; positioner_x = switch (positioner.anchor) { .none => rect.x + @divTrunc(rect.width, 2), .top => rect.x + @divTrunc(rect.width, 2), .bottom => rect.x + @divTrunc(rect.width, 2), .left => rect.x, .right => rect.x + rect.width, .top_left => rect.x, .bottom_left => rect.x, .top_right => rect.x + rect.width, .bottom_right => rect.x + rect.width, } + (if (self.parent) |parent| (if (parent.window_geometry) |wg| wg.x else 0) else 0); } var wg_x = (if (self.window_geometry) |wg| wg.x else 0); return parent_x + self_x + positioner_x - wg_x; } pub fn absoluteY(self: *Self) i32 { var parent_y = (if (self.parent) |p| p.absoluteY() else 0); var self_y = self.current().y; var positioner_y: i32 = 0; if (self.positioner) |positioner| { var rect = positioner.anchor_rect; positioner_y = switch (positioner.anchor) { .none => rect.y + @divTrunc(rect.height, 2), .top => rect.y, .bottom => rect.y + rect.height, .left => rect.y + @divTrunc(rect.height, 2), .right => rect.y + @divTrunc(rect.height, 2), .top_left => rect.y, .bottom_left => rect.y + rect.height, .top_right => rect.y, .bottom_right => rect.y + rect.height, } + (if (self.parent) |parent| (if (parent.window_geometry) |wg| wg.y else 0) else 0); } var wg_y = (if (self.window_geometry) |wg| wg.y else 0); return parent_y + self_y + positioner_y - wg_y; } pub fn frameCallback(self: *Self) !void { if (self.ready_for_callback == false) { return; } while (self.callbacks.readItem()) |wl_callback_id| { const wl_callback = self.client.context.get(wl_callback_id) orelse return error.CallbackIdNotFound; try prot.wl_callback_send_done(wl_callback, 23); try self.client.context.unregister(wl_callback); try prot.wl_display_send_delete_id(self.client.context.client.wl_display, wl_callback_id); } self.ready_for_callback = false; } pub fn root(self: *Window) *Window { if (self.parent) |parent| { return parent.root(); } else { return self; } } pub fn toplevelWindow(self: *Window) *Window { if (self.xdg_toplevel_id != null) { return self; } if (self.parent) |parent| { return parent.root(); } else { return self; } } pub fn toplevelUnderPointer(self: *Self, pointer_x: f64, pointer_y: f64) ?*Window { var it = self.backwardIterator(); while (it.prev()) |window| { if (self == window) { if (isPointerInside(self, pointer_x, pointer_y)) { return self; } } else { if (window.windowUnderPointer(pointer_x, pointer_y)) |child| { return self; } } } return null; } pub fn windowUnderPointer(self: *Self, pointer_x: f64, pointer_y: f64) ?*Window { if (self.popup) |popup| { var maybe_popup_window = popup.windowUnderPointer(pointer_x, pointer_y); if (maybe_popup_window) |popup_window| { return popup_window; } } var it = self.backwardIterator(); while (it.prev()) |window| { if (self == window) { if (isPointerInside(self, pointer_x, pointer_y)) { return window; } } else { if (window.windowUnderPointer(pointer_x, pointer_y)) |child| { return child; } } } return null; } fn isPointerInside(self: *Self, x: f64, y: f64) bool { if (self.current().input_region) |input_region| { return input_region.pointInside(x - @intToFloat(f64, self.absoluteX()), y - @intToFloat(f64, self.absoluteY())); } if (x >= @intToFloat(f64, self.absoluteX()) and x <= @intToFloat(f64, (self.absoluteX() + self.width))) { if (y >= @intToFloat(f64, self.absoluteY()) and y <= @intToFloat(f64, (self.absoluteY() + self.height))) { return true; } } return false; } pub fn mouseClick(self: *Self, button: u32, action: u32) !void { const client = self.client; const wl_pointer_id = client.wl_pointer_id orelse return; const wl_pointer = client.context.get(wl_pointer_id) orelse return; const now = @truncate(u32, @intCast(u64, std.time.milliTimestamp())); try prot.wl_pointer_send_button(wl_pointer, client.nextSerial(), now, button, action); } pub const SubwindowIterator = struct { current: ?*Window, parent: *Window, pub fn next(self: *SubwindowIterator) ?*Window { const window = self.current orelse return null; if (self.current == self.parent) { self.current = window.current().children.next; } else { self.current = window.current().siblings.next; } return window; } pub fn prev(self: *SubwindowIterator) ?*Window { const window = self.current orelse return null; if (self.current == self.parent) { self.current = window.current().children.prev; } else { self.current = window.current().siblings.prev; } return window; } pub fn nextPending(self: *SubwindowIterator) ?*Window { const window = self.current orelse return null; if (self.current == self.parent) { self.current = window.pending().children.next; } else { self.current = window.pending().siblings.next; } return window; } pub fn prevPending(self: *SubwindowIterator) ?*Window { const window = self.current orelse return null; if (self.current == self.parent) { self.current = window.pending().children.prev; } else { self.current = window.pending().siblings.prev; } return window; } }; pub fn subwindowIterator(self: *Self) SubwindowIterator { return SubwindowIterator{ .current = self, .parent = self, }; } pub fn forwardIterator(self: *Self) SubwindowIterator { var backward_it = self.subwindowIterator(); var rear: ?*Window = null; while (backward_it.prev()) |p| { rear = p; } return SubwindowIterator{ .current = rear, .parent = self, }; } pub fn backwardIterator(self: *Self) SubwindowIterator { var forward_it = self.subwindowIterator(); var front: ?*Window = null; while (forward_it.next()) |p| { front = p; } return SubwindowIterator{ .current = front, .parent = self, }; } // detach window from parent / siblings. Note this detaches the pending state only pub fn detach(self: *Self) void { var maybe_prev = self.pending().siblings.prev; var maybe_next = self.pending().siblings.next; if (maybe_prev) |prev| { if (prev == self.parent) { prev.pending().children.next = maybe_next; } else { prev.pending().siblings.next = maybe_next; } } if (maybe_next) |next| { if (next == self.parent) { next.pending().children.prev = maybe_prev; } else { next.pending().siblings.prev = maybe_prev; } } self.pending().siblings.prev = null; self.pending().siblings.next = null; } pub fn insertAbove(self: *Self, reference: *Self) void { if (reference == self.parent) { // If we're inserting above our parent we need to set our // sibling pointers but the parent's children pointers // Save the current next child of parent var next = reference.pending().children.next; // should this be current() // Set the next child to be our window reference.pending().children.next = self; // If next is not null set its previous to be our window if (next) |n| { n.pending().siblings.prev = self; } self.pending().siblings.next = next; self.pending().siblings.prev = reference; } else { // If we're inserting above a sibling we need to set our // sibling pointers and the sibling's sibling pointers var next = reference.pending().siblings.next; // should this be current()? reference.pending().siblings.next = self; // if next is non-null we have two options. Next is either our // parent or another sibling. Choose .children or .siblings appropriately. if (next) |n| { if (n == self.parent) { n.pending().children.prev = self; } else { n.pending().siblings.prev = self; } } self.pending().siblings.next = next; self.pending().siblings.prev = reference; } } pub fn insertBelow(self: *Self, reference: *Self) void { if (reference == self.parent) { var prev = reference.pending().children.prev; reference.pending().children.prev = self; if (prev) |p| { p.pending().siblings.next = self; } self.pending().siblings.next = reference; self.pending().siblings.prev = prev; } else { var prev = reference.pending().siblings.prev; reference.pending().siblings.prev = self; if (prev) |p| { if (p == self.parent) { p.pending().children.next = self; } else { p.pending().siblings.next = self; } } self.pending().siblings.next = reference; self.pending().siblings.prev = prev; } } pub fn placeAbove(self: *Self, reference: *Self) void { self.detach(); self.insertAbove(reference); } pub fn placeBelow(self: *Self, reference: *Self) void { self.detach(); self.insertBelow(reference); } pub fn activate(self: *Self) !void { var client = self.client; config: { const xdg_surface_id = self.xdg_surface_id orelse break :config; const xdg_surface = client.context.get(xdg_surface_id) orelse break :config; const xdg_toplevel_id = self.xdg_toplevel_id orelse break :config; const xdg_toplevel = client.context.get(xdg_toplevel_id) orelse break :config; var state: [1]u32 = [_]u32{@enumToInt(prot.xdg_toplevel_state.activated)}; if (self.window_geometry) |window_geometry| { try prot.xdg_toplevel_send_configure(xdg_toplevel, window_geometry.width, window_geometry.height, &state); } else { try prot.xdg_toplevel_send_configure(xdg_toplevel, self.width, self.height, &state); } try prot.xdg_surface_send_configure(xdg_surface, client.nextSerial()); } keyboard: { const wl_keyboard_id = client.wl_keyboard_id orelse break :keyboard; const wl_keyboard = client.context.get(wl_keyboard_id) orelse break :keyboard; try prot.wl_keyboard_send_enter(wl_keyboard, client.nextSerial(), self.wl_surface_id, &[_]u32{}); } } pub fn deactivate(self: *Self) !void { var client = self.client; config: { const xdg_surface_id = self.xdg_surface_id orelse break :config; const xdg_surface = client.context.get(xdg_surface_id) orelse break :config; const xdg_toplevel_id = self.xdg_toplevel_id orelse break :config; const xdg_toplevel = client.context.get(xdg_toplevel_id) orelse break :config; if (self.window_geometry) |window_geometry| { try prot.xdg_toplevel_send_configure(xdg_toplevel, window_geometry.width, window_geometry.height, &[_]u32{}); } else { try prot.xdg_toplevel_send_configure(xdg_toplevel, self.width, self.height, &[_]u32{}); } try prot.xdg_surface_send_configure(xdg_surface, client.nextSerial()); } keyboard: { const wl_keyboard_id = client.wl_keyboard_id orelse break :keyboard; const wl_keyboard = client.context.get(wl_keyboard_id) orelse break :keyboard; try prot.wl_keyboard_send_leave(wl_keyboard, client.nextSerial(), self.wl_surface_id); } } pub fn pointerEnter(self: *Self, pointer_x: f64, pointer_y: f64) !void { const client = self.client; const wl_pointer_id = client.wl_pointer_id orelse return; const wl_pointer = client.context.get(wl_pointer_id) orelse return; try prot.wl_pointer_send_enter(wl_pointer, client.nextSerial(), self.wl_surface_id, @floatCast(f32, pointer_x - @intToFloat(f64, self.current().x)), @floatCast(f32, pointer_y - @intToFloat(f64, self.current().y))); } pub fn pointerMotion(self: *Self, pointer_x: f64, pointer_y: f64) !void { const client = self.client; const wl_pointer_id = client.wl_pointer_id orelse return; const wl_pointer = client.context.get(wl_pointer_id) orelse return; try prot.wl_pointer_send_motion( wl_pointer, @truncate(u32, @intCast(u64, std.time.milliTimestamp())), @floatCast(f32, pointer_x - @intToFloat(f64, self.absoluteX())), @floatCast(f32, pointer_y - @intToFloat(f64, self.absoluteY())), ); } pub fn pointerLeave(self: *Self) !void { const client = self.client; const wl_pointer_id = client.wl_pointer_id orelse return; const wl_pointer = client.context.get(wl_pointer_id) orelse return; try prot.wl_pointer_send_leave( wl_pointer, client.nextSerial(), self.wl_surface_id, ); } pub fn mouseAxis(self: *Self, time: u32, axis: u32, value: f64) !void { const client = self.client; const wl_pointer_id = client.wl_pointer_id orelse return; const wl_pointer = client.context.get(wl_pointer_id) orelse return; const now = @truncate(u32, @intCast(u64, std.time.milliTimestamp())); try prot.wl_pointer_send_axis(wl_pointer, time, axis, @floatCast(f32, value)); } pub fn keyboardKey(self: *Self, time: u32, button: u32, action: u32) !void { const client = self.client; const wl_keyboard_id = client.wl_keyboard_id orelse return; const wl_keyboard = client.context.get(wl_keyboard_id) orelse return; try prot.wl_keyboard_send_key( wl_keyboard, client.nextSerial(), time, button, action, ); try prot.wl_keyboard_send_modifiers( wl_keyboard, client.nextSerial(), compositor.COMPOSITOR.mods_depressed, compositor.COMPOSITOR.mods_latched, compositor.COMPOSITOR.mods_locked, compositor.COMPOSITOR.mods_group, ); } pub fn deinit(self: *Self) !void { std.debug.warn("release window {}\n", .{self.index}); self.in_use = false; // Before doing anything else, such as deiniting the parent // detach this surface from its siblings self.detach(); // maybe we also need to detach current, i.e. self.detachCurrent()? if (self.xdg_popup_id != null) { if (self.parent) |parent| { parent.popup = null; } } self.parent = null; self.popup = null; self.wl_buffer_id = null; self.xdg_surface_id = null; self.xdg_toplevel_id = null; self.xdg_popup_id = null; self.wl_subsurface_id = null; if (self.positioner) |positioner| { try positioner.deinit(); } self.positioner = null; self.window_geometry = null; self.ready_for_callback = false; self.synchronized = false; if (self.view) |view| { view.remove(self); if (view.active_window == self) { view.active_window = null; } if (view.pointer_window == self) { view.pointer_window = null; } } self.view = null; self.mapped = false; self.state[0].deinit(); self.state[1].deinit(); if (self.texture) |texture| { self.texture = null; // Note that while this can fail, we're doing // the bits that can fail after deinitialising // enough so that this window could be reused try renderer.releaseTexture(texture); } } }; pub fn newWindow(client: *Client, wl_surface_id: u32) !*Window { var i: usize = 0; while (i < MAX_WINDOWS) { var window: *Window = &WINDOWS[i]; if (window.in_use == false) { window.index = i; window.in_use = true; window.client = client; window.wl_surface_id = wl_surface_id; window.wl_buffer_id = null; window.xdg_surface_id = null; window.xdg_toplevel_id = null; window.callbacks = LinearFifo(u32, LinearFifoBufferType{ .Static = 32 }).init(); window.texture = null; window.width = 0; window.height = 0; window.state[0].deinit(); window.state[1].deinit(); return window; } else { i = i + 1; continue; } } return error.WindowsExhausted; } pub fn debug(window: ?*Window) void { if (window) |self| { var next: ?usize = null; var prev: ?usize = null; if (self.toplevel.next) |toplevel_next| { next = toplevel_next.index; } if (self.toplevel.prev) |toplevel_prev| { prev = toplevel_prev.index; } std.debug.warn("debug: {} <-- window[{}, {}] --> {}\n", .{ prev, self.index, self.wl_surface_id, next }); } else { std.debug.warn("debug: null\n", .{}); } } pub fn debug_sibling(window: ?*Window) void { if (window) |self| { var next: ?usize = null; var prev: ?usize = null; if (self.current().siblings.next) |sibling_next| { next = sibling_next.index; } if (self.current().siblings.prev) |sibling_prev| { prev = sibling_prev.index; } var next_child: ?usize = null; var prev_child: ?usize = null; if (self.current().children.next) |children_next| { next_child = children_next.index; } if (self.current().children.prev) |children_prev| { prev_child = children_prev.index; } std.debug.warn("debug sibling: {} <-- window[{}, @{}] --> {}\n", .{ prev, self.index, self.wl_surface_id, next }); std.debug.warn("debug children: {} <-- window[{}, @{}] --> {}\n", .{ prev_child, self.index, self.wl_surface_id, next_child }); } else { std.debug.warn("debug_sibling: null\n", .{}); } } pub fn debug_sibling_pending(window: ?*Window) void { if (window) |self| { var next: ?usize = null; var prev: ?usize = null; if (self.pending().siblings.next) |sibling_next| { next = sibling_next.index; } if (self.pending().siblings.prev) |sibling_prev| { prev = sibling_prev.index; } var next_child: ?usize = null; var prev_child: ?usize = null; if (self.pending().children.next) |children_next| { next_child = children_next.index; } if (self.pending().children.prev) |children_prev| { prev_child = children_prev.index; } std.debug.warn("debug sibling (pending): {} <-- window[{}, @{}] --> {}\n", .{ prev, self.index, self.wl_surface_id, next }); std.debug.warn("debug children (pending): {} <-- window[{}, @{}] --> {}\n", .{ prev_child, self.index, self.wl_surface_id, next_child }); } else { std.debug.warn("debug_sibling (pending): null\n", .{}); } } pub const XdgOperation = enum { Maximize, Unmaximize, }; pub const XdgConfiguration = struct { serial: u32, operation: XdgOperation, }; const BufferedState = struct { sync: bool = false, siblings: Link, x: i32 = 0, y: i32 = 0, scale: i32 = 1, input_region: ?*Region, opaque_region: ?*Region, min_width: ?i32, min_height: ?i32, max_width: ?i32, max_height: ?i32, children: Link, const Self = @This(); fn deinit(self: *Self) void { self.sync = false; self.siblings.prev = null; self.siblings.next = null; self.x = 0; self.y = 0; self.scale = 1; self.input_region = null; self.opaque_region = null; self.min_width = null; self.min_height = null; self.max_width = null; self.max_height = null; self.children.prev = null; self.children.next = null; } }; pub fn releaseWindows(client: *Client) !void { var i: usize = 0; while (i < MAX_WINDOWS) { var window: *Window = &WINDOWS[i]; if (window.in_use and window.client == client) { try window.deinit(); } i = i + 1; } } pub const Link = struct { prev: ?*Window, next: ?*Window, mark: bool, pub fn unanchored(self: Link) bool { return (self.prev == null) and (self.next == null); } pub fn deinit(self: *Link) void { if (self.next) |next| { next.toplevel.prev = self.prev; } if (self.prev) |prev| { prev.toplevel.next = self.next; } self.prev = null; self.next = null; } }; pub const Cursor = struct { hotspot_x: i32, hotspot_y: i32, }; test "Window + View" { var c: Client = undefined; var v: View = View{ .top = null, .pointer_window = null, .active_window = null, .focus = .Click, }; var back = v.back(); std.debug.assert(back == null); var w1 = try newWindow(&c, 1); v.push(w1); std.debug.assert(v.top == w1); back = v.back(); std.debug.assert(back == w1); std.debug.assert(w1.toplevel.prev == null); std.debug.assert(w1.toplevel.next == null); var w2 = try newWindow(&c, 2); v.push(w2); std.debug.assert(v.top == w2); back = v.back(); std.debug.assert(back == w1); std.debug.assert(w1.toplevel.prev == null); std.debug.assert(w1.toplevel.next == w2); std.debug.assert(w2.toplevel.prev == w1); std.debug.assert(w2.toplevel.next == null); var w3 = try newWindow(&c, 3); v.push(w3); std.debug.assert(v.top == w3); back = v.back(); std.debug.assert(back == w1); std.debug.assert(w1.toplevel.prev == null); std.debug.assert(w1.toplevel.next == w2); std.debug.assert(w2.toplevel.prev == w1); std.debug.assert(w2.toplevel.next == w3); std.debug.assert(w3.toplevel.prev == w2); std.debug.assert(w3.toplevel.next == null); // Remove middle window v.remove(w2); std.debug.assert(v.top == w3); back = v.back(); std.debug.assert(back == w1); std.debug.assert(w1.toplevel.prev == null); std.debug.assert(w1.toplevel.next == w3); std.debug.assert(w3.toplevel.prev == w1); std.debug.assert(w3.toplevel.next == null); v.remove(w3); std.debug.assert(v.top == w1); back = v.back(); std.debug.assert(back == w1); std.debug.assert(w1.toplevel.prev == null); std.debug.assert(w1.toplevel.next == null); v.remove(w1); back = v.back(); std.debug.assert(back == null); }
src/window.zig
const std = @import("std"); const stdx = @import("stdx"); const build_options = @import("build_options"); const v8 = @import("v8"); const t = stdx.testing; const uv = @import("uv"); const runtime = @import("../runtime/runtime.zig"); const RuntimeContext = runtime.RuntimeContext; const main = @import("../runtime/main.zig"); const env_ns = @import("../runtime/env.zig"); const Environment = env_ns.Environment; const WriterIface = env_ns.WriterIface; const log = stdx.log.scoped(.behavior_test); const adapter = @import("../runtime/adapter.zig"); const FuncDataUserPtr = adapter.FuncDataUserPtr; // For tests that need to verify what the runtime is doing. // Not completely E2E tests (eg. writing to stderr is intercepted) but close enough. // For js behavior tests, see test/js. test "behavior: JS syntax error prints stack trace to stderr" { { const res = runScript( \\class { ); defer res.deinit(); try t.eq(res.success, false); try t.eqStr(res.stderr, \\class { \\ ^ \\Uncaught SyntaxError: Unexpected token '{' \\ at /test.js:1:6 \\ ); } { // Case where v8 returns the same message start/end column indicator. const res = runScript( \\class Foo { \\ x: 0 ); defer res.deinit(); try t.eq(res.success, false); try t.eqStr(res.stderr, \\ x: 0 \\ ^ \\Uncaught SyntaxError: Unexpected identifier \\ at /test.js:2:4 \\ ); } } test "behavior: JS main script runtime error prints stack trace to stderr" { { const res = runScript( \\foo ); defer res.deinit(); try t.eq(res.success, false); try t.eqStr(res.stderr, \\ReferenceError: foo is not defined \\ at /test.js:1:1 \\ ); } { // Async stack trace chain that fails in native async function. const res = runScript( \\async function foo2() { \\ await cs.files.getPathInfoAsync('does_not_exist') \\} \\async function foo1() { \\ await foo2() \\} \\await foo1() ); defer res.deinit(); try t.eq(res.success, true); var first_frame: []const u8 = undefined; defer t.alloc.free(first_frame); const stderr_rest = extractLine(res.stderr, 1, &first_frame); defer t.alloc.free(stderr_rest); try t.expect(std.mem.startsWith(u8, first_frame, " at cs.files.getPathInfoAsync gen_api.js")); try t.eqStr(stderr_rest, \\ApiError: FileNotFound \\ at async foo2 /test.js:2:5 \\ at async foo1 /test.js:5:5 \\ at async /test.js:7:1 \\ ); } } test "behavior: puts, print, dump prints to stdout" { const res = runScript( \\puts('foo') \\puts({ a: 123 }) \\print('foo\n') \\print({ a: 123 }, '\n') \\dump('foo') \\dump({ a: 123 }) \\dump(function foo() {}) \\dump(() => {}) ); defer res.deinit(); try t.eq(res.success, true); // puts should print the value as a string. // print should print the value as a string. // dump should print the value as a descriptive string. try t.eqStr(res.stdout, \\foo \\[object Object] \\foo \\[object Object] \\"foo" \\{ a: 123 } \\(Function: foo) \\(Function) \\ ); } test "behavior: CLI help, version, command usages." { { // "cosmic" prints out main usage. const res = runCmd(&.{"cosmic"}, .{}); defer res.deinit(); try t.eq(res.success, true); try t.expect(std.mem.startsWith(u8, res.stdout, "Usage: cosmic [command] [options]")); } { // "cosmic help" prints out main usage. const res = runCmd(&.{"cosmic", "help"}, .{}); defer res.deinit(); try t.eq(res.success, true); try t.expect(std.mem.startsWith(u8, res.stdout, "Usage: cosmic [command] [options]")); } { // "cosmic version" prints out the version and v8 version. const res = runCmd(&.{"cosmic", "version"}, .{}); defer res.deinit(); try t.eq(res.success, true); const exp_version = try std.fmt.allocPrint(t.alloc, \\cosmic {s} \\v8 {s} \\ , .{ build_options.VersionName, v8.getVersion() } ); defer t.alloc.free(exp_version); try t.eqStr(res.stdout, exp_version); } { // "cosmic run -h" prints out usage. const res = runCmd(&.{"cosmic", "run", "-h"}, .{}); defer res.deinit(); try t.eq(res.success, true); try t.eqStr(res.stdout, \\Usage: cosmic run [src-path] \\ cosmic [src-path] \\ \\Flags: \\ --test-api Include the cs.test api. \\ \\Run a js file. \\ ); } { // "cosmic run --help" prints out usage. const res = runCmd(&.{"cosmic", "run", "--help"}, .{}); defer res.deinit(); try t.eq(res.success, true); try t.eqStr(res.stdout, \\Usage: cosmic run [src-path] \\ cosmic [src-path] \\ \\Flags: \\ --test-api Include the cs.test api. \\ \\Run a js file. \\ ); } { // "cosmic dev -h" prints out usage. const res = runCmd(&.{"cosmic", "dev", "-h"}, .{}); defer res.deinit(); try t.eq(res.success, true); try t.eqStr(res.stdout, \\Usage: cosmic dev [src-path] \\ \\Flags: \\ --test-api Include the cs.test api. \\ \\Run a js file in dev mode. \\Dev mode enables hot reloading of your scripts whenever they are modified. \\It also includes a HUD for viewing debug output and running commands. \\ ); } { // "cosmic test -h" prints out usage. const res = runCmd(&.{"cosmic", "test", "-h"}, .{}); defer res.deinit(); try t.eq(res.success, true); try t.eqStr(res.stdout, \\Usage: cosmic test [src-path] \\ \\Run a js file with the test runner. \\Test runner also includes an additional API module `cs.test` \\which is not available during normal execution with `cosmic run`. \\A short test report will be printed at the end. \\Any test failure will result in a non 0 exit code. \\ ); } { // "cosmic shell -h" prints out usage. const res = runCmd(&.{"cosmic", "shell", "-h"}, .{}); defer res.deinit(); try t.eq(res.success, true); try t.eqStr(res.stdout, \\Usage: cosmic shell \\ \\Starts the runtime with an interactive shell. \\TODO: Support window API in the shell. \\ ); } { // "cosmic http -h" prints out usage. const res = runCmd(&.{"cosmic", "http", "-h"}, .{}); defer res.deinit(); try t.eq(res.success, true); try t.eqStr(res.stdout, \\Usage: cosmic http [dir-path] [addr=127.0.0.1:8081] \\ \\Starts an HTTP server binding to the address [addr] and serve files from the public directory root at [dir-path]. \\[addr] contains a host and port separated by `:`. The host is optional and defaults to `127.0.0.1`. \\The port is optional and defaults to 8081. \\ ); } { // "cosmic https -h" prints out usage. const res = runCmd(&.{"cosmic", "https", "-h"}, .{}); defer res.deinit(); try t.eq(res.success, true); try t.eqStr(res.stdout, \\Usage: cosmic https [dir-path] [public-key-path] [private-key-path] [port=127.0.0.1:8081] \\ \\Starts an HTTPS server binding to the address [addr] and serve files from the public directory root at [dir-path]. \\Paths to public and private keys must be absolute or relative to the public root path. \\[addr] contains a host and port separated by `:`. The host is optional and defaults to `127.0.0.1`. \\The port is optional and defaults to 8081. \\ ); } } test "behavior: 'cosmic http' starts server with 'localhost' as host address." { const cwd = try std.fs.path.resolve(t.alloc, &.{}); defer { std.os.chdir(cwd) catch unreachable; t.alloc.free(cwd); } const S = struct { fn onMainScriptDone(_: ?*anyopaque, rt: *RuntimeContext) !void { defer rt.requestShutdown(); const ids = rt.allocResourceIdsByTag(.CsHttpServer); defer rt.alloc.free(ids); try t.eq(ids.len, 1); const server = rt.getResourcePtr(.CsHttpServer, ids[0]).?; const addr = server.allocBindAddress(rt.alloc); defer addr.deinit(rt.alloc); try t.eqStr(addr.host, "127.0.0.1"); try t.eq(addr.port, 8081); } }; const res = runCmd(&.{"cosmic", "http", "./test/assets", "localhost:8081"}, .{ .on_main_script_done = S.onMainScriptDone, }); defer res.deinit(); try t.eq(res.success, true); try t.eqStr(res.stdout, \\HTTP server started. Binded to 127.0.0.1:8081. \\ ); } test "behavior: 'cosmic http' starts an HTTP server and handles request" { const cwd = try std.fs.path.resolve(t.alloc, &.{}); defer { std.os.chdir(cwd) catch unreachable; t.alloc.free(cwd); } const Context = struct { const Self = @This(); passed: bool = false, }; var ctx: Context = .{}; const S = struct { fn onMainScriptDone(ptr: ?*anyopaque, rt: *RuntimeContext) !void { const ctx_ = stdx.mem.ptrCastAlign(*Context, ptr); var res = rt.evalModuleScript( \\const res = await cs.http.getAsync('http://localhost:8081/index.html') \\cs.test.eq(res, `<html> \\<head> \\ <link rel="stylesheet" href="style.css"> \\</head> \\<body> \\ <img src="logo.png" /> \\ <p>Hello World!</p> \\</body> \\</html> \\`) ) catch unreachable; defer res.deinit(rt.alloc); rt.attachPromiseHandlers(res.eval.?.inner, ctx_, onEvalSuccess, onEvalFailure) catch unreachable; } fn onEvalSuccess(ctx_: *Context, rt: *RuntimeContext, _: v8.Value) void { ctx_.passed = true; rt.requestShutdown(); } // fn onEvalFailure(ctx_: FuncDataUserPtr(*Context), rt: *RuntimeContext, err: v8.Value) void { fn onEvalFailure(ctx_: *Context, rt: *RuntimeContext, err: v8.Value) void { const trace_str = runtime.allocExceptionJsStackTraceString(rt, err); defer rt.alloc.free(trace_str); rt.env.errorFmt("{s}", .{trace_str}); ctx_.passed = false; rt.requestShutdown(); } }; const res = runCmd(&.{"cosmic", "http", "./test/assets", ":8081"}, .{ .on_main_script_done = S.onMainScriptDone, .on_main_script_done_ctx = &ctx, }); defer res.deinit(); try t.eq(res.success, true); try t.eq(ctx.passed, true); try t.eqStr(res.stdout, \\HTTP server started. Binded to 127.0.0.1:8081. \\GET /index.html [200] \\ ); } test "behavior: 'cosmic https' starts an HTTPS server and handles request." { const cwd = try std.fs.path.resolve(t.alloc, &.{}); defer { std.os.chdir(cwd) catch unreachable; t.alloc.free(cwd); } const Context = struct { const Self = @This(); passed: bool = false, }; var ctx: Context = .{}; const S = struct { fn onMainScriptDone(ptr: ?*anyopaque, rt: *RuntimeContext) !void { const ctx_ = stdx.mem.ptrCastAlign(*Context, ptr); var res = rt.evalModuleScript( \\const res = await cs.http.requestAsync('https://localhost:8081/index.html', { \\ certFile: './localhost.crt', \\}) \\cs.test.eq(res.body, `<html> \\<head> \\ <link rel="stylesheet" href="style.css"> \\</head> \\<body> \\ <img src="logo.png" /> \\ <p>Hello World!</p> \\</body> \\</html> \\`) ) catch unreachable; defer res.deinit(rt.alloc); rt.attachPromiseHandlers(res.eval.?.inner, ctx_, onEvalSuccess, onEvalFailure) catch unreachable; } fn onEvalSuccess(ctx_: *Context, rt: *RuntimeContext, _: v8.Value) void { ctx_.passed = true; rt.requestShutdown(); } // fn onEvalFailure(ctx_: FuncDataUserPtr(*Context), rt: *RuntimeContext, err: v8.Value) void { fn onEvalFailure(ctx_: *Context, rt: *RuntimeContext, err: v8.Value) void { const trace_str = runtime.allocExceptionJsStackTraceString(rt, err); defer rt.alloc.free(trace_str); rt.env.errorFmt("{s}", .{trace_str}); ctx_.passed = false; rt.requestShutdown(); } }; const res = runCmd(&.{"cosmic", "https", "./test/assets", "./localhost.crt", "./localhost.key", ":8081"}, .{ .on_main_script_done = S.onMainScriptDone, .on_main_script_done_ctx = &ctx, }); defer res.deinit(); try t.eq(res.success, true); try t.eq(ctx.passed, true); try t.eqStr(res.stdout, \\HTTPS server started. Binded to 127.0.0.1:8081. \\GET /index.html [200] \\ ); } const RunResult = struct { const Self = @This(); success: bool, stdout: []const u8, stderr: []const u8, fn deinit(self: Self) void { t.alloc.free(self.stdout); t.alloc.free(self.stderr); } }; fn runCmd(cmd: []const []const u8, env: Environment) RunResult { var stdout_capture = std.ArrayList(u8).init(t.alloc); var stdout_writer = stdout_capture.writer(); var stderr_capture = std.ArrayList(u8).init(t.alloc); var stderr_writer = stderr_capture.writer(); var success = true; const S = struct { fn exit(code: u8) void { _ = code; // Nop. } }; var env_ = Environment{ .main_script_override = env.main_script_override, .main_script_origin = "/test.js", .err_writer = WriterIface.init(&stderr_writer), .out_writer = WriterIface.init(&stdout_writer), .on_main_script_done = env.on_main_script_done, .on_main_script_done_ctx = env.on_main_script_done_ctx, .exit_fn = S.exit, .pump_rt_on_graceful_shutdown = true, }; defer env_.deinit(t.alloc); main.runMain(t.alloc, cmd, &env_) catch |err| { std.debug.print("run error: {}\n", .{err}); success = false; }; return RunResult{ .success = success, .stdout = stdout_capture.toOwnedSlice(), .stderr = stderr_capture.toOwnedSlice(), }; } fn runScript(source: []const u8) RunResult { return runCmd(&.{"cosmic", "test.js"}, .{ .main_script_override = source, }); } fn root() []const u8 { return (std.fs.path.dirname(@src().file) orelse unreachable); } fn extractLine(str: []const u8, idx: u32, out: *[]const u8) []const u8 { var iter = std.mem.split(u8, str, "\n"); var rest = std.ArrayList([]const u8).init(t.alloc); defer rest.deinit(); var i: u32 = 0; while (iter.next()) |line| { if (i == idx) { out.* = t.alloc.dupe(u8, line) catch unreachable; } else { rest.append(line) catch unreachable; } i += 1; } return std.mem.join(t.alloc, "\n", rest.items) catch unreachable; }
test/behavior_test.zig
const std = @import("std"); const api = @import("api.zig"); const cache = @import("cache.zig"); const Engine = @import("Engine.zig"); const Dependency = @import("Dependency.zig"); const Project = @import("Project.zig"); const utils = @import("utils.zig"); const local = @import("local.zig"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; pub const name = "github"; pub const Resolution = []const u8; pub const ResolutionEntry = struct { user: []const u8, repo: []const u8, ref: []const u8, commit: []const u8, root: []const u8, dep_idx: ?usize = null, pub fn format( entry: ResolutionEntry, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { _ = fmt; _ = options; try writer.print("github.com/{s}/{s}:{s}/{s} -> {}", .{ entry.user, entry.repo, entry.commit, entry.root, entry.dep_idx, }); } }; pub const FetchError = error{Todo} || @typeInfo(@typeInfo(@TypeOf(api.getHeadCommit)).Fn.return_type.?).ErrorUnion.error_set || @typeInfo(@typeInfo(@TypeOf(fetch)).Fn.return_type.?).ErrorUnion.error_set; const FetchQueue = Engine.MultiQueueImpl(Resolution, FetchError); const ResolutionTable = std.ArrayListUnmanaged(ResolutionEntry); pub fn deserializeLockfileEntry( allocator: *Allocator, it: *std.mem.TokenIterator(u8), resolutions: *ResolutionTable, ) !void { try resolutions.append(allocator, .{ .user = it.next() orelse return error.NoUser, .repo = it.next() orelse return error.NoRepo, .ref = it.next() orelse return error.NoRef, .root = it.next() orelse return error.NoRoot, .commit = it.next() orelse return error.NoCommit, }); } pub fn serializeResolutions( resolutions: []const ResolutionEntry, writer: anytype, ) !void { for (resolutions) |entry| { if (entry.dep_idx != null) try writer.print("github {s} {s} {s} {s} {s}\n", .{ entry.user, entry.repo, entry.ref, entry.root, entry.commit, }); } } fn findResolution(dep: Dependency.Source, resolutions: []const ResolutionEntry) ?usize { const root = dep.github.root orelse utils.default_root; return for (resolutions) |entry, j| { if (std.mem.eql(u8, dep.github.user, entry.user) and std.mem.eql(u8, dep.github.repo, entry.repo) and std.mem.eql(u8, dep.github.ref, entry.ref) and std.mem.eql(u8, root, entry.root)) { break j; } } else null; } fn findMatch(dep_table: []const Dependency.Source, dep_idx: usize, edges: []const Engine.Edge) ?usize { const dep = dep_table[dep_idx].github; const root = dep.root orelse utils.default_root; return for (edges) |edge| { const other = dep_table[edge.to].github; const other_root = other.root orelse utils.default_root; if (std.mem.eql(u8, dep.user, other.user) and std.mem.eql(u8, dep.repo, other.repo) and std.mem.eql(u8, dep.ref, other.ref) and std.mem.eql(u8, root, other_root)) { break edge.to; } } else null; } // a partial would be one that matches the user and repo fn findPartialMatch( allocator: *Allocator, dep_table: []const Dependency.Source, commit: []const u8, dep_idx: usize, edges: []const Engine.Edge, ) !?usize { const dep = dep_table[dep_idx].github; return for (edges) |edge| { const other = dep_table[edge.to].github; if (std.mem.eql(u8, dep.user, other.user) and std.mem.eql(u8, dep.repo, other.repo)) { const other_commit = try api.getHeadCommit(allocator, other.user, other.repo, other.ref); defer allocator.free(other_commit); if (std.mem.eql(u8, commit, other_commit)) { break edge.to; } } } else null; } fn fetch( arena: *std.heap.ArenaAllocator, dep: Dependency.Source, done: bool, commit: Resolution, deps: *std.ArrayListUnmanaged(Dependency), path: *?[]const u8, ) !void { const allocator = arena.child_allocator; const entry_name = try std.fmt.allocPrint(allocator, "{s}-{s}-github-{s}", .{ dep.github.repo, dep.github.user, commit, }); defer allocator.free(entry_name); var entry = try cache.getEntry(entry_name); defer entry.deinit(); if (!done and !try entry.isDone()) { var content_dir = try entry.contentDir(); defer content_dir.close(); try api.getGithubTarGz( allocator, dep.github.user, dep.github.repo, commit, content_dir, ); try entry.done(); } const base_path = try std.fs.path.join(arena.child_allocator, &.{ ".gyro", entry_name, "pkg", }); defer arena.child_allocator.free(base_path); const root = dep.github.root orelse utils.default_root; path.* = try utils.joinPathConvertSep(arena, &.{ base_path, root }); if (!done) { var base_dir = try std.fs.cwd().openDir(base_path, .{}); defer base_dir.close(); const project_file = try base_dir.createFile("gyro.zzz", .{ .read = true, .truncate = false, .exclusive = false, }); defer project_file.close(); const text = try project_file.reader().readAllAlloc(&arena.allocator, std.math.maxInt(usize)); const project = try Project.fromUnownedText(arena.child_allocator, ".", text); defer project.destroy(); try deps.appendSlice(arena.child_allocator, project.deps.items); } } pub fn dedupeResolveAndFetch( dep_table: []const Dependency.Source, resolutions: []const ResolutionEntry, fetch_queue: *FetchQueue, i: usize, ) FetchError!void { const arena = &fetch_queue.items(.arena)[i]; const dep_idx = fetch_queue.items(.edge)[i].to; var commit: []const u8 = undefined; if (findResolution(dep_table[dep_idx], resolutions)) |res_idx| { if (resolutions[res_idx].dep_idx) |idx| { fetch_queue.items(.result)[i] = .{ .replace_me = idx, }; return; } else if (findMatch(dep_table, dep_idx, fetch_queue.items(.edge)[0..i])) |idx| { fetch_queue.items(.result)[i] = .{ .replace_me = idx, }; return; } else { fetch_queue.items(.result)[i] = .{ .fill_resolution = res_idx, }; } } else if (findMatch(dep_table, dep_idx, fetch_queue.items(.edge)[0..i])) |idx| { fetch_queue.items(.result)[i] = .{ .replace_me = idx, }; return; } else { commit = try api.getHeadCommit( &arena.allocator, dep_table[dep_idx].github.user, dep_table[dep_idx].github.repo, dep_table[dep_idx].github.ref, ); if (try findPartialMatch(arena.child_allocator, dep_table, commit, dep_idx, fetch_queue.items(.edge)[0..i])) |idx| { fetch_queue.items(.result)[i] = .{ .copy_deps = idx, }; } else { fetch_queue.items(.result)[i] = .{ .new_entry = commit, }; } } var done = false; const resolution = switch (fetch_queue.items(.result)[i]) { .fill_resolution => |res_idx| resolutions[res_idx].commit, .new_entry => |c| c, .copy_deps => blk: { done = true; break :blk commit; }, else => unreachable, }; try fetch( arena, dep_table[dep_idx], done, resolution, &fetch_queue.items(.deps)[i], &fetch_queue.items(.path)[i], ); assert(fetch_queue.items(.path)[i] != null); } pub fn updateResolution( allocator: *Allocator, resolutions: *ResolutionTable, dep_table: []const Dependency.Source, fetch_queue: *FetchQueue, i: usize, ) !void { switch (fetch_queue.items(.result)[i]) { .fill_resolution => |res_idx| { const dep_idx = fetch_queue.items(.edge)[i].to; assert(resolutions.items[res_idx].dep_idx == null); resolutions.items[res_idx].dep_idx = dep_idx; }, .new_entry => |commit| { const dep_idx = fetch_queue.items(.edge)[i].to; const gh = &dep_table[dep_idx].github; const root = gh.root orelse utils.default_root; try resolutions.append(allocator, .{ .user = gh.user, .repo = gh.repo, .ref = gh.ref, .root = root, .commit = commit, .dep_idx = dep_idx, }); }, .replace_me => |dep_idx| fetch_queue.items(.edge)[i].to = dep_idx, .err => |err| return err, .copy_deps => |queue_idx| { const commit = resolutions.items[ findResolution( dep_table[fetch_queue.items(.edge)[queue_idx].to], resolutions.items, ).? ].commit; const dep_idx = fetch_queue.items(.edge)[i].to; const gh = &dep_table[dep_idx].github; const root = gh.root orelse utils.default_root; try resolutions.append(allocator, .{ .user = gh.user, .repo = gh.repo, .ref = gh.ref, .root = root, .commit = commit, .dep_idx = dep_idx, }); try fetch_queue.items(.deps)[i].appendSlice( allocator, fetch_queue.items(.deps)[queue_idx].items, ); }, } }
src/github.zig
const std = @import("std"); const c = @import("c.zig"); const vt100 = @import("vt100.zig"); const fmt = std.fmt; const fs = std.fs; const mem = std.mem; const os = std.os; var old_tc_attr: ?os.termios = null; pub fn clear(stream: var) !void { try stream.writeAll(vt100.erase.inDisplay(vt100.erase.all) ++ vt100.cursor.position("")); } pub fn init(stdin: fs.File) !void { old_tc_attr = try os.tcgetattr(stdin.handle); try enableRawMode(stdin); } pub fn deinit(stdin: fs.File) !void { if (old_tc_attr) |old| try os.tcsetattr(stdin.handle, os.TCSA.FLUSH, old); } pub const Size = struct { rows: usize, columns: usize, }; pub fn size(stdout: fs.File, stdin: fs.File) !Size { var ws: c.winsize = undefined; if (c.ioctl(stdout.handle, c.TIOCGWINSZ, &ws) == -1 or ws.ws_col == 0) { // If getting the terminal size with ioctl didn't work, then we move // the cursor to the bottom right of the screen and asks for the cursor // position. // TODO: We sould probably restore the cursor position when we are done. // This program doesn't require that this is done, but if someone // copy pastes this, then they probably want that behavior. try stdout.writeAll(vt100.cursor.forward("999") ++ vt100.cursor.down("999")); const pos = try cursorPosition(stdout, stdin); return Size{ .columns = pos.x, .rows = pos.y, }; } return Size{ .columns = ws.ws_col, .rows = ws.ws_row, }; } pub const Pos = struct { x: usize, y: usize, }; pub fn cursorPosition(stdout: fs.File, stdin: fs.File) !Pos { try stdout.writeAll(vt100.device.statusReport(vt100.device.request.active_position)); var buf: [1024]u8 = undefined; var len: usize = 0; while (len < buf.len) : (len += 1) { const l = try stdin.read(buf[len .. len + 1]); if (l != 1 or buf[len] == 'R') break; } const response = buf[0..len]; if (len < vt100.escape.len) return error.CursorPosition; if (!mem.eql(u8, vt100.escape, buf[0..vt100.escape.len])) return error.CursorPosition; var iter = mem.split(buf[vt100.escape.len..len], ";"); const rows_str = iter.next() orelse return error.CursorPosition; const cols_str = iter.next() orelse return error.CursorPosition; if (iter.next()) |_| return error.CursorPosition; return Pos{ .x = try fmt.parseUnsigned(usize, cols_str, 10), .y = try fmt.parseUnsigned(usize, rows_str, 10), }; } fn enableRawMode(file: fs.File) !void { var raw = try os.tcgetattr(file.handle); raw.iflag &= ~@as(@TypeOf(raw.lflag), BRKINT | ICRNL | INPCK | ISTRIP | IXON); raw.oflag &= ~@as(@TypeOf(raw.lflag), os.OPOST); raw.cflag &= ~@as(@TypeOf(raw.lflag), os.CS8); raw.lflag &= ~@as(@TypeOf(raw.lflag), os.ECHO | os.ICANON | os.IEXTEN | os.ISIG); raw.cc[VMIN] = 0; raw.cc[VTIME] = 1; try os.tcsetattr(file.handle, os.TCSA.FLUSH, raw); } const builtin = @import("builtin"); const tcflag_t = c_uint; const cc_t = u8; const speed_t = c_uint; const VTIME = 5; const VMIN = 6; const BRKINT = 0o0002; const INPCK = 0o0020; const ISTRIP = 0o0040; const ICRNL = 0o0400; const IXON = 0o2000; const Termios = switch (builtin.arch) { .x86_64 => extern struct { iflag: tcflag_t, oflag: tcflag_t, cflag: tcflag_t, lflag: tcflag_t, line: cc_t, cc: [NCCS]cc_t, ispeed: speed_t, ospeed: speed_t, }, else => @compileError("Unsupported arch"), };
src/terminal.zig
const std = @import("std"); const assert = std.debug.assert; const allocator = std.heap.page_allocator; pub const Pos = struct { x: usize, y: usize, pub fn encode(self: Pos) usize { return self.x * 10000 + self.y; } }; pub const Hull = struct { cells: std.AutoHashMap(usize, Color), curr: Pos, pmin: Pos, pmax: Pos, dir: Direction, painted: usize, pub const Color = enum(u8) { Black = 0, White = 1, }; pub const Direction = enum(u8) { U = 0, D = 1, L = 2, R = 3, }; pub const Rotation = enum(u8) { L = 0, R = 1, }; pub fn init(first_color: Color) Hull { var self = Hull{ .cells = std.AutoHashMap(usize, Color).init(allocator), .curr = Pos{ .x = 500, .y = 500 }, .pmin = Pos{ .x = std.math.maxInt(usize), .y = std.math.maxInt(usize) }, .pmax = Pos{ .x = 0, .y = 0 }, .dir = Direction.U, .painted = 0, }; self.paint(first_color); return self; } pub fn deinit(self: *Hull) void { self.cells.deinit(); } pub fn position(self: Hull) usize { return self.curr.encode(); } pub fn get_color(self: *Hull, pos: Pos) Color { const label = pos.encode(); if (self.cells.contains(label)) { return self.cells.get(label).?; } return Color.Black; } pub fn get_current_color(self: *Hull) Color { return self.get_color(self.curr); } pub fn paint(self: *Hull, c: Color) void { const pos = self.position(); if (!self.cells.contains(pos)) { self.painted += 1; } _ = self.cells.put(pos, c) catch unreachable; } pub fn move(self: *Hull, rotation: Rotation) void { self.dir = switch (rotation) { Rotation.L => switch (self.dir) { Direction.U => Direction.L, Direction.L => Direction.D, Direction.D => Direction.R, Direction.R => Direction.U, }, Rotation.R => switch (self.dir) { Direction.U => Direction.R, Direction.L => Direction.U, Direction.D => Direction.L, Direction.R => Direction.D, }, }; var dx: i32 = 0; var dy: i32 = 0; switch (self.dir) { Direction.U => dy = 1, Direction.D => dy = -1, Direction.L => dx = -1, Direction.R => dx = 1, } self.curr.x = @intCast(usize, @intCast(i32, self.curr.x) + dx); self.curr.y = @intCast(usize, @intCast(i32, self.curr.y) + dy); if (self.pmin.x > self.curr.x) self.pmin.x = self.curr.x; if (self.pmin.y > self.curr.y) self.pmin.y = self.curr.y; if (self.pmax.x < self.curr.x) self.pmax.x = self.curr.x; if (self.pmax.y < self.curr.y) self.pmax.y = self.curr.y; } }; test "simple hull control" { var hull = Hull.init(Hull.Color.Black); defer hull.deinit(); assert(hull.painted == 1); assert(hull.get_current_color() == Hull.Color.Black); hull.paint(Hull.Color.White); assert(hull.get_current_color() == Hull.Color.White); assert(hull.painted == 1); hull.paint(Hull.Color.Black); assert(hull.get_current_color() == Hull.Color.Black); assert(hull.painted == 1); hull.move(Hull.Rotation.L); assert(hull.position() == 4990500); hull.move(Hull.Rotation.L); assert(hull.position() == 4990499); hull.move(Hull.Rotation.L); assert(hull.position() == 5000499); hull.move(Hull.Rotation.L); assert(hull.position() == 5000500); hull.move(Hull.Rotation.R); assert(hull.position() == 5010500); hull.move(Hull.Rotation.R); assert(hull.position() == 5010499); hull.move(Hull.Rotation.R); assert(hull.position() == 5000499); hull.move(Hull.Rotation.R); assert(hull.position() == 5000500); }
2019/p11/ship.zig
const std = @import("std"); const assert = std.debug.assert; const tools = @import("tools"); const Vec2 = tools.Vec2; fn getParens(line: []const u8) []const u8 { assert(line[0] == '('); var nb: usize = 0; for (line) |c, i| { if (c == '(') nb += 1; if (c == ')') { nb -= 1; if (nb == 0) return line[1..i]; } } unreachable; } fn compute(line: []const u8, prio: enum { left, add }) usize { var l = line; var terms: [64]usize = undefined; var ops: [64]u8 = undefined; var nb: usize = 0; // std.debug.print(" examining {}...\n", .{line}); while (l.len > 0) { var term: usize = undefined; if (l[0] == '(') { const par = getParens(l); term = compute(par, prio); l = l[par.len + 2 ..]; } else if (l[0] >= '0' and l[0] <= '9') { term = l[0] - '0'; l = l[1..]; } else { unreachable; } if (l.len > 1) { assert(l[0] == ' '); l = l[1..]; } terms[nb] = term; if (l.len > 1) { ops[nb] = l[0]; assert(l[1] == ' '); l = l[2..]; } nb += 1; // std.debug.print(" term={}, op={c} reste {}\n", .{ term, ops[nb - 1], l }); } switch (prio) { .left => { var i: usize = 0; while (i < nb - 1) : (i += 1) { if (ops[i] == '+') { terms[0] += terms[i + 1]; } else if (ops[i] == '*') { terms[0] *= terms[i + 1]; } else unreachable; } nb = 1; }, .add => { // apply '+' var i = nb - 1; while (i > 0) : (i -= 1) { if (ops[i - 1] == '+') { terms[i - 1] += terms[i]; std.mem.copy(usize, terms[i .. nb - 1], terms[i + 1 .. nb]); std.mem.copy(u8, ops[i - 1 .. nb - 2], ops[i .. nb - 1]); nb -= 1; } } // apply '*' i = nb - 1; while (i > 0) : (i -= 1) { if (ops[i - 1] == '*') { terms[i - 1] *= terms[i]; std.mem.copy(usize, terms[i .. nb - 1], terms[i + 1 .. nb]); std.mem.copy(u8, ops[i - 1 .. nb - 2], ops[i .. nb - 1]); nb -= 1; } } }, } // std.debug.print(" ... res = {}\n", .{terms[0]}); assert(nb == 1); return terms[0]; } pub fn run(input_text: []const u8, allocator: std.mem.Allocator) ![2][]const u8 { var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); const ans1 = ans: { var sum: usize = 0; var it = std.mem.tokenize(u8, input_text, "\n\r"); while (it.next()) |line| { const res = compute(std.mem.trim(u8, line, " "), .left); // std.debug.print("{} = {}\n", .{ res, line }); sum += res; } break :ans sum; }; const ans2 = ans: { var sum: usize = 0; var it = std.mem.tokenize(u8, input_text, "\n\r"); while (it.next()) |line| { const res = compute(std.mem.trim(u8, line, " "), .add); // std.debug.print("{} = {}\n", .{ res, line }); sum += res; } break :ans sum; }; return [_][]const u8{ try std.fmt.allocPrint(allocator, "{}", .{ans1}), try std.fmt.allocPrint(allocator, "{}", .{ans2}), }; } pub const main = tools.defaultMain("2020/input_day18.txt", run);
2020/day18.zig
const sf = @import("../sfml.zig"); pub const CircleShape = struct { const Self = @This(); // Constructor/destructor /// Inits a circle shape with a radius. The circle will be white and have 30 points pub fn init(radius: f32) !Self { var circle = sf.c.sfCircleShape_create(); if (circle == null) return sf.Error.nullptrUnknownReason; sf.c.sfCircleShape_setFillColor(circle, sf.c.sfWhite); sf.c.sfCircleShape_setRadius(circle, radius); return Self{ .ptr = circle.? }; } /// Destroys a circle shape pub fn deinit(self: Self) void { sf.c.sfCircleShape_destroy(self.ptr); } // Getters/setters /// Gets the fill color of this circle shape pub fn getFillColor(self: Self) sf.Color { _ = sf.c.sfCircleShape_getFillColor(self.ptr); // Register Rax holds the return val of function calls that can fit in a register const rax: usize = asm volatile ("" : [ret] "={rax}" (-> usize) ); std.debug.print("{}\n", rax); var x: u32 = @truncate(u32, (rax & 0x00000000FFFFFFFF) >> 00); var y: u32 = @truncate(u32, (rax & 0xFFFFFFFF00000000) >> 32); return sf.Color.fromInteger(x); } /// Sets the fill color of this circle shape pub fn setFillColor(self: Self, color: sf.Color) void { sf.c.sfCircleShape_setFillColor(self.ptr, color.toCSFML()); } /// Gets the radius of this circle shape pub fn getRadius(self: Self) f32 { return sf.c.sfCircleShape_getRadius(self.ptr); } /// Sets the radius of this circle shape pub fn setRadius(self: Self, radius: f32) void { sf.c.sfCircleShape_setRadius(self.ptr, radius); } /// Gets the position of this circle shape pub fn getPosition(self: Self) sf.Vector2f { return sf.Vector2f.fromCSFML(sf.c.sfCircleShape_getPosition(self.ptr)); } /// Sets the position of this circle shape pub fn setPosition(self: Self, pos: sf.Vector2f) void { sf.c.sfCircleShape_setPosition(self.ptr, pos.toCSFML()); } /// Adds the offset to this shape's position pub fn move(self: Self, offset: sf.Vector2f) void { sf.c.sfCircleShape_move(self.ptr, offset.toCSFML()); } /// Gets the origin of this circle shape pub fn getOrigin(self: Self) sf.Vector2f { return sf.Vector2f.fromCSFML(sf.c.sfCircleShape_getOrigin(self.ptr)); } /// Sets the origin of this circle shape pub fn setOrigin(self: Self, origin: sf.Vector2f) void { sf.c.sfCircleShape_setOrigin(self.ptr, origin.toCSFML()); } /// Gets the rotation of this circle shape pub fn getRotation(self: Self) f32 { return sf.c.sfCircleShape_getRotation(self.ptr); } /// Sets the rotation of this circle shape pub fn setRotation(self: Self, angle: f32) void { sf.c.sfCircleShape_setRotation(self.ptr, angle); } /// Rotates this shape by a given amount pub fn rotate(self: Self, angle: f32) void { sf.c.sfCircleShape_rotate(self.ptr, angle); } /// Gets the texture of this shape pub fn getTexture(self: Self) ?sf.Texture { var t = sf.c.sfCircleShape_getTexture(self.ptr); if (t != null) { return sf.Texture{ .const_ptr = t.? }; } else return null; } /// Sets the texture of this shape pub fn setTexture(self: Self, texture: ?sf.Texture) void { var tex = if (texture) |t| t.get() else null; sf.c.sfCircleShape_setTexture(self.ptr, tex, 0); } /// Gets the sub-rectangle of the texture that the shape will display pub fn getTextureRect(self: Self) sf.FloatRect { return sf.FloatRect.fromCSFML(sf.c.sfCircleShape_getTextureRect(self.ptr)); } /// Sets the sub-rectangle of the texture that the shape will display pub fn setTextureRect(self: Self, rect: sf.FloatRect) void { sf.c.sfCircleShape_getCircleRect(self.ptr, rect.toCSFML()); } /// Gets the bounds in the local coordinates system pub fn getLocalBounds(self: Self) sf.FloatRect { return sf.FloatRect.fromCSFML(sf.c.sfCircleShape_getLocalBounds(self.ptr)); } /// Gets the bounds in the global coordinates pub fn getGlobalBounds(self: Self) sf.FloatRect { return sf.FloatRect.fromCSFML(sf.c.sfCircleShape_getGlobalBounds(self.ptr)); } /// Pointer to the csfml structure ptr: *sf.c.sfCircleShape }; test "circle shape: sane getters and setters" { const tst = @import("std").testing; var circle = try CircleShape.init(30); defer circle.deinit(); circle.setFillColor(sf.Color.Yellow); circle.setRadius(50); circle.setRotation(15); circle.setPosition(.{ .x = 1, .y = 2 }); circle.setOrigin(.{ .x = 20, .y = 25 }); // TODO : issue #2 //tst.expectEqual(sf.Color.Yellow, circle.getFillColor()); tst.expectEqual(@as(f32, 50), circle.getRadius()); tst.expectEqual(@as(f32, 15), circle.getRotation()); tst.expectEqual(sf.Vector2f{ .x = 1, .y = 2 }, circle.getPosition()); tst.expectEqual(sf.Vector2f{ .x = 20, .y = 25 }, circle.getOrigin()); tst.expectEqual(@as(?sf.Texture, null), circle.getTexture()); circle.rotate(5); circle.move(.{ .x = -5, .y = 5 }); tst.expectEqual(@as(f32, 20), circle.getRotation()); tst.expectEqual(sf.Vector2f{ .x = -4, .y = 7 }, circle.getPosition()); }
src/sfml/graphics/circle_shape.zig
const os = @import("root").os; const fmt = @import("std").fmt; const arch = @import("builtin").arch; const range = os.lib.range.range; const Printer = struct { pub fn writeAll(self: *const Printer, str: []const u8) !void { try @call(.{ .modifier = .never_inline }, print_str, .{str}); } pub fn print(self: *const Printer, comptime format: []const u8, args: anytype) !void { log_nolock(format, args); } pub fn writeByteNTimes(self: *const Printer, val: u8, num: usize) !void { var i: usize = 0; while (i < num) : (i += 1) { putch(val); } } pub const Error = anyerror; }; var log_lock: os.thread.Spinlock = .{}; var lock_owner: ?*os.platform.smp.CoreData = null; pub fn log(comptime format: []const u8, args: anytype) void { const current_cpu = os.platform.thread.get_current_cpu(); const require_locking = @atomicLoad(?*os.platform.smp.CoreData, &lock_owner, .Acquire) != current_cpu; const a = if(require_locking) log_lock.lock() else undefined; defer if(require_locking) log_lock.unlock(a); if(require_locking) @atomicStore(?*os.platform.smp.CoreData, &lock_owner, current_cpu, .Release); defer if(require_locking) { @atomicStore(?*os.platform.smp.CoreData, &lock_owner, null, .Release); }; return log_nolock(format, args); } fn log_nolock(comptime format: []const u8, args: anytype) void { var printer = Printer{}; fmt.format(printer, format, args) catch unreachable; } fn print_str(str: []const u8) !void { for (str) |c| { putch(c); } } fn protected_putchar(comptime putch_func: anytype) type { return struct { is_inside: bool = false, pub fn putch(self: *@This(), ch: u8) void { if(self.is_inside) return; self.is_inside = true; defer self.is_inside = false; putch_func(ch); } }; } var platform: protected_putchar(os.platform.debugputch) = .{}; var mmio_serial: protected_putchar(os.drivers.mmio_serial.putch) = .{}; var vesa_log: protected_putchar(os.drivers.vesa_log.putch) = .{}; var vga_log: protected_putchar(os.drivers.vga_log.putch) = .{}; fn putch(ch: u8) void { platform.putch(ch); mmio_serial.putch(ch); vesa_log.putch(ch); if (arch == .x86_64) { vga_log.putch(ch); } } pub fn hexdump_obj(val: anytype) void { hexdump(@ptrCast([*]u8, val)[0..@sizeOf(@TypeOf(val.*))]); } pub fn hexdump(in_bytes: []const u8) void { var bytes = in_bytes; while (bytes.len != 0) { log("{X:0>16}: ", .{@ptrToInt(bytes.ptr)}); inline for (range(0x10)) |offset| { if (offset < bytes.len) { const value = bytes[offset]; log("{X:0>2}{c}", .{ value, if (offset == 7) '-' else ' ' }); } else { log(" ", .{}); } } inline for (range(0x10)) |offset| { if (offset < bytes.len) { const value = bytes[offset]; if (0x20 <= value and value < 0x7F) { log("{c}", .{value}); } else { log(".", .{}); } } else { log(" ", .{}); } } log("\n", .{}); if (bytes.len < 0x10) return; bytes = bytes[0x10..]; } }
src/lib/logger.zig
const gmath = @import("gmath.zig").gmath(f64); pub const Pos01 = struct { x: f64, y: f64, }; pub const PosI = struct { const Self = @This(); x: isize, y: isize, pub fn toPosU(self: Self) ?PosU { if (self.x >= 0 and self.y >= 0) { return PosU{ .x = self.x, .y = self.y }; } else { return null; } } }; pub const PosU = struct { x: usize, y: usize, }; pub const PosUTo01 = struct { const Self = @This(); xf: gmath.Fma, yf: gmath.Fma, pub fn forCenter(width: usize, height: usize) Self { return init(UnitBoundsF64.init(UnitBounds.initCenter(width, height))); } pub fn init(ub: UnitBoundsF64) Self { return .{ .xf = gmath.Fma.coMix(ub.x0, ub.x1), .yf = gmath.Fma.coMix(ub.y0, ub.y1), }; } pub inline fn toPos01(self: Self, x: usize, y: usize) Pos01 { return .{ .x = self.xf.apply(@intToFloat(f64, x)), .y = self.yf.apply(@intToFloat(f64, y)), }; } }; pub const Pos01ToI = struct { const Self = @This(); xf: gmath.Fma, yf: gmath.Fma, pub fn forCenter(width: usize, height: usize, skew: f64) Self { return init(UnitBoundsF64.init(UnitBounds.initCenter(width, height)), skew); } pub fn init(ub: UnitBoundsF64, skew: f64) Self { return .{ .xf = gmath.Fma.mix(ub.x0 + skew, ub.x1 + skew), .yf = gmath.Fma.mix(ub.y0 + skew, ub.y1 + skew), }; } pub inline fn toPosI(self: Self, x: f64, y: f64) PosI { return .{ .x = @floatToInt(isize, @floor(self.xf.apply(x))), .y = @floatToInt(isize, @floor(self.yf.apply(y))), }; } }; pub const Pos01ToIndex = struct { const Self = @This(); res: Res, ub: Pos01ToI, pub fn forCenter(width: usize, height: usize, skew: f64) Self { return init(Res.init(width, height), Pos01ToI.forCenter(width, height, skew)); } pub fn init(res: Res, ub: Pos01ToI) Self { return .{ .res = res, .ub = ub, }; } pub inline fn index(self: Self, x: f64, y: f64) ?usize { const pos = self.ub.toPosI(x, y); return self.res.indexI(pos.x, pos.y); } pub inline fn indexOff(self: Self, x: f64, y: f64, xo: isize, yo: isize) ?usize { const pos = self.ub.toPosI(x, y); return self.res.indexI(pos.x + xo, pos.y + yo); } pub fn indexOffsets(self: Self, x: f64, y: f64, comptime n: usize, comptime offsets: [n]Offset) [n]?usize { const pos = self.ub.toPosI(x, y); var result = [_]?usize{null} ** n; inline for (offsets) |off, i| { result[i] = self.res.indexI(pos.x + off.x, pos.y + off.y); } return result; } pub const Offset = struct { x: isize, y: isize, }; pub const neighbors4 = comptime [4]Offset{ .{ .x = 0, .y = -1 }, .{ .x = -1, .y = 0 }, .{ .x = 1, .y = 0 }, .{ .x = 0, .y = 1 }, }; pub const neighbors5 = comptime [5]Offset{ .{ .x = 0, .y = -1 }, .{ .x = -1, .y = 0 }, .{ .x = 0, .y = 0 }, .{ .x = 1, .y = 0 }, .{ .x = 0, .y = 1 }, }; pub const neighbors8 = comptime [8]Offset{ .{ .x = -1, .y = -1 }, .{ .x = 0, .y = -1 }, .{ .x = 1, .y = -1 }, .{ .x = -1, .y = 0 }, .{ .x = 1, .y = 0 }, .{ .x = -1, .y = 1 }, .{ .x = 0, .y = 1 }, .{ .x = 1, .y = 1 }, }; pub const neighbors9 = comptime [9]Offset{ .{ .x = -1, .y = -1 }, .{ .x = 0, .y = -1 }, .{ .x = 1, .y = -1 }, .{ .x = -1, .y = 0 }, .{ .x = 0, .y = 0 }, .{ .x = 1, .y = 0 }, .{ .x = -1, .y = 1 }, .{ .x = 0, .y = 1 }, .{ .x = 1, .y = 1 }, }; }; pub const Res = struct { width: usize, height: usize, const Self = @This(); pub fn init(width: usize, height: usize) Self { return .{ .width = width, .height = height }; } pub inline fn indexU(self: Self, x: usize, y: usize) ?usize { if (xi < self.width and yi < self.height) { return y * self.width + x; } return null; } pub inline fn indexI(self: Self, x: isize, y: isize) ?usize { if (x >= 0 and y >= 0 and x < self.width and y < self.height) { const xu = @intCast(usize, x); const yu = @intCast(usize, y); return yu * self.width + xu; } return null; } }; pub const UnitBounds = struct { const Self = @This(); x0: usize, y0: usize, x1: usize, y1: usize, pub fn initCenter(width: usize, height: usize) Self { if (width > height) { const unit = height; const x0 = (width - unit) / 2; return Self{ .x0 = x0, .x1 = x0 + unit, .y0 = 0, .y1 = unit }; } else { const unit = width; const y0 = (height - unit) / 2; return Self{ .y0 = y0, .y1 = y0 + unit, .x0 = 0, .x1 = unit }; } } }; pub const UnitBoundsF64 = struct { const Self = @This(); x0: f64, y0: f64, x1: f64, y1: f64, pub fn init(ub: UnitBounds) Self { return .{ .x0 = @intToFloat(f64, ub.x0), .y0 = @intToFloat(f64, ub.y0), .x1 = @intToFloat(f64, ub.x1), .y1 = @intToFloat(f64, ub.y1), }; } };
lib/unitbounds.zig
const std = @import("std"); const zap = @import("zap"); const hyperia = @import("hyperia"); const Reactor = hyperia.Reactor; const AsyncSocket = hyperia.AsyncSocket; const AsyncWaitGroupAllocator = hyperia.AsyncWaitGroupAllocator; const os = std.os; const net = std.net; const mem = std.mem; const builtin = std.builtin; const log = std.log.scoped(.server); usingnamespace hyperia.select; pub const log_level = .debug; var stopped: bool = false; pub const Server = struct { pub const Connection = struct { server: *Server, socket: AsyncSocket, address: net.Address, frame: @Frame(Connection.start), pub fn start(self: *Connection) !void { defer { log.info("{} has disconnected", .{self.address}); if (self.server.deregister(self.address)) { suspend { self.socket.deinit(); self.server.wga.allocator.destroy(self); } } } var buf: [4096]u8 = undefined; while (true) { const num_bytes = try self.socket.read(&buf); if (num_bytes == 0) return; const message = mem.trim(u8, buf[0..num_bytes], "\r\n"); log.info("got message from {}: '{s}'", .{ self.address, message }); } } }; listener: AsyncSocket, wga: AsyncWaitGroupAllocator, lock: std.Thread.Mutex = .{}, connections: std.AutoArrayHashMapUnmanaged(os.sockaddr, *Connection) = .{}, pub fn init(allocator: *mem.Allocator) Server { return Server{ .listener = undefined, .wga = .{ .backing_allocator = allocator }, }; } pub fn deinit(self: *Server, allocator: *mem.Allocator) void { { const held = self.lock.acquire(); defer held.release(); for (self.connections.items()) |entry| { entry.value.socket.shutdown(.both) catch {}; } } self.wga.wait(); self.connections.deinit(allocator); } pub fn close(self: *Server) void { self.listener.shutdown(.recv) catch {}; } pub fn start(self: *Server, reactor: Reactor, address: net.Address) !void { self.listener = try AsyncSocket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP); errdefer self.listener.deinit(); try reactor.add(self.listener.socket.fd, &self.listener.handle, .{ .readable = true }); try self.listener.setReuseAddress(true); try self.listener.setReusePort(true); try self.listener.setNoDelay(true); try self.listener.setFastOpen(true); try self.listener.setQuickAck(true); try self.listener.bind(address); try self.listener.listen(128); log.info("listening for connections on: {}", .{try self.listener.getName()}); } fn accept(self: *Server, allocator: *mem.Allocator, reactor: Reactor) callconv(.Async) !void { while (true) { var conn = try self.listener.accept(os.SOCK_CLOEXEC | os.SOCK_NONBLOCK); errdefer conn.socket.deinit(); try conn.socket.setNoDelay(true); log.info("got connection: {}", .{conn.address}); const wga_allocator = &self.wga.allocator; const connection = try wga_allocator.create(Connection); errdefer wga_allocator.destroy(connection); connection.server = self; connection.socket = AsyncSocket.from(conn.socket); connection.address = conn.address; try reactor.add(conn.socket.fd, &connection.socket.handle, .{ .readable = true, .writable = true }); { const held = self.lock.acquire(); defer held.release(); try self.connections.put(allocator, connection.address.any, connection); } connection.frame = async connection.start(); } } fn deregister(self: *Server, address: net.Address) bool { const held = self.lock.acquire(); defer held.release(); const entry = self.connections.swapRemove(address.any) orelse return false; return true; } }; pub fn runApp(reactor: Reactor, reactor_event: *Reactor.AutoResetEvent) !void { defer { log.info("shutting down...", .{}); @atomicStore(bool, &stopped, true, .Release); reactor_event.post(); } var server = Server.init(hyperia.allocator); defer server.deinit(hyperia.allocator); const address = net.Address.initIp4(.{ 0, 0, 0, 0 }, 9000); try server.start(reactor, address); const Cases = struct { accept: struct { run: Case(Server.accept), cancel: Case(Server.close), }, ctrl_c: struct { run: Case(hyperia.ctrl_c.wait), cancel: Case(hyperia.ctrl_c.cancel), }, }; switch (select( Cases{ .accept = .{ .run = call(Server.accept, .{ &server, hyperia.allocator, reactor }), .cancel = call(Server.close, .{&server}), }, .ctrl_c = .{ .run = call(hyperia.ctrl_c.wait, .{}), .cancel = call(hyperia.ctrl_c.cancel, .{}), }, }, )) { .accept => |result| return result, .ctrl_c => |result| return result, } } pub fn main() !void { hyperia.init(); defer hyperia.deinit(); hyperia.ctrl_c.init(); defer hyperia.ctrl_c.deinit(); const reactor = try Reactor.init(os.EPOLL_CLOEXEC); defer reactor.deinit(); var reactor_event = try Reactor.AutoResetEvent.init(os.EFD_CLOEXEC, reactor); defer reactor_event.deinit(); try reactor.add(reactor_event.fd, &reactor_event.handle, .{}); var frame = async runApp(reactor, &reactor_event); while (!@atomicLoad(bool, &stopped, .Acquire)) { const EventProcessor = struct { batch: zap.Pool.Batch = .{}, pub fn call(self: *@This(), event: Reactor.Event) void { const handle = @intToPtr(*Reactor.Handle, event.data); handle.call(&self.batch, event); } }; var processor: EventProcessor = .{}; defer hyperia.pool.schedule(.{}, processor.batch); try reactor.poll(128, &processor, null); } try nosuspend await frame; log.info("good bye!", .{}); }
example_tcp_server.zig
const std = @import("std"); const Arena = std.heap.ArenaAllocator; const Allocator = std.mem.Allocator; const eql = std.meta.eql; const panic = std.debug.panic; const assert = std.debug.assert; const init_codebase = @import("init_codebase.zig"); const initCodebase = init_codebase.initCodebase; const MockFileSystem = @import("file_system.zig").FileSystem; const analyzeSemantics = @import("semantic_analyzer.zig").analyzeSemantics; const ecs = @import("ecs.zig"); const Entity = ecs.Entity; const ECS = ecs.ECS; const components = @import("components.zig"); const query = @import("query.zig"); const literalOf = query.literalOf; const typeOf = query.typeOf; const parentType = query.parentType; const valueType = query.valueType; const sizeOf = query.sizeOf; const valueOf = query.valueOf; const List = @import("list.zig").List; const Strings = @import("strings.zig").Strings; const Context = struct { codebase: *ECS, wasm_instructions: *components.WasmInstructions, locals: *components.Locals, allocator: Allocator, builtins: components.Builtins, label: u64, data_segment: *components.DataSegment, }; fn codegenNumber(context: *Context, entity: Entity) !void { const type_of = typeOf(entity); const b = context.builtins; for (&[_]Entity{ b.IntLiteral, b.FloatLiteral }) |builtin| { if (eql(type_of, builtin)) { return; } } const builtins = &[_]Entity{ b.I64, b.I32, b.I16, b.I8, b.U64, b.U32, b.U16, b.U8, b.F64, b.F32 }; const kinds = &[_]components.WasmInstructionKind{ .i64_const, .i32_const, .i32_const, .i32_const, .i64_const, .i32_const, .i32_const, .i32_const, .f64_const, .f32_const }; for (builtins) |builtin, i| { if (!eql(type_of, builtin)) continue; const wasm_instruction = try context.codebase.createEntity(.{ kinds[i], components.Constant.init(entity), }); _ = try context.wasm_instructions.append(wasm_instruction); return; } if (type_of.has(components.ParentType)) |parent_type| { assert(eql(parent_type.entity, b.Ptr)); const wasm_instruction = try context.codebase.createEntity(.{ components.WasmInstructionKind.i32_const, components.Constant.init(entity), }); _ = try context.wasm_instructions.append(wasm_instruction); return; } panic("\ncodegen number unsupported type {s}\n", .{literalOf(type_of)}); } fn codegenCall(context: *Context, entity: Entity) !void { for (entity.get(components.Arguments).slice()) |argument| { try codegenEntity(context, argument); } for (entity.get(components.OrderedNamedArguments).slice()) |argument| { try codegenEntity(context, argument); } const wasm_instruction = try context.codebase.createEntity(.{ components.WasmInstructionKind.call, entity.get(components.Callable), }); _ = try context.wasm_instructions.append(wasm_instruction); } fn codegenDefine(context: *Context, entity: Entity) !void { const value = entity.get(components.Value).entity; const local = entity.get(components.Local).entity; if (!local.contains(components.Mutable)) { const type_of = typeOf(local); const b = context.builtins; for (&[_]Entity{ b.IntLiteral, b.FloatLiteral }) |builtin| { if (!eql(type_of, builtin)) continue; return; } const builtins = [_]Entity{ b.I64, b.I32, b.I16, b.I8, b.U64, b.U32, b.U16, b.U8, b.F64, b.F32 }; const types = [_]type{ i64, i32, i16, i8, u64, u32, u16, u8, f64, f32 }; inline for (&types) |T, i| { if (eql(builtins[i], type_of)) { if (try valueOf(T, value)) |_| { return; } } } } try codegenEntity(context, value); const wasm_instruction = try context.codebase.createEntity(.{ components.WasmInstructionKind.local_set, components.Local.init(local), }); _ = try context.wasm_instructions.append(wasm_instruction); try context.locals.put(local); } fn codegenAssign(context: *Context, entity: Entity) !void { const value = entity.get(components.Value).entity; try codegenEntity(context, value); const wasm_instruction = try context.codebase.createEntity(.{ components.WasmInstructionKind.local_set, entity.get(components.Local), }); _ = try context.wasm_instructions.append(wasm_instruction); return; } fn codegenLocal(context: *Context, local: Entity) !void { if (!local.contains(components.Mutable)) { const type_of = typeOf(local); const b = context.builtins; for (&[_]Entity{ b.IntLiteral, b.FloatLiteral }) |builtin| { if (!eql(type_of, builtin)) continue; return; } const builtins = [_]Entity{ b.I64, b.I32, b.I16, b.I8, b.U64, b.U32, b.U16, b.U8, b.F64, b.F32 }; const types = [_]type{ i64, i32, i16, i8, u64, u32, u16, u8, f64, f32 }; const kinds = &[_]components.WasmInstructionKind{ .i64_const, .i32_const, .i32_const, .i32_const, .i64_const, .i32_const, .i32_const, .i32_const, .f64_const, .f32_const }; if (local.has(components.Value)) |value_component| { const value = value_component.entity; inline for (&types) |T, i| { if (eql(builtins[i], type_of)) { if (try valueOf(T, value)) |_| { const wasm_instruction = try context.codebase.createEntity(.{ kinds[i], components.Constant.init(value), }); _ = try context.wasm_instructions.append(wasm_instruction); return; } } } } } const wasm_instruction = try context.codebase.createEntity(.{ components.WasmInstructionKind.local_get, components.Local.init(local), }); _ = try context.wasm_instructions.append(wasm_instruction); } const ArithmeticBinaryOps = struct { i64_fn: fn (lhs: i64, rhs: i64) i64, i32_fn: fn (lhs: i32, rhs: i32) i32, i16_fn: fn (lhs: i16, rhs: i16) i16, i8_fn: fn (lhs: i8, rhs: i8) i8, u64_fn: fn (lhs: u64, rhs: u64) u64, u32_fn: fn (lhs: u32, rhs: u32) u32, u16_fn: fn (lhs: u16, rhs: u16) u16, u8_fn: fn (lhs: u8, rhs: u8) u8, f64_fn: fn (lhs: f64, rhs: f64) f64, f32_fn: fn (lhs: f32, rhs: f32) f32, kinds: [10]components.WasmInstructionKind, simd_kinds: ?[8]components.WasmInstructionKind = null, float_simd_kinds: ?[2]components.WasmInstructionKind = null, types: [10]type = .{ i64, i32, i16, i8, u64, u32, u16, u8, f64, f32 }, argument_kinds: [10]components.WasmInstructionKind = .{ .i64_const, .i32_const, .i32_const, .i32_const, .i64_const, .i32_const, .i32_const, .i32_const, .f64_const, .f32_const, }, result_kinds: [10]components.WasmInstructionKind = .{ .i64_const, .i32_const, .i32_const, .i32_const, .i64_const, .i32_const, .i32_const, .i32_const, .f64_const, .f32_const, }, const Self = @This(); fn call(comptime self: Self, comptime T: type, lhs: T, rhs: T) T { return switch (T) { i64 => self.i64_fn(lhs, rhs), i32 => self.i32_fn(lhs, rhs), i16 => self.i16_fn(lhs, rhs), i8 => self.i8_fn(lhs, rhs), u64 => self.u64_fn(lhs, rhs), u32 => self.u32_fn(lhs, rhs), u16 => self.u16_fn(lhs, rhs), u8 => self.u8_fn(lhs, rhs), f64 => self.f64_fn(lhs, rhs), f32 => self.f32_fn(lhs, rhs), else => panic("\nunsupported type {s}\n", .{@typeName(T)}), }; } }; const IntBinaryOps = struct { i64_fn: fn (lhs: i64, rhs: i64) i64, i32_fn: fn (lhs: i32, rhs: i32) i32, i16_fn: fn (lhs: i16, rhs: i16) i16, i8_fn: fn (lhs: i8, rhs: i8) i8, u64_fn: fn (lhs: u64, rhs: u64) u64, u32_fn: fn (lhs: u32, rhs: u32) u32, u16_fn: fn (lhs: u16, rhs: u16) u16, u8_fn: fn (lhs: u8, rhs: u8) u8, kinds: [8]components.WasmInstructionKind, types: [8]type = .{ i64, i32, i16, i8, u64, u32, u16, u8 }, argument_kinds: [8]components.WasmInstructionKind = .{ .i64_const, .i32_const, .i32_const, .i32_const, .i64_const, .i32_const, .i32_const, .i32_const, }, result_kinds: [8]components.WasmInstructionKind = .{ .i64_const, .i32_const, .i32_const, .i32_const, .i64_const, .i32_const, .i32_const, .i32_const, }, const Self = @This(); fn call(comptime self: Self, comptime T: type, lhs: T, rhs: T) T { return switch (T) { i64 => self.i64_fn(lhs, rhs), i32 => self.i32_fn(lhs, rhs), i16 => self.i16_fn(lhs, rhs), i8 => self.i8_fn(lhs, rhs), u64 => self.u64_fn(lhs, rhs), u32 => self.u32_fn(lhs, rhs), u16 => self.u16_fn(lhs, rhs), u8 => self.u8_fn(lhs, rhs), else => panic("\nunsupported type {s}\n", .{@typeName(T)}), }; } }; const ComparisonBinaryOps = struct { i64_fn: fn (lhs: i64, rhs: i64) i32, i32_fn: fn (lhs: i32, rhs: i32) i32, i16_fn: fn (lhs: i16, rhs: i16) i32, i8_fn: fn (lhs: i8, rhs: i8) i32, u64_fn: fn (lhs: u64, rhs: u64) i32, u32_fn: fn (lhs: u32, rhs: u32) i32, u16_fn: fn (lhs: u16, rhs: u16) i32, u8_fn: fn (lhs: u8, rhs: u8) i32, f64_fn: fn (lhs: f64, rhs: f64) i32, f32_fn: fn (lhs: f32, rhs: f32) i32, kinds: [10]components.WasmInstructionKind, types: [10]type = .{ i64, i32, i16, i8, u64, u32, u16, u8, f64, f32 }, argument_kinds: [10]components.WasmInstructionKind = .{ .i64_const, .i32_const, .i32_const, .i32_const, .i64_const, .i32_const, .i32_const, .i32_const, .f64_const, .f32_const, }, result_kinds: [10]components.WasmInstructionKind = .{ .i32_const, .i32_const, .i32_const, .i32_const, .i32_const, .i32_const, .i32_const, .i32_const, .i32_const, .i32_const, }, const Self = @This(); fn call(comptime self: Self, comptime T: type, lhs: T, rhs: T) i32 { return switch (T) { i64 => self.i64_fn(lhs, rhs), i32 => self.i32_fn(lhs, rhs), i16 => self.i16_fn(lhs, rhs), i8 => self.i8_fn(lhs, rhs), u64 => self.u64_fn(lhs, rhs), u32 => self.u32_fn(lhs, rhs), u16 => self.u16_fn(lhs, rhs), u8 => self.u8_fn(lhs, rhs), f64 => self.f64_fn(lhs, rhs), f32 => self.f32_fn(lhs, rhs), else => panic("\nunsupported type {s}\n", .{@typeName(T)}), }; } }; fn addFn(comptime T: type) fn (T, T) T { return struct { fn f(lhs: T, rhs: T) T { return lhs + rhs; } }.f; } const addOps = ArithmeticBinaryOps{ .i64_fn = addFn(i64), .i32_fn = addFn(i32), .i16_fn = addFn(i16), .i8_fn = addFn(i8), .u64_fn = addFn(u64), .u32_fn = addFn(u32), .u16_fn = addFn(u16), .u8_fn = addFn(u8), .f64_fn = addFn(f64), .f32_fn = addFn(f32), .kinds = [_]components.WasmInstructionKind{ .i64_add, .i32_add, .i32_add_mod_16, .i32_add_mod_8, .i64_add, .i32_add, .i32_add_mod_16, .i32_add_mod_8, .f64_add, .f32_add, }, .simd_kinds = [_]components.WasmInstructionKind{ .i64x2_add, .i32x4_add, .i16x8_add, .i8x16_add, .i64x2_add, .i32x4_add, .i16x8_add, .i8x16_add, }, .float_simd_kinds = [_]components.WasmInstructionKind{ .f64x2_add, .f32x4_add, }, }; fn subtractFn(comptime T: type) fn (T, T) T { return struct { fn f(lhs: T, rhs: T) T { return lhs - rhs; } }.f; } const subtractOps = ArithmeticBinaryOps{ .i64_fn = subtractFn(i64), .i32_fn = subtractFn(i32), .i16_fn = subtractFn(i16), .i8_fn = subtractFn(i8), .u64_fn = subtractFn(u64), .u32_fn = subtractFn(u32), .u16_fn = subtractFn(u16), .u8_fn = subtractFn(u8), .f64_fn = subtractFn(f64), .f32_fn = subtractFn(f32), .kinds = [_]components.WasmInstructionKind{ .i64_sub, .i32_sub, .i32_sub_mod_16, .i32_sub_mod_8, .i64_sub, .i32_sub, .i32_sub_mod_16, .i32_sub_mod_8, .f64_sub, .f32_sub, }, .simd_kinds = [_]components.WasmInstructionKind{ .i64x2_sub, .i32x4_sub, .i16x8_sub, .i8x16_sub, .i64x2_sub, .i32x4_sub, .i16x8_sub, .i8x16_sub, }, .float_simd_kinds = [_]components.WasmInstructionKind{ .f64x2_sub, .f32x4_sub, }, }; fn multiplyFn(comptime T: type) fn (T, T) T { return struct { fn f(lhs: T, rhs: T) T { return lhs * rhs; } }.f; } const multiplyOps = ArithmeticBinaryOps{ .i64_fn = multiplyFn(i64), .i32_fn = multiplyFn(i32), .i16_fn = multiplyFn(i16), .i8_fn = multiplyFn(i8), .u64_fn = multiplyFn(u64), .u32_fn = multiplyFn(u32), .u16_fn = multiplyFn(u16), .u8_fn = multiplyFn(u8), .f64_fn = multiplyFn(f64), .f32_fn = multiplyFn(f32), .kinds = [_]components.WasmInstructionKind{ .i64_mul, .i32_mul, .i32_mul_mod_16, .i32_mul_mod_8, .i64_mul, .i32_mul, .i32_mul_mod_16, .i32_mul_mod_8, .f64_mul, .f32_mul, }, .simd_kinds = [_]components.WasmInstructionKind{ .i64x2_mul, .i32x4_mul, .i16x8_mul, .i8x16_mul, .i64x2_mul, .i32x4_mul, .i16x8_mul, .i8x16_mul, }, .float_simd_kinds = [_]components.WasmInstructionKind{ .f64x2_mul, .f32x4_mul, }, }; fn divideFn(comptime T: type) fn (T, T) T { return struct { fn f(lhs: T, rhs: T) T { return switch (T) { i64, i32, i16, i8, u64, u32, u16, u8 => @divFloor(lhs, rhs), else => lhs / rhs, }; } }.f; } const divideOps = ArithmeticBinaryOps{ .i64_fn = divideFn(i64), .i32_fn = divideFn(i32), .i16_fn = divideFn(i16), .i8_fn = divideFn(i8), .u64_fn = divideFn(u64), .u32_fn = divideFn(u32), .u16_fn = divideFn(u16), .u8_fn = divideFn(u8), .f64_fn = divideFn(f64), .f32_fn = divideFn(f32), .kinds = [_]components.WasmInstructionKind{ .i64_div, .i32_div, .i32_div, .i32_div, .u64_div, .u32_div, .u32_div, .u32_div, .f64_div, .f32_div, }, .float_simd_kinds = [_]components.WasmInstructionKind{ .f64x2_div, .f32x4_div, }, }; fn remainderFn(comptime T: type) fn (T, T) T { return struct { fn f(lhs: T, rhs: T) T { return @rem(lhs, rhs); } }.f; } const remainderOps = IntBinaryOps{ .i64_fn = remainderFn(i64), .i32_fn = remainderFn(i32), .i16_fn = remainderFn(i16), .i8_fn = remainderFn(i8), .u64_fn = remainderFn(u64), .u32_fn = remainderFn(u32), .u16_fn = remainderFn(u16), .u8_fn = remainderFn(u8), .kinds = [_]components.WasmInstructionKind{ .i64_rem, .i32_rem, .i32_rem, .i32_rem, .u64_rem, .u32_rem, .u32_rem, .u32_rem, }, }; fn bitAndFn(comptime T: type) fn (T, T) T { return struct { fn f(lhs: T, rhs: T) T { return lhs & rhs; } }.f; } const bitAndOps = IntBinaryOps{ .i64_fn = bitAndFn(i64), .i32_fn = bitAndFn(i32), .i16_fn = bitAndFn(i16), .i8_fn = bitAndFn(i8), .u64_fn = bitAndFn(u64), .u32_fn = bitAndFn(u32), .u16_fn = bitAndFn(u16), .u8_fn = bitAndFn(u8), .kinds = [_]components.WasmInstructionKind{ .i64_and, .i32_and, .i32_and, .i32_and, .i64_and, .i32_and, .i32_and, .i32_and, }, }; fn bitOrFn(comptime T: type) fn (T, T) T { return struct { fn f(lhs: T, rhs: T) T { return lhs | rhs; } }.f; } const bitOrOps = IntBinaryOps{ .i64_fn = bitOrFn(i64), .i32_fn = bitOrFn(i32), .i16_fn = bitOrFn(i16), .i8_fn = bitOrFn(i8), .u64_fn = bitOrFn(u64), .u32_fn = bitOrFn(u32), .u16_fn = bitOrFn(u16), .u8_fn = bitOrFn(u8), .kinds = [_]components.WasmInstructionKind{ .i64_or, .i32_or, .i32_or, .i32_or, .i64_or, .i32_or, .i32_or, .i32_or, }, }; fn bitXorFn(comptime T: type) fn (T, T) T { return struct { fn f(lhs: T, rhs: T) T { return lhs ^ rhs; } }.f; } const bitXorOps = IntBinaryOps{ .i64_fn = bitXorFn(i64), .i32_fn = bitXorFn(i32), .i16_fn = bitXorFn(i16), .i8_fn = bitXorFn(i8), .u64_fn = bitXorFn(u64), .u32_fn = bitXorFn(u32), .u16_fn = bitXorFn(u16), .u8_fn = bitXorFn(u8), .kinds = [_]components.WasmInstructionKind{ .i64_xor, .i32_xor, .i32_xor, .i32_xor, .i64_xor, .i32_xor, .i32_xor, .i32_xor, }, }; fn leftShiftFn(comptime T: type) fn (T, T) T { return struct { fn f(lhs: T, rhs: T) T { return lhs << @intCast(std.math.Log2Int(T), rhs); } }.f; } const leftShiftOps = IntBinaryOps{ .i64_fn = leftShiftFn(i64), .i32_fn = leftShiftFn(i32), .i16_fn = leftShiftFn(i16), .i8_fn = leftShiftFn(i8), .u64_fn = leftShiftFn(u64), .u32_fn = leftShiftFn(u32), .u16_fn = leftShiftFn(u16), .u8_fn = leftShiftFn(u8), .kinds = [_]components.WasmInstructionKind{ .i64_shl, .i32_shl, .i32_shl, .i32_shl, .u64_shl, .u32_shl, .u32_shl, .u32_shl, }, }; fn rightShiftFn(comptime T: type) fn (T, T) T { return struct { fn f(lhs: T, rhs: T) T { return lhs >> @intCast(std.math.Log2Int(T), rhs); } }.f; } const rightShiftOps = IntBinaryOps{ .i64_fn = rightShiftFn(i64), .i32_fn = rightShiftFn(i32), .i16_fn = rightShiftFn(i16), .i8_fn = rightShiftFn(i8), .u64_fn = rightShiftFn(u64), .u32_fn = rightShiftFn(u32), .u16_fn = rightShiftFn(u16), .u8_fn = rightShiftFn(u8), .kinds = [_]components.WasmInstructionKind{ .i64_shr, .i32_shr, .i32_shr, .i32_shr, .u64_shr, .u32_shr, .u32_shr, .u32_shr, }, }; fn equalFn(comptime T: type) fn (T, T) i32 { return struct { fn f(lhs: T, rhs: T) i32 { return if (lhs == rhs) 1 else 0; } }.f; } const equalOps = ComparisonBinaryOps{ .i64_fn = equalFn(i64), .i32_fn = equalFn(i32), .i16_fn = equalFn(i16), .i8_fn = equalFn(i8), .u64_fn = equalFn(u64), .u32_fn = equalFn(u32), .u16_fn = equalFn(u16), .u8_fn = equalFn(u8), .f64_fn = equalFn(f64), .f32_fn = equalFn(f32), .kinds = [_]components.WasmInstructionKind{ .i64_eq, .i32_eq, .i32_eq, .i32_eq, .i64_eq, .i32_eq, .i32_eq, .i32_eq, .f64_eq, .f32_eq, }, }; fn notEqualFn(comptime T: type) fn (T, T) i32 { return struct { fn f(lhs: T, rhs: T) i32 { return if (lhs != rhs) 1 else 0; } }.f; } const notEqualOps = ComparisonBinaryOps{ .i64_fn = notEqualFn(i64), .i32_fn = notEqualFn(i32), .i16_fn = notEqualFn(i16), .i8_fn = notEqualFn(i8), .u64_fn = notEqualFn(u64), .u32_fn = notEqualFn(u32), .u16_fn = notEqualFn(u16), .u8_fn = notEqualFn(u8), .f64_fn = notEqualFn(f64), .f32_fn = notEqualFn(f32), .kinds = [_]components.WasmInstructionKind{ .i64_ne, .i32_ne, .i32_ne, .i32_ne, .i64_ne, .i32_ne, .i32_ne, .i32_ne, .f64_ne, .f32_ne, }, }; fn lessThanFn(comptime T: type) fn (T, T) i32 { return struct { fn f(lhs: T, rhs: T) i32 { return if (lhs < rhs) 1 else 0; } }.f; } const lessThanOps = ComparisonBinaryOps{ .i64_fn = lessThanFn(i64), .i32_fn = lessThanFn(i32), .i16_fn = lessThanFn(i16), .i8_fn = lessThanFn(i8), .u64_fn = lessThanFn(u64), .u32_fn = lessThanFn(u32), .u16_fn = lessThanFn(u16), .u8_fn = lessThanFn(u8), .f64_fn = lessThanFn(f64), .f32_fn = lessThanFn(f32), .kinds = [_]components.WasmInstructionKind{ .i64_lt, .i32_lt, .i32_lt, .i32_lt, .i64_lt, .i32_lt, .i32_lt, .i32_lt, .f64_lt, .f32_lt, }, }; fn lessEqualFn(comptime T: type) fn (T, T) i32 { return struct { fn f(lhs: T, rhs: T) i32 { return if (lhs <= rhs) 1 else 0; } }.f; } const lessEqualOps = ComparisonBinaryOps{ .i64_fn = lessEqualFn(i64), .i32_fn = lessEqualFn(i32), .i16_fn = lessEqualFn(i16), .i8_fn = lessEqualFn(i8), .u64_fn = lessEqualFn(u64), .u32_fn = lessEqualFn(u32), .u16_fn = lessEqualFn(u16), .u8_fn = lessEqualFn(u8), .f64_fn = lessEqualFn(f64), .f32_fn = lessEqualFn(f32), .kinds = [_]components.WasmInstructionKind{ .i64_le, .i32_le, .i32_le, .i32_le, .i64_le, .i32_le, .i32_le, .i32_le, .f64_le, .f32_le, }, }; fn greaterThanFn(comptime T: type) fn (T, T) i32 { return struct { fn f(lhs: T, rhs: T) i32 { return if (lhs > rhs) 1 else 0; } }.f; } const greaterThanOps = ComparisonBinaryOps{ .i64_fn = greaterThanFn(i64), .i32_fn = greaterThanFn(i32), .i16_fn = greaterThanFn(i16), .i8_fn = greaterThanFn(i8), .u64_fn = greaterThanFn(u64), .u32_fn = greaterThanFn(u32), .u16_fn = greaterThanFn(u16), .u8_fn = greaterThanFn(u8), .f64_fn = greaterThanFn(f64), .f32_fn = greaterThanFn(f32), .kinds = [_]components.WasmInstructionKind{ .i64_gt, .i32_gt, .i32_gt, .i32_gt, .i64_gt, .i32_gt, .i32_gt, .i32_gt, .f64_gt, .f32_gt, }, }; fn greaterEqualFn(comptime T: type) fn (T, T) i32 { return struct { fn f(lhs: T, rhs: T) i32 { return if (lhs >= rhs) 1 else 0; } }.f; } const greaterEqualOps = ComparisonBinaryOps{ .i64_fn = greaterEqualFn(i64), .i32_fn = greaterEqualFn(i32), .i16_fn = greaterEqualFn(i16), .i8_fn = greaterEqualFn(i8), .u64_fn = greaterEqualFn(u64), .u32_fn = greaterEqualFn(u32), .u16_fn = greaterEqualFn(u16), .u8_fn = greaterEqualFn(u8), .f64_fn = greaterEqualFn(f64), .f32_fn = greaterEqualFn(f32), .kinds = [_]components.WasmInstructionKind{ .i64_ge, .i32_ge, .i32_ge, .i32_ge, .i64_ge, .i32_ge, .i32_ge, .i32_ge, .f64_ge, .f32_ge, }, }; fn codegenBinaryOp(context: *Context, entity: Entity, comptime ops: anytype) !void { const arguments = entity.get(components.Arguments).slice(); try codegenEntity(context, arguments[0]); try codegenEntity(context, arguments[1]); const type_of = typeOf(arguments[0]); const b = context.builtins; const builtins = [_]Entity{ b.I64, b.I32, b.I16, b.I8, b.U64, b.U32, b.U16, b.U8, b.F64, b.F32 }; inline for (&ops.types) |T, i| { if (eql(type_of, builtins[i])) { const instructions = context.wasm_instructions.mutSlice(); const rhs = instructions[instructions.len - 1]; const lhs = instructions[instructions.len - 2]; const lhs_kind = lhs.get(components.WasmInstructionKind); const rhs_kind = rhs.get(components.WasmInstructionKind); const kind = ops.argument_kinds[i]; if (lhs_kind == kind and rhs_kind == kind) { const lhs_value = (try valueOf(T, lhs.get(components.Constant).entity)).?; const rhs_value = (try valueOf(T, rhs.get(components.Constant).entity)).?; const result_value = ops.call(T, lhs_value, rhs_value); const result_literal = try std.fmt.allocPrint(context.allocator, "{}", .{result_value}); const interned = try context.codebase.getPtr(Strings).intern(result_literal); const result = try context.codebase.createEntity(.{ entity.get(components.Type), components.Literal.init(interned), result_value, }); instructions[instructions.len - 2] = try context.codebase.createEntity(.{ ops.result_kinds[i], components.Constant.init(result), }); context.wasm_instructions.shrink(1); return; } const instruction = try context.codebase.createEntity(.{ops.kinds[i]}); try context.wasm_instructions.append(instruction); return; } } const vectors = [_]Entity{ b.I64X2, b.I32X4, b.I16X8, b.I8X16, b.U64X2, b.U32X4, b.U16X8, b.U8X16 }; for (vectors) |vector, i| { if (eql(type_of, vector)) { if (@hasField(@TypeOf(ops), "simd_kinds")) { if (ops.simd_kinds) |simd_kinds| { const instruction = try context.codebase.createEntity(.{simd_kinds[i]}); try context.wasm_instructions.append(instruction); return; } } } } const float_vectors = [_]Entity{ b.F64X2, b.F32X4 }; for (float_vectors) |vector, i| { if (eql(type_of, vector)) { if (@hasField(@TypeOf(ops), "float_simd_kinds")) { if (ops.float_simd_kinds) |simd_kinds| { const instruction = try context.codebase.createEntity(.{simd_kinds[i]}); try context.wasm_instructions.append(instruction); return; } } } } assert(eql(parentType(type_of), b.Ptr)); const instructions = context.wasm_instructions.mutSlice(); const rhs = instructions[instructions.len - 1]; const lhs = instructions[instructions.len - 2]; const lhs_kind = lhs.get(components.WasmInstructionKind); const rhs_kind = rhs.get(components.WasmInstructionKind); const i32_index = 1; const kind = ops.argument_kinds[i32_index]; if (lhs_kind == kind and rhs_kind == kind) { const lhs_value = (try valueOf(i32, lhs.get(components.Constant).entity)).?; const rhs_value = (try valueOf(i32, rhs.get(components.Constant).entity)).?; const result_value = ops.call(i32, lhs_value, rhs_value); const result_literal = try std.fmt.allocPrint(context.allocator, "{}", .{result_value}); const interned = try context.codebase.getPtr(Strings).intern(result_literal); const result = try context.codebase.createEntity(.{ entity.get(components.Type), components.Literal.init(interned), result_value, }); instructions[instructions.len - 2] = try context.codebase.createEntity(.{ ops.result_kinds[i32_index], components.Constant.init(result), }); context.wasm_instructions.shrink(1); return; } const instruction = try context.codebase.createEntity(.{ops.kinds[i32_index]}); try context.wasm_instructions.append(instruction); } fn codegenPtrI32BinaryOp(context: *Context, entity: Entity, kind: components.WasmInstructionKind) !void { const arguments = entity.get(components.Arguments).slice(); try codegenEntity(context, arguments[0]); try codegenEntity(context, arguments[1]); const instructions = context.wasm_instructions.mutSlice(); const rhs = instructions[instructions.len - 1]; const rhs_kind = rhs.get(components.WasmInstructionKind); const bytes = valueType(typeOf(arguments[0])).get(components.Size).bytes; if (rhs_kind == .i32_const) { const rhs_value = (try valueOf(i32, rhs.get(components.Constant).entity)).?; const result_value = rhs_value * bytes; const result_literal = try std.fmt.allocPrint(context.allocator, "{}", .{result_value}); const interned = try context.codebase.getPtr(Strings).intern(result_literal); const result = try context.codebase.createEntity(.{ entity.get(components.Type), components.Literal.init(interned), result_value, }); instructions[instructions.len - 1] = try context.codebase.createEntity(.{ components.WasmInstructionKind.i32_const, components.Constant.init(result), }); const instruction = try context.codebase.createEntity(.{kind}); try context.wasm_instructions.append(instruction); return; } try codegenConstant(i32, context, bytes); const multiply = try context.codebase.createEntity(.{ components.WasmInstructionKind.i32_mul, }); const op = try context.codebase.createEntity(.{kind}); try context.wasm_instructions.append(multiply); try context.wasm_instructions.append(op); } fn codegenSubPtrPtr(context: *Context, entity: Entity) !void { const arguments = entity.get(components.Arguments).slice(); try codegenEntity(context, arguments[0]); try codegenEntity(context, arguments[1]); const bytes = valueType(typeOf(arguments[0])).get(components.Size).bytes; const literal = try std.fmt.allocPrint(context.allocator, "{}", .{bytes}); const interned = try context.codebase.getPtr(Strings).intern(literal); const subtract = try context.codebase.createEntity(.{ components.WasmInstructionKind.i32_sub, }); const result = try context.codebase.createEntity(.{ components.Type.init(context.builtins.I32), components.Literal.init(interned), bytes, }); const constant = try context.codebase.createEntity(.{ components.WasmInstructionKind.i32_const, components.Constant.init(result), }); const divide = try context.codebase.createEntity(.{ components.WasmInstructionKind.i32_div, }); try context.wasm_instructions.append(subtract); try context.wasm_instructions.append(constant); try context.wasm_instructions.append(divide); } fn codegenStore(context: *Context, entity: Entity) !void { try context.codebase.set(.{components.UsesMemory{ .value = true }}); const arguments = entity.get(components.Arguments).slice(); const pointer = arguments[0]; try codegenEntity(context, pointer); try codegenEntity(context, arguments[1]); const b = context.builtins; const builtins = [_]Entity{ b.I64, b.I32, b.U64, b.U32, b.F64, b.F32 }; const instructions = [_]components.WasmInstructionKind{ .i64_store, .i32_store, .i64_store, .i32_store, .f64_store, .f32_store }; const value_type = valueType(typeOf(pointer)); for (builtins) |builtin, i| { if (eql(value_type, builtin)) { const instruction = try context.codebase.createEntity(.{instructions[i]}); try context.wasm_instructions.append(instruction); return; } } if (value_type.has(components.ParentType)) |value_type_parent_type| { assert(eql(value_type_parent_type.entity, b.Ptr)); const instruction = try context.codebase.createEntity(.{components.WasmInstructionKind.i32_store}); try context.wasm_instructions.append(instruction); return; } panic("\ncodegen store unspported type {s}\n", .{literalOf(value_type)}); } fn codegenV128Store(context: *Context, entity: Entity) !void { try context.codebase.set(.{components.UsesMemory{ .value = true }}); const arguments = entity.get(components.Arguments).slice(); try codegenEntity(context, arguments[0]); try codegenEntity(context, arguments[1]); const instruction = try context.codebase.createEntity(.{ components.WasmInstructionKind.v128_store, }); try context.wasm_instructions.append(instruction); } fn codegenLoad(context: *Context, entity: Entity) !void { try context.codebase.set(.{components.UsesMemory{ .value = true }}); const arguments = entity.get(components.Arguments).slice(); const pointer = arguments[0]; try codegenEntity(context, pointer); const b = context.builtins; const builtins = [_]Entity{ b.I64, b.I32, b.U64, b.U32, b.U8, b.F64, b.F32 }; const instructions = [_]components.WasmInstructionKind{ .i64_load, .i32_load, .i64_load, .i32_load, .i32_load8_u, .f64_load, .f32_load }; const value_type = valueType(typeOf(pointer)); for (builtins) |builtin, i| { if (eql(value_type, builtin)) { const instruction = try context.codebase.createEntity(.{instructions[i]}); try context.wasm_instructions.append(instruction); return; } } panic("\ncodegen load unspported type {s}\n", .{literalOf(value_type)}); } fn codegenV128Load(context: *Context, entity: Entity) !void { try context.codebase.set(.{components.UsesMemory{ .value = true }}); const arguments = entity.get(components.Arguments).slice(); const pointer = arguments[0]; try codegenEntity(context, pointer); const instruction = try context.codebase.createEntity(.{ components.WasmInstructionKind.v128_load, }); try context.wasm_instructions.append(instruction); } fn codegenIntrinsic(context: *Context, entity: Entity) !void { const intrinsic = entity.get(components.Intrinsic); switch (intrinsic) { .add => try codegenBinaryOp(context, entity, addOps), .subtract => try codegenBinaryOp(context, entity, subtractOps), .multiply => try codegenBinaryOp(context, entity, multiplyOps), .divide => try codegenBinaryOp(context, entity, divideOps), .remainder => try codegenBinaryOp(context, entity, remainderOps), .bit_and => try codegenBinaryOp(context, entity, bitAndOps), .bit_or => try codegenBinaryOp(context, entity, bitOrOps), .bit_xor => try codegenBinaryOp(context, entity, bitXorOps), .left_shift => try codegenBinaryOp(context, entity, leftShiftOps), .right_shift => try codegenBinaryOp(context, entity, rightShiftOps), .equal => try codegenBinaryOp(context, entity, equalOps), .not_equal => try codegenBinaryOp(context, entity, notEqualOps), .less_than => try codegenBinaryOp(context, entity, lessThanOps), .less_equal => try codegenBinaryOp(context, entity, lessEqualOps), .greater_than => try codegenBinaryOp(context, entity, greaterThanOps), .greater_equal => try codegenBinaryOp(context, entity, greaterEqualOps), .store => try codegenStore(context, entity), .load => try codegenLoad(context, entity), .add_ptr_i32 => try codegenPtrI32BinaryOp(context, entity, .i32_add), .subtract_ptr_i32 => try codegenPtrI32BinaryOp(context, entity, .i32_sub), .subtract_ptr_ptr => try codegenSubPtrPtr(context, entity), .v128_load => try codegenV128Load(context, entity), .v128_store => try codegenV128Store(context, entity), } } fn codegenIf(context: *Context, entity: Entity) !void { const conditional = entity.get(components.Conditional).entity; try codegenEntity(context, conditional); const conditional_instruction = context.wasm_instructions.last(); const kind = conditional_instruction.get(components.WasmInstructionKind); if (kind == .i32_const) { context.wasm_instructions.shrink(1); if ((try valueOf(i32, conditional_instruction.get(components.Constant).entity)).? != 0) { for (entity.get(components.Then).slice()) |expression| { try codegenEntity(context, expression); } return; } for (entity.get(components.Else).slice()) |expression| { try codegenEntity(context, expression); } return; } try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.if_, entity.get(components.Type), })); for (entity.get(components.Then).slice()) |expression| { try codegenEntity(context, expression); } try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.else_, })); for (entity.get(components.Else).slice()) |expression| { try codegenEntity(context, expression); } try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.end, })); } fn codegenWhile(context: *Context, entity: Entity) !void { const block_label = components.Label{ .value = context.label }; const loop_label = components.Label{ .value = context.label + 1 }; context.label += 2; try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.block, block_label, })); try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.loop, loop_label, })); const conditional = entity.get(components.Conditional).entity; try codegenEntity(context, conditional); try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.i32_eqz, })); try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.br_if, block_label, })); for (entity.get(components.Body).slice()) |expression| { try codegenEntity(context, expression); } try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.br, loop_label, })); try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.end, loop_label, })); try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.end, block_label, })); context.label -= 2; } fn codegenFor(context: *Context, entity: Entity) !void { const loop_variable = entity.get(components.LoopVariable).entity; const local = loop_variable.get(components.Local).entity; try context.locals.put(local); const iterator = entity.get(components.Iterator).entity; const b = context.builtins; const builtins = [_]Entity{ b.I64, b.I32, b.I16, b.I8, b.U64, b.U32, b.U16, b.U8, b.F64, b.F32 }; const kinds = &[_]components.WasmInstructionKind{ .i64_const, .i32_const, .i32_const, .i32_const, .i64_const, .i32_const, .i32_const, .i32_const, .f64_const, .f32_const, }; const type_of = typeOf(local); const i: u64 = blk: { for (builtins) |builtin, i| { if (!eql(builtin, type_of)) continue; break :blk i; } panic("\nfor range unsupported type {s}\n", .{literalOf(type_of)}); }; try codegenEntity(context, iterator.get(components.First).entity); { const wasm_instruction = try context.codebase.createEntity(.{ components.WasmInstructionKind.local_set, components.Local.init(local), }); _ = try context.wasm_instructions.append(wasm_instruction); } const block_label = components.Label{ .value = context.label }; const loop_label = components.Label{ .value = context.label + 1 }; context.label += 2; try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.block, block_label, })); try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.loop, loop_label, })); { const wasm_instruction = try context.codebase.createEntity(.{ components.WasmInstructionKind.local_get, components.Local.init(local), }); _ = try context.wasm_instructions.append(wasm_instruction); } try codegenEntity(context, iterator.get(components.Last).entity); { const wasm_instruction = try context.codebase.createEntity(.{greaterEqualOps.kinds[i]}); _ = try context.wasm_instructions.append(wasm_instruction); } try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.br_if, block_label, })); for (entity.get(components.Body).slice()) |expression| { try codegenEntity(context, expression); } { const string = try std.fmt.allocPrint(context.allocator, "1", .{}); const interned = try context.codebase.getPtr(Strings).intern(string); const one = try context.codebase.createEntity(.{ components.Type.init(builtins[i]), components.Literal.init(interned), }); const wasm_instruction = try context.codebase.createEntity(.{ kinds[i], components.Constant.init(one), }); _ = try context.wasm_instructions.append(wasm_instruction); } { const wasm_instruction = try context.codebase.createEntity(.{ components.WasmInstructionKind.local_get, components.Local.init(local), }); _ = try context.wasm_instructions.append(wasm_instruction); } { const wasm_instruction = try context.codebase.createEntity(.{addOps.kinds[i]}); _ = try context.wasm_instructions.append(wasm_instruction); } { const wasm_instruction = try context.codebase.createEntity(.{ components.WasmInstructionKind.local_set, components.Local.init(local), }); _ = try context.wasm_instructions.append(wasm_instruction); } try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.br, loop_label, })); try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.end, loop_label, })); try context.wasm_instructions.append(try context.codebase.createEntity(.{ components.WasmInstructionKind.end, block_label, })); context.label -= 2; } fn codegenConstruct(context: *Context, entity: Entity) !void { for (entity.get(components.Arguments).slice()) |argument| { try codegenEntity(context, argument); } for (entity.get(components.OrderedNamedArguments).slice()) |argument| { try codegenEntity(context, argument); } } fn codegenField(context: *Context, entity: Entity) !void { _ = try entity.set(.{components.WasmInstructionKind.field}); try context.wasm_instructions.append(entity); } fn codegenAssignField(context: *Context, entity: Entity) !void { const value = entity.get(components.Value).entity; try codegenEntity(context, value); _ = try entity.set(.{components.WasmInstructionKind.assign_field}); _ = try context.wasm_instructions.append(entity); } fn codegenConstant(comptime T: type, context: *Context, value: T) !void { const KindAndType = struct { kind: components.WasmInstructionKind, Type: Entity, }; const kind_and_type: KindAndType = switch (T) { i32 => .{ .kind = .i32_const, .Type = context.builtins.I32 }, else => panic("\ncodegen number unsupported type {s}\n", .{@typeName(T)}), }; const literal = try std.fmt.allocPrint(context.allocator, "{}", .{value}); const interned = try context.codebase.getPtr(Strings).intern(literal); const result = try context.codebase.createEntity(.{ components.Type.init(kind_and_type.Type), components.Literal.init(interned), value, }); const wasm_instruction = try context.codebase.createEntity(.{ kind_and_type.kind, components.Constant.init(result), }); _ = try context.wasm_instructions.append(wasm_instruction); } fn codegenString(context: *Context, entity: Entity) !void { try context.codebase.set(.{components.UsesMemory{ .value = true }}); const length = entity.get(components.Length).value; try context.data_segment.entities.append(entity); try codegenConstant(i32, context, context.data_segment.end); try codegenConstant(i32, context, length); const location = components.Location{ .value = context.data_segment.end }; _ = try entity.set(.{location}); context.data_segment.end += length * 8; } fn codegenArrayLiteral(context: *Context, entity: Entity) !void { try context.codebase.set(.{components.UsesMemory{ .value = true }}); const values = entity.get(components.Values).slice(); const length = @intCast(i32, values.len); try context.data_segment.entities.append(entity); try codegenConstant(i32, context, context.data_segment.end); try codegenConstant(i32, context, length); const location = components.Location{ .value = context.data_segment.end }; _ = try entity.set(.{location}); context.data_segment.end += length * sizeOf(valueType(typeOf(entity))) * 8; } fn codegenIndex(context: *Context, entity: Entity) !void { const arguments = entity.get(components.Arguments).slice(); const array = arguments[0]; const array_type = typeOf(array); const strings = context.codebase.getPtr(Strings); const interned = try strings.intern("ptr"); for (array_type.get(components.Fields).slice()) |field| { if (!eql(field.get(components.Literal).interned, interned)) continue; const ptr = try context.codebase.createEntity(.{ components.AstKind.field, components.WasmInstructionKind.field, components.Type.init(typeOf(field)), components.Local.init(array), components.Field.init(field), }); try context.wasm_instructions.append(ptr); break; } try codegenEntity(context, arguments[1]); try codegenConstant(i32, context, sizeOf(typeOf(entity))); const mul = try context.codebase.createEntity(.{components.WasmInstructionKind.i32_mul}); try context.wasm_instructions.append(mul); const add = try context.codebase.createEntity(.{components.WasmInstructionKind.i32_add}); try context.wasm_instructions.append(add); const u8_load = try context.codebase.createEntity(.{components.WasmInstructionKind.i32_load8_u}); try context.wasm_instructions.append(u8_load); } fn codegenEntity(context: *Context, entity: Entity) error{ OutOfMemory, Overflow, InvalidCharacter }!void { const kind = entity.get(components.AstKind); switch (kind) { .int, .float => try codegenNumber(context, entity), .call => try codegenCall(context, entity), .define => try codegenDefine(context, entity), .assign => try codegenAssign(context, entity), .local => try codegenLocal(context, entity), .intrinsic => try codegenIntrinsic(context, entity), .if_ => try codegenIf(context, entity), .while_ => try codegenWhile(context, entity), .for_ => try codegenFor(context, entity), .cast => try codegenEntity(context, entity.get(components.Value).entity), .construct => try codegenConstruct(context, entity), .field => try codegenField(context, entity), .assign_field => try codegenAssignField(context, entity), .string => try codegenString(context, entity), .array_literal => try codegenArrayLiteral(context, entity), .index => try codegenIndex(context, entity), else => panic("\ncodegen entity {} not implmented\n", .{kind}), } } pub fn codegen(module: Entity) !void { const codebase = module.ecs; const allocator = codebase.arena.allocator(); const builtins = codebase.get(components.Builtins); try codebase.set(.{components.DataSegment.init(allocator)}); for (module.ecs.get(components.Functions).slice()) |function| { if (function.has(components.Body)) |body_component| { var locals = components.Locals.init(allocator); var wasm_instructions = components.WasmInstructions.init(allocator); var context = Context{ .codebase = codebase, .wasm_instructions = &wasm_instructions, .locals = &locals, .allocator = allocator, .builtins = builtins, .label = 0, .data_segment = codebase.getPtr(components.DataSegment), }; for (body_component.slice()) |entity| { try codegenEntity(&context, entity); } _ = try function.set(.{ wasm_instructions, locals }); } } }
src/codegen.zig
const std = @import("std"); const wren = @import("wren"); pub var alloc = std.testing.allocator; pub fn main() anyerror!void { // Initialize the data structures for the wrapper wren.init(alloc); defer wren.deinit(); // Set up a VM configuration using the supplied default bindings // You can override the bindings after calling this to change them var config = wren.util.defaultConfig(); // Create a new VM from our config we generated previously const vm = wren.newVM(&config); defer wren.freeVM(vm); // Passing and returning a slew of different data types. // Values will be casted as appropriate. // Use a span for single-typed lists in wren, a tuple for // passing Wren a multi-typed list, and either an AutoHashMap or // StringHashMap for a map, depending on the key type. // Does not support returned multi-typed lists yet, not sure how to // fill in a pre-defined tuple at runtime. std.debug.print("\n=== Calling Wren from Zig ===\n",.{}); try wren.util.run(vm,"main", \\ class TestClass { \\ static doubleUp(num) { \\ return num + num \\ } \\ static text(txt) { \\ return txt + "return" \\ } \\ static splat(val,count) { \\ return [val] * count \\ } \\ static addArray(arr) { \\ return arr[0] + arr[1] \\ } \\ static addStr(str1,str2) { \\ return [str1,str2] \\ } \\ static fArr(flot,inte) { \\ return [flot] * inte \\ } \\ static notMe(iambool) { \\ return !iambool \\ } \\ static blah(farr) { \\ return farr + farr \\ } \\ static tup(vtup,vint) { \\ return vtup[1] + vint \\ } \\ static fmap(imap) { \\ imap["Add1"] = "Wren" \\ imap["Add2"] = "Wren" \\ return imap \\ } \\ } ); // Yes, this is ugly :) var needs_adding:usize = 41; std.debug.print("Before Call: {}\n",.{needs_adding}); // This defines a method call handle to call Wren methods from Zig. // Pass the module/class/method sig, then argument type tuple, followed by return type. // .init() takes the vm handle to the vm that this will be running on. // To call the method, run [methHand].callMethod, passing in a tuple of the argument values. var wm = try wren.MethodCallHandle( "main","TestClass","doubleUp(_)", .{usize}, usize ).init(vm); defer wm.deinit(); needs_adding = try wm.callMethod(.{needs_adding}); std.debug.print("Int->Int: {}\n",.{needs_adding}); var wm2 = try wren.MethodCallHandle( "main","TestClass","text(_)", .{[]const u8}, []const u8 ).init(vm); defer wm2.deinit(); var ostr = try wm2.callMethod(.{"Input "}); std.debug.print("String->String: {s}\n",.{ostr}); var wm3 = try wren.MethodCallHandle( "main","TestClass","splat(_,_)", .{i32,i32}, []i32 ).init(vm); defer wm3.deinit(); var oslc = try wm3.callMethod(.{3,5}); std.debug.print("IntArray->Slice: {any}\n",.{oslc}); var wm4 = try wren.MethodCallHandle( "main","TestClass","addArray(_)", .{[]u32}, i32 ).init(vm); defer wm4.deinit(); var oarr = try wm4.callMethod(.{ .{3,5} }); std.debug.print("IntSlice->Int: {any}\n",.{oarr}); var wm5 = try wren.MethodCallHandle( "main","TestClass","addStr(_,_)", .{[]const u8,[]const u8}, []const []const u8 ).init(vm); defer wm5.deinit(); var oast = try wm5.callMethod(.{"abc","def"}); std.debug.print("String->StringSlice: {s}\n",.{oast}); var wm6 = try wren.MethodCallHandle( "main","TestClass","fArr(_,_)", .{f32,i32}, []f32 ).init(vm); defer wm6.deinit(); var ofsp = try wm6.callMethod(.{2.34,5}); std.debug.print("Float,Int->FloatSlice: {any}\n",.{ofsp}); var wm7 = try wren.MethodCallHandle( "main","TestClass","notMe(_)", .{bool}, bool ).init(vm); defer wm7.deinit(); var oboo = try wm7.callMethod(.{false}); std.debug.print("Bool->Bool: {any}\n",.{oboo}); var wm8 = try wren.MethodCallHandle( "main","TestClass","blah(_)", .{[]f32}, []f32 ).init(vm); defer wm8.deinit(); var obla = try wm8.callMethod(.{ .{2.34,2.34} }); std.debug.print("FloatSlice->FloatSlice: {any}\n",.{obla}); var wm9 = try wren.MethodCallHandle( "main","TestClass","tup(_,_)", .{ std.meta.Tuple(&[_]type{[]const u8,i32}),i32 }, i32 ).init(vm); defer wm9.deinit(); var otup = try wm9.callMethod(.{ .{"poo",3}, 39 }); std.debug.print("Str,Int Tuple->Int: {any}\n",.{otup}); var wm10 = try wren.MethodCallHandle( "main","TestClass","fmap(_)", .{ std.StringHashMap([]const u8) }, std.StringHashMap([]const u8) ).init(vm); defer wm10.deinit(); var nmap = std.StringHashMap([]const u8).init(std.testing.allocator); defer nmap.deinit(); nmap.put("Init1","Zig") catch unreachable; nmap.put("Init2","Zig") catch unreachable; var omap = try wm10.callMethod(.{ nmap }); std.debug.print("SMap->Map: \n",.{}); var it = omap.iterator(); while(it.next()) |entry| { std.debug.print(" >> {s}: {s}\n",.{entry.key_ptr.*,entry.value_ptr.*}); } }
example/value_passing.zig
const std = @import("std"); pub fn Bitset(num_bits: usize) type { const num_bytes = @divTrunc(num_bits + 7, 8); return struct { pub fn set(self: *@This(), idx: usize) void { self.data[idx / 8] |= (@as(u8, 1) << @intCast(u3, idx % 8)); } pub fn unset(self: *@This(), idx: usize) void { self.data[idx / 8] &= ~(@as(u8, 1) << @intCast(u3, idx % 8)); } pub fn isSet(self: *const @This(), idx: usize) bool { return (self.data[idx / 8] >> @intCast(u3, idx % 8)) == 1; } pub fn size(_: *const @This()) usize { return num_bits; } data: [num_bytes]u8 = [_]u8{0} ** num_bytes, }; } const DynamicBitset = struct { len: usize, data: [*]u8, pub fn bytesNeeded(len: usize) usize { return @divTrunc(len + 7, 8); } pub fn init(len: usize, data: []u8) DynamicBitset { std.debug.assert(data.len >= DynamicBitset.bytesNeeded(len)); for (data) |*cell| { cell.* = 0; } return DynamicBitset{ .len = len, .data = data.ptr }; } pub fn set(self: *@This(), idx: usize) void { std.debug.assert(idx < self.len); self.data[idx / 8] |= (@as(u8, 1) << @intCast(u3, idx % 8)); } pub fn unset(self: *@This(), idx: usize) void { std.debug.assert(idx < self.len); self.data[idx / 8] &= ~(@as(u8, 1) << @intCast(u3, idx % 8)); } pub fn isSet(self: *const @This(), idx: usize) bool { std.debug.assert(idx < self.len); return (self.data[idx / 8] >> @intCast(u3, idx % 8)) == 1; } }; test "bitset" { var bs: Bitset(8) = .{}; try std.testing.expect(!bs.isSet(0)); bs.set(0); try std.testing.expect(bs.isSet(0)); bs.unset(0); try std.testing.expect(!bs.isSet(0)); } test "dynamic bitset" { var mem: [2]u8 = undefined; var bs = DynamicBitset.init(16, &mem); try std.testing.expect(!bs.isSet(0)); bs.set(0); try std.testing.expect(bs.isSet(0)); bs.set(13); bs.unset(0); try std.testing.expect(!bs.isSet(0)); try std.testing.expect(bs.isSet(13)); }
lib/util/bitset.zig
const std = @import("std"); const sqlite = @import("sqlite"); const manage_main = @import("main.zig"); const http = @import("apple_pie"); const ManageContext = manage_main.Context; const SqlGiver = @import("./find_main.zig").SqlGiver; const magick = @import("./magick.zig"); const Context = struct { manage: *ManageContext, given_args: *Args, }; const log = std.log.scoped(.ahydrus_api); const VERSION = "0.0.1"; const HELPTEXT = \\ ahydrus_api: hydrus client api provider for awtfdb (read only) \\ \\ usage: \\ ahydrus_api [options] \\ \\ options: \\ -h prints this help and exits \\ -V prints version and exits \\ --key <key> run the server with this access key \\ being able to be used in the server \\ (multiple can be defined) \\ \\ examples: \\ ahydrus_api --key a867e7abdf8e1a717928b8505be0c1cc776cb32773695 ; fn methodString(request: http.Request) []const u8 { return switch (request.method()) { .get => "GET", .head => "HEAD", .post => "POST", .put => "PUT", .delete => "DELETE", .connect => "CONNECT", .options => "OPTIONS", .trace => "TRACE", .patch => "PATCH", .any => "ANY", }; } const StringList = std.ArrayList([]const u8); const Args = struct { help: bool = false, version: bool = false, access_keys: StringList, pub fn deinit(self: *@This()) void { self.access_keys.deinit(); } }; pub fn main() anyerror!void { const rc = sqlite.c.sqlite3_config(sqlite.c.SQLITE_CONFIG_LOG, manage_main.sqliteLog, @as(?*anyopaque, null)); if (rc != sqlite.c.SQLITE_OK) { std.log.err("failed to configure: {d} '{s}'", .{ rc, sqlite.c.sqlite3_errstr(rc), }); return error.ConfigFail; } var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); var allocator = gpa.allocator(); var args_it = std.process.args(); _ = args_it.skip(); var given_args = Args{ .access_keys = StringList.init(allocator) }; defer given_args.deinit(); var state: enum { None, FetchAccessKey } = .None; while (args_it.next()) |arg| { if (state == .FetchAccessKey) { try given_args.access_keys.append(arg); state = .None; continue; } if (std.mem.eql(u8, arg, "-h")) { given_args.help = true; } else if (std.mem.eql(u8, arg, "-V")) { given_args.version = true; } else if (std.mem.eql(u8, arg, "--key")) { state = .FetchAccessKey; } else { log.err("unknown argument {s}", .{arg}); return error.UnknownArgument; } } if (given_args.help) { std.debug.print(HELPTEXT, .{}); return; } else if (given_args.version) { std.debug.print("ahydrus_api {s}\n", .{VERSION}); return; } var manage_ctx = ManageContext{ .home_path = null, .args_it = &args_it, .stdout = undefined, .db = null, .allocator = allocator, }; defer manage_ctx.deinit(); try manage_ctx.loadDatabase(.{}); if (given_args.access_keys.items.len == 0) { log.warn("no access keys defined, nobody will be able to access the api", .{}); } var ctx = Context{ .manage = &manage_ctx, .given_args = &given_args }; try http.listenAndServe( allocator, try std.net.Address.parseIp("127.0.0.1", 8080), &ctx, mainHandler, ); } fn mainHandler( ctx: *Context, response: *http.Response, request: http.Request, ) !void { const builder = http.router.Builder(*Context); @setEvalBranchQuota(1000000); const router = comptime http.router.Router(*Context, &.{ builder.get("/", null, index), builder.get("/api_version", null, apiVersion), builder.options("/verify_access_key", null, corsHandler), builder.get("/verify_access_key", null, verifyAccessKey), builder.options("/get_files/search_files", null, corsHandler), builder.get("/get_files/search_files", null, searchFiles), builder.options("/get_files/file_metadata", null, corsHandler), builder.get("/get_files/file_metadata", null, fileMetadata), builder.options("/get_files/thumbnail", null, corsHandler), builder.get("/get_files/thumbnail", null, fileThumbnail), builder.options("/get_files/file", null, corsHandler), builder.get("/get_files/file", null, fileContents), }); try writeCors(response); router(ctx, response, request) catch |err| { log.info("{s} {s} got error: {s}", .{ methodString(request), request.path(), @errorName(err), }); return err; }; log.info("{s} {s} {d}", .{ methodString(request), request.path(), @enumToInt(response.status_code), }); } fn corsHandler( ctx: *Context, response: *http.Response, request: http.Request, captures: ?*const anyopaque, ) !void { if (!wantMethod(request, response, .{.options})) return; std.debug.assert(captures == null); _ = request; _ = ctx; try writeCors(response); } fn index( ctx: *Context, response: *http.Response, request: http.Request, captures: ?*const anyopaque, ) !void { std.debug.assert(captures == null); _ = request; _ = ctx; try response.writer().writeAll("Hello Zig!\n"); } const hzzp = @import("hzzp"); test "index test" { if (@import("builtin").single_threaded) return error.SkipZigTest; const address = try std.net.Address.parseIp("0.0.0.0", 8080); var server = http.Server.init(); var manage_ctx = try manage_main.makeTestContext(); defer manage_ctx.deinit(); var main_ctx = Context{ .manage = &manage_ctx, .given_args = undefined }; const server_thread = struct { var _addr: std.net.Address = undefined; fn runServer(context: *http.Server, ctx: *Context) !void { try context.run(ctx.manage.allocator, _addr, ctx, mainHandler); } }; server_thread._addr = address; const thread = try std.Thread.spawn( .{}, server_thread.runServer, .{ &server, &main_ctx }, ); errdefer server.shutdown(); var stream = while (true) { var conn = std.net.tcpConnectToAddress(address) catch |err| switch (err) { error.ConnectionRefused => continue, else => return err, }; break conn; } else unreachable; errdefer stream.close(); // tell server to shutdown // fill finish current request and then shutdown server.shutdown(); var buffer: [256]u8 = undefined; var client = hzzp.base.client.create(&buffer, {}, stream.writer()); try client.writeStatusLine("GET", "/"); try client.writeHeaderValue("Host", "localhost"); try client.finishHeaders(); try client.writePayload(null); var output_buf: [512]u8 = undefined; const len = try stream.reader().read(&output_buf); const output = output_buf[0..len]; stream.close(); thread.join(); var buffer_stream = std.io.fixedBufferStream(output); var read_buffer: [256]u8 = undefined; var read_client = hzzp.base.client.create(&read_buffer, buffer_stream.reader(), {}); const received_status_line = (try read_client.next()).?; try std.testing.expectEqual(@as(u16, 200), received_status_line.status.code); } fn apiVersion( ctx: *Context, response: *http.Response, request: http.Request, captures: ?*const anyopaque, ) !void { std.debug.assert(captures == null); _ = request; _ = ctx; try std.json.stringify(.{ .version = 17, .hydrus_version = 441, }, .{}, response.writer()); } fn wantMethod(request: http.Request, response: *http.Response, allowed_methods: anytype) bool { var method_fail_count: usize = 0; inline for (allowed_methods) |allowed_method| { if (request.method() != allowed_method) method_fail_count += 1; } // if all method checks failed, then send 405 if (method_fail_count == allowed_methods.len) { response.status_code = .method_not_allowed; return false; } else { return true; } } fn writeCors(response: *http.Response) !void { try response.headers.put("access-control-allow-origin", "https://hydrus.app"); try response.headers.put("access-control-allow-headers", "*"); try response.headers.put("access-control-allow-methods", "*"); try response.headers.put("access-control-expose-headers", "*"); } const HydrusAPIInput = struct { const Self = @This(); pub fn deinit(self: Self) void { _ = self; } pub fn getAccessKey(self: Self) []const u8 { _ = self; return "TODO"; } pub fn getTags(self: Self) []const u8 { _ = self; return "TODO"; } }; const HydrusAPIInputResult = union(enum) { Error: struct {}, Ok: struct {}, }; const ACCESS_KEY_NAME = "Hydrus-Client-API-Access-Key"; fn fetchInput( ctx: *ManageContext, headers: http.Headers, raw_query: []const u8, ) !HydrusAPIInputResult { const param_map = try http.Uri.decodeQueryString(ctx.allocator, raw_query); errdefer param_map.deinit(); log.info("uri: {s}", .{param_map}); _ = ctx; _ = headers; return .{ .Ok = .{}, }; } test "hydrus api input parsing" { var ctx = try manage_main.makeTestContext(); defer ctx.deinit(); var headers = http.Headers.init(std.testing.allocator); defer headers.deinit(); // var wrapped_input = try fetchInput( // &ctx, // headers, // "file_sort_type=6&file_sort_asc=false&tags=%5B%22character%3Asamus%20aran%22%2C%20%22creator%3A%5Cu9752%5Cu3044%5Cu685c%22%2C%20%22system%3Aheight%20%3E%202000%22%5D", // ); // const input = wrapped_input.Ok; // _ = input; // return error.Thing; //try std.testing.expectEqual(http.Response.StatusCode.ok, response.status_code); } fn wantAuth(ctx: *Context, response: *http.Response, request: http.Request) bool { var headers_it = request.iterator(); var access_key: ?[]const u8 = null; while (headers_it.next()) |header| { if (std.mem.eql(u8, header.key, "Hydrus-Client-API-Access-Key")) { access_key = header.value; } } var param_map = request.context.uri.queryParameters(ctx.manage.allocator) catch |err| { writeError( response, .bad_request, "invalid query parameters: {s}", .{@errorName(err)}, ) catch return false; return false; }; defer param_map.deinit(ctx.manage.allocator); access_key = access_key orelse param_map.get("Hydrus-Client-API-Access-Key"); if (access_key == null) { response.status_code = .unauthorized; return false; } for (ctx.given_args.access_keys.items) |correct_access_key| { if (std.mem.eql(u8, access_key.?, correct_access_key)) return true; } response.status_code = .unauthorized; return false; } fn verifyAccessKey( ctx: *Context, response: *http.Response, request: http.Request, captures: ?*const anyopaque, ) !void { if (!wantMethod(request, response, .{.get})) return; std.debug.assert(captures == null); _ = request; _ = ctx; if (!wantAuth(ctx, response, request)) return; try std.json.stringify( .{ .basic_permissions = .{ 0, 1, 3 }, .human_description = "this is a test", }, .{}, response.writer(), ); } fn convertTagsToFindQuery(allocator: std.mem.Allocator, tags_string: []const u8) ![]const u8 { //parse json out of tags_string //construct afind query out of it log.debug("tags string: '{s}'", .{tags_string}); var tokens = std.json.TokenStream.init(tags_string); const opts = std.json.ParseOptions{ .allocator = allocator }; const tags = try std.json.parse([][]const u8, &tokens, opts); defer std.json.parseFree([][]const u8, tags, opts); return std.mem.join(allocator, " ", tags); } fn writeError( response: *http.Response, status_code: http.Response.Status, comptime fmt: []const u8, args: anytype, ) !void { response.status_code = status_code; try response.writer().print(fmt, args); } fn searchFiles( ctx: *Context, response: *http.Response, request: http.Request, captures: ?*const anyopaque, ) !void { if (!wantMethod(request, response, .{.get})) return; std.debug.assert(captures == null); _ = ctx; if (!wantAuth(ctx, response, request)) return; var param_map = try request.context.uri.queryParameters(ctx.manage.allocator); defer param_map.deinit(ctx.manage.allocator); const unsafe_tags_string = param_map.get("tags") orelse { writeError(response, .bad_request, "need tags", .{}) catch return; return; }; const safe_tags_string = try http.Uri.decode(ctx.manage.allocator, unsafe_tags_string); defer ctx.manage.allocator.free(safe_tags_string); const find_query = try convertTagsToFindQuery(ctx.manage.allocator, safe_tags_string); defer ctx.manage.allocator.free(find_query); var giver = try SqlGiver.init(); defer giver.deinit(); const wrapped_result = try giver.giveMeSql(ctx.manage.allocator, find_query); defer wrapped_result.deinit(); const result = switch (wrapped_result) { .Ok => |ok_body| ok_body, .Error => |error_body| { try writeError( response, .bad_request, "query has error at character {d}: {s}", .{ error_body.character, error_body.error_type }, ); return; }, }; var resolved_tag_cores = std.ArrayList(i64).init(ctx.manage.allocator); defer resolved_tag_cores.deinit(); for (result.tags) |tag_text| { const maybe_tag = try ctx.manage.fetchNamedTag(tag_text, "en"); if (maybe_tag) |tag| { try resolved_tag_cores.append(tag.core.id); } else { try writeError(response, .bad_request, "query has unknown tag: {s}", .{tag_text}); return; } } var stmt = try ctx.manage.db.?.prepareDynamic(result.query); defer stmt.deinit(); log.debug("generated query: {s}", .{result.query}); log.debug("found tag cores: {any}", .{resolved_tag_cores.items}); var it = try stmt.iterator(i64, resolved_tag_cores.items); var returned_file_ids = std.ArrayList(i64).init(ctx.manage.allocator); defer returned_file_ids.deinit(); while (try it.next(.{})) |file_hash_id| { //var file = (try ctx.manage.fetchFile(file_hash)).?; //defer file.deinit(); try returned_file_ids.append(file_hash_id); } try std.json.stringify( .{ .file_ids = returned_file_ids.items }, .{}, response.writer(), ); } fn fileMetadata( ctx: *Context, response: *http.Response, request: http.Request, captures: ?*const anyopaque, ) !void { _ = captures; if (!wantMethod(request, response, .{.get})) return; if (!wantAuth(ctx, response, request)) return; var param_map = try request.context.uri.queryParameters(ctx.manage.allocator); defer param_map.deinit(ctx.manage.allocator); const file_ids_serialized = param_map.get("file_ids") orelse { writeError(response, .bad_request, "need file_ids", .{}) catch return; return; }; log.info("file ids: {s}", .{file_ids_serialized}); var tokens = std.json.TokenStream.init(file_ids_serialized); const opts = std.json.ParseOptions{ .allocator = ctx.manage.allocator }; const file_ids = try std.json.parse([]i64, &tokens, opts); defer std.json.parseFree([]i64, file_ids, opts); const HydrusFile = struct { file_id: i64, hash: []const u8, size: i64 = 0, mime: []const u8, ext: []const u8, width: usize = 100, height: usize = 100, duration: ?usize = null, time_modified: ?usize = null, file_services: ?usize = null, // TODO add current+deleted has_audio: bool = false, num_frames: ?usize = null, num_words: ?usize = null, is_inbox: bool = false, is_local: bool = true, // should this be true? is_trashed: bool = false, known_urls: ?[]usize = null, // TODO this is array service_names_to_statuses_to_tags: ?[]usize = null, service_keys_to_statuses_to_tags: ?[]usize = null, service_names_to_statuses_to_display_tags: ?[]usize = null, service_keys_to_statuses_to_display_tags: ?[]usize = null, }; var hydrus_files = std.ArrayList(HydrusFile).init(ctx.manage.allocator); defer { for (hydrus_files.items) |file| { ctx.manage.allocator.free(file.hash); ctx.manage.allocator.free(file.mime); ctx.manage.allocator.free(file.ext); } hydrus_files.deinit(); } for (file_ids) |file_id| { var maybe_file = try ctx.manage.fetchFile(file_id); if (maybe_file) |file| { var hex_hash = file.hash.toHex(); try hydrus_files.append(.{ .file_id = file_id, .hash = try ctx.manage.allocator.dupe(u8, &hex_hash), .mime = try ctx.manage.allocator.dupe(u8, "image/png"), .ext = try ctx.manage.allocator.dupe(u8, ".png"), }); } } try std.json.stringify( .{ .metadata = hydrus_files.items }, .{}, response.writer(), ); } const c = @cImport({ @cInclude("magic.h"); }); const MagicResult = struct { cookie: c.magic_t, result: ?[]const u8, }; fn inferMimetype(response: *http.Response, allocator: std.mem.Allocator, local_path: []const u8) !?MagicResult { var cookie = c.magic_open( c.MAGIC_SYMLINK | c.MAGIC_MIME, ) orelse return error.UnableToMakeMagicCookie; // TODO use MAGIC variable if set here???? if (c.magic_load(cookie, "/usr/share/misc/magic.mgc") == -1) { const magic_error_value = c.magic_error(cookie); log.err("failed to load magic file: {s}", .{magic_error_value}); try writeError( response, .internal_server_error, "an error occoured while calculating magic: {s}", .{magic_error_value}, ); return null; } const local_path_cstr = try std.cstr.addNullByte(allocator, local_path); defer allocator.free(local_path_cstr); const mimetype = c.magic_file(cookie, local_path_cstr) orelse { const magic_error_value = c.magic_error(cookie); log.err("failed to infer mimetype: {s}", .{magic_error_value}); return error.MimetypeFail; }; return MagicResult{ .cookie = cookie, .result = std.mem.span(mimetype), }; } fn fileThumbnail( ctx: *Context, response: *http.Response, request: http.Request, captures: ?*const anyopaque, ) !void { _ = captures; if (!wantMethod(request, response, .{.get})) return; if (!wantAuth(ctx, response, request)) return; var param_map = try request.context.uri.queryParameters(ctx.manage.allocator); defer param_map.deinit(ctx.manage.allocator); const maybe_file_id = param_map.get("file_id"); const maybe_file_hash = param_map.get("hash"); if (maybe_file_id != null and maybe_file_hash != null) { writeError(response, .bad_request, "cant have both hash and id", .{}) catch return; return; } var hash_id_parsed: ?i64 = null; var hash_parsed: [32]u8 = undefined; if (maybe_file_hash) |file_hash| { var out = try std.fmt.hexToBytes(&hash_parsed, file_hash); if (out.len != 32) { writeError(response, .bad_request, "invalid file hash size", .{}) catch return; return; } } if (maybe_file_id) |file_id| { hash_id_parsed = std.fmt.parseInt(i64, file_id, 10) catch |err| { writeError( response, .bad_request, "invalid file id (must be number): {s}", .{@errorName(err)}, ) catch return; return; }; } log.debug("id? {s} hash? {s}", .{ maybe_file_id, maybe_file_hash }); var maybe_file: ?ManageContext.File = if (maybe_file_id != null) try ctx.manage.fetchFile(hash_id_parsed.?) else if (maybe_file_hash != null) try ctx.manage.fetchFileByHash(hash_parsed) else unreachable; if (maybe_file) |file| { defer file.deinit(); const mimetype_result = (try inferMimetype(response, ctx.manage.allocator, file.local_path)) orelse return; defer c.magic_close(mimetype_result.cookie); const mimetype_cstr = mimetype_result.result; if (mimetype_cstr == null) { const magic_error_value = c.magic_error(mimetype_result.cookie); log.err("failed to get mimetype: {s}", .{magic_error_value}); try writeError( response, .internal_server_error, "an error occoured while calculating magic: {s}", .{magic_error_value}, ); return; } else { const mimetype = mimetype_cstr.?; log.debug("mimetype found: {s}", .{mimetype}); if (std.mem.startsWith(u8, mimetype, "image/")) { //var mctx = try magick.loadImage(local_path_cstr); //defer mctx.deinit(); const file_fd = try std.fs.openFileAbsolute(file.local_path, .{ .mode = .read_only }); defer file_fd.close(); // we need a better API to pass header values whose lifetime are // beyond the request handler's, or else we're passing undefined // memory to the response. // // this hack is required so that the values live in constant // memory inside the executable, rather than stack/heap. if (std.mem.startsWith(u8, mimetype, "image/png")) { try response.headers.put("Content-Type", "image/png"); } else if (std.mem.startsWith(u8, mimetype, "application/pdf")) { try response.headers.put("Content-Type", "application/pdf"); } var buf: [4096]u8 = undefined; while (true) { const read_bytes = try file_fd.read(&buf); if (read_bytes == 0) break; try response.writer().writeAll(&buf); } } else if (std.mem.startsWith(u8, mimetype, "application/pdf")) { var PREFIX = "/tmp/awtf/ahydrus-thumbnails"; const dirpath = std.fs.path.dirname(file.local_path).?; const basename = std.fs.path.basename(file.local_path); const local_path_cstr = try std.cstr.addNullByte(ctx.manage.allocator, file.local_path); defer ctx.manage.allocator.free(local_path_cstr); const basename_png = try std.fmt.allocPrint(ctx.manage.allocator, "{s}.png", .{basename}); defer ctx.manage.allocator.free(basename_png); const thumbnail_path = try std.fs.path.resolve(ctx.manage.allocator, &[_][]const u8{ PREFIX, dirpath, basename_png, }); defer ctx.manage.allocator.free(thumbnail_path); try std.fs.cwd().makePath(std.fs.path.dirname(thumbnail_path).?); var maybe_file_fd: ?std.fs.File = std.fs.openFileAbsolute( thumbnail_path, .{ .mode = .read_only }, ) catch |err| switch (err) { error.FileNotFound => blk: { break :blk null; }, else => return err, }; defer if (maybe_file_fd) |file_fd| file_fd.close(); if (maybe_file_fd == null) { var mctx = try magick.loadImage(local_path_cstr); defer mctx.deinit(); const thumbnail_path_cstr = try std.cstr.addNullByte(ctx.manage.allocator, thumbnail_path); defer ctx.manage.allocator.free(thumbnail_path_cstr); if (magick.c.MagickWriteImage(mctx.wand, thumbnail_path_cstr) == 0) return error.MagickWriteFail; //should work now maybe_file_fd = try std.fs.openFileAbsolute( thumbnail_path, .{ .mode = .read_only }, ); } var file_fd = maybe_file_fd.?; var buf: [4096]u8 = undefined; while (true) { const read_bytes = try file_fd.read(&buf); if (read_bytes == 0) break; try response.writer().writeAll(&buf); } return; } else { // todo return default thumbnail try writeError(response, .internal_server_error, "unsupported mimetype: {s}", .{mimetype}); } } } else { response.status_code = .not_found; } } fn fileContents( ctx: *Context, response: *http.Response, request: http.Request, captures: ?*const anyopaque, ) !void { _ = captures; if (!wantMethod(request, response, .{.get})) return; if (!wantAuth(ctx, response, request)) return; var param_map = try request.context.uri.queryParameters(ctx.manage.allocator); defer param_map.deinit(ctx.manage.allocator); // TODO decrease repetition const maybe_file_id = param_map.get("file_id"); const maybe_file_hash = param_map.get("hash"); if (maybe_file_id != null and maybe_file_hash != null) { writeError(response, .bad_request, "cant have both hash and id", .{}) catch return; return; } var hash_id_parsed: ?i64 = null; var hash_parsed: [32]u8 = undefined; if (maybe_file_hash) |file_hash| { var out = try std.fmt.hexToBytes(&hash_parsed, file_hash); if (out.len != 32) { writeError(response, .bad_request, "invalid file hash size", .{}) catch return; return; } } if (maybe_file_id) |file_id| { hash_id_parsed = std.fmt.parseInt(i64, file_id, 10) catch |err| { writeError( response, .bad_request, "invalid file id (must be number): {s}", .{@errorName(err)}, ) catch return; return; }; } log.debug("id? {s} hash? {s}", .{ maybe_file_id, maybe_file_hash }); var maybe_file: ?ManageContext.File = if (maybe_file_id != null) try ctx.manage.fetchFile(hash_id_parsed.?) else if (maybe_file_hash != null) try ctx.manage.fetchFileByHash(hash_parsed) else unreachable; if (maybe_file) |file| { defer file.deinit(); const mimetype_result = (try inferMimetype(response, ctx.manage.allocator, file.local_path)) orelse return; defer c.magic_close(mimetype_result.cookie); const maybe_mimetype = mimetype_result.result; if (maybe_mimetype) |mimetype| { log.debug("mimetype found: {s}", .{mimetype}); const file_fd = try std.fs.openFileAbsolute(file.local_path, .{ .mode = .read_only }); defer file_fd.close(); // we need a better API to pass header values whose lifetime are // beyond the request handler's, or else we're passing undefined // memory to the response. // // this hack is required so that the values live in constant // memory inside the executable, rather than stack/heap. if (std.mem.startsWith(u8, mimetype, "image/png")) { try response.headers.put("Content-Type", "image/png"); } else if (std.mem.startsWith(u8, mimetype, "application/pdf")) { try response.headers.put("Content-Type", "application/pdf"); } var buf: [4096]u8 = undefined; while (true) { const read_bytes = try file_fd.read(&buf); if (read_bytes == 0) break; try response.writer().writeAll(&buf); } } else { const magic_error_value = c.magic_error(mimetype_result.cookie); log.err("failed to get mimetype: {s}", .{magic_error_value}); try writeError( response, .internal_server_error, "an error occoured while calculating magic: {s}", .{magic_error_value}, ); return; } } else { response.status_code = .not_found; } }
src/hydrus_api_main.zig
const ffs = @import("count0bits.zig"); const testing = @import("std").testing; fn test__ffsti2(a: u128, expected: i32) !void { var x = @bitCast(i128, a); var result = ffs.__ffsti2(x); try testing.expectEqual(expected, result); } test "ffsti2" { try test__ffsti2(0x00000000_00000000_00000000_00000001, 1); try test__ffsti2(0x00000000_00000000_00000000_00000002, 2); try test__ffsti2(0x00000000_00000000_00000000_00000003, 1); try test__ffsti2(0x00000000_00000000_00000000_00000004, 3); try test__ffsti2(0x00000000_00000000_00000000_00000005, 1); try test__ffsti2(0x00000000_00000000_00000000_00000006, 2); try test__ffsti2(0x00000000_00000000_00000000_00000007, 1); try test__ffsti2(0x00000000_00000000_00000000_00000008, 4); try test__ffsti2(0x00000000_00000000_00000000_00000009, 1); try test__ffsti2(0x00000000_00000000_00000000_0000000A, 2); try test__ffsti2(0x00000000_00000000_00000000_0000000B, 1); try test__ffsti2(0x00000000_00000000_00000000_0000000C, 3); try test__ffsti2(0x00000000_00000000_00000000_0000000D, 1); try test__ffsti2(0x00000000_00000000_00000000_0000000E, 2); try test__ffsti2(0x00000000_00000000_00000000_0000000F, 1); try test__ffsti2(0x00000000_00000000_00000000_00000010, 5); try test__ffsti2(0x00000000_00000000_00000000_00000011, 1); try test__ffsti2(0x00000000_00000000_00000000_00000012, 2); try test__ffsti2(0x00000000_00000000_00000000_00000013, 1); try test__ffsti2(0x00000000_00000000_00000000_00000014, 3); try test__ffsti2(0x00000000_00000000_00000000_00000015, 1); try test__ffsti2(0x00000000_00000000_00000000_00000016, 2); try test__ffsti2(0x00000000_00000000_00000000_00000017, 1); try test__ffsti2(0x00000000_00000000_00000000_00000018, 4); try test__ffsti2(0x00000000_00000000_00000000_00000019, 1); try test__ffsti2(0x00000000_00000000_00000000_0000001A, 2); try test__ffsti2(0x00000000_00000000_00000000_0000001B, 1); try test__ffsti2(0x00000000_00000000_00000000_0000001C, 3); try test__ffsti2(0x00000000_00000000_00000000_0000001D, 1); try test__ffsti2(0x00000000_00000000_00000000_0000001E, 2); try test__ffsti2(0x00000000_00000000_00000000_0000001F, 1); try test__ffsti2(0x00000000_00000000_00000000_00000020, 6); try test__ffsti2(0x00000000_00000000_00000000_00000021, 1); try test__ffsti2(0x00000000_00000000_00000000_00000022, 2); try test__ffsti2(0x00000000_00000000_00000000_00000023, 1); try test__ffsti2(0x00000000_00000000_00000000_00000024, 3); try test__ffsti2(0x00000000_00000000_00000000_00000025, 1); try test__ffsti2(0x00000000_00000000_00000000_00000026, 2); try test__ffsti2(0x00000000_00000000_00000000_00000027, 1); try test__ffsti2(0x00000000_00000000_00000000_00000028, 4); try test__ffsti2(0x00000000_00000000_00000000_00000029, 1); try test__ffsti2(0x00000000_00000000_00000000_0000002A, 2); try test__ffsti2(0x00000000_00000000_00000000_0000002B, 1); try test__ffsti2(0x00000000_00000000_00000000_0000002C, 3); try test__ffsti2(0x00000000_00000000_00000000_0000002D, 1); try test__ffsti2(0x00000000_00000000_00000000_0000002E, 2); try test__ffsti2(0x00000000_00000000_00000000_0000002F, 1); try test__ffsti2(0x00000000_00000000_00000000_00000030, 5); try test__ffsti2(0x00000000_00000000_00000000_00000031, 1); try test__ffsti2(0x00000000_00000000_00000000_00000032, 2); try test__ffsti2(0x00000000_00000000_00000000_00000033, 1); try test__ffsti2(0x00000000_00000000_00000000_00000034, 3); try test__ffsti2(0x00000000_00000000_00000000_00000035, 1); try test__ffsti2(0x00000000_00000000_00000000_00000036, 2); try test__ffsti2(0x00000000_00000000_00000000_00000037, 1); try test__ffsti2(0x00000000_00000000_00000000_00000038, 4); try test__ffsti2(0x00000000_00000000_00000000_00000039, 1); try test__ffsti2(0x00000000_00000000_00000000_0000003A, 2); try test__ffsti2(0x00000000_00000000_00000000_0000003B, 1); try test__ffsti2(0x00000000_00000000_00000000_0000003C, 3); try test__ffsti2(0x00000000_00000000_00000000_0000003D, 1); try test__ffsti2(0x00000000_00000000_00000000_0000003E, 2); try test__ffsti2(0x00000000_00000000_00000000_0000003F, 1); try test__ffsti2(0x00000000_00000000_00000000_00000040, 7); try test__ffsti2(0x00000000_00000000_00000000_00000041, 1); try test__ffsti2(0x00000000_00000000_00000000_00000042, 2); try test__ffsti2(0x00000000_00000000_00000000_00000043, 1); try test__ffsti2(0x00000000_00000000_00000000_00000044, 3); try test__ffsti2(0x00000000_00000000_00000000_00000045, 1); try test__ffsti2(0x00000000_00000000_00000000_00000046, 2); try test__ffsti2(0x00000000_00000000_00000000_00000047, 1); try test__ffsti2(0x00000000_00000000_00000000_00000048, 4); try test__ffsti2(0x00000000_00000000_00000000_00000049, 1); try test__ffsti2(0x00000000_00000000_00000000_0000004A, 2); try test__ffsti2(0x00000000_00000000_00000000_0000004B, 1); try test__ffsti2(0x00000000_00000000_00000000_0000004C, 3); try test__ffsti2(0x00000000_00000000_00000000_0000004D, 1); try test__ffsti2(0x00000000_00000000_00000000_0000004E, 2); try test__ffsti2(0x00000000_00000000_00000000_0000004F, 1); try test__ffsti2(0x00000000_00000000_00000000_00000050, 5); try test__ffsti2(0x00000000_00000000_00000000_00000051, 1); try test__ffsti2(0x00000000_00000000_00000000_00000052, 2); try test__ffsti2(0x00000000_00000000_00000000_00000053, 1); try test__ffsti2(0x00000000_00000000_00000000_00000054, 3); try test__ffsti2(0x00000000_00000000_00000000_00000055, 1); try test__ffsti2(0x00000000_00000000_00000000_00000056, 2); try test__ffsti2(0x00000000_00000000_00000000_00000057, 1); try test__ffsti2(0x00000000_00000000_00000000_00000058, 4); try test__ffsti2(0x00000000_00000000_00000000_00000059, 1); try test__ffsti2(0x00000000_00000000_00000000_0000005A, 2); try test__ffsti2(0x00000000_00000000_00000000_0000005B, 1); try test__ffsti2(0x00000000_00000000_00000000_0000005C, 3); try test__ffsti2(0x00000000_00000000_00000000_0000005D, 1); try test__ffsti2(0x00000000_00000000_00000000_0000005E, 2); try test__ffsti2(0x00000000_00000000_00000000_0000005F, 1); try test__ffsti2(0x00000000_00000000_00000000_00000060, 6); try test__ffsti2(0x00000000_00000000_00000000_00000061, 1); try test__ffsti2(0x00000000_00000000_00000000_00000062, 2); try test__ffsti2(0x00000000_00000000_00000000_00000063, 1); try test__ffsti2(0x00000000_00000000_00000000_00000064, 3); try test__ffsti2(0x00000000_00000000_00000000_00000065, 1); try test__ffsti2(0x00000000_00000000_00000000_00000066, 2); try test__ffsti2(0x00000000_00000000_00000000_00000067, 1); try test__ffsti2(0x00000000_00000000_00000000_00000068, 4); try test__ffsti2(0x00000000_00000000_00000000_00000069, 1); try test__ffsti2(0x00000000_00000000_00000000_0000006A, 2); try test__ffsti2(0x00000000_00000000_00000000_0000006B, 1); try test__ffsti2(0x00000000_00000000_00000000_0000006C, 3); try test__ffsti2(0x00000000_00000000_00000000_0000006D, 1); try test__ffsti2(0x00000000_00000000_00000000_0000006E, 2); try test__ffsti2(0x00000000_00000000_00000000_0000006F, 1); try test__ffsti2(0x00000000_00000000_00000000_00000070, 5); try test__ffsti2(0x00000000_00000000_00000000_00000071, 1); try test__ffsti2(0x00000000_00000000_00000000_00000072, 2); try test__ffsti2(0x00000000_00000000_00000000_00000073, 1); try test__ffsti2(0x00000000_00000000_00000000_00000074, 3); try test__ffsti2(0x00000000_00000000_00000000_00000075, 1); try test__ffsti2(0x00000000_00000000_00000000_00000076, 2); try test__ffsti2(0x00000000_00000000_00000000_00000077, 1); try test__ffsti2(0x00000000_00000000_00000000_00000078, 4); try test__ffsti2(0x00000000_00000000_00000000_00000079, 1); try test__ffsti2(0x00000000_00000000_00000000_0000007A, 2); try test__ffsti2(0x00000000_00000000_00000000_0000007B, 1); try test__ffsti2(0x00000000_00000000_00000000_0000007C, 3); try test__ffsti2(0x00000000_00000000_00000000_0000007D, 1); try test__ffsti2(0x00000000_00000000_00000000_0000007E, 2); try test__ffsti2(0x00000000_00000000_00000000_0000007F, 1); try test__ffsti2(0x00000000_00000000_00000000_00000080, 8); try test__ffsti2(0x00000000_00000000_00000000_00000081, 1); try test__ffsti2(0x00000000_00000000_00000000_00000082, 2); try test__ffsti2(0x00000000_00000000_00000000_00000083, 1); try test__ffsti2(0x00000000_00000000_00000000_00000084, 3); try test__ffsti2(0x00000000_00000000_00000000_00000085, 1); try test__ffsti2(0x00000000_00000000_00000000_00000086, 2); try test__ffsti2(0x00000000_00000000_00000000_00000087, 1); try test__ffsti2(0x00000000_00000000_00000000_00000088, 4); try test__ffsti2(0x00000000_00000000_00000000_00000089, 1); try test__ffsti2(0x00000000_00000000_00000000_0000008A, 2); try test__ffsti2(0x00000000_00000000_00000000_0000008B, 1); try test__ffsti2(0x00000000_00000000_00000000_0000008C, 3); try test__ffsti2(0x00000000_00000000_00000000_0000008D, 1); try test__ffsti2(0x00000000_00000000_00000000_0000008E, 2); try test__ffsti2(0x00000000_00000000_00000000_0000008F, 1); try test__ffsti2(0x00000000_00000000_00000000_00000090, 5); try test__ffsti2(0x00000000_00000000_00000000_00000091, 1); try test__ffsti2(0x00000000_00000000_00000000_00000092, 2); try test__ffsti2(0x00000000_00000000_00000000_00000093, 1); try test__ffsti2(0x00000000_00000000_00000000_00000094, 3); try test__ffsti2(0x00000000_00000000_00000000_00000095, 1); try test__ffsti2(0x00000000_00000000_00000000_00000096, 2); try test__ffsti2(0x00000000_00000000_00000000_00000097, 1); try test__ffsti2(0x00000000_00000000_00000000_00000098, 4); try test__ffsti2(0x00000000_00000000_00000000_00000099, 1); try test__ffsti2(0x00000000_00000000_00000000_0000009A, 2); try test__ffsti2(0x00000000_00000000_00000000_0000009B, 1); try test__ffsti2(0x00000000_00000000_00000000_0000009C, 3); try test__ffsti2(0x00000000_00000000_00000000_0000009D, 1); try test__ffsti2(0x00000000_00000000_00000000_0000009E, 2); try test__ffsti2(0x00000000_00000000_00000000_0000009F, 1); try test__ffsti2(0x00000000_00000000_00000000_000000A0, 6); try test__ffsti2(0x00000000_00000000_00000000_000000A1, 1); try test__ffsti2(0x00000000_00000000_00000000_000000A2, 2); try test__ffsti2(0x00000000_00000000_00000000_000000A3, 1); try test__ffsti2(0x00000000_00000000_00000000_000000A4, 3); try test__ffsti2(0x00000000_00000000_00000000_000000A5, 1); try test__ffsti2(0x00000000_00000000_00000000_000000A6, 2); try test__ffsti2(0x00000000_00000000_00000000_000000A7, 1); try test__ffsti2(0x00000000_00000000_00000000_000000A8, 4); try test__ffsti2(0x00000000_00000000_00000000_000000A9, 1); try test__ffsti2(0x00000000_00000000_00000000_000000AA, 2); try test__ffsti2(0x00000000_00000000_00000000_000000AB, 1); try test__ffsti2(0x00000000_00000000_00000000_000000AC, 3); try test__ffsti2(0x00000000_00000000_00000000_000000AD, 1); try test__ffsti2(0x00000000_00000000_00000000_000000AE, 2); try test__ffsti2(0x00000000_00000000_00000000_000000AF, 1); try test__ffsti2(0x00000000_00000000_00000000_000000B0, 5); try test__ffsti2(0x00000000_00000000_00000000_000000B1, 1); try test__ffsti2(0x00000000_00000000_00000000_000000B2, 2); try test__ffsti2(0x00000000_00000000_00000000_000000B3, 1); try test__ffsti2(0x00000000_00000000_00000000_000000B4, 3); try test__ffsti2(0x00000000_00000000_00000000_000000B5, 1); try test__ffsti2(0x00000000_00000000_00000000_000000B6, 2); try test__ffsti2(0x00000000_00000000_00000000_000000B7, 1); try test__ffsti2(0x00000000_00000000_00000000_000000B8, 4); try test__ffsti2(0x00000000_00000000_00000000_000000B9, 1); try test__ffsti2(0x00000000_00000000_00000000_000000BA, 2); try test__ffsti2(0x00000000_00000000_00000000_000000BB, 1); try test__ffsti2(0x00000000_00000000_00000000_000000BC, 3); try test__ffsti2(0x00000000_00000000_00000000_000000BD, 1); try test__ffsti2(0x00000000_00000000_00000000_000000BE, 2); try test__ffsti2(0x00000000_00000000_00000000_000000BF, 1); try test__ffsti2(0x00000000_00000000_00000000_000000C0, 7); try test__ffsti2(0x00000000_00000000_00000000_000000C1, 1); try test__ffsti2(0x00000000_00000000_00000000_000000C2, 2); try test__ffsti2(0x00000000_00000000_00000000_000000C3, 1); try test__ffsti2(0x00000000_00000000_00000000_000000C4, 3); try test__ffsti2(0x00000000_00000000_00000000_000000C5, 1); try test__ffsti2(0x00000000_00000000_00000000_000000C6, 2); try test__ffsti2(0x00000000_00000000_00000000_000000C7, 1); try test__ffsti2(0x00000000_00000000_00000000_000000C8, 4); try test__ffsti2(0x00000000_00000000_00000000_000000C9, 1); try test__ffsti2(0x00000000_00000000_00000000_000000CA, 2); try test__ffsti2(0x00000000_00000000_00000000_000000CB, 1); try test__ffsti2(0x00000000_00000000_00000000_000000CC, 3); try test__ffsti2(0x00000000_00000000_00000000_000000CD, 1); try test__ffsti2(0x00000000_00000000_00000000_000000CE, 2); try test__ffsti2(0x00000000_00000000_00000000_000000CF, 1); try test__ffsti2(0x00000000_00000000_00000000_000000D0, 5); try test__ffsti2(0x00000000_00000000_00000000_000000D1, 1); try test__ffsti2(0x00000000_00000000_00000000_000000D2, 2); try test__ffsti2(0x00000000_00000000_00000000_000000D3, 1); try test__ffsti2(0x00000000_00000000_00000000_000000D4, 3); try test__ffsti2(0x00000000_00000000_00000000_000000D5, 1); try test__ffsti2(0x00000000_00000000_00000000_000000D6, 2); try test__ffsti2(0x00000000_00000000_00000000_000000D7, 1); try test__ffsti2(0x00000000_00000000_00000000_000000D8, 4); try test__ffsti2(0x00000000_00000000_00000000_000000D9, 1); try test__ffsti2(0x00000000_00000000_00000000_000000DA, 2); try test__ffsti2(0x00000000_00000000_00000000_000000DB, 1); try test__ffsti2(0x00000000_00000000_00000000_000000DC, 3); try test__ffsti2(0x00000000_00000000_00000000_000000DD, 1); try test__ffsti2(0x00000000_00000000_00000000_000000DE, 2); try test__ffsti2(0x00000000_00000000_00000000_000000DF, 1); try test__ffsti2(0x00000000_00000000_00000000_000000E0, 6); try test__ffsti2(0x00000000_00000000_00000000_000000E1, 1); try test__ffsti2(0x00000000_00000000_00000000_000000E2, 2); try test__ffsti2(0x00000000_00000000_00000000_000000E3, 1); try test__ffsti2(0x00000000_00000000_00000000_000000E4, 3); try test__ffsti2(0x00000000_00000000_00000000_000000E5, 1); try test__ffsti2(0x00000000_00000000_00000000_000000E6, 2); try test__ffsti2(0x00000000_00000000_00000000_000000E7, 1); try test__ffsti2(0x00000000_00000000_00000000_000000E8, 4); try test__ffsti2(0x00000000_00000000_00000000_000000E9, 1); try test__ffsti2(0x00000000_00000000_00000000_000000EA, 2); try test__ffsti2(0x00000000_00000000_00000000_000000EB, 1); try test__ffsti2(0x00000000_00000000_00000000_000000EC, 3); try test__ffsti2(0x00000000_00000000_00000000_000000ED, 1); try test__ffsti2(0x00000000_00000000_00000000_000000EE, 2); try test__ffsti2(0x00000000_00000000_00000000_000000EF, 1); try test__ffsti2(0x00000000_00000000_00000000_000000F0, 5); try test__ffsti2(0x00000000_00000000_00000000_000000F1, 1); try test__ffsti2(0x00000000_00000000_00000000_000000F2, 2); try test__ffsti2(0x00000000_00000000_00000000_000000F3, 1); try test__ffsti2(0x00000000_00000000_00000000_000000F4, 3); try test__ffsti2(0x00000000_00000000_00000000_000000F5, 1); try test__ffsti2(0x00000000_00000000_00000000_000000F6, 2); try test__ffsti2(0x00000000_00000000_00000000_000000F7, 1); try test__ffsti2(0x00000000_00000000_00000000_000000F8, 4); try test__ffsti2(0x00000000_00000000_00000000_000000F9, 1); try test__ffsti2(0x00000000_00000000_00000000_000000FA, 2); try test__ffsti2(0x00000000_00000000_00000000_000000FB, 1); try test__ffsti2(0x00000000_00000000_00000000_000000FC, 3); try test__ffsti2(0x00000000_00000000_00000000_000000FD, 1); try test__ffsti2(0x00000000_00000000_00000000_000000FE, 2); try test__ffsti2(0x00000000_00000000_00000000_000000FF, 1); try test__ffsti2(0x00000000_00000000_00000000_00000000, 0); try test__ffsti2(0x80000000_00000000_00000000_00000000, 128); try test__ffsti2(0x40000000_00000000_00000000_00000000, 127); try test__ffsti2(0x20000000_00000000_00000000_00000000, 126); try test__ffsti2(0x10000000_00000000_00000000_00000000, 125); try test__ffsti2(0x08000000_00000000_00000000_00000000, 124); try test__ffsti2(0x04000000_00000000_00000000_00000000, 123); try test__ffsti2(0x02000000_00000000_00000000_00000000, 122); try test__ffsti2(0x01000000_00000000_00000000_00000000, 121); try test__ffsti2(0x00800000_00000000_00000000_00000000, 120); try test__ffsti2(0x00400000_00000000_00000000_00000000, 119); try test__ffsti2(0x00200000_00000000_00000000_00000000, 118); try test__ffsti2(0x00100000_00000000_00000000_00000000, 117); try test__ffsti2(0x00080000_00000000_00000000_00000000, 116); try test__ffsti2(0x00040000_00000000_00000000_00000000, 115); try test__ffsti2(0x00020000_00000000_00000000_00000000, 114); try test__ffsti2(0x00010000_00000000_00000000_00000000, 113); try test__ffsti2(0x00008000_00000000_00000000_00000000, 112); try test__ffsti2(0x00004000_00000000_00000000_00000000, 111); try test__ffsti2(0x00002000_00000000_00000000_00000000, 110); try test__ffsti2(0x00001000_00000000_00000000_00000000, 109); try test__ffsti2(0x00000800_00000000_00000000_00000000, 108); try test__ffsti2(0x00000400_00000000_00000000_00000000, 107); try test__ffsti2(0x00000200_00000000_00000000_00000000, 106); try test__ffsti2(0x00000100_00000000_00000000_00000000, 105); try test__ffsti2(0x00000080_00000000_00000000_00000000, 104); try test__ffsti2(0x00000040_00000000_00000000_00000000, 103); try test__ffsti2(0x00000020_00000000_00000000_00000000, 102); try test__ffsti2(0x00000010_00000000_00000000_00000000, 101); try test__ffsti2(0x00000008_00000000_00000000_00000000, 100); try test__ffsti2(0x00000004_00000000_00000000_00000000, 99); try test__ffsti2(0x00000002_00000000_00000000_00000000, 98); try test__ffsti2(0x00000001_00000000_00000000_00000000, 97); try test__ffsti2(0x00000000_80000000_00000000_00000000, 96); try test__ffsti2(0x00000000_40000000_00000000_00000000, 95); try test__ffsti2(0x00000000_20000000_00000000_00000000, 94); try test__ffsti2(0x00000000_10000000_00000000_00000000, 93); try test__ffsti2(0x00000000_08000000_00000000_00000000, 92); try test__ffsti2(0x00000000_04000000_00000000_00000000, 91); try test__ffsti2(0x00000000_02000000_00000000_00000000, 90); try test__ffsti2(0x00000000_01000000_00000000_00000000, 89); try test__ffsti2(0x00000000_00800000_00000000_00000000, 88); try test__ffsti2(0x00000000_00400000_00000000_00000000, 87); try test__ffsti2(0x00000000_00200000_00000000_00000000, 86); try test__ffsti2(0x00000000_00100000_00000000_00000000, 85); try test__ffsti2(0x00000000_00080000_00000000_00000000, 84); try test__ffsti2(0x00000000_00040000_00000000_00000000, 83); try test__ffsti2(0x00000000_00020000_00000000_00000000, 82); try test__ffsti2(0x00000000_00010000_00000000_00000000, 81); try test__ffsti2(0x00000000_00008000_00000000_00000000, 80); try test__ffsti2(0x00000000_00004000_00000000_00000000, 79); try test__ffsti2(0x00000000_00002000_00000000_00000000, 78); try test__ffsti2(0x00000000_00001000_00000000_00000000, 77); try test__ffsti2(0x00000000_00000800_00000000_00000000, 76); try test__ffsti2(0x00000000_00000400_00000000_00000000, 75); try test__ffsti2(0x00000000_00000200_00000000_00000000, 74); try test__ffsti2(0x00000000_00000100_00000000_00000000, 73); try test__ffsti2(0x00000000_00000080_00000000_00000000, 72); try test__ffsti2(0x00000000_00000040_00000000_00000000, 71); try test__ffsti2(0x00000000_00000020_00000000_00000000, 70); try test__ffsti2(0x00000000_00000010_00000000_00000000, 69); try test__ffsti2(0x00000000_00000008_00000000_00000000, 68); try test__ffsti2(0x00000000_00000004_00000000_00000000, 67); try test__ffsti2(0x00000000_00000002_00000000_00000000, 66); try test__ffsti2(0x00000000_00000001_00000000_00000000, 65); try test__ffsti2(0x00000000_00000000_80000000_00000000, 64); try test__ffsti2(0x00000000_00000000_40000000_00000000, 63); try test__ffsti2(0x00000000_00000000_20000000_00000000, 62); try test__ffsti2(0x00000000_00000000_10000000_00000000, 61); try test__ffsti2(0x00000000_00000000_08000000_00000000, 60); try test__ffsti2(0x00000000_00000000_04000000_00000000, 59); try test__ffsti2(0x00000000_00000000_02000000_00000000, 58); try test__ffsti2(0x00000000_00000000_01000000_00000000, 57); try test__ffsti2(0x00000000_00000000_00800000_00000000, 56); try test__ffsti2(0x00000000_00000000_00400000_00000000, 55); try test__ffsti2(0x00000000_00000000_00200000_00000000, 54); try test__ffsti2(0x00000000_00000000_00100000_00000000, 53); try test__ffsti2(0x00000000_00000000_00080000_00000000, 52); try test__ffsti2(0x00000000_00000000_00040000_00000000, 51); try test__ffsti2(0x00000000_00000000_00020000_00000000, 50); try test__ffsti2(0x00000000_00000000_00010000_00000000, 49); try test__ffsti2(0x00000000_00000000_00008000_00000000, 48); try test__ffsti2(0x00000000_00000000_00004000_00000000, 47); try test__ffsti2(0x00000000_00000000_00002000_00000000, 46); try test__ffsti2(0x00000000_00000000_00001000_00000000, 45); try test__ffsti2(0x00000000_00000000_00000800_00000000, 44); try test__ffsti2(0x00000000_00000000_00000400_00000000, 43); try test__ffsti2(0x00000000_00000000_00000200_00000000, 42); try test__ffsti2(0x00000000_00000000_00000100_00000000, 41); try test__ffsti2(0x00000000_00000000_00000080_00000000, 40); try test__ffsti2(0x00000000_00000000_00000040_00000000, 39); try test__ffsti2(0x00000000_00000000_00000020_00000000, 38); try test__ffsti2(0x00000000_00000000_00000010_00000000, 37); try test__ffsti2(0x00000000_00000000_00000008_00000000, 36); try test__ffsti2(0x00000000_00000000_00000004_00000000, 35); try test__ffsti2(0x00000000_00000000_00000002_00000000, 34); try test__ffsti2(0x00000000_00000000_00000001_00000000, 33); try test__ffsti2(0x00000000_00000000_00000000_80000000, 32); try test__ffsti2(0x00000000_00000000_00000000_40000000, 31); try test__ffsti2(0x00000000_00000000_00000000_20000000, 30); try test__ffsti2(0x00000000_00000000_00000000_10000000, 29); try test__ffsti2(0x00000000_00000000_00000000_08000000, 28); try test__ffsti2(0x00000000_00000000_00000000_04000000, 27); try test__ffsti2(0x00000000_00000000_00000000_02000000, 26); try test__ffsti2(0x00000000_00000000_00000000_01000000, 25); try test__ffsti2(0x00000000_00000000_00000000_00800000, 24); try test__ffsti2(0x00000000_00000000_00000000_00400000, 23); try test__ffsti2(0x00000000_00000000_00000000_00200000, 22); try test__ffsti2(0x00000000_00000000_00000000_00100000, 21); try test__ffsti2(0x00000000_00000000_00000000_00080000, 20); try test__ffsti2(0x00000000_00000000_00000000_00040000, 19); try test__ffsti2(0x00000000_00000000_00000000_00020000, 18); try test__ffsti2(0x00000000_00000000_00000000_00010000, 17); try test__ffsti2(0x00000000_00000000_00000000_00008000, 16); try test__ffsti2(0x00000000_00000000_00000000_00004000, 15); try test__ffsti2(0x00000000_00000000_00000000_00002000, 14); try test__ffsti2(0x00000000_00000000_00000000_00001000, 13); try test__ffsti2(0x00000000_00000000_00000000_00000800, 12); try test__ffsti2(0x00000000_00000000_00000000_00000400, 11); try test__ffsti2(0x00000000_00000000_00000000_00000200, 10); try test__ffsti2(0x00000000_00000000_00000000_00000100, 9); }
lib/std/special/compiler_rt/ffsti2_test.zig
const std = @import("std"); const assert = std.debug.assert; const io = std.io; const fs = std.fs; const mem = std.mem; const process = std.process; const Allocator = mem.Allocator; const ArrayList = std.ArrayList; const ast = std.zig.ast; const warn = std.log.warn; const Compilation = @import("Compilation.zig"); const link = @import("link.zig"); const Package = @import("Package.zig"); const zir = @import("zir.zig"); const build_options = @import("build_options"); const introspect = @import("introspect.zig"); const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const translate_c = @import("translate_c.zig"); const Cache = @import("Cache.zig"); const target_util = @import("target.zig"); const ThreadPool = @import("ThreadPool.zig"); pub fn fatal(comptime format: []const u8, args: anytype) noreturn { std.log.emerg(format, args); process.exit(1); } pub const max_src_size = 2 * 1024 * 1024 * 1024; // 2 GiB pub const Color = enum { auto, off, on, }; const usage = \\Usage: zig [command] [options] \\ \\Commands: \\ \\ build Build project from build.zig \\ build-exe Create executable from source or object files \\ build-lib Create library from source or object files \\ build-obj Create object from source or object files \\ cc Use Zig as a drop-in C compiler \\ c++ Use Zig as a drop-in C++ compiler \\ env Print lib path, std path, cache directory, and version \\ fmt Reformat Zig source into canonical form \\ help Print this help and exit \\ init-exe Initialize a `zig build` application in the cwd \\ init-lib Initialize a `zig build` library in the cwd \\ libc Display native libc paths file or validate one \\ run Create executable and run immediately \\ translate-c Convert C code to Zig code \\ targets List available compilation targets \\ test Create and run a test build \\ version Print version number and exit \\ zen Print Zen of Zig and exit \\ \\General Options: \\ \\ -h, --help Print command-specific usage \\ ; pub const log_level: std.log.Level = switch (std.builtin.mode) { .Debug => .debug, .ReleaseSafe, .ReleaseFast => .info, .ReleaseSmall => .crit, }; pub fn log( comptime level: std.log.Level, comptime scope: @TypeOf(.EnumLiteral), comptime format: []const u8, args: anytype, ) void { // Hide debug messages unless added with `-Dlog=foo`. if (@enumToInt(level) > @enumToInt(std.log.level) or @enumToInt(level) > @enumToInt(std.log.Level.info)) { const scope_name = @tagName(scope); const ok = comptime for (build_options.log_scopes) |log_scope| { if (mem.eql(u8, log_scope, scope_name)) break true; } else return; } // We only recognize 4 log levels in this application. const level_txt = switch (level) { .emerg, .alert, .crit, .err => "error", .warn => "warning", .notice, .info => "info", .debug => "debug", }; const prefix1 = level_txt; const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): "; // Print the message to stderr, silently ignoring any errors std.debug.print(prefix1 ++ prefix2 ++ format ++ "\n", args); } var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){}; pub fn main() anyerror!void { const gpa = if (std.builtin.link_libc) std.heap.raw_c_allocator else &general_purpose_allocator.allocator; defer if (!std.builtin.link_libc) { _ = general_purpose_allocator.deinit(); }; var arena_instance = std.heap.ArenaAllocator.init(gpa); defer arena_instance.deinit(); const arena = &arena_instance.allocator; const args = try process.argsAlloc(arena); return mainArgs(gpa, arena, args); } const os_can_execve = std.builtin.os.tag != .windows; pub fn mainArgs(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !void { if (args.len <= 1) { std.log.info("{}", .{usage}); fatal("expected command argument", .{}); } if (os_can_execve and std.os.getenvZ("ZIG_IS_DETECTING_LIBC_PATHS") != null) { // In this case we have accidentally invoked ourselves as "the system C compiler" // to figure out where libc is installed. This is essentially infinite recursion // via child process execution due to the CC environment variable pointing to Zig. // Here we ignore the CC environment variable and exec `cc` as a child process. // However it's possible Zig is installed as *that* C compiler as well, which is // why we have this additional environment variable here to check. var env_map = try std.process.getEnvMap(arena); const inf_loop_env_key = "ZIG_IS_TRYING_TO_NOT_CALL_ITSELF"; if (env_map.get(inf_loop_env_key) != null) { fatal("The compilation links against libc, but Zig is unable to provide a libc " ++ "for this operating system, and no --libc " ++ "parameter was provided, so Zig attempted to invoke the system C compiler " ++ "in order to determine where libc is installed. However the system C " ++ "compiler is `zig cc`, so no libc installation was found.", .{}); } try env_map.set(inf_loop_env_key, "1"); // Some programs such as CMake will strip the `cc` and subsequent args from the // CC environment variable. We detect and support this scenario here because of // the ZIG_IS_DETECTING_LIBC_PATHS environment variable. if (mem.eql(u8, args[1], "cc")) { return std.os.execvpe(arena, args[1..], &env_map); } else { const modified_args = try arena.dupe([]const u8, args); modified_args[0] = "cc"; return std.os.execvpe(arena, modified_args, &env_map); } } const cmd = args[1]; const cmd_args = args[2..]; if (mem.eql(u8, cmd, "build-exe")) { return buildOutputType(gpa, arena, args, .{ .build = .Exe }); } else if (mem.eql(u8, cmd, "build-lib")) { return buildOutputType(gpa, arena, args, .{ .build = .Lib }); } else if (mem.eql(u8, cmd, "build-obj")) { return buildOutputType(gpa, arena, args, .{ .build = .Obj }); } else if (mem.eql(u8, cmd, "test")) { return buildOutputType(gpa, arena, args, .zig_test); } else if (mem.eql(u8, cmd, "run")) { return buildOutputType(gpa, arena, args, .run); } else if (mem.eql(u8, cmd, "cc")) { return buildOutputType(gpa, arena, args, .cc); } else if (mem.eql(u8, cmd, "c++")) { return buildOutputType(gpa, arena, args, .cpp); } else if (mem.eql(u8, cmd, "translate-c")) { return buildOutputType(gpa, arena, args, .translate_c); } else if (mem.eql(u8, cmd, "clang") or mem.eql(u8, cmd, "-cc1") or mem.eql(u8, cmd, "-cc1as")) { return punt_to_clang(arena, args); } else if (mem.eql(u8, cmd, "ld.lld") or mem.eql(u8, cmd, "ld64.lld") or mem.eql(u8, cmd, "lld-link") or mem.eql(u8, cmd, "wasm-ld")) { return punt_to_lld(arena, args); } else if (mem.eql(u8, cmd, "build")) { return cmdBuild(gpa, arena, cmd_args); } else if (mem.eql(u8, cmd, "fmt")) { return cmdFmt(gpa, cmd_args); } else if (mem.eql(u8, cmd, "libc")) { return cmdLibC(gpa, cmd_args); } else if (mem.eql(u8, cmd, "init-exe")) { return cmdInit(gpa, arena, cmd_args, .Exe); } else if (mem.eql(u8, cmd, "init-lib")) { return cmdInit(gpa, arena, cmd_args, .Lib); } else if (mem.eql(u8, cmd, "targets")) { const info = try detectNativeTargetInfo(arena, .{}); const stdout = io.getStdOut().outStream(); return @import("print_targets.zig").cmdTargets(arena, cmd_args, stdout, info.target); } else if (mem.eql(u8, cmd, "version")) { try std.io.getStdOut().writeAll(build_options.version ++ "\n"); } else if (mem.eql(u8, cmd, "env")) { try @import("print_env.zig").cmdEnv(arena, cmd_args, io.getStdOut().outStream()); } else if (mem.eql(u8, cmd, "zen")) { try io.getStdOut().writeAll(info_zen); } else if (mem.eql(u8, cmd, "help") or mem.eql(u8, cmd, "-h") or mem.eql(u8, cmd, "--help")) { try io.getStdOut().writeAll(usage); } else { std.log.info("{}", .{usage}); fatal("unknown command: {}", .{args[1]}); } } const usage_build_generic = \\Usage: zig build-exe <options> [files] \\ zig build-lib <options> [files] \\ zig build-obj <options> [files] \\ zig test <options> [files] \\ zig run <options> [file] [-- [args]] \\ \\Supported file types: \\ .zig Zig source code \\ .zir Zig Intermediate Representation code \\ .o ELF object file \\ .o MACH-O (macOS) object file \\ .obj COFF (Windows) object file \\ .lib COFF (Windows) static library \\ .a ELF static library \\ .so ELF shared object (dynamic link) \\ .dll Windows Dynamic Link Library \\ .dylib MACH-O (macOS) dynamic library \\ .tbd (macOS) text-based dylib definition \\ .s Target-specific assembly source code \\ .S Assembly with C preprocessor (requires LLVM extensions) \\ .c C source code (requires LLVM extensions) \\ .cpp C++ source code (requires LLVM extensions) \\ Other C++ extensions: .C .cc .cxx \\ \\General Options: \\ -h, --help Print this help and exit \\ --watch Enable compiler REPL \\ --color [auto|off|on] Enable or disable colored error messages \\ -femit-bin[=path] (default) Output machine code \\ -fno-emit-bin Do not output machine code \\ -femit-asm[=path] Output .s (assembly code) \\ -fno-emit-asm (default) Do not output .s (assembly code) \\ -femit-zir[=path] Produce a .zir file with Zig IR \\ -fno-emit-zir (default) Do not produce a .zir file with Zig IR \\ -femit-llvm-ir[=path] Produce a .ll file with LLVM IR (requires LLVM extensions) \\ -fno-emit-llvm-ir (default) Do not produce a .ll file with LLVM IR \\ -femit-h[=path] Generate a C header file (.h) \\ -fno-emit-h (default) Do not generate a C header file (.h) \\ -femit-docs[=path] Create a docs/ dir with html documentation \\ -fno-emit-docs (default) Do not produce docs/ dir with html documentation \\ -femit-analysis[=path] Write analysis JSON file with type information \\ -fno-emit-analysis (default) Do not write analysis JSON file with type information \\ --show-builtin Output the source of @import("builtin") then exit \\ --cache-dir [path] Override the local cache directory \\ --global-cache-dir [path] Override the global cache directory \\ --override-lib-dir [path] Override path to Zig installation lib directory \\ --enable-cache Output to cache directory; print path to stdout \\ \\Compile Options: \\ -target [name] <arch><sub>-<os>-<abi> see the targets command \\ -mcpu [cpu] Specify target CPU and feature set \\ -mcmodel=[default|tiny| Limit range of code and data virtual addresses \\ small|kernel| \\ medium|large] \\ --name [name] Override root name (not a file path) \\ -O [mode] Choose what to optimize for \\ Debug (default) Optimizations off, safety on \\ ReleaseFast Optimizations on, safety off \\ ReleaseSafe Optimizations on, safety on \\ ReleaseSmall Optimize for small binary, safety off \\ --pkg-begin [name] [path] Make pkg available to import and push current pkg \\ --pkg-end Pop current pkg \\ --main-pkg-path Set the directory of the root package \\ -fPIC Force-enable Position Independent Code \\ -fno-PIC Force-disable Position Independent Code \\ -fPIE Force-enable Position Independent Executable \\ -fno-PIE Force-disable Position Independent Executable \\ -fstack-check Enable stack probing in unsafe builds \\ -fno-stack-check Disable stack probing in safe builds \\ -fsanitize-c Enable C undefined behavior detection in unsafe builds \\ -fno-sanitize-c Disable C undefined behavior detection in safe builds \\ -fvalgrind Include valgrind client requests in release builds \\ -fno-valgrind Omit valgrind client requests in debug builds \\ -fdll-export-fns Mark exported functions as DLL exports (Windows) \\ -fno-dll-export-fns Force-disable marking exported functions as DLL exports \\ -fLLVM Force using LLVM as the codegen backend \\ -fno-LLVM Prevent using LLVM as a codegen backend \\ -fClang Force using Clang as the C/C++ compilation backend \\ -fno-Clang Prevent using Clang as the C/C++ compilation backend \\ --strip Omit debug symbols \\ --single-threaded Code assumes it is only used single-threaded \\ -ofmt=[mode] Override target object format \\ elf Executable and Linking Format \\ c Compile to C source code \\ wasm WebAssembly \\ pe Portable Executable (Windows) \\ coff Common Object File Format (Windows) \\ macho macOS relocatables \\ hex (planned) Intel IHEX \\ raw (planned) Dump machine code directly \\ -dirafter [dir] Add directory to AFTER include search path \\ -isystem [dir] Add directory to SYSTEM include search path \\ -I[dir] Add directory to include search path \\ -D[macro]=[value] Define C [macro] to [value] (1 if [value] omitted) \\ --libc [file] Provide a file which specifies libc paths \\ -cflags [flags] -- Set extra flags for the next positional C source files \\ -ffunction-sections Places each function in a separate section \\ \\Link Options: \\ -l[lib], --library [lib] Link against system library \\ -L[d], --library-directory [d] Add a directory to the library search path \\ -T[script], --script [script] Use a custom linker script \\ --version-script [path] Provide a version .map file \\ --dynamic-linker [path] Set the dynamic interpreter path (usually ld.so) \\ --version [ver] Dynamic library semver \\ -fsoname[=name] (Linux) Override the default SONAME value \\ -fno-soname (Linux) Disable emitting a SONAME \\ -fLLD Force using LLD as the linker \\ -fno-LLD Prevent using LLD as the linker \\ -fcompiler-rt Always include compiler-rt symbols in output \\ -fno-compiler-rt Prevent including compiler-rt symbols in output \\ -rdynamic Add all symbols to the dynamic symbol table \\ -rpath [path] Add directory to the runtime library search path \\ -feach-lib-rpath Ensure adding rpath for each used dynamic library \\ -fno-each-lib-rpath Prevent adding rpath for each used dynamic library \\ --eh-frame-hdr Enable C++ exception handling by passing --eh-frame-hdr to linker \\ --emit-relocs Enable output of relocation sections for post build tools \\ -dynamic Force output to be dynamically linked \\ -static Force output to be statically linked \\ -Bsymbolic Bind global references locally \\ --subsystem [subsystem] (Windows) /SUBSYSTEM:<subsystem> to the linker\n" \\ --stack [size] Override default stack size \\ --image-base [addr] Set base address for executable image \\ -framework [name] (Darwin) link against framework \\ -F[dir] (Darwin) add search path for frameworks \\ \\Test Options: \\ --test-filter [text] Skip tests that do not match filter \\ --test-name-prefix [text] Add prefix to all tests \\ --test-cmd [arg] Specify test execution command one arg at a time \\ --test-cmd-bin Appends test binary path to test cmd args \\ --test-evented-io Runs the test in evented I/O mode \\ \\Debug Options (Zig Compiler Development): \\ -ftime-report Print timing diagnostics \\ -fstack-report Print stack size diagnostics \\ --verbose-link Display linker invocations \\ --verbose-cc Display C compiler invocations \\ --verbose-tokenize Enable compiler debug output for tokenization \\ --verbose-ast Enable compiler debug output for AST parsing \\ --verbose-ir Enable compiler debug output for Zig IR \\ --verbose-llvm-ir Enable compiler debug output for LLVM IR \\ --verbose-cimport Enable compiler debug output for C imports \\ --verbose-llvm-cpu-features Enable compiler debug output for LLVM CPU features \\ ; const repl_help = \\Commands: \\ update Detect changes to source files and update output files. \\ help Print this text \\ exit Quit this repl \\ ; const SOName = union(enum) { no, yes_default_value, yes: []const u8, }; const EmitBin = union(enum) { no, yes_default_path, yes: []const u8, yes_a_out, }; const Emit = union(enum) { no, yes_default_path, yes: []const u8, const Resolved = struct { data: ?Compilation.EmitLoc, dir: ?fs.Dir, fn deinit(self: *Resolved) void { if (self.dir) |*dir| { dir.close(); } } }; fn resolve(emit: Emit, default_basename: []const u8) !Resolved { var resolved: Resolved = .{ .data = null, .dir = null }; errdefer resolved.deinit(); switch (emit) { .no => {}, .yes_default_path => { resolved.data = Compilation.EmitLoc{ .directory = .{ .path = null, .handle = fs.cwd() }, .basename = default_basename, }; }, .yes => |full_path| { const basename = fs.path.basename(full_path); if (fs.path.dirname(full_path)) |dirname| { const handle = try fs.cwd().openDir(dirname, .{}); resolved = .{ .dir = handle, .data = Compilation.EmitLoc{ .basename = basename, .directory = .{ .path = dirname, .handle = handle, }, }, }; } else { resolved.data = Compilation.EmitLoc{ .basename = basename, .directory = .{ .path = null, .handle = fs.cwd() }, }; } }, } return resolved; } }; fn optionalStringEnvVar(arena: *Allocator, name: []const u8) !?[]const u8 { if (std.process.getEnvVarOwned(arena, name)) |value| { return value; } else |err| switch (err) { error.EnvironmentVariableNotFound => return null, else => |e| return e, } } fn buildOutputType( gpa: *Allocator, arena: *Allocator, all_args: []const []const u8, arg_mode: union(enum) { build: std.builtin.OutputMode, cc, cpp, translate_c, zig_test, run, }, ) !void { var color: Color = .auto; var optimize_mode: std.builtin.Mode = .Debug; var provided_name: ?[]const u8 = null; var link_mode: ?std.builtin.LinkMode = null; var dll_export_fns: ?bool = null; var root_src_file: ?[]const u8 = null; var version: std.builtin.Version = .{ .major = 0, .minor = 0, .patch = 0 }; var have_version = false; var strip = false; var single_threaded = false; var function_sections = false; var watch = false; var verbose_link = false; var verbose_cc = false; var verbose_tokenize = false; var verbose_ast = false; var verbose_ir = false; var verbose_llvm_ir = false; var verbose_cimport = false; var verbose_llvm_cpu_features = false; var time_report = false; var stack_report = false; var show_builtin = false; var emit_bin: EmitBin = .yes_default_path; var emit_asm: Emit = .no; var emit_llvm_ir: Emit = .no; var emit_zir: Emit = .no; var emit_docs: Emit = .no; var emit_analysis: Emit = .no; var target_arch_os_abi: []const u8 = "native"; var target_mcpu: ?[]const u8 = null; var target_dynamic_linker: ?[]const u8 = null; var target_ofmt: ?[]const u8 = null; var output_mode: std.builtin.OutputMode = undefined; var emit_h: Emit = .no; var soname: SOName = undefined; var ensure_libc_on_non_freestanding = false; var ensure_libcpp_on_non_freestanding = false; var link_libc = false; var link_libcpp = false; var want_native_include_dirs = false; var enable_cache: ?bool = null; var want_pic: ?bool = null; var want_pie: ?bool = null; var want_sanitize_c: ?bool = null; var want_stack_check: ?bool = null; var want_valgrind: ?bool = null; var want_compiler_rt: ?bool = null; var rdynamic: bool = false; var linker_script: ?[]const u8 = null; var version_script: ?[]const u8 = null; var disable_c_depfile = false; var linker_gc_sections: ?bool = null; var linker_allow_shlib_undefined: ?bool = null; var linker_bind_global_refs_locally: ?bool = null; var linker_z_nodelete = false; var linker_z_defs = false; var test_evented_io = false; var stack_size_override: ?u64 = null; var image_base_override: ?u64 = null; var use_llvm: ?bool = null; var use_lld: ?bool = null; var use_clang: ?bool = null; var link_eh_frame_hdr = false; var link_emit_relocs = false; var each_lib_rpath: ?bool = null; var libc_paths_file: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LIBC"); var machine_code_model: std.builtin.CodeModel = .default; var runtime_args_start: ?usize = null; var test_filter: ?[]const u8 = null; var test_name_prefix: ?[]const u8 = null; var override_local_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LOCAL_CACHE_DIR"); var override_global_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_GLOBAL_CACHE_DIR"); var override_lib_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LIB_DIR"); var main_pkg_path: ?[]const u8 = null; var clang_preprocessor_mode: Compilation.ClangPreprocessorMode = .no; var subsystem: ?std.Target.SubSystem = null; var system_libs = std.ArrayList([]const u8).init(gpa); defer system_libs.deinit(); var clang_argv = std.ArrayList([]const u8).init(gpa); defer clang_argv.deinit(); var extra_cflags = std.ArrayList([]const u8).init(gpa); defer extra_cflags.deinit(); var lld_argv = std.ArrayList([]const u8).init(gpa); defer lld_argv.deinit(); var lib_dirs = std.ArrayList([]const u8).init(gpa); defer lib_dirs.deinit(); var rpath_list = std.ArrayList([]const u8).init(gpa); defer rpath_list.deinit(); var c_source_files = std.ArrayList(Compilation.CSourceFile).init(gpa); defer c_source_files.deinit(); var link_objects = std.ArrayList([]const u8).init(gpa); defer link_objects.deinit(); var framework_dirs = std.ArrayList([]const u8).init(gpa); defer framework_dirs.deinit(); var frameworks = std.ArrayList([]const u8).init(gpa); defer frameworks.deinit(); // null means replace with the test executable binary var test_exec_args = std.ArrayList(?[]const u8).init(gpa); defer test_exec_args.deinit(); const pkg_tree_root = try gpa.create(Package); // This package only exists to clean up the code parsing --pkg-begin and // --pkg-end flags. Use dummy values that are safe for the destroy call. pkg_tree_root.* = .{ .root_src_directory = .{ .path = null, .handle = fs.cwd() }, .root_src_path = &[0]u8{}, }; defer pkg_tree_root.destroy(gpa); var cur_pkg: *Package = pkg_tree_root; switch (arg_mode) { .build, .translate_c, .zig_test, .run => { var optimize_mode_string: ?[]const u8 = null; switch (arg_mode) { .build => |m| { output_mode = m; }, .translate_c => { emit_bin = .no; output_mode = .Obj; }, .zig_test, .run => { output_mode = .Exe; }, else => unreachable, } soname = .yes_default_value; const args = all_args[2..]; var i: usize = 0; args_loop: while (i < args.len) : (i += 1) { const arg = args[i]; if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { try io.getStdOut().writeAll(usage_build_generic); return cleanExit(); } else if (mem.eql(u8, arg, "--")) { if (arg_mode == .run) { // The index refers to all_args so skip `zig` `run` // and `--` runtime_args_start = i + 3; break :args_loop; } else { fatal("unexpected end-of-parameter mark: --", .{}); } } else if (mem.eql(u8, arg, "--pkg-begin")) { if (i + 2 >= args.len) fatal("Expected 2 arguments after {}", .{arg}); i += 1; const pkg_name = args[i]; i += 1; const pkg_path = args[i]; const new_cur_pkg = Package.create( gpa, fs.path.dirname(pkg_path), fs.path.basename(pkg_path), ) catch |err| { fatal("Failed to add package at path {}: {}", .{ pkg_path, @errorName(err) }); }; new_cur_pkg.parent = cur_pkg; try cur_pkg.add(gpa, pkg_name, new_cur_pkg); cur_pkg = new_cur_pkg; } else if (mem.eql(u8, arg, "--pkg-end")) { cur_pkg = cur_pkg.parent orelse fatal("encountered --pkg-end with no matching --pkg-begin", .{}); } else if (mem.eql(u8, arg, "--main-pkg-path")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; main_pkg_path = args[i]; } else if (mem.eql(u8, arg, "-cflags")) { extra_cflags.shrinkRetainingCapacity(0); while (true) { i += 1; if (i + 1 >= args.len) fatal("expected -- after -cflags", .{}); if (mem.eql(u8, args[i], "--")) break; try extra_cflags.append(args[i]); } } else if (mem.eql(u8, arg, "--color")) { if (i + 1 >= args.len) { fatal("expected [auto|on|off] after --color", .{}); } i += 1; const next_arg = args[i]; color = std.meta.stringToEnum(Color, next_arg) orelse { fatal("expected [auto|on|off] after --color, found '{}'", .{next_arg}); }; } else if (mem.eql(u8, arg, "--subsystem")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; if (mem.eql(u8, args[i], "console")) { subsystem = .Console; } else if (mem.eql(u8, args[i], "windows")) { subsystem = .Windows; } else if (mem.eql(u8, args[i], "posix")) { subsystem = .Posix; } else if (mem.eql(u8, args[i], "native")) { subsystem = .Native; } else if (mem.eql(u8, args[i], "efi_application")) { subsystem = .EfiApplication; } else if (mem.eql(u8, args[i], "efi_boot_service_driver")) { subsystem = .EfiBootServiceDriver; } else if (mem.eql(u8, args[i], "efi_rom")) { subsystem = .EfiRom; } else if (mem.eql(u8, args[i], "efi_runtime_driver")) { subsystem = .EfiRuntimeDriver; } else { fatal("invalid: --subsystem: '{s}'. Options are:\n{s}", .{ args[i], \\ console \\ windows \\ posix \\ native \\ efi_application \\ efi_boot_service_driver \\ efi_rom \\ efi_runtime_driver \\ }); } } else if (mem.eql(u8, arg, "-O")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; optimize_mode_string = args[i]; } else if (mem.eql(u8, arg, "--stack")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; stack_size_override = std.fmt.parseUnsigned(u64, args[i], 0) catch |err| { fatal("unable to parse '{}': {}", .{ arg, @errorName(err) }); }; } else if (mem.eql(u8, arg, "--image-base")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; image_base_override = std.fmt.parseUnsigned(u64, args[i], 0) catch |err| { fatal("unable to parse '{}': {}", .{ arg, @errorName(err) }); }; } else if (mem.eql(u8, arg, "--name")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; provided_name = args[i]; } else if (mem.eql(u8, arg, "-rpath")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; try rpath_list.append(args[i]); } else if (mem.eql(u8, arg, "--library-directory") or mem.eql(u8, arg, "-L")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; try lib_dirs.append(args[i]); } else if (mem.eql(u8, arg, "-F")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; try framework_dirs.append(args[i]); } else if (mem.eql(u8, arg, "-framework")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; try frameworks.append(args[i]); } else if (mem.eql(u8, arg, "-T") or mem.eql(u8, arg, "--script")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; linker_script = args[i]; } else if (mem.eql(u8, arg, "--version-script")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; version_script = args[i]; } else if (mem.eql(u8, arg, "--library") or mem.eql(u8, arg, "-l")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); // We don't know whether this library is part of libc or libc++ until we resolve the target. // So we simply append to the list for now. i += 1; try system_libs.append(args[i]); } else if (mem.eql(u8, arg, "-D") or mem.eql(u8, arg, "-isystem") or mem.eql(u8, arg, "-I") or mem.eql(u8, arg, "-dirafter")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; try clang_argv.append(arg); try clang_argv.append(args[i]); } else if (mem.eql(u8, arg, "--version")) { if (i + 1 >= args.len) { fatal("expected parameter after --version", .{}); } i += 1; version = std.builtin.Version.parse(args[i]) catch |err| { fatal("unable to parse --version '{}': {}", .{ args[i], @errorName(err) }); }; have_version = true; } else if (mem.eql(u8, arg, "-target")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; target_arch_os_abi = args[i]; } else if (mem.eql(u8, arg, "-mcpu")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; target_mcpu = args[i]; } else if (mem.eql(u8, arg, "-mcmodel")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; machine_code_model = parseCodeModel(args[i]); } else if (mem.startsWith(u8, arg, "-ofmt=")) { target_ofmt = arg["-ofmt=".len..]; } else if (mem.startsWith(u8, arg, "-mcpu=")) { target_mcpu = arg["-mcpu=".len..]; } else if (mem.startsWith(u8, arg, "-mcmodel=")) { machine_code_model = parseCodeModel(arg["-mcmodel=".len..]); } else if (mem.startsWith(u8, arg, "-O")) { optimize_mode_string = arg["-O".len..]; } else if (mem.eql(u8, arg, "--dynamic-linker")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; target_dynamic_linker = args[i]; } else if (mem.eql(u8, arg, "--libc")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; libc_paths_file = args[i]; } else if (mem.eql(u8, arg, "--test-filter")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; test_filter = args[i]; } else if (mem.eql(u8, arg, "--test-name-prefix")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; test_name_prefix = args[i]; } else if (mem.eql(u8, arg, "--test-cmd")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; try test_exec_args.append(args[i]); } else if (mem.eql(u8, arg, "--cache-dir")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; override_local_cache_dir = args[i]; } else if (mem.eql(u8, arg, "--global-cache-dir")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; override_global_cache_dir = args[i]; } else if (mem.eql(u8, arg, "--override-lib-dir")) { if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg}); i += 1; override_lib_dir = args[i]; } else if (mem.eql(u8, arg, "-fcompiler-rt")) { want_compiler_rt = true; } else if (mem.eql(u8, arg, "-fno-compiler-rt")) { want_compiler_rt = false; } else if (mem.eql(u8, arg, "-feach-lib-rpath")) { each_lib_rpath = true; } else if (mem.eql(u8, arg, "-fno-each-lib-rpath")) { each_lib_rpath = false; } else if (mem.eql(u8, arg, "--enable-cache")) { enable_cache = true; } else if (mem.eql(u8, arg, "--test-cmd-bin")) { try test_exec_args.append(null); } else if (mem.eql(u8, arg, "--test-evented-io")) { test_evented_io = true; } else if (mem.eql(u8, arg, "--watch")) { watch = true; } else if (mem.eql(u8, arg, "-ftime-report")) { time_report = true; } else if (mem.eql(u8, arg, "-fstack-report")) { stack_report = true; } else if (mem.eql(u8, arg, "-fPIC")) { want_pic = true; } else if (mem.eql(u8, arg, "-fno-PIC")) { want_pic = false; } else if (mem.eql(u8, arg, "-fPIE")) { want_pie = true; } else if (mem.eql(u8, arg, "-fno-PIE")) { want_pie = false; } else if (mem.eql(u8, arg, "-fstack-check")) { want_stack_check = true; } else if (mem.eql(u8, arg, "-fno-stack-check")) { want_stack_check = false; } else if (mem.eql(u8, arg, "-fsanitize-c")) { want_sanitize_c = true; } else if (mem.eql(u8, arg, "-fno-sanitize-c")) { want_sanitize_c = false; } else if (mem.eql(u8, arg, "-fvalgrind")) { want_valgrind = true; } else if (mem.eql(u8, arg, "-fno-valgrind")) { want_valgrind = false; } else if (mem.eql(u8, arg, "-fLLVM")) { use_llvm = true; } else if (mem.eql(u8, arg, "-fno-LLVM")) { use_llvm = false; } else if (mem.eql(u8, arg, "-fLLD")) { use_lld = true; } else if (mem.eql(u8, arg, "-fno-LLD")) { use_lld = false; } else if (mem.eql(u8, arg, "-fClang")) { use_clang = true; } else if (mem.eql(u8, arg, "-fno-Clang")) { use_clang = false; } else if (mem.eql(u8, arg, "-rdynamic")) { rdynamic = true; } else if (mem.eql(u8, arg, "-fsoname")) { soname = .yes_default_value; } else if (mem.startsWith(u8, arg, "-fsoname=")) { soname = .{ .yes = arg["-fsoname=".len..] }; } else if (mem.eql(u8, arg, "-fno-soname")) { soname = .no; } else if (mem.eql(u8, arg, "-femit-bin")) { emit_bin = .yes_default_path; } else if (mem.startsWith(u8, arg, "-femit-bin=")) { emit_bin = .{ .yes = arg["-femit-bin=".len..] }; } else if (mem.eql(u8, arg, "-fno-emit-bin")) { emit_bin = .no; } else if (mem.eql(u8, arg, "-femit-zir")) { emit_zir = .yes_default_path; } else if (mem.startsWith(u8, arg, "-femit-zir=")) { emit_zir = .{ .yes = arg["-femit-zir=".len..] }; } else if (mem.eql(u8, arg, "-fno-emit-zir")) { emit_zir = .no; } else if (mem.eql(u8, arg, "-femit-h")) { emit_h = .yes_default_path; } else if (mem.startsWith(u8, arg, "-femit-h=")) { emit_h = .{ .yes = arg["-femit-h=".len..] }; } else if (mem.eql(u8, arg, "-fno-emit-h")) { emit_h = .no; } else if (mem.eql(u8, arg, "-femit-asm")) { emit_asm = .yes_default_path; } else if (mem.startsWith(u8, arg, "-femit-asm=")) { emit_asm = .{ .yes = arg["-femit-asm=".len..] }; } else if (mem.eql(u8, arg, "-fno-emit-asm")) { emit_asm = .no; } else if (mem.eql(u8, arg, "-femit-llvm-ir")) { emit_llvm_ir = .yes_default_path; } else if (mem.startsWith(u8, arg, "-femit-llvm-ir=")) { emit_llvm_ir = .{ .yes = arg["-femit-llvm-ir=".len..] }; } else if (mem.eql(u8, arg, "-fno-emit-llvm-ir")) { emit_llvm_ir = .no; } else if (mem.eql(u8, arg, "-femit-docs")) { emit_docs = .yes_default_path; } else if (mem.startsWith(u8, arg, "-femit-docs=")) { emit_docs = .{ .yes = arg["-femit-docs=".len..] }; } else if (mem.eql(u8, arg, "-fno-emit-docs")) { emit_docs = .no; } else if (mem.eql(u8, arg, "-femit-analysis")) { emit_analysis = .yes_default_path; } else if (mem.startsWith(u8, arg, "-femit-analysis=")) { emit_analysis = .{ .yes = arg["-femit-analysis=".len..] }; } else if (mem.eql(u8, arg, "-fno-emit-analysis")) { emit_analysis = .no; } else if (mem.eql(u8, arg, "-dynamic")) { link_mode = .Dynamic; } else if (mem.eql(u8, arg, "-static")) { link_mode = .Static; } else if (mem.eql(u8, arg, "-fdll-export-fns")) { dll_export_fns = true; } else if (mem.eql(u8, arg, "-fno-dll-export-fns")) { dll_export_fns = false; } else if (mem.eql(u8, arg, "--show-builtin")) { show_builtin = true; emit_bin = .no; } else if (mem.eql(u8, arg, "--strip")) { strip = true; } else if (mem.eql(u8, arg, "--single-threaded")) { single_threaded = true; } else if (mem.eql(u8, arg, "-ffunction-sections")) { function_sections = true; } else if (mem.eql(u8, arg, "--eh-frame-hdr")) { link_eh_frame_hdr = true; } else if (mem.eql(u8, arg, "--emit-relocs")) { link_emit_relocs = true; } else if (mem.eql(u8, arg, "-Bsymbolic")) { linker_bind_global_refs_locally = true; } else if (mem.eql(u8, arg, "--verbose-link")) { verbose_link = true; } else if (mem.eql(u8, arg, "--verbose-cc")) { verbose_cc = true; } else if (mem.eql(u8, arg, "--verbose-tokenize")) { verbose_tokenize = true; } else if (mem.eql(u8, arg, "--verbose-ast")) { verbose_ast = true; } else if (mem.eql(u8, arg, "--verbose-ir")) { verbose_ir = true; } else if (mem.eql(u8, arg, "--verbose-llvm-ir")) { verbose_llvm_ir = true; } else if (mem.eql(u8, arg, "--verbose-cimport")) { verbose_cimport = true; } else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) { verbose_llvm_cpu_features = true; } else if (mem.startsWith(u8, arg, "-T")) { linker_script = arg[2..]; } else if (mem.startsWith(u8, arg, "-L")) { try lib_dirs.append(arg[2..]); } else if (mem.startsWith(u8, arg, "-F")) { try framework_dirs.append(arg[2..]); } else if (mem.startsWith(u8, arg, "-l")) { // We don't know whether this library is part of libc or libc++ until we resolve the target. // So we simply append to the list for now. try system_libs.append(arg[2..]); } else if (mem.startsWith(u8, arg, "-D") or mem.startsWith(u8, arg, "-I")) { try clang_argv.append(arg); } else { fatal("unrecognized parameter: '{}'", .{arg}); } } else switch (Compilation.classifyFileExt(arg)) { .object, .static_library, .shared_library => { try link_objects.append(arg); }, .assembly, .c, .cpp, .h, .ll, .bc => { try c_source_files.append(.{ .src_path = arg, .extra_flags = try arena.dupe([]const u8, extra_cflags.items), }); }, .zig, .zir => { if (root_src_file) |other| { fatal("found another zig file '{}' after root source file '{}'", .{ arg, other }); } else { root_src_file = arg; } }, .unknown => { fatal("unrecognized file extension of parameter '{}'", .{arg}); }, } } if (optimize_mode_string) |s| { optimize_mode = std.meta.stringToEnum(std.builtin.Mode, s) orelse fatal("unrecognized optimization mode: '{}'", .{s}); } }, .cc, .cpp => { emit_h = .no; soname = .no; strip = true; ensure_libc_on_non_freestanding = true; ensure_libcpp_on_non_freestanding = arg_mode == .cpp; want_native_include_dirs = true; const COutMode = enum { link, object, assembly, preprocessor, }; var c_out_mode: COutMode = .link; var out_path: ?[]const u8 = null; var is_shared_lib = false; var linker_args = std.ArrayList([]const u8).init(arena); var it = ClangArgIterator.init(arena, all_args); while (it.has_next) { it.next() catch |err| { fatal("unable to parse command line parameters: {}", .{@errorName(err)}); }; switch (it.zig_equivalent) { .target => target_arch_os_abi = it.only_arg, // example: -target riscv64-linux-unknown .o => out_path = it.only_arg, // -o .c => c_out_mode = .object, // -c .asm_only => c_out_mode = .assembly, // -S .preprocess_only => c_out_mode = .preprocessor, // -E .other => { try clang_argv.appendSlice(it.other_args); }, .positional => { const file_ext = Compilation.classifyFileExt(mem.spanZ(it.only_arg)); switch (file_ext) { .assembly, .c, .cpp, .ll, .bc, .h => try c_source_files.append(.{ .src_path = it.only_arg }), .unknown, .shared_library, .object, .static_library => { try link_objects.append(it.only_arg); }, .zig, .zir => { if (root_src_file) |other| { fatal("found another zig file '{}' after root source file '{}'", .{ it.only_arg, other }); } else { root_src_file = it.only_arg; } }, } }, .l => { // -l // We don't know whether this library is part of libc or libc++ until we resolve the target. // So we simply append to the list for now. try system_libs.append(it.only_arg); }, .ignore => {}, .driver_punt => { // Never mind what we're doing, just pass the args directly. For example --help. return punt_to_clang(arena, all_args); }, .pic => want_pic = true, .no_pic => want_pic = false, .pie => want_pie = true, .no_pie => want_pie = false, .nostdlib => ensure_libc_on_non_freestanding = false, .nostdlib_cpp => ensure_libcpp_on_non_freestanding = false, .shared => { link_mode = .Dynamic; is_shared_lib = true; }, .rdynamic => rdynamic = true, .wl => { var split_it = mem.split(it.only_arg, ","); while (split_it.next()) |linker_arg| { try linker_args.append(linker_arg); } }, .optimize => { // Alright, what release mode do they want? const level = if (it.only_arg.len >= 1 and it.only_arg[0] == 'O') it.only_arg[1..] else it.only_arg; if (mem.eql(u8, level, "s") or mem.eql(u8, level, "z")) { optimize_mode = .ReleaseSmall; } else if (mem.eql(u8, level, "1") or mem.eql(u8, level, "2") or mem.eql(u8, level, "3") or mem.eql(u8, level, "4") or mem.eql(u8, level, "fast")) { optimize_mode = .ReleaseFast; } else if (mem.eql(u8, level, "g") or mem.eql(u8, level, "0")) { optimize_mode = .Debug; } else { try clang_argv.appendSlice(it.other_args); } }, .debug => { strip = false; if (mem.eql(u8, it.only_arg, "g")) { // We handled with strip = false above. } else if (mem.eql(u8, it.only_arg, "g1") or mem.eql(u8, it.only_arg, "gline-tables-only")) { // We handled with strip = false above. but we also want reduced debug info. try clang_argv.append("-gline-tables-only"); } else { try clang_argv.appendSlice(it.other_args); } }, .sanitize => { if (mem.eql(u8, it.only_arg, "undefined")) { want_sanitize_c = true; } else { try clang_argv.appendSlice(it.other_args); } }, .linker_script => linker_script = it.only_arg, .verbose => { verbose_link = true; // Have Clang print more infos, some tools such as CMake // parse this to discover any implicit include and // library dir to look-up into. try clang_argv.append("-v"); }, .dry_run => { verbose_link = true; try clang_argv.append("-###"); // XXX: Don't execute anything! }, .for_linker => try linker_args.append(it.only_arg), .linker_input_z => { try linker_args.append("-z"); try linker_args.append(it.only_arg); }, .lib_dir => try lib_dirs.append(it.only_arg), .mcpu => target_mcpu = it.only_arg, .dep_file => { disable_c_depfile = true; try clang_argv.appendSlice(it.other_args); }, .framework_dir => try framework_dirs.append(it.only_arg), .framework => try frameworks.append(it.only_arg), .nostdlibinc => want_native_include_dirs = false, } } // Parse linker args. var i: usize = 0; while (i < linker_args.items.len) : (i += 1) { const arg = linker_args.items[i]; if (mem.eql(u8, arg, "-soname")) { i += 1; if (i >= linker_args.items.len) { fatal("expected linker arg after '{}'", .{arg}); } const name = linker_args.items[i]; soname = .{ .yes = name }; // Use it as --name. // Example: libsoundio.so.2 var prefix: usize = 0; if (mem.startsWith(u8, name, "lib")) { prefix = 3; } var end: usize = name.len; if (mem.endsWith(u8, name, ".so")) { end -= 3; } else { var found_digit = false; while (end > 0 and std.ascii.isDigit(name[end - 1])) { found_digit = true; end -= 1; } if (found_digit and end > 0 and name[end - 1] == '.') { end -= 1; } else { end = name.len; } if (mem.endsWith(u8, name[prefix..end], ".so")) { end -= 3; } } provided_name = name[prefix..end]; } else if (mem.eql(u8, arg, "-rpath")) { i += 1; if (i >= linker_args.items.len) { fatal("expected linker arg after '{}'", .{arg}); } try rpath_list.append(linker_args.items[i]); } else if (mem.eql(u8, arg, "-I") or mem.eql(u8, arg, "--dynamic-linker") or mem.eql(u8, arg, "-dynamic-linker")) { i += 1; if (i >= linker_args.items.len) { fatal("expected linker arg after '{}'", .{arg}); } target_dynamic_linker = linker_args.items[i]; } else if (mem.eql(u8, arg, "-E") or mem.eql(u8, arg, "--export-dynamic") or mem.eql(u8, arg, "-export-dynamic")) { rdynamic = true; } else if (mem.eql(u8, arg, "--version-script")) { i += 1; if (i >= linker_args.items.len) { fatal("expected linker arg after '{}'", .{arg}); } version_script = linker_args.items[i]; } else if (mem.startsWith(u8, arg, "-O")) { try lld_argv.append(arg); } else if (mem.eql(u8, arg, "--gc-sections")) { linker_gc_sections = true; } else if (mem.eql(u8, arg, "--no-gc-sections")) { linker_gc_sections = false; } else if (mem.eql(u8, arg, "--allow-shlib-undefined") or mem.eql(u8, arg, "-allow-shlib-undefined")) { linker_allow_shlib_undefined = true; } else if (mem.eql(u8, arg, "--no-allow-shlib-undefined") or mem.eql(u8, arg, "-no-allow-shlib-undefined")) { linker_allow_shlib_undefined = false; } else if (mem.eql(u8, arg, "-Bsymbolic")) { linker_bind_global_refs_locally = true; } else if (mem.eql(u8, arg, "-z")) { i += 1; if (i >= linker_args.items.len) { fatal("expected linker arg after '{}'", .{arg}); } const z_arg = linker_args.items[i]; if (mem.eql(u8, z_arg, "nodelete")) { linker_z_nodelete = true; } else if (mem.eql(u8, z_arg, "defs")) { linker_z_defs = true; } else { warn("unsupported linker arg: -z {}", .{z_arg}); } } else if (mem.eql(u8, arg, "--major-image-version")) { i += 1; if (i >= linker_args.items.len) { fatal("expected linker arg after '{}'", .{arg}); } version.major = std.fmt.parseUnsigned(u32, linker_args.items[i], 10) catch |err| { fatal("unable to parse '{}': {}", .{ arg, @errorName(err) }); }; have_version = true; } else if (mem.eql(u8, arg, "--minor-image-version")) { i += 1; if (i >= linker_args.items.len) { fatal("expected linker arg after '{}'", .{arg}); } version.minor = std.fmt.parseUnsigned(u32, linker_args.items[i], 10) catch |err| { fatal("unable to parse '{}': {}", .{ arg, @errorName(err) }); }; have_version = true; } else if (mem.eql(u8, arg, "--stack")) { i += 1; if (i >= linker_args.items.len) { fatal("expected linker arg after '{}'", .{arg}); } stack_size_override = std.fmt.parseUnsigned(u64, linker_args.items[i], 0) catch |err| { fatal("unable to parse '{}': {}", .{ arg, @errorName(err) }); }; } else if (mem.eql(u8, arg, "--image-base")) { i += 1; if (i >= linker_args.items.len) { fatal("expected linker arg after '{}'", .{arg}); } image_base_override = std.fmt.parseUnsigned(u64, linker_args.items[i], 0) catch |err| { fatal("unable to parse '{}': {}", .{ arg, @errorName(err) }); }; } else { warn("unsupported linker arg: {}", .{arg}); } } if (want_sanitize_c) |wsc| { if (wsc and optimize_mode == .ReleaseFast) { optimize_mode = .ReleaseSafe; } } switch (c_out_mode) { .link => { output_mode = if (is_shared_lib) .Lib else .Exe; emit_bin = if (out_path) |p| .{ .yes = p } else EmitBin.yes_a_out; enable_cache = true; }, .object => { output_mode = .Obj; if (out_path) |p| { emit_bin = .{ .yes = p }; } else { emit_bin = .yes_default_path; } }, .assembly => { output_mode = .Obj; emit_bin = .no; if (out_path) |p| { emit_asm = .{ .yes = p }; } else { emit_asm = .yes_default_path; } }, .preprocessor => { output_mode = .Obj; // An error message is generated when there is more than 1 C source file. if (c_source_files.items.len != 1) { // For example `zig cc` and no args should print the "no input files" message. return punt_to_clang(arena, all_args); } if (out_path) |p| { emit_bin = .{ .yes = p }; clang_preprocessor_mode = .yes; } else { clang_preprocessor_mode = .stdout; } }, } if (c_source_files.items.len == 0 and link_objects.items.len == 0) { // For example `zig cc` and no args should print the "no input files" message. return punt_to_clang(arena, all_args); } }, } if (arg_mode == .translate_c and c_source_files.items.len != 1) { fatal("translate-c expects exactly 1 source file (found {})", .{c_source_files.items.len}); } if (root_src_file == null and arg_mode == .zig_test) { fatal("`zig test` expects a zig source file argument", .{}); } const root_name = if (provided_name) |n| n else blk: { if (arg_mode == .zig_test) { break :blk "test"; } else if (root_src_file) |file| { const basename = fs.path.basename(file); break :blk basename[0 .. basename.len - fs.path.extension(basename).len]; } else if (c_source_files.items.len >= 1) { const basename = fs.path.basename(c_source_files.items[0].src_path); break :blk basename[0 .. basename.len - fs.path.extension(basename).len]; } else if (link_objects.items.len >= 1) { const basename = fs.path.basename(link_objects.items[0]); break :blk basename[0 .. basename.len - fs.path.extension(basename).len]; } else if (emit_bin == .yes) { const basename = fs.path.basename(emit_bin.yes); break :blk basename[0 .. basename.len - fs.path.extension(basename).len]; } else if (show_builtin) { break :blk "builtin"; } else if (arg_mode == .run) { fatal("`zig run` expects at least one positional argument", .{}); // TODO once the attempt to unwrap error: LinkingWithoutZigSourceUnimplemented // is solved, remove the above fatal() and uncomment the `break` below. //break :blk "run"; } else { fatal("expected a positional argument, -femit-bin=[path], --show-builtin, or --name [name]", .{}); } }; var diags: std.zig.CrossTarget.ParseOptions.Diagnostics = .{}; const cross_target = std.zig.CrossTarget.parse(.{ .arch_os_abi = target_arch_os_abi, .cpu_features = target_mcpu, .dynamic_linker = target_dynamic_linker, .diagnostics = &diags, }) catch |err| switch (err) { error.UnknownCpuModel => { help: { var help_text = std.ArrayList(u8).init(arena); for (diags.arch.?.allCpuModels()) |cpu| { help_text.writer().print(" {}\n", .{cpu.name}) catch break :help; } std.log.info("Available CPUs for architecture '{}': {}", .{ @tagName(diags.arch.?), help_text.items, }); } fatal("Unknown CPU: '{}'", .{diags.cpu_name.?}); }, error.UnknownCpuFeature => { help: { var help_text = std.ArrayList(u8).init(arena); for (diags.arch.?.allFeaturesList()) |feature| { help_text.writer().print(" {}: {}\n", .{ feature.name, feature.description }) catch break :help; } std.log.info("Available CPU features for architecture '{}': {}", .{ @tagName(diags.arch.?), help_text.items, }); } fatal("Unknown CPU feature: '{}'", .{diags.unknown_feature_name}); }, else => |e| return e, }; const target_info = try detectNativeTargetInfo(gpa, cross_target); if (target_info.target.os.tag != .freestanding) { if (ensure_libc_on_non_freestanding) link_libc = true; if (ensure_libcpp_on_non_freestanding) link_libcpp = true; } // Now that we have target info, we can find out if any of the system libraries // are part of libc or libc++. We remove them from the list and communicate their // existence via flags instead. { var i: usize = 0; while (i < system_libs.items.len) { const lib_name = system_libs.items[i]; if (target_util.is_libc_lib_name(target_info.target, lib_name)) { link_libc = true; _ = system_libs.orderedRemove(i); continue; } if (target_util.is_libcpp_lib_name(target_info.target, lib_name)) { link_libcpp = true; _ = system_libs.orderedRemove(i); continue; } if (std.fs.path.isAbsolute(lib_name)) { fatal("cannot use absolute path as a system library: {s}", .{lib_name}); } i += 1; } } if (cross_target.isNativeOs() and (system_libs.items.len != 0 or want_native_include_dirs)) { const paths = std.zig.system.NativePaths.detect(arena) catch |err| { fatal("unable to detect native system paths: {}", .{@errorName(err)}); }; for (paths.warnings.items) |warning| { warn("{}", .{warning}); } try clang_argv.ensureCapacity(clang_argv.items.len + paths.include_dirs.items.len * 2); for (paths.include_dirs.items) |include_dir| { clang_argv.appendAssumeCapacity("-isystem"); clang_argv.appendAssumeCapacity(include_dir); } for (paths.lib_dirs.items) |lib_dir| { try lib_dirs.append(lib_dir); } for (paths.rpaths.items) |rpath| { try rpath_list.append(rpath); } } const object_format: std.Target.ObjectFormat = blk: { const ofmt = target_ofmt orelse break :blk target_info.target.getObjectFormat(); if (mem.eql(u8, ofmt, "elf")) { break :blk .elf; } else if (mem.eql(u8, ofmt, "c")) { break :blk .c; } else if (mem.eql(u8, ofmt, "coff")) { break :blk .coff; } else if (mem.eql(u8, ofmt, "pe")) { break :blk .pe; } else if (mem.eql(u8, ofmt, "macho")) { break :blk .macho; } else if (mem.eql(u8, ofmt, "wasm")) { break :blk .wasm; } else if (mem.eql(u8, ofmt, "hex")) { break :blk .hex; } else if (mem.eql(u8, ofmt, "raw")) { break :blk .raw; } else { fatal("unsupported object format: {}", .{ofmt}); } }; if (output_mode == .Obj and (object_format == .coff or object_format == .macho)) { const total_obj_count = c_source_files.items.len + @boolToInt(root_src_file != null) + link_objects.items.len; if (total_obj_count > 1) { fatal("{s} does not support linking multiple objects into one", .{@tagName(object_format)}); } } var cleanup_emit_bin_dir: ?fs.Dir = null; defer if (cleanup_emit_bin_dir) |*dir| dir.close(); const have_enable_cache = enable_cache orelse false; const optional_version = if (have_version) version else null; const resolved_soname: ?[]const u8 = switch (soname) { .yes => |explicit| explicit, .no => null, .yes_default_value => switch (object_format) { .elf => if (have_version) try std.fmt.allocPrint(arena, "lib{s}.so.{d}", .{ root_name, version.major }) else try std.fmt.allocPrint(arena, "lib{s}.so", .{root_name}), else => null, }, }; const a_out_basename = switch (object_format) { .pe, .coff => "a.exe", else => "a.out", }; const emit_bin_loc: ?Compilation.EmitLoc = switch (emit_bin) { .no => null, .yes_default_path => Compilation.EmitLoc{ .directory = blk: { switch (arg_mode) { .run, .zig_test => break :blk null, else => { if (have_enable_cache) { break :blk null; } else { break :blk .{ .path = null, .handle = fs.cwd() }; } }, } }, .basename = try std.zig.binNameAlloc(arena, .{ .root_name = root_name, .target = target_info.target, .output_mode = output_mode, .link_mode = link_mode, .object_format = object_format, .version = optional_version, }), }, .yes => |full_path| b: { const basename = fs.path.basename(full_path); if (have_enable_cache) { break :b Compilation.EmitLoc{ .basename = basename, .directory = null, }; } if (fs.path.dirname(full_path)) |dirname| { const handle = fs.cwd().openDir(dirname, .{}) catch |err| { fatal("unable to open output directory '{}': {}", .{ dirname, @errorName(err) }); }; cleanup_emit_bin_dir = handle; break :b Compilation.EmitLoc{ .basename = basename, .directory = .{ .path = dirname, .handle = handle, }, }; } else { break :b Compilation.EmitLoc{ .basename = basename, .directory = .{ .path = null, .handle = fs.cwd() }, }; } }, .yes_a_out => Compilation.EmitLoc{ .directory = null, .basename = a_out_basename, }, }; const default_h_basename = try std.fmt.allocPrint(arena, "{}.h", .{root_name}); var emit_h_resolved = try emit_h.resolve(default_h_basename); defer emit_h_resolved.deinit(); const default_asm_basename = try std.fmt.allocPrint(arena, "{}.s", .{root_name}); var emit_asm_resolved = try emit_asm.resolve(default_asm_basename); defer emit_asm_resolved.deinit(); const default_llvm_ir_basename = try std.fmt.allocPrint(arena, "{}.ll", .{root_name}); var emit_llvm_ir_resolved = try emit_llvm_ir.resolve(default_llvm_ir_basename); defer emit_llvm_ir_resolved.deinit(); const default_analysis_basename = try std.fmt.allocPrint(arena, "{}-analysis.json", .{root_name}); var emit_analysis_resolved = try emit_analysis.resolve(default_analysis_basename); defer emit_analysis_resolved.deinit(); var emit_docs_resolved = try emit_docs.resolve("docs"); defer emit_docs_resolved.deinit(); const zir_out_path: ?[]const u8 = switch (emit_zir) { .no => null, .yes_default_path => blk: { if (root_src_file) |rsf| { if (mem.endsWith(u8, rsf, ".zir")) { break :blk try std.fmt.allocPrint(arena, "{}.out.zir", .{root_name}); } } break :blk try std.fmt.allocPrint(arena, "{}.zir", .{root_name}); }, .yes => |p| p, }; const root_pkg: ?*Package = if (root_src_file) |src_path| blk: { if (main_pkg_path) |p| { const rel_src_path = try fs.path.relative(gpa, p, src_path); defer gpa.free(rel_src_path); break :blk try Package.create(gpa, p, rel_src_path); } else { break :blk try Package.create(gpa, fs.path.dirname(src_path), fs.path.basename(src_path)); } } else null; defer if (root_pkg) |p| p.destroy(gpa); // Transfer packages added with --pkg-begin/--pkg-end to the root package if (root_pkg) |pkg| { pkg.table = pkg_tree_root.table; pkg_tree_root.table = .{}; } const self_exe_path = try fs.selfExePathAlloc(arena); var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir| .{ .path = lib_dir, .handle = try fs.cwd().openDir(lib_dir, .{}), } else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| { fatal("unable to find zig installation directory: {}", .{@errorName(err)}); }; defer zig_lib_directory.handle.close(); var thread_pool: ThreadPool = undefined; try thread_pool.init(gpa); defer thread_pool.deinit(); var libc_installation: ?LibCInstallation = null; defer if (libc_installation) |*l| l.deinit(gpa); if (libc_paths_file) |paths_file| { libc_installation = LibCInstallation.parse(gpa, paths_file) catch |err| { fatal("unable to parse libc paths file: {}", .{@errorName(err)}); }; } var global_cache_directory: Compilation.Directory = l: { const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena); break :l .{ .handle = try fs.cwd().makeOpenPath(p, .{}), .path = p, }; }; defer global_cache_directory.handle.close(); var cleanup_local_cache_dir: ?fs.Dir = null; defer if (cleanup_local_cache_dir) |*dir| dir.close(); var local_cache_directory: Compilation.Directory = l: { if (override_local_cache_dir) |local_cache_dir_path| { const dir = try fs.cwd().makeOpenPath(local_cache_dir_path, .{}); cleanup_local_cache_dir = dir; break :l .{ .handle = dir, .path = local_cache_dir_path, }; } if (arg_mode == .run) { break :l global_cache_directory; } if (root_pkg) |pkg| { const cache_dir_path = try pkg.root_src_directory.join(arena, &[_][]const u8{"zig-cache"}); const dir = try pkg.root_src_directory.handle.makeOpenPath("zig-cache", .{}); cleanup_local_cache_dir = dir; break :l .{ .handle = dir, .path = cache_dir_path, }; } // Otherwise we really don't have a reasonable place to put the local cache directory, // so we utilize the global one. break :l global_cache_directory; }; if (build_options.have_llvm and emit_asm != .no) { // LLVM has no way to set this non-globally. const argv = [_][*:0]const u8{ "zig (LLVM option parsing)", "--x86-asm-syntax=intel" }; @import("llvm.zig").ParseCommandLineOptions(argv.len, &argv); } gimmeMoreOfThoseSweetSweetFileDescriptors(); const comp = Compilation.create(gpa, .{ .zig_lib_directory = zig_lib_directory, .local_cache_directory = local_cache_directory, .global_cache_directory = global_cache_directory, .root_name = root_name, .target = target_info.target, .is_native_os = cross_target.isNativeOs(), .is_native_abi = cross_target.isNativeAbi(), .dynamic_linker = target_info.dynamic_linker.get(), .output_mode = output_mode, .root_pkg = root_pkg, .emit_bin = emit_bin_loc, .emit_h = emit_h_resolved.data, .emit_asm = emit_asm_resolved.data, .emit_llvm_ir = emit_llvm_ir_resolved.data, .emit_docs = emit_docs_resolved.data, .emit_analysis = emit_analysis_resolved.data, .link_mode = link_mode, .dll_export_fns = dll_export_fns, .object_format = object_format, .optimize_mode = optimize_mode, .keep_source_files_loaded = zir_out_path != null, .clang_argv = clang_argv.items, .lld_argv = lld_argv.items, .lib_dirs = lib_dirs.items, .rpath_list = rpath_list.items, .c_source_files = c_source_files.items, .link_objects = link_objects.items, .framework_dirs = framework_dirs.items, .frameworks = frameworks.items, .system_libs = system_libs.items, .link_libc = link_libc, .link_libcpp = link_libcpp, .want_pic = want_pic, .want_pie = want_pie, .want_sanitize_c = want_sanitize_c, .want_stack_check = want_stack_check, .want_valgrind = want_valgrind, .want_compiler_rt = want_compiler_rt, .use_llvm = use_llvm, .use_lld = use_lld, .use_clang = use_clang, .rdynamic = rdynamic, .linker_script = linker_script, .version_script = version_script, .disable_c_depfile = disable_c_depfile, .soname = resolved_soname, .linker_gc_sections = linker_gc_sections, .linker_allow_shlib_undefined = linker_allow_shlib_undefined, .linker_bind_global_refs_locally = linker_bind_global_refs_locally, .linker_z_nodelete = linker_z_nodelete, .linker_z_defs = linker_z_defs, .link_eh_frame_hdr = link_eh_frame_hdr, .link_emit_relocs = link_emit_relocs, .stack_size_override = stack_size_override, .image_base_override = image_base_override, .strip = strip, .single_threaded = single_threaded, .function_sections = function_sections, .self_exe_path = self_exe_path, .thread_pool = &thread_pool, .clang_passthrough_mode = arg_mode != .build, .clang_preprocessor_mode = clang_preprocessor_mode, .version = optional_version, .libc_installation = if (libc_installation) |*lci| lci else null, .verbose_cc = verbose_cc, .verbose_link = verbose_link, .verbose_tokenize = verbose_tokenize, .verbose_ast = verbose_ast, .verbose_ir = verbose_ir, .verbose_llvm_ir = verbose_llvm_ir, .verbose_cimport = verbose_cimport, .verbose_llvm_cpu_features = verbose_llvm_cpu_features, .machine_code_model = machine_code_model, .color = color, .time_report = time_report, .stack_report = stack_report, .is_test = arg_mode == .zig_test, .each_lib_rpath = each_lib_rpath, .test_evented_io = test_evented_io, .test_filter = test_filter, .test_name_prefix = test_name_prefix, .disable_lld_caching = !have_enable_cache, .subsystem = subsystem, }) catch |err| { fatal("unable to create compilation: {}", .{@errorName(err)}); }; defer comp.destroy(); if (show_builtin) { return std.io.getStdOut().writeAll(try comp.generateBuiltinZigSource(arena)); } if (arg_mode == .translate_c) { return cmdTranslateC(comp, arena, have_enable_cache); } const hook: AfterUpdateHook = blk: { if (!have_enable_cache) break :blk .none; switch (emit_bin) { .no => break :blk .none, .yes_default_path => break :blk .{ .print = comp.bin_file.options.emit.?.directory.path orelse ".", }, .yes => |full_path| break :blk .{ .update = full_path }, .yes_a_out => break :blk .{ .update = a_out_basename }, } }; updateModule(gpa, comp, zir_out_path, hook) catch |err| switch (err) { error.SemanticAnalyzeFail => process.exit(1), else => |e| return e, }; try comp.makeBinFileExecutable(); if (build_options.is_stage1 and comp.stage1_lock != null and watch) { warn("--watch is not recommended with the stage1 backend; it leaks memory and is not capable of incremental compilation", .{}); } const run_or_test = switch (arg_mode) { .run, .zig_test => true, else => false, }; if (run_or_test) run: { const exe_loc = emit_bin_loc orelse break :run; const exe_directory = exe_loc.directory orelse comp.bin_file.options.emit.?.directory; const exe_path = try fs.path.join(arena, &[_][]const u8{ exe_directory.path orelse ".", exe_loc.basename, }); var argv = std.ArrayList([]const u8).init(gpa); defer argv.deinit(); if (test_exec_args.items.len == 0) { if (!std.Target.current.canExecBinariesOf(target_info.target)) { switch (arg_mode) { .zig_test => { warn("created {s} but skipping execution because it is non-native", .{exe_path}); if (!watch) return cleanExit(); break :run; }, .run => fatal("unable to execute {s}: non-native", .{exe_path}), else => unreachable, } } try argv.append(exe_path); } else { for (test_exec_args.items) |arg| { try argv.append(arg orelse exe_path); } } if (runtime_args_start) |i| { try argv.appendSlice(all_args[i..]); } // We do not execve for tests because if the test fails we want to print the error message and // invocation below. if (os_can_execve and arg_mode == .run and !watch) { // TODO improve the std lib so that we don't need a call to getEnvMap here. var env_vars = try process.getEnvMap(arena); const err = std.os.execvpe(gpa, argv.items, &env_vars); const cmd = try argvCmd(arena, argv.items); fatal("the following command failed to execve with '{s}':\n{s}", .{ @errorName(err), cmd }); } else { const child = try std.ChildProcess.init(argv.items, gpa); defer child.deinit(); child.stdin_behavior = .Inherit; child.stdout_behavior = .Inherit; child.stderr_behavior = .Inherit; const term = try child.spawnAndWait(); switch (arg_mode) { .run => { switch (term) { .Exited => |code| { if (code == 0) { if (!watch) return cleanExit(); } else { // TODO https://github.com/ziglang/zig/issues/6342 process.exit(1); } }, else => process.exit(1), } }, .zig_test => { switch (term) { .Exited => |code| { if (code == 0) { if (!watch) return cleanExit(); } else { const cmd = try argvCmd(arena, argv.items); fatal("the following test command failed with exit code {}:\n{}", .{ code, cmd }); } }, else => { const cmd = try argvCmd(arena, argv.items); fatal("the following test command crashed:\n{}", .{cmd}); }, } }, else => unreachable, } } } const stdin = std.io.getStdIn().inStream(); const stderr = std.io.getStdErr().outStream(); var repl_buf: [1024]u8 = undefined; while (watch) { try stderr.print("(zig) ", .{}); try comp.makeBinFileExecutable(); if (stdin.readUntilDelimiterOrEof(&repl_buf, '\n') catch |err| { try stderr.print("\nUnable to parse command: {}\n", .{@errorName(err)}); continue; }) |line| { const actual_line = mem.trimRight(u8, line, "\r\n "); if (mem.eql(u8, actual_line, "update")) { if (output_mode == .Exe) { try comp.makeBinFileWritable(); } updateModule(gpa, comp, zir_out_path, hook) catch |err| switch (err) { error.SemanticAnalyzeFail => continue, else => |e| return e, }; } else if (mem.eql(u8, actual_line, "exit")) { break; } else if (mem.eql(u8, actual_line, "help")) { try stderr.writeAll(repl_help); } else { try stderr.print("unknown command: {}\n", .{actual_line}); } } else { break; } } } const AfterUpdateHook = union(enum) { none, print: []const u8, update: []const u8, }; fn updateModule(gpa: *Allocator, comp: *Compilation, zir_out_path: ?[]const u8, hook: AfterUpdateHook) !void { try comp.update(); var errors = try comp.getAllErrorsAlloc(); defer errors.deinit(comp.gpa); if (errors.list.len != 0) { for (errors.list) |full_err_msg| { full_err_msg.renderToStdErr(); } return error.SemanticAnalyzeFail; } else switch (hook) { .none => {}, .print => |bin_path| try io.getStdOut().writer().print("{s}\n", .{bin_path}), .update => |full_path| _ = try comp.bin_file.options.emit.?.directory.handle.updateFile( comp.bin_file.options.emit.?.sub_path, fs.cwd(), full_path, .{}, ), } if (zir_out_path) |zop| { const module = comp.bin_file.options.module orelse fatal("-femit-zir with no zig source code", .{}); var new_zir_module = try zir.emit(gpa, module); defer new_zir_module.deinit(gpa); const baf = try io.BufferedAtomicFile.create(gpa, fs.cwd(), zop, .{}); defer baf.destroy(); try new_zir_module.writeToStream(gpa, baf.stream()); try baf.finish(); } } fn cmdTranslateC(comp: *Compilation, arena: *Allocator, enable_cache: bool) !void { if (!build_options.have_llvm) fatal("cannot translate-c: compiler built without LLVM extensions", .{}); assert(comp.c_source_files.len == 1); const c_source_file = comp.c_source_files[0]; const translated_zig_basename = try std.fmt.allocPrint(arena, "{}.zig", .{comp.bin_file.options.root_name}); var man: Cache.Manifest = comp.obtainCObjectCacheManifest(); defer if (enable_cache) man.deinit(); man.hash.add(@as(u16, 0xb945)); // Random number to distinguish translate-c from compiling C objects _ = man.addFile(c_source_file.src_path, null) catch |err| { fatal("unable to process '{}': {}", .{ c_source_file.src_path, @errorName(err) }); }; const digest = if (try man.hit()) man.final() else digest: { var argv = std.ArrayList([]const u8).init(arena); var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath("tmp", .{}); defer zig_cache_tmp_dir.close(); const ext = Compilation.classifyFileExt(c_source_file.src_path); const out_dep_path: ?[]const u8 = blk: { if (comp.disable_c_depfile or !ext.clangSupportsDepFile()) break :blk null; const c_src_basename = fs.path.basename(c_source_file.src_path); const dep_basename = try std.fmt.allocPrint(arena, "{}.d", .{c_src_basename}); const out_dep_path = try comp.tmpFilePath(arena, dep_basename); break :blk out_dep_path; }; try comp.addTranslateCCArgs(arena, &argv, ext, out_dep_path); try argv.append(c_source_file.src_path); if (comp.verbose_cc) { std.debug.print("clang ", .{}); Compilation.dump_argv(argv.items); } // Convert to null terminated args. const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, argv.items.len + 1); new_argv_with_sentinel[argv.items.len] = null; const new_argv = new_argv_with_sentinel[0..argv.items.len :null]; for (argv.items) |arg, i| { new_argv[i] = try arena.dupeZ(u8, arg); } const c_headers_dir_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{"include"}); const c_headers_dir_path_z = try arena.dupeZ(u8, c_headers_dir_path); var clang_errors: []translate_c.ClangErrMsg = &[0]translate_c.ClangErrMsg{}; const tree = translate_c.translate( comp.gpa, new_argv.ptr, new_argv.ptr + new_argv.len, &clang_errors, c_headers_dir_path_z, ) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ASTUnitFailure => fatal("clang API returned errors but due to a clang bug, it is not exposing the errors for zig to see. For more details: https://github.com/ziglang/zig/issues/4455", .{}), error.SemanticAnalyzeFail => { for (clang_errors) |clang_err| { std.debug.print("{}:{}:{}: {}\n", .{ if (clang_err.filename_ptr) |p| p[0..clang_err.filename_len] else "(no file)", clang_err.line + 1, clang_err.column + 1, clang_err.msg_ptr[0..clang_err.msg_len], }); } process.exit(1); }, }; defer tree.deinit(); if (out_dep_path) |dep_file_path| { const dep_basename = std.fs.path.basename(dep_file_path); // Add the files depended on to the cache system. try man.addDepFilePost(zig_cache_tmp_dir, dep_basename); // Just to save disk space, we delete the file because it is never needed again. zig_cache_tmp_dir.deleteFile(dep_basename) catch |err| { warn("failed to delete '{}': {}", .{ dep_file_path, @errorName(err) }); }; } const digest = man.final(); const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest }); var o_dir = try comp.local_cache_directory.handle.makeOpenPath(o_sub_path, .{}); defer o_dir.close(); var zig_file = try o_dir.createFile(translated_zig_basename, .{}); defer zig_file.close(); var bos = io.bufferedOutStream(zig_file.writer()); _ = try std.zig.render(comp.gpa, bos.writer(), tree); try bos.flush(); man.writeManifest() catch |err| warn("failed to write cache manifest: {}", .{@errorName(err)}); break :digest digest; }; if (enable_cache) { const full_zig_path = try comp.local_cache_directory.join(arena, &[_][]const u8{ "o", &digest, translated_zig_basename, }); try io.getStdOut().writer().print("{}\n", .{full_zig_path}); return cleanExit(); } else { const out_zig_path = try fs.path.join(arena, &[_][]const u8{ "o", &digest, translated_zig_basename }); const zig_file = try comp.local_cache_directory.handle.openFile(out_zig_path, .{}); defer zig_file.close(); try io.getStdOut().writeFileAll(zig_file, .{}); return cleanExit(); } } pub const usage_libc = \\Usage: zig libc \\ \\ Detect the native libc installation and print the resulting \\ paths to stdout. You can save this into a file and then edit \\ the paths to create a cross compilation libc kit. Then you \\ can pass `--libc [file]` for Zig to use it. \\ \\Usage: zig libc [paths_file] \\ \\ Parse a libc installation text file and validate it. \\ ; pub fn cmdLibC(gpa: *Allocator, args: []const []const u8) !void { var input_file: ?[]const u8 = null; { var i: usize = 0; while (i < args.len) : (i += 1) { const arg = args[i]; if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { const stdout = io.getStdOut().writer(); try stdout.writeAll(usage_libc); return cleanExit(); } else { fatal("unrecognized parameter: '{}'", .{arg}); } } else if (input_file != null) { fatal("unexpected extra parameter: '{}'", .{arg}); } else { input_file = arg; } } } if (input_file) |libc_file| { var libc = LibCInstallation.parse(gpa, libc_file) catch |err| { fatal("unable to parse libc file: {}", .{@errorName(err)}); }; defer libc.deinit(gpa); } else { var libc = LibCInstallation.findNative(.{ .allocator = gpa, .verbose = true, }) catch |err| { fatal("unable to detect native libc: {}", .{@errorName(err)}); }; defer libc.deinit(gpa); var bos = io.bufferedOutStream(io.getStdOut().writer()); try libc.render(bos.writer()); try bos.flush(); } } pub const usage_init = \\Usage: zig init-exe \\ zig init-lib \\ \\ Initializes a `zig build` project in the current working \\ directory. \\ \\Options: \\ -h, --help Print this help and exit \\ \\ ; pub fn cmdInit( gpa: *Allocator, arena: *Allocator, args: []const []const u8, output_mode: std.builtin.OutputMode, ) !void { { var i: usize = 0; while (i < args.len) : (i += 1) { const arg = args[i]; if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { try io.getStdOut().writeAll(usage_init); return cleanExit(); } else { fatal("unrecognized parameter: '{}'", .{arg}); } } else { fatal("unexpected extra parameter: '{}'", .{arg}); } } } const self_exe_path = try fs.selfExePathAlloc(arena); var zig_lib_directory = introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| { fatal("unable to find zig installation directory: {}\n", .{@errorName(err)}); }; defer zig_lib_directory.handle.close(); const s = fs.path.sep_str; const template_sub_path = switch (output_mode) { .Obj => unreachable, .Lib => "std" ++ s ++ "special" ++ s ++ "init-lib", .Exe => "std" ++ s ++ "special" ++ s ++ "init-exe", }; var template_dir = try zig_lib_directory.handle.openDir(template_sub_path, .{}); defer template_dir.close(); const cwd_path = try process.getCwdAlloc(arena); const cwd_basename = fs.path.basename(cwd_path); const max_bytes = 10 * 1024 * 1024; const build_zig_contents = template_dir.readFileAlloc(arena, "build.zig", max_bytes) catch |err| { fatal("unable to read template file 'build.zig': {}", .{@errorName(err)}); }; var modified_build_zig_contents = std.ArrayList(u8).init(arena); try modified_build_zig_contents.ensureCapacity(build_zig_contents.len); for (build_zig_contents) |c| { if (c == '$') { try modified_build_zig_contents.appendSlice(cwd_basename); } else { try modified_build_zig_contents.append(c); } } const main_zig_contents = template_dir.readFileAlloc(arena, "src" ++ s ++ "main.zig", max_bytes) catch |err| { fatal("unable to read template file 'main.zig': {}", .{@errorName(err)}); }; if (fs.cwd().access("build.zig", .{})) |_| { fatal("existing build.zig file would be overwritten", .{}); } else |err| switch (err) { error.FileNotFound => {}, else => fatal("unable to test existence of build.zig: {}\n", .{@errorName(err)}), } var src_dir = try fs.cwd().makeOpenPath("src", .{}); defer src_dir.close(); try src_dir.writeFile("main.zig", main_zig_contents); try fs.cwd().writeFile("build.zig", modified_build_zig_contents.items); std.log.info("Created build.zig", .{}); std.log.info("Created src" ++ s ++ "main.zig", .{}); switch (output_mode) { .Lib => std.log.info("Next, try `zig build --help` or `zig build test`", .{}), .Exe => std.log.info("Next, try `zig build --help` or `zig build run`", .{}), .Obj => unreachable, } } pub const usage_build = \\Usage: zig build [steps] [options] \\ \\ Build a project from build.zig. \\ \\Options: \\ -h, --help Print this help and exit \\ \\ ; pub fn cmdBuild(gpa: *Allocator, arena: *Allocator, args: []const []const u8) !void { // We want to release all the locks before executing the child process, so we make a nice // big block here to ensure the cleanup gets run when we extract out our argv. const child_argv = argv: { const self_exe_path = try fs.selfExePathAlloc(arena); var build_file: ?[]const u8 = null; var override_lib_dir: ?[]const u8 = null; var override_global_cache_dir: ?[]const u8 = null; var override_local_cache_dir: ?[]const u8 = null; var child_argv = std.ArrayList([]const u8).init(arena); const argv_index_exe = child_argv.items.len; _ = try child_argv.addOne(); try child_argv.append(self_exe_path); const argv_index_build_file = child_argv.items.len; _ = try child_argv.addOne(); const argv_index_cache_dir = child_argv.items.len; _ = try child_argv.addOne(); const argv_index_global_cache_dir = child_argv.items.len; _ = try child_argv.addOne(); { var i: usize = 0; while (i < args.len) : (i += 1) { const arg = args[i]; if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "--build-file")) { if (i + 1 >= args.len) fatal("expected argument after '{}'", .{arg}); i += 1; build_file = args[i]; continue; } else if (mem.eql(u8, arg, "--override-lib-dir")) { if (i + 1 >= args.len) fatal("expected argument after '{}'", .{arg}); i += 1; override_lib_dir = args[i]; try child_argv.appendSlice(&[_][]const u8{ arg, args[i] }); continue; } else if (mem.eql(u8, arg, "--cache-dir")) { if (i + 1 >= args.len) fatal("expected argument after '{}'", .{arg}); i += 1; override_local_cache_dir = args[i]; continue; } else if (mem.eql(u8, arg, "--global-cache-dir")) { if (i + 1 >= args.len) fatal("expected argument after '{}'", .{arg}); i += 1; override_global_cache_dir = args[i]; continue; } } try child_argv.append(arg); } } var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir| .{ .path = lib_dir, .handle = try fs.cwd().openDir(lib_dir, .{}), } else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| { fatal("unable to find zig installation directory: {}", .{@errorName(err)}); }; defer zig_lib_directory.handle.close(); const std_special = "std" ++ fs.path.sep_str ++ "special"; const special_dir_path = try zig_lib_directory.join(arena, &[_][]const u8{std_special}); var root_pkg: Package = .{ .root_src_directory = .{ .path = special_dir_path, .handle = try zig_lib_directory.handle.openDir(std_special, .{}), }, .root_src_path = "build_runner.zig", }; defer root_pkg.root_src_directory.handle.close(); var cleanup_build_dir: ?fs.Dir = null; defer if (cleanup_build_dir) |*dir| dir.close(); const cwd_path = try process.getCwdAlloc(arena); const build_zig_basename = if (build_file) |bf| fs.path.basename(bf) else "build.zig"; const build_directory: Compilation.Directory = blk: { if (build_file) |bf| { if (fs.path.dirname(bf)) |dirname| { const dir = try fs.cwd().openDir(dirname, .{}); cleanup_build_dir = dir; break :blk .{ .path = dirname, .handle = dir }; } break :blk .{ .path = null, .handle = fs.cwd() }; } // Search up parent directories until we find build.zig. var dirname: []const u8 = cwd_path; while (true) { const joined_path = try fs.path.join(arena, &[_][]const u8{ dirname, build_zig_basename }); if (fs.cwd().access(joined_path, .{})) |_| { const dir = try fs.cwd().openDir(dirname, .{}); break :blk .{ .path = dirname, .handle = dir }; } else |err| switch (err) { error.FileNotFound => { dirname = fs.path.dirname(dirname) orelse { std.log.info("{}", .{ \\Initialize a 'build.zig' template file with `zig init-lib` or `zig init-exe`, \\or see `zig --help` for more options. }); fatal("No 'build.zig' file found, in the current directory or any parent directories.", .{}); }; continue; }, else => |e| return e, } } }; child_argv.items[argv_index_build_file] = build_directory.path orelse cwd_path; var build_pkg: Package = .{ .root_src_directory = build_directory, .root_src_path = build_zig_basename, }; try root_pkg.table.put(arena, "@build", &build_pkg); var global_cache_directory: Compilation.Directory = l: { const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena); break :l .{ .handle = try fs.cwd().makeOpenPath(p, .{}), .path = p, }; }; defer global_cache_directory.handle.close(); child_argv.items[argv_index_global_cache_dir] = global_cache_directory.path orelse cwd_path; var local_cache_directory: Compilation.Directory = l: { if (override_local_cache_dir) |local_cache_dir_path| { break :l .{ .handle = try fs.cwd().makeOpenPath(local_cache_dir_path, .{}), .path = local_cache_dir_path, }; } const cache_dir_path = try build_directory.join(arena, &[_][]const u8{"zig-cache"}); break :l .{ .handle = try build_directory.handle.makeOpenPath("zig-cache", .{}), .path = cache_dir_path, }; }; defer local_cache_directory.handle.close(); child_argv.items[argv_index_cache_dir] = local_cache_directory.path orelse cwd_path; gimmeMoreOfThoseSweetSweetFileDescriptors(); const cross_target: std.zig.CrossTarget = .{}; const target_info = try detectNativeTargetInfo(gpa, cross_target); const exe_basename = try std.zig.binNameAlloc(arena, .{ .root_name = "build", .target = target_info.target, .output_mode = .Exe, }); const emit_bin: Compilation.EmitLoc = .{ .directory = null, // Use the local zig-cache. .basename = exe_basename, }; var thread_pool: ThreadPool = undefined; try thread_pool.init(gpa); defer thread_pool.deinit(); const comp = Compilation.create(gpa, .{ .zig_lib_directory = zig_lib_directory, .local_cache_directory = local_cache_directory, .global_cache_directory = global_cache_directory, .root_name = "build", .target = target_info.target, .is_native_os = cross_target.isNativeOs(), .is_native_abi = cross_target.isNativeAbi(), .dynamic_linker = target_info.dynamic_linker.get(), .output_mode = .Exe, .root_pkg = &root_pkg, .emit_bin = emit_bin, .emit_h = null, .optimize_mode = .Debug, .self_exe_path = self_exe_path, .thread_pool = &thread_pool, }) catch |err| { fatal("unable to create compilation: {}", .{@errorName(err)}); }; defer comp.destroy(); try updateModule(gpa, comp, null, .none); try comp.makeBinFileExecutable(); child_argv.items[argv_index_exe] = try comp.bin_file.options.emit.?.directory.join( arena, &[_][]const u8{exe_basename}, ); break :argv child_argv.items; }; const child = try std.ChildProcess.init(child_argv, gpa); defer child.deinit(); child.stdin_behavior = .Inherit; child.stdout_behavior = .Inherit; child.stderr_behavior = .Inherit; const term = try child.spawnAndWait(); switch (term) { .Exited => |code| { if (code == 0) return cleanExit(); const cmd = try argvCmd(arena, child_argv); fatal("the following build command failed with exit code {}:\n{}", .{ code, cmd }); }, else => { const cmd = try argvCmd(arena, child_argv); fatal("the following build command crashed:\n{}", .{cmd}); }, } } fn argvCmd(allocator: *Allocator, argv: []const []const u8) ![]u8 { var cmd = std.ArrayList(u8).init(allocator); defer cmd.deinit(); for (argv[0 .. argv.len - 1]) |arg| { try cmd.appendSlice(arg); try cmd.append(' '); } try cmd.appendSlice(argv[argv.len - 1]); return cmd.toOwnedSlice(); } pub const usage_fmt = \\Usage: zig fmt [file]... \\ \\ Formats the input files and modifies them in-place. \\ Arguments can be files or directories, which are searched \\ recursively. \\ \\Options: \\ -h, --help Print this help and exit \\ --color [auto|off|on] Enable or disable colored error messages \\ --stdin Format code from stdin; output to stdout \\ --check List non-conforming files and exit with an error \\ if the list is non-empty \\ \\ ; const Fmt = struct { seen: SeenMap, any_error: bool, color: Color, gpa: *Allocator, out_buffer: std.ArrayList(u8), const SeenMap = std.AutoHashMap(fs.File.INode, void); }; pub fn cmdFmt(gpa: *Allocator, args: []const []const u8) !void { const stderr_file = io.getStdErr(); var color: Color = .auto; var stdin_flag: bool = false; var check_flag: bool = false; var input_files = ArrayList([]const u8).init(gpa); defer input_files.deinit(); { var i: usize = 0; while (i < args.len) : (i += 1) { const arg = args[i]; if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { const stdout = io.getStdOut().outStream(); try stdout.writeAll(usage_fmt); return cleanExit(); } else if (mem.eql(u8, arg, "--color")) { if (i + 1 >= args.len) { fatal("expected [auto|on|off] after --color", .{}); } i += 1; const next_arg = args[i]; color = std.meta.stringToEnum(Color, next_arg) orelse { fatal("expected [auto|on|off] after --color, found '{}'", .{next_arg}); }; } else if (mem.eql(u8, arg, "--stdin")) { stdin_flag = true; } else if (mem.eql(u8, arg, "--check")) { check_flag = true; } else { fatal("unrecognized parameter: '{}'", .{arg}); } } else { try input_files.append(arg); } } } if (stdin_flag) { if (input_files.items.len != 0) { fatal("cannot use --stdin with positional arguments", .{}); } const stdin = io.getStdIn().inStream(); const source_code = try stdin.readAllAlloc(gpa, max_src_size); defer gpa.free(source_code); const tree = std.zig.parse(gpa, source_code) catch |err| { fatal("error parsing stdin: {}", .{err}); }; defer tree.deinit(); for (tree.errors) |parse_error| { try printErrMsgToFile(gpa, parse_error, tree, "<stdin>", stderr_file, color); } if (tree.errors.len != 0) { process.exit(1); } if (check_flag) { const anything_changed = try std.zig.render(gpa, io.null_out_stream, tree); const code = if (anything_changed) @as(u8, 1) else @as(u8, 0); process.exit(code); } var bos = io.bufferedOutStream(io.getStdOut().writer()); _ = try std.zig.render(gpa, bos.writer(), tree); try bos.flush(); return; } if (input_files.items.len == 0) { fatal("expected at least one source file argument", .{}); } var fmt = Fmt{ .gpa = gpa, .seen = Fmt.SeenMap.init(gpa), .any_error = false, .color = color, .out_buffer = std.ArrayList(u8).init(gpa), }; defer fmt.seen.deinit(); defer fmt.out_buffer.deinit(); for (input_files.items) |file_path| { // Get the real path here to avoid Windows failing on relative file paths with . or .. in them. const real_path = fs.realpathAlloc(gpa, file_path) catch |err| { fatal("unable to open '{}': {}", .{ file_path, err }); }; defer gpa.free(real_path); try fmtPath(&fmt, file_path, check_flag, fs.cwd(), real_path); } if (fmt.any_error) { process.exit(1); } } const FmtError = error{ SystemResources, OperationAborted, IoPending, BrokenPipe, Unexpected, WouldBlock, FileClosed, DestinationAddressRequired, DiskQuota, FileTooBig, InputOutput, NoSpaceLeft, AccessDenied, OutOfMemory, RenameAcrossMountPoints, ReadOnlyFileSystem, LinkQuotaExceeded, FileBusy, EndOfStream, Unseekable, NotOpenForWriting, } || fs.File.OpenError; fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8) FmtError!void { fmtPathFile(fmt, file_path, check_mode, dir, sub_path) catch |err| switch (err) { error.IsDir, error.AccessDenied => return fmtPathDir(fmt, file_path, check_mode, dir, sub_path), else => { warn("unable to format '{}': {}", .{ file_path, err }); fmt.any_error = true; return; }, }; } fn fmtPathDir( fmt: *Fmt, file_path: []const u8, check_mode: bool, parent_dir: fs.Dir, parent_sub_path: []const u8, ) FmtError!void { var dir = try parent_dir.openDir(parent_sub_path, .{ .iterate = true }); defer dir.close(); const stat = try dir.stat(); if (try fmt.seen.fetchPut(stat.inode, {})) |_| return; var dir_it = dir.iterate(); while (try dir_it.next()) |entry| { const is_dir = entry.kind == .Directory; if (is_dir and std.mem.eql(u8, entry.name, "zig-cache")) continue; if (is_dir or mem.endsWith(u8, entry.name, ".zig")) { const full_path = try fs.path.join(fmt.gpa, &[_][]const u8{ file_path, entry.name }); defer fmt.gpa.free(full_path); if (is_dir) { try fmtPathDir(fmt, full_path, check_mode, dir, entry.name); } else { fmtPathFile(fmt, full_path, check_mode, dir, entry.name) catch |err| { warn("unable to format '{}': {}", .{ full_path, err }); fmt.any_error = true; return; }; } } } } fn fmtPathFile( fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8, ) FmtError!void { const source_file = try dir.openFile(sub_path, .{}); var file_closed = false; errdefer if (!file_closed) source_file.close(); const stat = try source_file.stat(); if (stat.kind == .Directory) return error.IsDir; const source_code = source_file.readToEndAllocOptions( fmt.gpa, max_src_size, std.math.cast(usize, stat.size) catch return error.FileTooBig, @alignOf(u8), null, ) catch |err| switch (err) { error.ConnectionResetByPeer => unreachable, error.ConnectionTimedOut => unreachable, error.NotOpenForReading => unreachable, else => |e| return e, }; source_file.close(); file_closed = true; defer fmt.gpa.free(source_code); // Add to set after no longer possible to get error.IsDir. if (try fmt.seen.fetchPut(stat.inode, {})) |_| return; const tree = try std.zig.parse(fmt.gpa, source_code); defer tree.deinit(); for (tree.errors) |parse_error| { try printErrMsgToFile(fmt.gpa, parse_error, tree, file_path, std.io.getStdErr(), fmt.color); } if (tree.errors.len != 0) { fmt.any_error = true; return; } if (check_mode) { const anything_changed = try std.zig.render(fmt.gpa, io.null_out_stream, tree); if (anything_changed) { const stdout = io.getStdOut().writer(); try stdout.print("{}\n", .{file_path}); fmt.any_error = true; } } else { // As a heuristic, we make enough capacity for the same as the input source. try fmt.out_buffer.ensureCapacity(source_code.len); fmt.out_buffer.items.len = 0; const writer = fmt.out_buffer.writer(); const anything_changed = try std.zig.render(fmt.gpa, writer, tree); if (!anything_changed) return; // Good thing we didn't waste any file system access on this. var af = try dir.atomicFile(sub_path, .{ .mode = stat.mode }); defer af.deinit(); try af.file.writeAll(fmt.out_buffer.items); try af.finish(); const stdout = io.getStdOut().writer(); try stdout.print("{}\n", .{file_path}); } } fn printErrMsgToFile( gpa: *mem.Allocator, parse_error: ast.Error, tree: *ast.Tree, path: []const u8, file: fs.File, color: Color, ) !void { const color_on = switch (color) { .auto => file.isTty(), .on => true, .off => false, }; const lok_token = parse_error.loc(); const span_first = lok_token; const span_last = lok_token; const first_token = tree.token_locs[span_first]; const last_token = tree.token_locs[span_last]; const start_loc = tree.tokenLocationLoc(0, first_token); const end_loc = tree.tokenLocationLoc(first_token.end, last_token); var text_buf = std.ArrayList(u8).init(gpa); defer text_buf.deinit(); const out_stream = text_buf.outStream(); try parse_error.render(tree.token_ids, out_stream); const text = text_buf.items; const stream = file.outStream(); try stream.print("{}:{}:{}: error: {}\n", .{ path, start_loc.line + 1, start_loc.column + 1, text }); if (!color_on) return; // Print \r and \t as one space each so that column counts line up for (tree.source[start_loc.line_start..start_loc.line_end]) |byte| { try stream.writeByte(switch (byte) { '\r', '\t' => ' ', else => byte, }); } try stream.writeByte('\n'); try stream.writeByteNTimes(' ', start_loc.column); try stream.writeByteNTimes('~', last_token.end - first_token.start); try stream.writeByte('\n'); } pub const info_zen = \\ \\ * Communicate intent precisely. \\ * Edge cases matter. \\ * Favor reading code over writing code. \\ * Only one obvious way to do things. \\ * Runtime crashes are better than bugs. \\ * Compile errors are better than runtime crashes. \\ * Incremental improvements. \\ * Avoid local maximums. \\ * Reduce the amount one must remember. \\ * Focus on code rather than style. \\ * Resource allocation may fail; resource deallocation must succeed. \\ * Memory is a resource. \\ * Together we serve the users. \\ \\ ; extern "c" fn ZigClang_main(argc: c_int, argv: [*:null]?[*:0]u8) c_int; /// TODO https://github.com/ziglang/zig/issues/3257 fn punt_to_clang(arena: *Allocator, args: []const []const u8) error{OutOfMemory} { if (!build_options.have_llvm) fatal("`zig cc` and `zig c++` unavailable: compiler built without LLVM extensions", .{}); // Convert the args to the format Clang expects. const argv = try arena.alloc(?[*:0]u8, args.len + 1); for (args) |arg, i| { argv[i] = try arena.dupeZ(u8, arg); // TODO If there was an argsAllocZ we could avoid this allocation. } argv[args.len] = null; const exit_code = ZigClang_main(@intCast(c_int, args.len), argv[0..args.len :null].ptr); process.exit(@bitCast(u8, @truncate(i8, exit_code))); } /// The first argument determines which backend is invoked. The options are: /// * `ld.lld` - ELF /// * `ld64.lld` - Mach-O /// * `lld-link` - COFF /// * `wasm-ld` - WebAssembly /// TODO https://github.com/ziglang/zig/issues/3257 pub fn punt_to_lld(arena: *Allocator, args: []const []const u8) error{OutOfMemory} { if (!build_options.have_llvm) fatal("`zig {s}` unavailable: compiler built without LLVM extensions", .{args[0]}); // Convert the args to the format LLD expects. // We subtract 1 to shave off the zig binary from args[0]. const argv = try arena.allocSentinel(?[*:0]const u8, args.len - 1, null); for (args[1..]) |arg, i| { argv[i] = try arena.dupeZ(u8, arg); // TODO If there was an argsAllocZ we could avoid this allocation. } const exit_code = rc: { const llvm = @import("llvm.zig"); const argc = @intCast(c_int, argv.len); if (mem.eql(u8, args[1], "ld.lld")) { break :rc llvm.LinkELF(argc, argv.ptr, true); } else if (mem.eql(u8, args[1], "ld64.lld")) { break :rc llvm.LinkMachO(argc, argv.ptr, true); } else if (mem.eql(u8, args[1], "lld-link")) { break :rc llvm.LinkCOFF(argc, argv.ptr, true); } else if (mem.eql(u8, args[1], "wasm-ld")) { break :rc llvm.LinkWasm(argc, argv.ptr, true); } else { unreachable; } }; process.exit(@bitCast(u8, @truncate(i8, exit_code))); } const clang_args = @import("clang_options.zig").list; pub const ClangArgIterator = struct { has_next: bool, zig_equivalent: ZigEquivalent, only_arg: []const u8, second_arg: []const u8, other_args: []const []const u8, argv: []const []const u8, next_index: usize, root_args: ?*Args, allocator: *Allocator, pub const ZigEquivalent = enum { target, o, c, other, positional, l, ignore, driver_punt, pic, no_pic, pie, no_pie, nostdlib, nostdlib_cpp, shared, rdynamic, wl, preprocess_only, asm_only, optimize, debug, sanitize, linker_script, dry_run, verbose, for_linker, linker_input_z, lib_dir, mcpu, dep_file, framework_dir, framework, nostdlibinc, }; const Args = struct { next_index: usize, argv: []const []const u8, }; fn init(allocator: *Allocator, argv: []const []const u8) ClangArgIterator { return .{ .next_index = 2, // `zig cc foo` this points to `foo` .has_next = argv.len > 2, .zig_equivalent = undefined, .only_arg = undefined, .second_arg = undefined, .other_args = undefined, .argv = argv, .root_args = null, .allocator = allocator, }; } fn next(self: *ClangArgIterator) !void { assert(self.has_next); assert(self.next_index < self.argv.len); // In this state we know that the parameter we are looking at is a root parameter // rather than an argument to a parameter. // We adjust the len below when necessary. self.other_args = (self.argv.ptr + self.next_index)[0..1]; var arg = mem.span(self.argv[self.next_index]); self.incrementArgIndex(); if (mem.startsWith(u8, arg, "@")) { if (self.root_args != null) return error.NestedResponseFile; // This is a "compiler response file". We must parse the file and treat its // contents as command line parameters. const allocator = self.allocator; const max_bytes = 10 * 1024 * 1024; // 10 MiB of command line arguments is a reasonable limit const resp_file_path = arg[1..]; const resp_contents = fs.cwd().readFileAlloc(allocator, resp_file_path, max_bytes) catch |err| { fatal("unable to read response file '{}': {}", .{ resp_file_path, @errorName(err) }); }; defer allocator.free(resp_contents); // TODO is there a specification for this file format? Let's find it and make this parsing more robust // at the very least I'm guessing this needs to handle quotes and `#` comments. var it = mem.tokenize(resp_contents, " \t\r\n"); var resp_arg_list = std.ArrayList([]const u8).init(allocator); defer resp_arg_list.deinit(); { errdefer { for (resp_arg_list.items) |item| { allocator.free(mem.span(item)); } } while (it.next()) |token| { const dupe_token = try mem.dupeZ(allocator, u8, token); errdefer allocator.free(dupe_token); try resp_arg_list.append(dupe_token); } const args = try allocator.create(Args); errdefer allocator.destroy(args); args.* = .{ .next_index = self.next_index, .argv = self.argv, }; self.root_args = args; } const resp_arg_slice = resp_arg_list.toOwnedSlice(); self.next_index = 0; self.argv = resp_arg_slice; if (resp_arg_slice.len == 0) { self.resolveRespFileArgs(); return; } self.has_next = true; self.other_args = (self.argv.ptr + self.next_index)[0..1]; // We adjust len below when necessary. arg = mem.span(self.argv[self.next_index]); self.incrementArgIndex(); } if (!mem.startsWith(u8, arg, "-")) { self.zig_equivalent = .positional; self.only_arg = arg; return; } find_clang_arg: for (clang_args) |clang_arg| switch (clang_arg.syntax) { .flag => { const prefix_len = clang_arg.matchEql(arg); if (prefix_len > 0) { self.zig_equivalent = clang_arg.zig_equivalent; self.only_arg = arg[prefix_len..]; break :find_clang_arg; } }, .joined, .comma_joined => { // joined example: --target=foo // comma_joined example: -Wl,-soname,libsoundio.so.2 const prefix_len = clang_arg.matchStartsWith(arg); if (prefix_len != 0) { self.zig_equivalent = clang_arg.zig_equivalent; self.only_arg = arg[prefix_len..]; // This will skip over the "--target=" part. break :find_clang_arg; } }, .joined_or_separate => { // Examples: `-lfoo`, `-l foo` const prefix_len = clang_arg.matchStartsWith(arg); if (prefix_len == arg.len) { if (self.next_index >= self.argv.len) { fatal("Expected parameter after '{}'", .{arg}); } self.only_arg = self.argv[self.next_index]; self.incrementArgIndex(); self.other_args.len += 1; self.zig_equivalent = clang_arg.zig_equivalent; break :find_clang_arg; } else if (prefix_len != 0) { self.zig_equivalent = clang_arg.zig_equivalent; self.only_arg = arg[prefix_len..]; break :find_clang_arg; } }, .joined_and_separate => { // Example: `-Xopenmp-target=riscv64-linux-unknown foo` const prefix_len = clang_arg.matchStartsWith(arg); if (prefix_len != 0) { self.only_arg = arg[prefix_len..]; if (self.next_index >= self.argv.len) { fatal("Expected parameter after '{}'", .{arg}); } self.second_arg = self.argv[self.next_index]; self.incrementArgIndex(); self.other_args.len += 1; self.zig_equivalent = clang_arg.zig_equivalent; break :find_clang_arg; } }, .separate => if (clang_arg.matchEql(arg) > 0) { if (self.next_index >= self.argv.len) { fatal("Expected parameter after '{}'", .{arg}); } self.only_arg = self.argv[self.next_index]; self.incrementArgIndex(); self.other_args.len += 1; self.zig_equivalent = clang_arg.zig_equivalent; break :find_clang_arg; }, .remaining_args_joined => { const prefix_len = clang_arg.matchStartsWith(arg); if (prefix_len != 0) { @panic("TODO"); } }, .multi_arg => if (clang_arg.matchEql(arg) > 0) { @panic("TODO"); }, } else { fatal("Unknown Clang option: '{}'", .{arg}); } } fn incrementArgIndex(self: *ClangArgIterator) void { self.next_index += 1; self.resolveRespFileArgs(); } fn resolveRespFileArgs(self: *ClangArgIterator) void { const allocator = self.allocator; if (self.next_index >= self.argv.len) { if (self.root_args) |root_args| { self.next_index = root_args.next_index; self.argv = root_args.argv; allocator.destroy(root_args); self.root_args = null; } if (self.next_index >= self.argv.len) { self.has_next = false; } } } }; fn parseCodeModel(arg: []const u8) std.builtin.CodeModel { return std.meta.stringToEnum(std.builtin.CodeModel, arg) orelse fatal("unsupported machine code model: '{}'", .{arg}); } /// Raise the open file descriptor limit. Ask and ye shall receive. /// For one example of why this is handy, consider the case of building musl libc. /// We keep a lock open for each of the object files in the form of a file descriptor /// until they are finally put into an archive file. This is to allow a zig-cache /// garbage collector to run concurrently to zig processes, and to allow multiple /// zig processes to run concurrently with each other, without clobbering each other. fn gimmeMoreOfThoseSweetSweetFileDescriptors() void { if (!@hasDecl(std.os, "rlimit")) return; const posix = std.os; var lim = posix.getrlimit(.NOFILE) catch return; // Oh well; we tried. if (comptime std.Target.current.isDarwin()) { // On Darwin, `NOFILE` is bounded by a hardcoded value `OPEN_MAX`. // According to the man pages for setrlimit(): // setrlimit() now returns with errno set to EINVAL in places that historically succeeded. // It no longer accepts "rlim_cur = RLIM_INFINITY" for RLIM_NOFILE. // Use "rlim_cur = min(OPEN_MAX, rlim_max)". lim.max = std.math.min(std.os.darwin.OPEN_MAX, lim.max); } if (lim.cur == lim.max) return; // Do a binary search for the limit. var min: posix.rlim_t = lim.cur; var max: posix.rlim_t = 1 << 20; // But if there's a defined upper bound, don't search, just set it. if (lim.max != posix.RLIM_INFINITY) { min = lim.max; max = lim.max; } while (true) { lim.cur = min + @divTrunc(max - min, 2); // on freebsd rlim_t is signed if (posix.setrlimit(.NOFILE, lim)) |_| { min = lim.cur; } else |_| { max = lim.cur; } if (min + 1 >= max) break; } } test "fds" { gimmeMoreOfThoseSweetSweetFileDescriptors(); } fn detectNativeCpuWithLLVM( arch: std.Target.Cpu.Arch, llvm_cpu_name_z: ?[*:0]const u8, llvm_cpu_features_opt: ?[*:0]const u8, ) !std.Target.Cpu { var result = std.Target.Cpu.baseline(arch); if (llvm_cpu_name_z) |cpu_name_z| { const llvm_cpu_name = mem.spanZ(cpu_name_z); for (arch.allCpuModels()) |model| { const this_llvm_name = model.llvm_name orelse continue; if (mem.eql(u8, this_llvm_name, llvm_cpu_name)) { // Here we use the non-dependencies-populated set, // so that subtracting features later in this function // affect the prepopulated set. result = std.Target.Cpu{ .arch = arch, .model = model, .features = model.features, }; break; } } } const all_features = arch.allFeaturesList(); if (llvm_cpu_features_opt) |llvm_cpu_features| { var it = mem.tokenize(mem.spanZ(llvm_cpu_features), ","); while (it.next()) |decorated_llvm_feat| { var op: enum { add, sub, } = undefined; var llvm_feat: []const u8 = undefined; if (mem.startsWith(u8, decorated_llvm_feat, "+")) { op = .add; llvm_feat = decorated_llvm_feat[1..]; } else if (mem.startsWith(u8, decorated_llvm_feat, "-")) { op = .sub; llvm_feat = decorated_llvm_feat[1..]; } else { return error.InvalidLlvmCpuFeaturesFormat; } for (all_features) |feature, index_usize| { const this_llvm_name = feature.llvm_name orelse continue; if (mem.eql(u8, llvm_feat, this_llvm_name)) { const index = @intCast(std.Target.Cpu.Feature.Set.Index, index_usize); switch (op) { .add => result.features.addFeature(index), .sub => result.features.removeFeature(index), } break; } } } } result.features.populateDependencies(all_features); return result; } fn detectNativeTargetInfo(gpa: *Allocator, cross_target: std.zig.CrossTarget) !std.zig.system.NativeTargetInfo { var info = try std.zig.system.NativeTargetInfo.detect(gpa, cross_target); if (info.cpu_detection_unimplemented) { const arch = std.Target.current.cpu.arch; // We want to just use detected_info.target but implementing // CPU model & feature detection is todo so here we rely on LLVM. // https://github.com/ziglang/zig/issues/4591 if (!build_options.have_llvm) fatal("CPU features detection is not yet available for {} without LLVM extensions", .{@tagName(arch)}); const llvm = @import("llvm.zig"); const llvm_cpu_name = llvm.GetHostCPUName(); const llvm_cpu_features = llvm.GetNativeFeatures(); info.target.cpu = try detectNativeCpuWithLLVM(arch, llvm_cpu_name, llvm_cpu_features); cross_target.updateCpuFeatures(&info.target.cpu.features); info.target.cpu.arch = cross_target.getCpuArch(); } return info; } /// Indicate that we are now terminating with a successful exit code. /// In debug builds, this is a no-op, so that the calling code's /// cleanup mechanisms are tested and so that external tools that /// check for resource leaks can be accurate. In release builds, this /// calls exit(0), and does not return. pub fn cleanExit() void { if (std.builtin.mode == .Debug) { return; } else { process.exit(0); } }
src/main.zig