code
stringlengths
38
801k
repo_path
stringlengths
6
263
const c = @import("c.zig"); const getElapsedTime = @import("glfw.zig").getElapsedTime; const utils = @import("utils.zig"); const time = @import("std").time; const print = @import("std").debug.print; pub const FrameTime = struct { update: f64 = 0, draw: f64 = 0, delta: f64 = 0, last: f64 = 0, current: f64 = 0, /// Start updating frametime pub fn start(fr: *FrameTime) void { fr.current = getElapsedTime(); fr.update = fr.current - fr.last; fr.last = fr.current; } /// Stop updating frametime pub fn stop(fr: *FrameTime) void { fr.current = getElapsedTime(); fr.draw = fr.current - fr.last; fr.last = fr.current; fr.delta = fr.update + fr.draw; } /// Sleep for the sake of cpu pub fn sleep(fr: *FrameTime, targetfps: f64) void { if (fr.delta < targetfps) { const ms = (targetfps - fr.delta) * 1000; const sleep_time = ms * 1000000; time.sleep(@floatToInt(u64, sleep_time)); fr.current = getElapsedTime(); fr.delta += fr.current - fr.last; fr.last = fr.current; } } }; pub const FpsDirect = struct { counter: u32 = 0, fps: u32 = 0, last: f64 = 0, /// Calculates the fps pub fn calculate(fp: FpsDirect, fr: FrameTime) FpsDirect { var fps = fp; const fuck = fr.current - fps.last; fps.counter += 1; if (fuck >= 1.0) { fps.fps = fps.counter; fps.counter = 0; fps.last = fr.current; } return fps; } }; pub const Info = struct { handle: ?*c_void = null, title: []const u8 = "<Insert Title>", size: Size = Size{}, minsize: Size = Size{}, maxsize: Size = Size{}, position: Position = Position{}, callbacks: Callbacks = Callbacks{}, pub const Size = struct { width: i32 = 1024, height: i32 = 768, }; pub const Position = struct { x: i32 = 0, y: i32 = 0, }; pub const UpdateProperty = enum { size, sizelimits, title, position, all }; pub const Callbacks = struct { close: ?fn (handle: ?*c_void) void = null, resize: ?fn (handle: ?*c_void, w: i32, h: i32) void = null, mousepos: ?fn (handle: ?*c_void, x: f64, y: f64) void = null, mouseinp: ?fn (handle: ?*c_void, key: i32, ac: i32, mods: i32) void = null, keyinp: ?fn (handle: ?*c_void, key: i32, sc: i32, ac: i32, mods: i32) void = null, textinp: ?fn (handle: ?*c_void, codepoint: u32) void = null, }; /// Create the window pub fn create(win: *Info, fullscreen: bool) !void { try utils.check(win.handle != null, "kira/window -> handle must be null", .{}); win.handle = @ptrCast(?*c_void, c.glfwCreateWindow(win.size.width, win.size.height, @ptrCast([*c]const u8, win.title), if (fullscreen) c.glfwGetPrimaryMonitor() else null, null)); try utils.check(win.handle == null, "kira/window -> glfw could not create window handle!", .{}); if (win.callbacks.close != null) { _ = c.glfwSetWindowCloseCallback(@ptrCast(?*c.struct_GLFWwindow, win.handle), @ptrCast(c.GLFWwindowclosefun, win.callbacks.close)); } if (win.callbacks.resize != null) { _ = c.glfwSetWindowSizeCallback(@ptrCast(?*c.struct_GLFWwindow, win.handle), @ptrCast(c.GLFWwindowsizefun, win.callbacks.resize)); } if (win.callbacks.mousepos != null) { _ = c.glfwSetCursorPosCallback(@ptrCast(?*c.struct_GLFWwindow, win.handle), @ptrCast(c.GLFWcursorposfun, win.callbacks.mousepos)); } if (win.callbacks.mouseinp != null) { _ = c.glfwSetMouseButtonCallback(@ptrCast(?*c.struct_GLFWwindow, win.handle), @ptrCast(c.GLFWmousebuttonfun, win.callbacks.mouseinp)); } if (win.callbacks.keyinp != null) { _ = c.glfwSetKeyCallback(@ptrCast(?*c.struct_GLFWwindow, win.handle), @ptrCast(c.GLFWkeyfun, win.callbacks.keyinp)); } if (win.callbacks.textinp != null) { _ = c.glfwSetCharCallback(@ptrCast(?*c.struct_GLFWwindow, win.handle), @ptrCast(c.GLFWcharfun, win.callbacks.textinp)); } win.update(UpdateProperty.all); } /// Destroys the window pub fn destroy(win: *Info) !void { try utils.check(win.handle == null, "kira/window -> handle must be valid", .{}); c.glfwDestroyWindow(@ptrCast(?*c.struct_GLFWwindow, win.handle)); win.handle = null; } /// Updates the properties pub fn update(win: *Info, p: UpdateProperty) void { switch (p) { UpdateProperty.size => { c.glfwSetWindowSize(@ptrCast(?*c.struct_GLFWwindow, win.handle), win.size.width, win.size.height); }, UpdateProperty.sizelimits => { c.glfwSetWindowSizeLimits(@ptrCast(?*c.struct_GLFWwindow, win.handle), win.minsize.width, win.minsize.height, win.maxsize.width, win.maxsize.height); }, UpdateProperty.title => { c.glfwSetWindowTitle(@ptrCast(?*c.struct_GLFWwindow, win.handle), @ptrCast([*c]const u8, win.title)); }, UpdateProperty.position => { c.glfwSetWindowPos(@ptrCast(?*c.struct_GLFWwindow, win.handle), win.position.x, win.position.y); }, UpdateProperty.all => { c.glfwSetWindowSize(@ptrCast(?*c.struct_GLFWwindow, win.handle), win.size.width, win.size.height); c.glfwSetWindowSizeLimits(@ptrCast(?*c.struct_GLFWwindow, win.handle), win.minsize.width, win.minsize.height, win.maxsize.width, win.maxsize.height); c.glfwSetWindowTitle(@ptrCast(?*c.struct_GLFWwindow, win.handle), @ptrCast([*c]const u8, win.title)); c.glfwSetWindowPos(@ptrCast(?*c.struct_GLFWwindow, win.handle), win.position.x, win.position.y); }, } } };
src/kiragine/kira/window.zig
const std = @import("std"); usingnamespace (@import("../machine.zig")); usingnamespace (@import("../util.zig")); test "MMX" { const m32 = Machine.init(.x86_32); const m64 = Machine.init(.x64); const reg = Operand.register; const regRm = Operand.registerRm; const imm = Operand.immediate; debugPrint(false); const rm32 = Operand.memoryRm(.DefaultSeg, .DWORD, .EAX, 0); const rm64 = Operand.memoryRm(.DefaultSeg, .QWORD, .EAX, 0); const mem_64 = rm64; { testOp0(m32, .EMMS, "0F 77"); testOp0(m64, .EMMS, "0F 77"); } { { testOp2(m32, .MOVD, reg(.MM0), rm32, "0F 6E 00"); testOp2(m32, .MOVD, reg(.MM1), rm32, "0F 6E 08"); testOp2(m32, .MOVD, reg(.MM2), rm32, "0F 6E 10"); testOp2(m32, .MOVD, reg(.MM3), rm32, "0F 6E 18"); testOp2(m32, .MOVD, reg(.MM4), rm32, "0F 6E 20"); testOp2(m32, .MOVD, reg(.MM5), rm32, "0F 6E 28"); testOp2(m32, .MOVD, reg(.MM6), rm32, "0F 6E 30"); testOp2(m32, .MOVD, reg(.MM7), rm32, "0F 6E 38"); // testOp2(m64, .MOVD, reg(.MM0), rm32, "67 0F 6E 00"); testOp2(m64, .MOVD, reg(.MM1), rm32, "67 0F 6E 08"); testOp2(m64, .MOVD, reg(.MM2), rm32, "67 0F 6E 10"); testOp2(m64, .MOVD, reg(.MM3), rm32, "67 0F 6E 18"); testOp2(m64, .MOVD, reg(.MM4), rm32, "67 0F 6E 20"); testOp2(m64, .MOVD, reg(.MM5), rm32, "67 0F 6E 28"); testOp2(m64, .MOVD, reg(.MM6), rm32, "67 0F 6E 30"); testOp2(m64, .MOVD, reg(.MM7), rm32, "67 0F 6E 38"); } { testOp2(m32, .MOVD, reg(.MM0), rm64, AsmError.InvalidOperand); testOp2(m32, .MOVD, reg(.MM1), rm64, AsmError.InvalidOperand); testOp2(m32, .MOVD, reg(.MM2), rm64, AsmError.InvalidOperand); testOp2(m32, .MOVD, reg(.MM3), rm64, AsmError.InvalidOperand); testOp2(m32, .MOVD, reg(.MM4), rm64, AsmError.InvalidOperand); testOp2(m32, .MOVD, reg(.MM5), rm64, AsmError.InvalidOperand); testOp2(m32, .MOVD, reg(.MM6), rm64, AsmError.InvalidOperand); testOp2(m32, .MOVD, reg(.MM7), rm64, AsmError.InvalidOperand); // testOp2(m64, .MOVD, reg(.MM0), rm64, "67 48 0F 6E 00"); testOp2(m64, .MOVD, reg(.MM1), rm64, "67 48 0F 6E 08"); testOp2(m64, .MOVD, reg(.MM2), rm64, "67 48 0F 6E 10"); testOp2(m64, .MOVD, reg(.MM3), rm64, "67 48 0F 6E 18"); testOp2(m64, .MOVD, reg(.MM4), rm64, "67 48 0F 6E 20"); testOp2(m64, .MOVD, reg(.MM5), rm64, "67 48 0F 6E 28"); testOp2(m64, .MOVD, reg(.MM6), rm64, "67 48 0F 6E 30"); testOp2(m64, .MOVD, reg(.MM7), rm64, "67 48 0F 6E 38"); } { testOp2(m32, .MOVQ, reg(.MM0), reg(.RAX), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.MM1), reg(.RAX), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.MM2), reg(.RAX), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.MM3), reg(.RAX), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.MM4), reg(.RAX), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.MM5), reg(.RAX), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.MM6), reg(.RAX), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.MM7), reg(.RAX), AsmError.InvalidOperand); // testOp2(m64, .MOVQ, reg(.MM0), reg(.RAX), "48 0F 6E C0"); testOp2(m64, .MOVQ, reg(.MM1), reg(.RAX), "48 0F 6E C8"); testOp2(m64, .MOVQ, reg(.MM2), reg(.RAX), "48 0F 6E D0"); testOp2(m64, .MOVQ, reg(.MM3), reg(.RAX), "48 0F 6E D8"); testOp2(m64, .MOVQ, reg(.MM4), reg(.RAX), "48 0F 6E E0"); testOp2(m64, .MOVQ, reg(.MM5), reg(.RAX), "48 0F 6E E8"); testOp2(m64, .MOVQ, reg(.MM6), reg(.RAX), "48 0F 6E F0"); testOp2(m64, .MOVQ, reg(.MM7), reg(.RAX), "48 0F 6E F8"); } { testOp2(m32, .MOVQ, reg(.RAX), reg(.MM0), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.RAX), reg(.MM1), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.RAX), reg(.MM2), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.RAX), reg(.MM3), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.RAX), reg(.MM4), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.RAX), reg(.MM5), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.RAX), reg(.MM6), AsmError.InvalidOperand); testOp2(m32, .MOVQ, reg(.RAX), reg(.MM7), AsmError.InvalidOperand); // testOp2(m64, .MOVQ, reg(.RAX), reg(.MM0), "48 0F 7E C0"); testOp2(m64, .MOVQ, reg(.RAX), reg(.MM1), "48 0F 7E C8"); testOp2(m64, .MOVQ, reg(.RAX), reg(.MM2), "48 0F 7E D0"); testOp2(m64, .MOVQ, reg(.RAX), reg(.MM3), "48 0F 7E D8"); testOp2(m64, .MOVQ, reg(.RAX), reg(.MM4), "48 0F 7E E0"); testOp2(m64, .MOVQ, reg(.RAX), reg(.MM5), "48 0F 7E E8"); testOp2(m64, .MOVQ, reg(.RAX), reg(.MM6), "48 0F 7E F0"); testOp2(m64, .MOVQ, reg(.RAX), reg(.MM7), "48 0F 7E F8"); } { testOp2(m32, .MOVQ, reg(.MM0), mem_64, "0F 6F 00"); testOp2(m32, .MOVQ, reg(.MM1), mem_64, "0F 6F 08"); testOp2(m32, .MOVQ, reg(.MM2), mem_64, "0F 6F 10"); testOp2(m32, .MOVQ, reg(.MM3), mem_64, "0F 6F 18"); testOp2(m32, .MOVQ, reg(.MM4), mem_64, "0F 6F 20"); testOp2(m32, .MOVQ, reg(.MM5), mem_64, "0F 6F 28"); testOp2(m32, .MOVQ, reg(.MM6), mem_64, "0F 6F 30"); testOp2(m32, .MOVQ, reg(.MM7), mem_64, "0F 6F 38"); // testOp2(m64, .MOVQ, reg(.MM0), mem_64, "67 0F 6F 00"); testOp2(m64, .MOVQ, reg(.MM1), mem_64, "67 0F 6F 08"); testOp2(m64, .MOVQ, reg(.MM2), mem_64, "67 0F 6F 10"); testOp2(m64, .MOVQ, reg(.MM3), mem_64, "67 0F 6F 18"); testOp2(m64, .MOVQ, reg(.MM4), mem_64, "67 0F 6F 20"); testOp2(m64, .MOVQ, reg(.MM5), mem_64, "67 0F 6F 28"); testOp2(m64, .MOVQ, reg(.MM6), mem_64, "67 0F 6F 30"); testOp2(m64, .MOVQ, reg(.MM7), mem_64, "67 0F 6F 38"); } { testOp2(m32, .MOVQ, mem_64, reg(.MM0), "0F 7F 00"); testOp2(m32, .MOVQ, mem_64, reg(.MM1), "0F 7F 08"); testOp2(m32, .MOVQ, mem_64, reg(.MM2), "0F 7F 10"); testOp2(m32, .MOVQ, mem_64, reg(.MM3), "0F 7F 18"); testOp2(m32, .MOVQ, mem_64, reg(.MM4), "0F 7F 20"); testOp2(m32, .MOVQ, mem_64, reg(.MM5), "0F 7F 28"); testOp2(m32, .MOVQ, mem_64, reg(.MM6), "0F 7F 30"); testOp2(m32, .MOVQ, mem_64, reg(.MM7), "0F 7F 38"); // testOp2(m64, .MOVQ, mem_64, reg(.MM0), "67 0F 7F 00"); testOp2(m64, .MOVQ, mem_64, reg(.MM1), "67 0F 7F 08"); testOp2(m64, .MOVQ, mem_64, reg(.MM2), "67 0F 7F 10"); testOp2(m64, .MOVQ, mem_64, reg(.MM3), "67 0F 7F 18"); testOp2(m64, .MOVQ, mem_64, reg(.MM4), "67 0F 7F 20"); testOp2(m64, .MOVQ, mem_64, reg(.MM5), "67 0F 7F 28"); testOp2(m64, .MOVQ, mem_64, reg(.MM6), "67 0F 7F 30"); testOp2(m64, .MOVQ, mem_64, reg(.MM7), "67 0F 7F 38"); } { testOp2(m32, .MOVQ, reg(.MM0), reg(.MM0), "0F 6F c0"); testOp2(m32, .MOVQ, reg(.MM1), reg(.MM1), "0F 6F c9"); testOp2(m32, .MOVQ, reg(.MM2), reg(.MM2), "0F 6F d2"); testOp2(m32, .MOVQ, reg(.MM3), reg(.MM3), "0F 6F db"); testOp2(m32, .MOVQ, reg(.MM4), reg(.MM4), "0F 6F e4"); testOp2(m32, .MOVQ, reg(.MM5), reg(.MM5), "0F 6F ed"); testOp2(m32, .MOVQ, reg(.MM6), reg(.MM6), "0F 6F f6"); testOp2(m32, .MOVQ, reg(.MM7), reg(.MM7), "0F 6F ff"); // testOp2(m64, .MOVQ, reg(.MM0), reg(.MM0), "0F 6F c0"); testOp2(m64, .MOVQ, reg(.MM1), reg(.MM1), "0F 6F c9"); testOp2(m64, .MOVQ, reg(.MM2), reg(.MM2), "0F 6F d2"); testOp2(m64, .MOVQ, reg(.MM3), reg(.MM3), "0F 6F db"); testOp2(m64, .MOVQ, reg(.MM4), reg(.MM4), "0F 6F e4"); testOp2(m64, .MOVQ, reg(.MM5), reg(.MM5), "0F 6F ed"); testOp2(m64, .MOVQ, reg(.MM6), reg(.MM6), "0F 6F f6"); testOp2(m64, .MOVQ, reg(.MM7), reg(.MM7), "0F 6F ff"); } } { testOp2(m32, .PACKSSWB, reg(.MM0), reg(.MM0), "0F 63 c0"); testOp2(m32, .PACKSSWB, reg(.MM0), reg(.MM1), "0F 63 c1"); testOp2(m32, .PACKSSWB, reg(.MM1), reg(.MM0), "0F 63 c8"); testOp2(m32, .PACKSSWB, reg(.MM0), mem_64, "0F 63 00"); testOp2(m32, .PACKSSWB, mem_64, mem_64, AsmError.InvalidOperand); testOp2(m32, .PACKSSWB, mem_64, reg(.MM1), AsmError.InvalidOperand); testOp2(m32, .PACKSSDW, reg(.MM0), reg(.MM0), "0F 6B c0"); testOp2(m32, .PACKSSDW, reg(.MM0), reg(.MM1), "0F 6B c1"); testOp2(m32, .PACKSSDW, reg(.MM1), reg(.MM0), "0F 6B c8"); testOp2(m32, .PACKSSDW, reg(.MM0), mem_64, "0F 6B 00"); testOp2(m32, .PACKSSDW, mem_64, mem_64, AsmError.InvalidOperand); testOp2(m32, .PACKSSDW, mem_64, reg(.MM1), AsmError.InvalidOperand); testOp2(m32, .PACKUSWB, reg(.MM0), reg(.MM0), "0F 67 c0"); testOp2(m32, .PACKUSWB, reg(.MM0), reg(.MM1), "0F 67 c1"); testOp2(m32, .PACKUSWB, reg(.MM1), reg(.MM0), "0F 67 c8"); testOp2(m32, .PACKUSWB, reg(.MM0), mem_64, "0F 67 00"); testOp2(m32, .PACKUSWB, mem_64, mem_64, AsmError.InvalidOperand); testOp2(m32, .PACKUSWB, mem_64, reg(.MM1), AsmError.InvalidOperand); } { testOp2(m32, .PADDB, reg(.MM0), reg(.MM0), "0f fc c0"); testOp2(m32, .PADDW, reg(.MM0), reg(.MM0), "0f fd c0"); testOp2(m32, .PADDD, reg(.MM0), reg(.MM0), "0f fe c0"); testOp2(m32, .PADDQ, reg(.MM0), reg(.MM0), "0f d4 c0"); testOp2(m32, .PADDSB, reg(.MM0), reg(.MM0), "0f ec c0"); testOp2(m32, .PADDSW, reg(.MM0), reg(.MM0), "0f ed c0"); testOp2(m32, .PADDUSB, reg(.MM0), reg(.MM0), "0f dc c0"); testOp2(m32, .PADDUSW, reg(.MM0), reg(.MM0), "0f dd c0"); testOp2(m32, .PAND, reg(.MM0), reg(.MM0), "0f db c0"); testOp2(m32, .PANDN, reg(.MM0), reg(.MM0), "0f df c0"); testOp2(m32, .POR, reg(.MM0), reg(.MM0), "0f eb c0"); testOp2(m32, .PXOR, reg(.MM0), reg(.MM0), "0f ef c0"); testOp2(m32, .PCMPEQB, reg(.MM0), reg(.MM0), "0f 74 c0"); testOp2(m32, .PCMPEQW, reg(.MM0), reg(.MM0), "0f 75 c0"); testOp2(m32, .PCMPEQD, reg(.MM0), reg(.MM0), "0f 76 c0"); testOp2(m32, .PCMPGTB, reg(.MM0), reg(.MM0), "0f 64 c0"); testOp2(m32, .PCMPGTW, reg(.MM0), reg(.MM0), "0f 65 c0"); testOp2(m32, .PCMPGTD, reg(.MM0), reg(.MM0), "0f 66 c0"); testOp3(m32, .PEXTRW, reg(.EAX), reg(.MM0), imm(0), "0f c5 c0 00"); testOp3(m64, .PEXTRW, reg(.RAX), reg(.MM0), imm(0), "0f c5 c0 00"); testOp2(m32, .PMADDWD, reg(.MM0), reg(.MM0), "0f f5 c0"); testOp2(m32, .PMULHW, reg(.MM0), reg(.MM0), "0f e5 c0"); testOp2(m32, .PMULLW, reg(.MM0), reg(.MM0), "0f d5 c0"); testOp2(m32, .PSLLW, reg(.MM0), reg(.MM0), "0f f1 c0"); testOp2(m32, .PSLLW, reg(.MM0), imm(0), "0f 71 f0 00"); testOp2(m32, .PSLLD, reg(.MM0), reg(.MM0), "0f f2 c0"); testOp2(m32, .PSLLD, reg(.MM0), imm(0), "0f 72 f0 00"); testOp2(m32, .PSLLQ, reg(.MM0), reg(.MM0), "0f f3 c0"); testOp2(m32, .PSLLQ, reg(.MM0), imm(0), "0f 73 f0 00"); testOp2(m32, .PSRAW, reg(.MM0), reg(.MM0), "0f e1 c0"); testOp2(m32, .PSRAW, reg(.MM0), imm(0), "0f 71 e0 00"); testOp2(m32, .PSRAD, reg(.MM0), reg(.MM0), "0f e2 c0"); testOp2(m32, .PSRAD, reg(.MM0), imm(0), "0f 72 e0 00"); testOp2(m32, .PSRLW, reg(.MM0), reg(.MM0), "0f d1 c0"); testOp2(m32, .PSRLW, reg(.MM0), imm(0), "0f 71 d0 00"); testOp2(m32, .PSRLD, reg(.MM0), reg(.MM0), "0f d2 c0"); testOp2(m32, .PSRLD, reg(.MM0), imm(0), "0f 72 d0 00"); testOp2(m32, .PSRLQ, reg(.MM0), reg(.MM0), "0f d3 c0"); testOp2(m32, .PSRLQ, reg(.MM0), imm(0), "0f 73 d0 00"); testOp2(m32, .PSUBB, reg(.MM0), reg(.MM0), "0f f8 c0"); testOp2(m32, .PSUBW, reg(.MM0), reg(.MM0), "0f f9 c0"); testOp2(m32, .PSUBD, reg(.MM0), reg(.MM0), "0f fa c0"); testOp2(m32, .PSUBUSB, reg(.MM0), reg(.MM0), "0f d8 c0"); testOp2(m32, .PSUBUSW, reg(.MM0), reg(.MM0), "0f d9 c0"); testOp2(m32, .PUNPCKHBW, reg(.MM0), reg(.MM0), "0f 68 c0"); testOp2(m32, .PUNPCKHWD, reg(.MM0), reg(.MM0), "0f 69 c0"); testOp2(m32, .PUNPCKHDQ, reg(.MM0), reg(.MM0), "0f 6a c0"); // testOp2(m32, .PUNPCKHQDQ,reg(.MM0), reg(.MM0), "0f 6d c0"); testOp2(m32, .PUNPCKLBW, reg(.MM0), reg(.MM0), "0f 60 c0"); testOp2(m32, .PUNPCKLWD, reg(.MM0), reg(.MM0), "0f 61 c0"); testOp2(m32, .PUNPCKLDQ, reg(.MM0), reg(.MM0), "0f 62 c0"); // testOp2(m32, .PUNPCKLQDQ,reg(.MM0), reg(.MM0), "0f 6c c0"); } // // 3DNow! // { // FEMMS testOp0(m64, .FEMMS, "0f 0e"); // PAVGUSB testOp2(m64, .PAVGUSB, reg(.MM1), regRm(.MM0), "0f 0f c8 bf"); // PF2ID testOp2(m64, .PF2ID, reg(.MM1), regRm(.MM0), "0f 0f c8 1d"); // PFACC testOp2(m64, .PFACC, reg(.MM1), regRm(.MM0), "0f 0f c8 ae"); // PFADD testOp2(m64, .PFADD, reg(.MM1), regRm(.MM0), "0f 0f c8 9e"); // PFCMPEQ testOp2(m64, .PFCMPEQ, reg(.MM1), regRm(.MM0), "0f 0f c8 b0"); // PFCMPGE testOp2(m64, .PFCMPGE, reg(.MM1), regRm(.MM0), "0f 0f c8 90"); // PFCMPGT testOp2(m64, .PFCMPGT, reg(.MM1), regRm(.MM0), "0f 0f c8 a0"); // PFMAX testOp2(m64, .PFMAX, reg(.MM1), regRm(.MM0), "0f 0f c8 a4"); // PFMIN testOp2(m64, .PFMIN, reg(.MM1), regRm(.MM0), "0f 0f c8 94"); // PFMUL testOp2(m64, .PFMUL, reg(.MM1), regRm(.MM0), "0f 0f c8 b4"); // PFRCP testOp2(m64, .PFRCP, reg(.MM1), regRm(.MM0), "0f 0f c8 96"); // PFRCPIT1 testOp2(m64, .PFRCPIT1, reg(.MM1), regRm(.MM0), "0f 0f c8 a6"); // PFRCPIT2 testOp2(m64, .PFRCPIT2, reg(.MM1), regRm(.MM0), "0f 0f c8 b6"); // PFRSQIT1 testOp2(m64, .PFRSQIT1, reg(.MM1), regRm(.MM0), "0f 0f c8 a7"); // PFRSQRT testOp2(m64, .PFRSQRT, reg(.MM1), regRm(.MM0), "0f 0f c8 97"); // PFSUB testOp2(m64, .PFSUB, reg(.MM1), regRm(.MM0), "0f 0f c8 9a"); // PFSUBR testOp2(m64, .PFSUBR, reg(.MM1), regRm(.MM0), "0f 0f c8 aa"); // PI2FD testOp2(m64, .PI2FD, reg(.MM1), regRm(.MM0), "0f 0f c8 0d"); // PMULHRW testOp2(m64, .PMULHRW, reg(.MM1), regRm(.MM0), "0f 0f c8 0c"); } { // PFRCPV testOp2(m64, .PFRCPV, reg(.MM1), regRm(.MM0), "0f 0f c8 87"); // PFRSQRTV testOp2(m64, .PFRSQRTV, reg(.MM1), regRm(.MM0), "0f 0f c8 86"); } // // 3DNow! Extensions // { // PF2IW testOp2(m64, .PF2IW, reg(.MM1), regRm(.MM0), "0f 0f c8 1c"); // PFNACC testOp2(m64, .PFNACC, reg(.MM1), regRm(.MM0), "0f 0f c8 8a"); // PFPNACC testOp2(m64, .PFPNACC, reg(.MM1), regRm(.MM0), "0f 0f c8 8e"); // PI2FW testOp2(m64, .PI2FW, reg(.MM1), regRm(.MM0), "0f 0f c8 0c"); // PSWAPD testOp2(m64, .PSWAPD, reg(.MM1), regRm(.MM0), "0f 0f c8 bb"); } // // Cyrix EMMI (Extended Multi-Media Instructions) // { // NOTE: some instructions are overloaded elsewhere by AMD/Intel, so // so need to use separate cpu features here const cpu_features = [_]CpuFeature { .Cyrix, .EMMI }; const cyrix32 = Machine.init_with_features(.x86_32, cpu_features[0..]); // PADDSIW testOp2(cyrix32, .PADDSIW, reg(.MM1), regRm(.MM0), "0f 51 c8"); // PAVEB testOp2(cyrix32, .PAVEB, reg(.MM1), regRm(.MM0), "0f 50 c8"); // PDISTIB testOp2(cyrix32, .PDISTIB, reg(.MM1), mem_64, "0f 54 08"); // PMACHRIW testOp2(cyrix32, .PMACHRIW, reg(.MM1), mem_64, "0f 5e 08"); // PMAGW testOp2(cyrix32, .PMAGW, reg(.MM1), regRm(.MM0), "0f 52 c8"); // PMULHRW / PMULHRIW testOp2(cyrix32, .PMULHRW, reg(.MM1), regRm(.MM0), "0f 59 c8"); testOp2(cyrix32, .PMULHRIW, reg(.MM1), regRm(.MM0), "0f 5d c8"); // PMVZB / PMVNZB / PMVLZB / PMVGEZB testOp2(cyrix32, .PMVZB, reg(.MM1), mem_64, "0f 58 08"); testOp2(cyrix32, .PMVNZB, reg(.MM1), mem_64, "0f 5a 08"); testOp2(cyrix32, .PMVLZB, reg(.MM1), mem_64, "0f 5b 08"); testOp2(cyrix32, .PMVGEZB, reg(.MM1), mem_64, "0f 5c 08"); // PSUBSIW testOp2(cyrix32, .PSUBSIW, reg(.MM1), regRm(.MM0), "0f 55 c8"); } }
src/x86/tests/mmx.zig
const builtin = @import("builtin"); const build_options = @import("build_options"); const std = @import("std"); const big = std.math.big; const heap = std.heap; const io = std.io; const log = std.log; const mem = std.mem; const net = std.net; const Connection = @import("connection.zig").Connection; const PrimitiveWriter = @import("primitive/writer.zig").PrimitiveWriter; const PreparedMetadata = @import("metadata.zig").PreparedMetadata; const RowsMetadata = @import("metadata.zig").RowsMetadata; const ColumnSpec = @import("metadata.zig").ColumnSpec; const bigint = @import("bigint.zig"); usingnamespace @import("primitive_types.zig"); usingnamespace @import("iterator.zig"); usingnamespace @import("query_parameters.zig"); usingnamespace @import("error.zig"); usingnamespace @import("frames/query.zig"); usingnamespace @import("frames/prepare.zig"); usingnamespace @import("frames/execute.zig"); const testing = @import("testing.zig"); const casstest = @import("casstest.zig"); pub const Client = struct { const Self = @This(); pub const InitOptions = struct { /// The default consistency to use for queries. consistency: Consistency = .One, }; pub const QueryOptions = struct { /// If this is provided the client will try to limit the size of the resultset. /// Note that even if the query is paged, cassandra doesn't guarantee that there will /// be at most `page_size` rows, it can be slightly smaller or bigger. /// /// If page_size is not null, after execution the `paging_state` field in this struct will be /// populated if paging is necessary, otherwise it will be null. page_size: ?u32 = null, /// If this is provided it will be used to page the result of the query. /// Additionally, this will be populated by the client with the next paging state if any. paging_state: ?[]const u8 = null, /// If this is provided it will be populated in case of failures. /// This will provide more detail than an error can. diags: ?*Diagnostics = null, pub const Diagnostics = struct { /// The error message returned by the Cassandra node. message: []const u8 = "", unavailable_replicas: ?UnavailableReplicasError = null, function_failure: ?FunctionFailureError = null, write_timeout: ?WriteError.Timeout = null, read_timeout: ?ReadError.Timeout = null, write_failure: ?WriteError.Failure = null, read_failure: ?ReadError.Failure = null, cas_write_unknown: ?WriteError.CASUnknown = null, already_exists: ?AlreadyExistsError = null, unprepared: ?UnpreparedError = null, execute: Execute = .{}, const Execute = struct { not_enough_args: ?bool = null, first_incompatible_arg: ?ExecuteIncompatibleArg = null, const ExecuteIncompatibleArg = struct { position: usize = 0, prepared: ColumnSpec = .{}, argument: ?OptionID = null, }; pub fn format(value: Execute, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { var buf: [1024]u8 = undefined; var fbs = io.fixedBufferStream(&buf); var fbw = fbs.writer(); if (value.not_enough_args) |v| { if (v) { try std.fmt.format(fbw, "not enough args, ", .{}); } } if (value.first_incompatible_arg) |v| { try std.fmt.format(fbw, "first incompatible arg: {{position: {}, prepared: (name: {s}, type: {}), argument: {}}}", .{ v.position, v.prepared.name, v.prepared.option, v.argument, }); } try writer.writeAll(fbs.getWritten()); } }; }; }; const PreparedStatementMetadataValue = struct { result_metadata_id: ?[]const u8, metadata: PreparedMetadata, rows_metadata: RowsMetadata, }; /// Maps a prepared statement id to the types of the arguments needed when executing it. const PreparedStatementsMetadata = std.HashMap([]const u8, PreparedStatementMetadataValue, std.hash_map.hashString, std.hash_map.eqlString, std.hash_map.DefaultMaxLoadPercentage); allocator: *mem.Allocator, connection: *Connection, options: InitOptions, /// TODO(vincent): need to implement some sort of TLL or size limit for this. prepared_statements_metadata: PreparedStatementsMetadata, pub fn initWithConnection(allocator: *mem.Allocator, connection: *Connection, options: InitOptions) Self { var self: Self = undefined; self.allocator = allocator; self.connection = connection; self.options = options; self.prepared_statements_metadata = PreparedStatementsMetadata.init(allocator); return self; } pub fn deinit(self: *Self) void {} pub fn prepare(self: *Self, allocator: *mem.Allocator, options: QueryOptions, comptime query_string: []const u8, args: anytype) ![]const u8 { var dummy_diags = QueryOptions.Diagnostics{}; var diags = options.diags orelse &dummy_diags; // Check that the query makes sense for the arguments provided. comptime { const bind_markers = countBindMarkers(query_string); const fields = @typeInfo(@TypeOf(args)).Struct.fields.len; if (bind_markers != fields) { @compileLog("number of arguments = ", fields); @compileLog("number of bind markers = ", bind_markers); @compileError("Query string has different number of bind markers than the number of arguments provided"); } } var option_ids = OptionIDArrayList{}; try computeValues(allocator, null, &option_ids, args); // Write PREPARE, expect RESULT { var prepare_frame = PrepareFrame{ .query = query_string, .keyspace = null, }; try self.connection.writeFrame(allocator, .{ .opcode = .Prepare, .body = prepare_frame, }); } var read_frame = try self.connection.readFrame(allocator, Connection.ReadFrameOptions{ .frame_allocator = self.allocator, }); switch (read_frame) { .Result => |frame| switch (frame.result) { .Prepared => |prepared| { const id = prepared.query_id; // Store the metadata for later use with `execute`. const gop = try self.prepared_statements_metadata.getOrPut(id); if (gop.found_existing) { if (gop.entry.value.result_metadata_id) |result_metadata_id| { self.allocator.free(result_metadata_id); } gop.entry.value.metadata.deinit(self.allocator); gop.entry.value.rows_metadata.deinit(self.allocator); } gop.entry.value = undefined; gop.entry.value.result_metadata_id = prepared.result_metadata_id; gop.entry.value.metadata = prepared.metadata; gop.entry.value.rows_metadata = prepared.rows_metadata; return id; }, else => return error.InvalidServerResponse, }, .Error => |err| { diags.message = err.message; return error.QueryPreparationFailed; }, else => return error.InvalidServerResponse, } } // TODO(vincent): maybe add not comptime equivalent ? pub fn query(self: *Self, allocator: *mem.Allocator, options: QueryOptions, comptime query_string: []const u8, args: anytype) !?Iterator { var dummy_diags = QueryOptions.Diagnostics{}; var diags = options.diags orelse &dummy_diags; // Check that the query makes sense for the arguments provided. comptime { const bind_markers = countBindMarkers(query_string); const fields = @typeInfo(@TypeOf(args)).Struct.fields.len; if (bind_markers != fields) { @compileLog("number of arguments = ", fields); @compileLog("number of bind markers = ", bind_markers); @compileError("Query string has different number of bind markers than the number of arguments provided"); } } var values = std.ArrayList(Value).init(allocator); try computeValues(allocator, &values, null, args); // TODO(vincent): handle named values // TODO(vincent): handle skip_metadata (see §4.1.4 in the spec) var query_parameters = QueryParameters{ .consistency_level = self.options.consistency, .values = undefined, .skip_metadata = false, .page_size = options.page_size, .paging_state = options.paging_state, .serial_consistency_level = null, .timestamp = null, .keyspace = null, .now_in_seconds = null, }; query_parameters.values = Values{ .Normal = values.toOwnedSlice() }; // Write QUERY { var query_frame = QueryFrame{ .query = query_string, .query_parameters = query_parameters, }; try self.connection.writeFrame(allocator, .{ .opcode = .Query, .body = query_frame, }); } // Read either RESULT or ERROR return switch (try self.connection.readFrame(allocator, null)) { .Result => |frame| { return switch (frame.result) { .Rows => |rows| blk: { break :blk Iterator.init(rows.metadata, rows.data); }, else => null, }; }, .Error => |err| { diags.message = err.message; return error.QueryExecutionFailed; }, else => return error.InvalidServerResponse, }; } pub fn execute(self: *Self, allocator: *mem.Allocator, options: QueryOptions, query_id: []const u8, args: anytype) !?Iterator { var dummy_diags = QueryOptions.Diagnostics{}; var diags = options.diags orelse &dummy_diags; // If the metadata doesn't exist we can't proceed. const prepared_statement_metadata_kv = self.prepared_statements_metadata.get(query_id); if (prepared_statement_metadata_kv == null) { return error.InvalidPreparedQueryID; } const ps_result_metadata_id = prepared_statement_metadata_kv.?.result_metadata_id; const ps_metadata = prepared_statement_metadata_kv.?.metadata; const ps_rows_metadata = prepared_statement_metadata_kv.?.rows_metadata; var values = try std.ArrayList(Value).initCapacity(allocator, 16); var option_ids = OptionIDArrayList{}; try computeValues(allocator, &values, &option_ids, args); // Now that we have both prepared and compute option IDs, check that they're compatible // If not compatible we produce a diagnostic. { const prepared = ps_metadata.column_specs; const computed = option_ids.getItems(); if (prepared.len != computed.len) { diags.execute.not_enough_args = true; return error.InvalidPreparedStatementExecuteArgs; } for (prepared) |column_spec, i| { if (computed[i]) |option| { if (column_spec.option != option) { diags.execute.first_incompatible_arg = .{ .position = i, .prepared = prepared[i], .argument = computed[i], }; return error.InvalidPreparedStatementExecuteArgs; } } } } // Check that the values provided are compatible with the prepared statement // TODO(vincent): handle named values var query_parameters = QueryParameters{ .consistency_level = self.options.consistency, .values = undefined, .skip_metadata = true, .page_size = options.page_size, .paging_state = options.paging_state, .serial_consistency_level = null, .timestamp = null, .keyspace = null, .now_in_seconds = null, }; query_parameters.values = Values{ .Normal = values.toOwnedSlice() }; // Write EXECUTE { var execute_frame = ExecuteFrame{ .query_id = query_id, .result_metadata_id = ps_result_metadata_id, .query_parameters = query_parameters, }; try self.connection.writeFrame(allocator, .{ .opcode = .Execute, .body = execute_frame, }); } // Read either RESULT or ERROR return switch (try self.connection.readFrame(allocator, null)) { .Result => |frame| { return switch (frame.result) { .Rows => |rows| blk: { const metadata = if (rows.metadata.column_specs.len > 0) rows.metadata else ps_rows_metadata; break :blk Iterator.init(metadata, rows.data); }, else => null, }; }, .Error => |err| { diags.message = err.message; return error.QueryExecutionFailed; }, else => return error.InvalidServerResponse, }; } }; fn testWithCassandra(harness: *casstest.Harness) !void { // Insert some data const nb_rows = 2; try harness.insertTestData(.AgeToIDs, nb_rows); try harness.insertTestData(.User, nb_rows); // Read and validate the data for the age_to_ids table { const Callback = struct { pub fn do(h: *casstest.Harness, i: usize, row: *casstest.Row.AgeToIDs) !bool { testing.expectEqual(row.age, 0); testing.expectEqualSlices(u8, &[_]u8{ 0, 2, 4, 8 }, row.ids); testing.expectEqualStrings("<NAME>", row.name); testing.expect(h.positive_varint.toConst().eq(row.balance)); return true; } }; const res = try harness.selectAndScan( casstest.Row.AgeToIDs, "SELECT age, name, ids, balance FROM foobar.age_to_ids WHERE age = ?", .{ @intCast(u32, 0), }, Callback.do, ); testing.expect(res); } { const Callback = struct { pub fn do(h: *casstest.Harness, i: usize, row: *casstest.Row.AgeToIDs) !bool { testing.expectEqual(@as(u32, 1), row.age); testing.expectEqualSlices(u8, &[_]u8{ 0, 2, 4, 8 }, row.ids); testing.expectEqualStrings("", row.name); testing.expect(h.negative_varint.toConst().eq(row.balance)); return true; } }; const res = try harness.selectAndScan( casstest.Row.AgeToIDs, "SELECT age, name, ids, balance FROM foobar.age_to_ids WHERE age = ?", .{ @intCast(u32, 1), }, Callback.do, ); testing.expect(res); } // Read and validate the data for the user table { const Callback = struct { pub fn do(h: *casstest.Harness, i: usize, row: *casstest.Row.User) !bool { testing.expectEqual(@as(u64, 2000), row.id); testing.expectEqual(i + 25, row.secondary_id); return true; } }; const res = try harness.selectAndScan( casstest.Row.User, "SELECT id, secondary_id FROM foobar.user WHERE id = 2000", .{}, Callback.do, ); testing.expect(res); } } test "client: insert then query" { if (build_options.with_cassandra == null) return error.SkipZigTest; const testParameters = struct { const Self = @This(); compression: ?CompressionAlgorithm, protocol_version: ?ProtocolVersion, pub fn init(s: []const u8) !Self { var self: Self = undefined; var it = mem.tokenize(s, ","); while (true) { const token = it.next() orelse break; var it2 = mem.tokenize(token, ":"); const key = it2.next() orelse continue; const value = it2.next() orelse std.debug.panic("invalid token {s}\n", .{token}); if (mem.eql(u8, "compression", key)) { self.compression = try CompressionAlgorithm.fromString(value); } else if (mem.eql(u8, "protocol", key)) { self.protocol_version = try ProtocolVersion.fromString(value); } } return self; } }; var params = try testParameters.init(build_options.with_cassandra.?); var arena = testing.arenaAllocator(); defer arena.deinit(); var harness: casstest.Harness = undefined; try harness.init( &arena.allocator, params.compression, params.protocol_version, ); defer harness.deinit(); try testWithCassandra(&harness); } const OptionIDArrayList = struct { const Self = @This(); items: [128]?OptionID = undefined, pos: usize = 0, pub fn append(self: *Self, option_id: ?OptionID) !void { self.items[self.pos] = option_id; self.pos += 1; } pub fn getItems(self: *Self) []?OptionID { return self.items[0..self.pos]; } }; test "option id array list" { var option_ids = OptionIDArrayList{}; try option_ids.append(.Tinyint); try option_ids.append(.Smallint); const items = option_ids.getItems(); testing.expectEqual(@as(usize, 2), items.len); testing.expectEqual(OptionID.Tinyint, items[0].?); testing.expectEqual(OptionID.Smallint, items[1].?); } /// Compute a list of Value and OptionID for each field in the tuple or struct args. /// It resolves the values recursively too. /// /// TODO(vincent): it's not clear to the caller that data in `args` must outlive `values` because we don't duplicating memory /// unless absolutely necessary in the case of arrays. /// Think of a way to communicate that. fn computeValues(allocator: *mem.Allocator, values: ?*std.ArrayList(Value), options: ?*OptionIDArrayList, args: anytype) !void { if (@typeInfo(@TypeOf(args)) != .Struct) { @compileError("Expected tuple or struct argument, found " ++ @typeName(args) ++ " of type " ++ @tagName(@typeInfo(args))); } var dummy_vals = try std.ArrayList(Value).initCapacity(allocator, 16); defer dummy_vals.deinit(); var vals = values orelse &dummy_vals; var dummy_opts = OptionIDArrayList{}; var opts = options orelse &dummy_opts; inline for (@typeInfo(@TypeOf(args)).Struct.fields) |struct_field, i| { const Type = struct_field.field_type; const arg = @field(args, struct_field.name); try computeSingleValue(allocator, vals, opts, Type, arg); } } fn resolveOption(comptime Type: type) OptionID { // Special case [16]u8 since we consider it a UUID. if (Type == [16]u8) return .UUID; // Special case []const u8 because it's used for strings. if (Type == []const u8) return .Varchar; // Special case big.int types because it's used for varint. if (Type == big.int.Mutable or Type == big.int.Const) return .Varint; const type_info = @typeInfo(Type); switch (type_info) { .Bool => return .Boolean, .Int => |info| switch (Type) { i8, u8 => return .Tinyint, i16, u16 => return .Smallint, i32, u32 => return .Int, i64, u64 => return .Bigint, else => @compileError("field type " ++ @typeName(Type) ++ " is not compatible with CQL"), }, .Float => |info| switch (Type) { f32 => return .Float, f64 => return .Double, else => @compileError("field type " ++ @typeName(Type) ++ " is not compatible with CQL"), }, .Pointer => |pointer| switch (pointer.size) { .One => { return resolveOption(pointer.child); }, else => @compileError("invalid pointer size " ++ @tagName(pointer.size)), }, .Optional => |optional| { return resolveOption(optional.child); }, else => @compileError("field type " ++ @typeName(Type) ++ " not handled yet (type id: " ++ @tagName(type_info) ++ ")"), } } fn computeSingleValue(allocator: *mem.Allocator, values: *std.ArrayList(Value), options: *OptionIDArrayList, comptime Type: type, arg: Type) !void { const type_info = @typeInfo(Type); var value: Value = undefined; // Special case [16]u8 since we consider it a UUID. if (Type == [16]u8) { try options.append(.UUID); value = Value{ .Set = try mem.dupe(allocator, u8, &arg) }; try values.append(value); return; } // Special case []const u8 because it's used for strings. if (Type == []const u8) { try options.append(.Varchar); // TODO(vincent): should we make a copy ? value = Value{ .Set = arg }; try values.append(value); return; } // Special case big.int types because it's used for varint. if (Type == big.int.Const) { try options.append(.Varint); value = Value{ .Set = try bigint.toBytes(allocator, arg) }; try values.append(value); return; } // The NotSet struct allows the caller to not set a value, which according to the // protocol will not result in any change to the existing value. if (Type == NotSet) { try options.append(resolveOption(arg.type)); value = Value{ .NotSet = {} }; try values.append(value); return; } switch (type_info) { .Bool => { var buf = try allocator.alloc(u8, 1); errdefer allocator.free(buf); buf[0] = if (arg) 0x01 else 0x00; try options.append(.Boolean); value = Value{ .Set = buf }; try values.append(value); }, .Int => |info| { try options.append(resolveOption(Type)); var buf = try allocator.alloc(u8, info.bits / 8); errdefer allocator.free(buf); mem.writeIntBig(Type, @ptrCast(*[info.bits / 8]u8, buf), arg); value = Value{ .Set = buf }; try values.append(value); }, .Float => |info| { try options.append(resolveOption(Type)); var buf = try allocator.alloc(u8, info.bits / 8); errdefer allocator.free(buf); @ptrCast(*align(1) Type, buf).* = arg; value = Value{ .Set = buf }; try values.append(value); }, .Pointer => |pointer| switch (pointer.size) { .One => { try computeValues(allocator, values, options, .{arg.*}); return; }, .Slice => { // Otherwise it's a list or a set, encode a new list of values. var inner_values = std.ArrayList(Value).init(allocator); for (arg) |item| { try computeValues(allocator, &inner_values, null, .{item}); } try options.append(null); value = Value{ .Set = try serializeValues(allocator, inner_values.toOwnedSlice()) }; try values.append(value); }, else => @compileError("invalid pointer size " ++ @tagName(pointer.size)), }, .Array => |array| { // Otherwise it's a list or a set, encode a new list of values. var inner_values = std.ArrayList(Value).init(allocator); for (arg) |item| { try computeValues(allocator, &inner_values, null, .{item}); } try options.append(null); value = Value{ .Set = try serializeValues(allocator, inner_values.toOwnedSlice()) }; try values.append(value); }, .Optional => |optional| { if (arg) |a| { try computeSingleValue(allocator, values, options, optional.child, a); } else { try options.append(resolveOption(optional.child)); value = Value{ .Null = {} }; try values.append(value); } }, else => @compileError("field type " ++ @typeName(Type) ++ " not handled yet (type id: " ++ @tagName(type_info) ++ ")"), } } fn serializeValues(allocator: *mem.Allocator, values: []const Value) ![]const u8 { var pw: PrimitiveWriter = undefined; try pw.reset(allocator); try pw.writeInt(u32, @intCast(u32, values.len)); for (values) |value| { switch (value) { .Set => |v| { try pw.writeBytes(v); }, else => {}, } } return pw.toOwnedSlice(); } test "serialize values" { var arenaAllocator = testing.arenaAllocator(); defer arenaAllocator.deinit(); var v1 = Value{ .Set = "foobar" }; var v2 = Value{ .Set = "barbaz" }; var data = try serializeValues(&arenaAllocator.allocator, &[_]Value{ v1, v2 }); testing.expectEqual(@as(usize, 24), data.len); testing.expectEqualSlices(u8, "\x00\x00\x00\x02", data[0..4]); testing.expectEqualStrings("\x00\x00\x00\x06foobar\x00\x00\x00\x06barbaz", data[4..]); } test "compute values: ints" { var arenaAllocator = testing.arenaAllocator(); defer arenaAllocator.deinit(); var allocator = &arenaAllocator.allocator; var values = std.ArrayList(Value).init(allocator); var options = OptionIDArrayList{}; const my_u64 = @as(u64, 20000); _ = try computeValues(allocator, &values, &options, .{ .i_tinyint = @as(i8, 0x7f), .u_tinyint = @as(u8, 0xff), .i_smallint = @as(i16, 0x7fff), .u_smallint = @as(u16, 0xdedf), .i_int = @as(i32, 0x7fffffff), .u_int = @as(u32, 0xabababaf), .i_bigint = @as(i64, 0x7fffffffffffffff), .u_bigint = @as(u64, 0xdcdcdcdcdcdcdcdf), .u_bigint_ptr = &my_u64, }); var v = values.items; var o = options.getItems(); testing.expectEqual(@as(usize, 9), v.len); testing.expectEqual(@as(usize, 9), o.len); testing.expectEqualSlices(u8, "\x7f", v[0].Set); testing.expectEqual(OptionID.Tinyint, o[0].?); testing.expectEqualSlices(u8, "\xff", v[1].Set); testing.expectEqual(OptionID.Tinyint, o[1].?); testing.expectEqualSlices(u8, "\x7f\xff", v[2].Set); testing.expectEqual(OptionID.Smallint, o[2].?); testing.expectEqualSlices(u8, "\xde\xdf", v[3].Set); testing.expectEqual(OptionID.Smallint, o[3].?); testing.expectEqualSlices(u8, "\x7f\xff\xff\xff", v[4].Set); testing.expectEqual(OptionID.Int, o[4].?); testing.expectEqualSlices(u8, "\xab\xab\xab\xaf", v[5].Set); testing.expectEqual(OptionID.Int, o[5].?); testing.expectEqualSlices(u8, "\x7f\xff\xff\xff\xff\xff\xff\xff", v[6].Set); testing.expectEqual(OptionID.Bigint, o[6].?); testing.expectEqualSlices(u8, "\xdc\xdc\xdc\xdc\xdc\xdc\xdc\xdf", v[7].Set); testing.expectEqual(OptionID.Bigint, o[7].?); testing.expectEqualSlices(u8, "\x00\x00\x00\x00\x00\x00\x4e\x20", v[8].Set); testing.expectEqual(OptionID.Bigint, o[8].?); } test "compute values: floats" { var arenaAllocator = testing.arenaAllocator(); defer arenaAllocator.deinit(); var allocator = &arenaAllocator.allocator; var values = std.ArrayList(Value).init(allocator); var options = OptionIDArrayList{}; const my_f64 = @as(f64, 402.240); _ = try computeValues(allocator, &values, &options, .{ .f32 = @as(f32, 0.002), .f64 = @as(f64, 245601.000240305603), .f64_ptr = &my_f64, }); var v = values.items; var o = options.getItems(); testing.expectEqual(@as(usize, 3), v.len); testing.expectEqual(@as(usize, 3), o.len); testing.expectEqualSlices(u8, "\x6f\x12\x03\x3b", v[0].Set); testing.expectEqual(OptionID.Float, o[0].?); testing.expectEqualSlices(u8, "\x46\xfd\x7d\x00\x08\xfb\x0d\x41", v[1].Set); testing.expectEqual(OptionID.Double, o[1].?); testing.expectEqualSlices(u8, "\xa4\x70\x3d\x0a\xd7\x23\x79\x40", v[2].Set); testing.expectEqual(OptionID.Double, o[2].?); } test "compute values: strings" { var arenaAllocator = testing.arenaAllocator(); defer arenaAllocator.deinit(); var allocator = &arenaAllocator.allocator; var values = std.ArrayList(Value).init(allocator); var options = OptionIDArrayList{}; _ = try computeValues(allocator, &values, &options, .{ .string = @as([]const u8, try mem.dupe(allocator, u8, "foobar")), }); var v = values.items; var o = options.getItems(); testing.expectEqual(@as(usize, 1), v.len); testing.expectEqual(@as(usize, 1), o.len); testing.expectEqualStrings("foobar", v[0].Set); testing.expectEqual(OptionID.Varchar, o[0].?); } test "compute values: bool" { var arenaAllocator = testing.arenaAllocator(); defer arenaAllocator.deinit(); var allocator = &arenaAllocator.allocator; var values = std.ArrayList(Value).init(allocator); var options = OptionIDArrayList{}; _ = try computeValues(allocator, &values, &options, .{ .bool1 = true, .bool2 = false, }); var v = values.items; var o = options.getItems(); testing.expectEqual(@as(usize, 2), v.len); testing.expectEqual(@as(usize, 2), o.len); testing.expectEqualSlices(u8, "\x01", v[0].Set); testing.expectEqual(OptionID.Boolean, o[0].?); testing.expectEqualSlices(u8, "\x00", v[1].Set); testing.expectEqual(OptionID.Boolean, o[1].?); } test "compute values: set/list" { var arenaAllocator = testing.arenaAllocator(); defer arenaAllocator.deinit(); var allocator = &arenaAllocator.allocator; var values = std.ArrayList(Value).init(allocator); var options = OptionIDArrayList{}; _ = try computeValues(allocator, &values, &options, .{ .string = &[_]u16{ 0x01, 0x2050 }, .string2 = @as([]const u16, &[_]u16{ 0x01, 0x2050 }), }); var v = values.items; var o = options.getItems(); testing.expectEqual(@as(usize, 2), v.len); testing.expectEqual(@as(usize, 2), o.len); testing.expectEqualSlices(u8, "\x00\x00\x00\x02\x00\x00\x00\x02\x00\x01\x00\x00\x00\x02\x20\x50", v[0].Set); testing.expect(o[0] == null); testing.expectEqualSlices(u8, "\x00\x00\x00\x02\x00\x00\x00\x02\x00\x01\x00\x00\x00\x02\x20\x50", v[1].Set); testing.expect(o[1] == null); } test "compute values: uuid" { var arenaAllocator = testing.arenaAllocator(); defer arenaAllocator.deinit(); var allocator = &arenaAllocator.allocator; var values = std.ArrayList(Value).init(allocator); var options = OptionIDArrayList{}; _ = try computeValues(allocator, &values, &options, .{ .uuid = [16]u8{ 0x55, 0x94, 0xd5, 0xb1, 0xef, 0x84, 0x41, 0xc4, 0xb2, 0x4e, 0x68, 0x48, 0x8d, 0xcf, 0xa1, 0xc9, }, }); var v = values.items; var o = options.getItems(); testing.expectEqual(@as(usize, 1), v.len); testing.expectEqual(@as(usize, 1), o.len); testing.expectEqualSlices(u8, "\x55\x94\xd5\xb1\xef\x84\x41\xc4\xb2\x4e\x68\x48\x8d\xcf\xa1\xc9", v[0].Set); testing.expectEqual(OptionID.UUID, o[0].?); } test "compute values: not set and null" { var arenaAllocator = testing.arenaAllocator(); defer arenaAllocator.deinit(); var allocator = &arenaAllocator.allocator; var values = std.ArrayList(Value).init(allocator); var options = OptionIDArrayList{}; const Args = struct { not_set: NotSet, nullable: ?u64, }; _ = try computeValues(allocator, &values, &options, Args{ .not_set = NotSet{ .type = i32 }, .nullable = null, }); var v = values.items; var o = options.getItems(); testing.expectEqual(@as(usize, 2), v.len); testing.expectEqual(@as(usize, 2), o.len); testing.expect(v[0] == .NotSet); testing.expectEqual(OptionID.Int, o[0].?); testing.expect(v[1] == .Null); testing.expectEqual(OptionID.Bigint, o[1].?); } fn areOptionIDsEqual(prepared: []const ColumnSpec, computed: []const ?OptionID) bool {} fn countBindMarkers(query_string: []const u8) usize { var pos: usize = 0; var count: usize = 0; while (mem.indexOfScalarPos(u8, query_string, pos, '?')) |i| { count += 1; pos = i + 1; } return count; } test "count bind markers" { const query_string = "select * from foobar.user where id = ? and name = ? and age < ?"; const count = countBindMarkers(query_string); testing.expectEqual(@as(usize, 3), count); } test "" { _ = @import("bigint.zig"); if (build_options.with_snappy) { _ = @import("snappy.zig"); } _ = @import("lz4.zig"); }
src/client.zig
const std = @import("std"); const webgpu = @import("./webgpu.zig"); pub const Buffer = struct { pub const VTable = struct { destroy_fn: fn(*Buffer) void, get_const_mapped_range_fn: fn(*Buffer, usize, usize) GetConstMappedRangeError![]align(16) const u8, get_mapped_range_fn: fn(*Buffer, usize, usize) GetMappedRangeError![]align(16) u8, map_async_fn: fn(*Buffer, webgpu.MapMode, usize, usize) MapAsyncError!void, unmap_fn: fn(*Buffer) UnmapError!void, }; __vtable: *const VTable, device: *webgpu.Device, pub inline fn destroy(buffer: *Buffer) void { buffer.__vtable.destroy_fn(buffer); } pub const GetConstMappedRangeError = error { Failed, }; pub inline fn getConstMappedRange(buffer: *Buffer, offset: usize, size: usize) GetConstMappedRangeError![]align(16) const u8 { return buffer.__vtable.get_const_mapped_range_fn(buffer, offset, size); } pub const GetMappedRangeError = error { Failed, }; pub inline fn getMappedRange(buffer: *Buffer, offset: usize, size: usize) GetMappedRangeError![]align(16) u8 { return buffer.__vtable.get_mapped_range_fn(buffer, offset, size); } pub const MapAsyncError = error {}; pub inline fn mapAsync(buffer: *Buffer, mode: webgpu.MapMode, offset: usize, size: usize) MapAsyncError!void { return buffer.__vtable.map_async_fn(buffer, mode, offset, size); } pub const UnmapError = error {}; pub inline fn unmap(buffer: *Buffer) UnmapError!void { return buffer.__vtable.unmap_fn(buffer); } }; pub const QuerySet = struct { pub const VTable = struct { destroy_fn: fn(*QuerySet) void, }; __vtable: *const VTable, device: *webgpu.Device, pub inline fn destroy(query_set: *QuerySet) void { query_set.__vtable.destroy_fn(query_set); } }; pub const Sampler = struct { pub const VTable = struct { destroy_fn: fn(*Sampler) void, }; __vtable: *const VTable, device: *webgpu.Device, pub inline fn destroy(sampler: *Sampler) void { sampler.__vtable.destroy_fn(sampler); } }; pub const SwapChain = struct { pub const VTable = struct { destroy_fn: fn(*SwapChain) void, get_current_texture_view_fn: fn(*SwapChain) ?*webgpu.TextureView, }; __vtable: *const VTable, device: *webgpu.Device, pub inline fn destroy(swap_chain: *SwapChain) void { return swap_chain.__vtable.destroy_fn(swap_chain); } pub inline fn getCurrentTextureView(swap_chain: *SwapChain) ?*webgpu.TextureView { return swap_chain.__vtable.get_current_texture_view_fn(swap_chain); } }; pub const Texture = struct { pub const VTable = struct { destroy_fn: fn(*Texture) void, create_view_fn: fn(*Texture, webgpu.TextureViewDescriptor) CreateViewError!*webgpu.TextureView, }; __vtable: *const VTable, device: *webgpu.Device, pub inline fn destroy(texture: *Texture) void { texture.__vtable.destroy_fn(texture); } pub const CreateViewError = error { OutOfMemory, }; pub inline fn createView(texture: *Texture, descriptor: webgpu.TextureViewDescriptor) CreateViewError!*webgpu.TextureView { return texture.__vtable.create_view_fn(texture, descriptor); } }; pub const TextureView = struct { pub const VTable = struct { destroy_fn: fn(*TextureView) void, }; __vtable: *const VTable, texture: *Texture, device: *webgpu.Device, pub inline fn destroy(texture_view: *TextureView) void { texture_view.__vtable.destroy_fn(texture_view); } };
src/resource.zig
const std = @import("std"); const sqlite = @import("sqlite"); const manage_main = @import("main.zig"); const libpcre = @import("libpcre"); const Context = manage_main.Context; const log = std.log.scoped(.atags); const VERSION = "0.0.1"; const HELPTEXT = \\ atags: manage your tags \\ \\ usage: \\ atags action [arguments...] \\ \\ options: \\ -h prints this help and exits \\ -V prints version and exits \\ --no-confirm do not ask for confirmation on remove \\ commands. \\ \\ examples: \\ atags create tag \\ atags create --core lkdjfalskjg tag \\ atags search tag \\ atags remove --tag tag \\ atags remove --core dslkjfsldkjf ; const ActionConfig = union(enum) { Create: CreateAction.Config, Remove: RemoveAction.Config, Search: SearchAction.Config, }; const CreateAction = struct { pub const Config = struct { tag_core: ?[]const u8 = null, tag_alias: ?[]const u8 = null, tag: ?[]const u8 = null, }; pub fn processArgs(args_it: *std.process.ArgIterator, given_args: *Args) !ActionConfig { _ = given_args; var config = Config{}; const ArgState = enum { None, NeedTagCore, NeedTagAlias }; var state: ArgState = .None; while (args_it.next()) |arg| { if (state == .NeedTagCore) { config.tag_core = arg; state = .None; } else if (state == .NeedTagAlias) { config.tag_alias = arg; state = .None; } else if (std.mem.eql(u8, arg, "--core")) { state = .NeedTagCore; } else if (std.mem.eql(u8, arg, "--alias")) { state = .NeedTagAlias; } else { config.tag = arg; } if (config.tag_core != null and config.tag_alias != null) { log.err("only one of --core or --alias may be provided", .{}); return error.OnlyOneAliasOrCore; } } return ActionConfig{ .Create = config }; } ctx: *Context, config: Config, const Self = @This(); pub fn init(ctx: *Context, config: Config) !Self { return Self{ .ctx = ctx, .config = config }; } pub fn deinit(self: *Self) void { _ = self; } pub fn run(self: *Self) !void { _ = self; var stdout = std.io.getStdOut().writer(); var raw_core_hash_buffer: [32]u8 = undefined; var maybe_core: ?Context.Hash = null; if (self.config.tag_core) |tag_core_hex_string| { maybe_core = try consumeCoreHash(self.ctx, &raw_core_hash_buffer, tag_core_hex_string); } else if (self.config.tag_alias) |tag_core_hex_string| { // tag aliasing is a process where you have two separate tags // and you want them both to refer to the same core, in a non // destructive manner, by relinking files from the tag that's going // to become the alias. // // for purposes of explanation, we'll consider that we have // tag A and tag B, and we want B to be an alias of A // // to do so, we need to // - find all files that are linked to B // - link them to A // - delete tag B // - create tag B, with core set to A var savepoint = try self.ctx.db.?.savepoint("tag_aliasing"); errdefer savepoint.rollback(); defer savepoint.commit(); var tag_to_be_aliased_to = try consumeCoreHash(self.ctx, &raw_core_hash_buffer, tag_core_hex_string); var tag_to_be_aliased_from = if (try self.ctx.fetchNamedTag(self.config.tag.?, "en")) |tag_text| tag_text else return error.UnknownTag; if (tag_to_be_aliased_from.core.id == tag_to_be_aliased_to.id) { log.err( "tag {s} already is pointing to core {s}, making a new alias of an existing alias is a destructive operation", .{ self.config.tag.?, tag_to_be_aliased_to }, ); return error.TagAlreadyAliased; } // find all tags with that single tag (tag_to_be_aliased_from) const SqlGiver = @import("./find_main.zig").SqlGiver; var giver = try SqlGiver.init(); defer giver.deinit(); // always wrap given tag text in quotemarks so that its // properly parsed by SqlGiver var find_query_text = try std.fmt.allocPrint(self.ctx.allocator, "\"{s}\"", .{self.config.tag.?}); defer self.ctx.allocator.free(find_query_text); var wrapped_sql_result = try giver.giveMeSql(self.ctx.allocator, find_query_text); defer wrapped_sql_result.deinit(); const sql_result = switch (wrapped_sql_result) { .Ok => |ok_body| ok_body, .Error => |error_body| { log.err("parse error at character {d}: {s}", .{ error_body.character, error_body.error_type }); return error.ParseErrorHappened; }, }; if (sql_result.tags.len != 1) { log.err("expected 1 tag to bind from find query: '{s}', got {d}", .{ self.config.tag.?, sql_result.tags.len }); return error.ExpectedSingleTag; } std.debug.assert(std.mem.eql(u8, sql_result.tags[0], self.config.tag.?)); // execute query and bind to tag_to_be_aliased_from var stmt = try self.ctx.db.?.prepareDynamic(sql_result.query); defer stmt.deinit(); var args = [1]i64{tag_to_be_aliased_from.core.id}; var it = try stmt.iterator(i64, args); // add tag_to_be_aliased_to to all returned files while (try it.next(.{})) |file_hash_id| { var file = (try self.ctx.fetchFile(file_hash_id)).?; defer file.deinit(); try file.addTag(tag_to_be_aliased_to); try stdout.print("relinked {s}", .{file.local_path}); try file.printTagsTo(self.ctx.allocator, stdout); try stdout.print("\n", .{}); } // delete tag_to_be_aliased_from const deleted_tag_names = try tag_to_be_aliased_from.deleteAll(&self.ctx.db.?); log.info("deleted {d} tag names", .{deleted_tag_names}); // and create the proper alias (can only be done after deletion) const aliased_tag = try self.ctx.createNamedTag(self.config.tag.?, "en", tag_to_be_aliased_to); log.info("full tag info: {}", .{aliased_tag}); return; } const tag = try self.ctx.createNamedTag(self.config.tag.?, "en", maybe_core); try stdout.print( "created tag with core '{s}' name '{s}'\n", .{ tag.core, tag }, ); } }; test "create action" { const config = CreateAction.Config{ .tag_core = null, .tag = "test tag", }; var ctx = try manage_main.makeTestContext(); defer ctx.deinit(); var action = try CreateAction.init(&ctx, config); defer action.deinit(); try action.run(); _ = (try ctx.fetchNamedTag("test tag", "en")) orelse return error.ExpectedTag; } test "create action (aliasing)" { var ctx = try manage_main.makeTestContext(); defer ctx.deinit(); var tag1 = try ctx.createNamedTag("test tag1", "en", null); var tag2_before_alias = try ctx.createNamedTag("test tag2", "en", null); try std.testing.expect(tag2_before_alias.core.id != tag1.core.id); const tag1_core = tag1.core.toHex(); // turn tag2 into an alias of tag1 const config = CreateAction.Config{ .tag_core = null, .tag_alias = &tag1_core, .tag = "test tag2", }; var action = try CreateAction.init(&ctx, config); defer action.deinit(); try action.run(); // tag1 must still exist // tag2 must still exist, but with same core now var tag1_after_alias = (try ctx.fetchNamedTag("test tag1", "en")).?; var tag2_after_alias = (try ctx.fetchNamedTag("test tag2", "en")).?; try std.testing.expectEqual(tag1.core.id, tag1_after_alias.core.id); try std.testing.expectEqual(tag1.core.id, tag2_after_alias.core.id); } fn consumeCoreHash(ctx: *Context, raw_core_hash_buffer: *[32]u8, tag_core_hex_string: []const u8) !Context.Hash { if (tag_core_hex_string.len != 64) { log.err("hashes myst be 64 bytes long, got {d}", .{tag_core_hex_string.len}); return error.InvalidHashLength; } var raw_core_hash = try std.fmt.hexToBytes(raw_core_hash_buffer, tag_core_hex_string); const hash_blob = sqlite.Blob{ .data = raw_core_hash }; const hash_id = (try ctx.db.?.one( i64, \\ select hashes.id \\ from hashes \\ join tag_cores \\ on tag_cores.core_hash = hashes.id \\ where hashes.hash_data = ? , .{}, .{hash_blob}, )) orelse { return error.UnknownTagCore; }; log.debug("found hash_id for the given core: {d}", .{hash_id}); return Context.Hash{ .id = hash_id, .hash_data = raw_core_hash_buffer.* }; } const RemoveAction = struct { pub const Config = struct { tag_core: ?[]const u8 = null, tag: ?[]const u8 = null, given_args: *const Args, }; pub fn processArgs(args_it: *std.process.ArgIterator, given_args: *Args) !ActionConfig { _ = given_args; var config = Config{ .given_args = given_args }; const ArgState = enum { None, NeedTagCore, NeedTag }; var state: ArgState = .None; while (args_it.next()) |arg| { if (state == .NeedTagCore) { config.tag_core = arg; state = .None; } else if (state == .NeedTag) { config.tag = arg; state = .None; } else if (std.mem.eql(u8, arg, "--core")) { state = .NeedTagCore; } else if (std.mem.eql(u8, arg, "--tag")) { state = .NeedTag; } else { return error.InvalidArgument; } } return ActionConfig{ .Remove = config }; } ctx: *Context, config: Config, const Self = @This(); pub fn init(ctx: *Context, config: Config) !Self { return Self{ .ctx = ctx, .config = config }; } pub fn deinit(self: *Self) void { _ = self; } pub fn run(self: *Self) !void { _ = self; var stdout = std.io.getStdOut().writer(); var stdin = std.io.getStdIn().reader(); var raw_core_hash_buffer: [32]u8 = undefined; var amount: usize = 0; var core_hash_id: ?i64 = null; try stdout.print("the following tags will be removed:\n", .{}); if (self.config.tag_core) |tag_core_hex_string| { var core = try consumeCoreHash(self.ctx, &raw_core_hash_buffer, tag_core_hex_string); core_hash_id = core.id; // to delete the core, we need to delete every tag that references this tag core // // since this is a VERY destructive operation, we print the tag // names that are affected by this command, requiring user // confirmation to continue. var stmt = try self.ctx.db.?.prepare( "select tag_text, tag_language from tag_names where core_hash = ?", ); defer stmt.deinit(); var it = try stmt.iteratorAlloc( struct { tag_text: []const u8, tag_language: []const u8, }, self.ctx.allocator, .{core.id}, ); while (try it.nextAlloc(self.ctx.allocator, .{})) |tag_name| { defer { self.ctx.allocator.free(tag_name.tag_text); self.ctx.allocator.free(tag_name.tag_language); } try stdout.print(" {s}", .{tag_name.tag_text}); amount += 1; } try stdout.print("\n", .{}); } else if (self.config.tag) |tag_text| { var maybe_tag = try self.ctx.fetchNamedTag(tag_text, "en"); if (maybe_tag) |tag| { try stdout.print(" {s}", .{tag.kind.Named.text}); core_hash_id = tag.core.id; amount += 1; } else { return error.NamedTagNotFound; } try stdout.print("\n", .{}); } else { unreachable; } { const referenced_files = try self.ctx.db.?.one( i64, "select count(*) from tag_files where core_hash = ?", .{}, .{core_hash_id}, ); try stdout.print("{d} files reference this tag.\n", .{referenced_files}); } if (self.config.given_args.ask_confirmation) { var outcome: [1]u8 = undefined; try stdout.print("do you want to remove {d} tags (y/n)? ", .{amount}); _ = try stdin.read(&outcome); if (!std.mem.eql(u8, &outcome, "y")) return error.NotConfirmed; } var deleted_count: ?i64 = null; if (self.config.tag_core) |tag_core_hex_string| { var core = try consumeCoreHash(self.ctx, &raw_core_hash_buffer, tag_core_hex_string); // TODO fix deleted_count here deleted_count = (try self.ctx.db.?.one( i64, \\ delete from tag_names \\ where core_hash = ? \\ returning ( \\ select count(*) \\ from tag_names \\ where core_hash = ? \\ ) as deleted_count , .{}, .{ core.id, core.id }, )).?; try self.ctx.db.?.exec("delete from tag_cores where core_hash = ?", .{}, .{core.id}); try self.ctx.db.?.exec("delete from hashes where id = ?", .{}, .{core.id}); } else if (self.config.tag) |tag_text| { deleted_count = (try self.ctx.db.?.one( i64, \\ delete from tag_names \\ where tag_text = ? and tag_language = ? \\ returning ( \\ select count(*) \\ from tag_names \\ where tag_text = ? and tag_language = ? \\ ) as deleted_count , .{}, .{ tag_text, "en", tag_text, "en" }, )).?; } try stdout.print("deleted {d} tags\n", .{deleted_count.?}); } }; test "remove action" { var ctx = try manage_main.makeTestContext(); defer ctx.deinit(); var tag = try ctx.createNamedTag("test tag", "en", null); var tag2 = try ctx.createNamedTag("test tag2", "en", tag.core); _ = tag2; var tag3 = try ctx.createNamedTag("test tag3", "en", null); _ = tag3; var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); var file = try tmp.dir.createFile("test_file", .{}); defer file.close(); _ = try file.write("awooga"); var indexed_file = try ctx.createFileFromDir(tmp.dir, "test_file"); defer indexed_file.deinit(); // setup file tags to 1, 2, 3 try indexed_file.addTag(tag.core); try indexed_file.addTag(tag3.core); const tag1_core = tag.core.toHex(); const args = Args{ .ask_confirmation = false }; const config = RemoveAction.Config{ .tag_core = &tag1_core, .tag = null, .given_args = &args, }; var action = try RemoveAction.init(&ctx, config); defer action.deinit(); try action.run(); // tag must be gone var maybe_tag1 = try ctx.fetchNamedTag("test tag1", "en"); try std.testing.expectEqual(@as(?Context.Tag, null), maybe_tag1); var maybe_tag2 = try ctx.fetchNamedTag("test tag2", "en"); try std.testing.expectEqual(@as(?Context.Tag, null), maybe_tag2); var maybe_tag3 = try ctx.fetchNamedTag("test tag3", "en"); try std.testing.expect(maybe_tag3 != null); // file should only have tag3 var tag_cores = try indexed_file.fetchTags(std.testing.allocator); defer std.testing.allocator.free(tag_cores); try std.testing.expectEqual(@as(usize, 1), tag_cores.len); try std.testing.expectEqual(tag3.core.id, tag_cores[0].id); } const SearchAction = struct { pub const Config = struct { query: ?[]const u8 = null, }; pub fn processArgs(args_it: *std.process.ArgIterator, given_args: *Args) !ActionConfig { _ = given_args; var config = Config{}; config.query = args_it.next() orelse return error.MissingQuery; return ActionConfig{ .Search = config }; } ctx: *Context, config: Config, const Self = @This(); pub fn init(ctx: *Context, config: Config) !Self { return Self{ .ctx = ctx, .config = config }; } pub fn deinit(self: *Self) void { _ = self; } pub fn run(self: *Self) !void { var stdout = std.io.getStdOut().writer(); var stmt = try self.ctx.db.?.prepare( \\ select distinct core_hash core_hash, hashes.hash_data \\ from tag_names \\ join hashes \\ on hashes.id = tag_names.core_hash \\ where tag_text LIKE '%' || ? || '%' ); defer stmt.deinit(); var tag_names = try stmt.all( struct { core_hash: i64, hash_data: sqlite.Blob, }, self.ctx.allocator, .{}, .{self.config.query.?}, ); defer { for (tag_names) |tag| { self.ctx.allocator.free(tag.hash_data.data); } self.ctx.allocator.free(tag_names); } for (tag_names) |tag_name| { const fake_hash = Context.HashWithBlob{ .id = tag_name.core_hash, .hash_data = tag_name.hash_data, }; var related_tags = try self.ctx.fetchTagsFromCore( self.ctx.allocator, fake_hash.toRealHash(), ); defer related_tags.deinit(); const full_tag_core = related_tags.items[0].core; try stdout.print("{s}", .{full_tag_core}); for (related_tags.items) |tag| { try stdout.print(" '{s}'", .{tag}); } try stdout.print("\n", .{}); } } }; const Args = struct { help: bool = false, version: bool = false, ask_confirmation: bool = true, action_config: ?ActionConfig = null, dry_run: bool = false, }; pub fn main() anyerror!void { const rc = sqlite.c.sqlite3_config(sqlite.c.SQLITE_CONFIG_LOG, manage_main.sqliteLog, @as(?*anyopaque, null)); if (rc != sqlite.c.SQLITE_OK) { std.log.err("failed to configure: {d} '{s}'", .{ rc, sqlite.c.sqlite3_errstr(rc), }); return error.ConfigFail; } var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); var allocator = gpa.allocator(); var args_it = std.process.args(); _ = args_it.skip(); var given_args = Args{}; while (args_it.next()) |arg| { if (std.mem.eql(u8, arg, "-h")) { given_args.help = true; } else if (std.mem.eql(u8, arg, "-V")) { given_args.version = true; } else if (std.mem.eql(u8, arg, "--no-confirm")) { given_args.ask_confirmation = false; } else if (std.mem.eql(u8, arg, "--dry-run")) { given_args.dry_run = true; } else { if (std.mem.eql(u8, arg, "search")) { given_args.action_config = try SearchAction.processArgs(&args_it, &given_args); } else if (std.mem.eql(u8, arg, "create")) { given_args.action_config = try CreateAction.processArgs(&args_it, &given_args); } else if (std.mem.eql(u8, arg, "remove")) { given_args.action_config = try RemoveAction.processArgs(&args_it, &given_args); } else { log.err("{s} is an invalid action", .{arg}); return error.InvalidAction; } } } if (given_args.help) { std.debug.print(HELPTEXT, .{}); return; } else if (given_args.version) { std.debug.print("ainclude {s}\n", .{VERSION}); return; } if (given_args.action_config == null) { std.log.err("action is a required argument", .{}); return error.MissingAction; } const action_config = given_args.action_config.?; var ctx = Context{ .home_path = null, .args_it = undefined, .stdout = undefined, .db = null, .allocator = allocator, }; defer ctx.deinit(); try ctx.loadDatabase(.{}); if (given_args.dry_run) try ctx.turnIntoMemoryDb(); switch (action_config) { .Search => |search_config| { var self = try SearchAction.init(&ctx, search_config); defer self.deinit(); try self.run(); }, .Create => |create_config| { var self = try CreateAction.init(&ctx, create_config); defer self.deinit(); try self.run(); }, .Remove => |remove_config| { var self = try RemoveAction.init(&ctx, remove_config); defer self.deinit(); try self.run(); }, } }
src/tags_main.zig
const std = @import("std"); const builtin = @import("builtin"); const mem = std.mem; const math = std.math; const assert = std.debug.assert; const Air = @import("Air.zig"); const Zir = @import("Zir.zig"); const Liveness = @import("Liveness.zig"); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; const TypedValue = @import("TypedValue.zig"); const link = @import("link.zig"); const Module = @import("Module.zig"); const Compilation = @import("Compilation.zig"); const ErrorMsg = Module.ErrorMsg; const Target = std.Target; const Allocator = mem.Allocator; const trace = @import("tracy.zig").trace; const DW = std.dwarf; const leb128 = std.leb; const log = std.log.scoped(.codegen); const build_options = @import("build_options"); const RegisterManager = @import("register_manager.zig").RegisterManager; pub const FnResult = union(enum) { /// The `code` parameter passed to `generateSymbol` has the value appended. appended: void, fail: *ErrorMsg, }; pub const Result = union(enum) { /// The `code` parameter passed to `generateSymbol` has the value appended. appended: void, /// The value is available externally, `code` is unused. externally_managed: []const u8, fail: *ErrorMsg, }; pub const GenerateSymbolError = error{ OutOfMemory, Overflow, /// A Decl that this symbol depends on had a semantic analysis failure. AnalysisFail, }; pub const DebugInfoOutput = union(enum) { dwarf: struct { dbg_line: *std.ArrayList(u8), dbg_info: *std.ArrayList(u8), dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable, }, /// the plan9 debuginfo output is a bytecode with 4 opcodes /// assume all numbers/variables are bytes /// 0 w x y z -> interpret w x y z as a big-endian i32, and add it to the line offset /// x when x < 65 -> add x to line offset /// x when x < 129 -> subtract 64 from x and subtract it from the line offset /// x -> subtract 129 from x, multiply it by the quanta of the instruction size /// (1 on x86_64), and add it to the pc /// after every opcode, add the quanta of the instruction size to the pc plan9: struct { /// the actual opcodes dbg_line: *std.ArrayList(u8), /// what line the debuginfo starts on /// this helps because the linker might have to insert some opcodes to make sure that the line count starts at the right amount for the next decl start_line: *?u32, /// what the line count ends on after codegen /// this helps because the linker might have to insert some opcodes to make sure that the line count starts at the right amount for the next decl end_line: *u32, /// the last pc change op /// This is very useful for adding quanta /// to it if its not actually the last one. pcop_change_index: *?u32, }, none, }; pub fn generateFunction( bin_file: *link.File, src_loc: Module.SrcLoc, func: *Module.Fn, air: Air, liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) GenerateSymbolError!FnResult { switch (bin_file.options.target.cpu.arch) { .wasm32 => unreachable, // has its own code path .wasm64 => unreachable, // has its own code path .arm, .armeb, => return @import("arch/arm/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), .aarch64, .aarch64_be, .aarch64_32, => return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.arc => return Function(.arc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.avr => return Function(.avr).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.bpfel => return Function(.bpfel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.bpfeb => return Function(.bpfeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.hexagon => return Function(.hexagon).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.mips => return Function(.mips).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.mipsel => return Function(.mipsel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.mips64 => return Function(.mips64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.mips64el => return Function(.mips64el).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.msp430 => return Function(.msp430).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.powerpc => return Function(.powerpc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.powerpc64 => return Function(.powerpc64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.powerpc64le => return Function(.powerpc64le).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.r600 => return Function(.r600).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.amdgcn => return Function(.amdgcn).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.riscv32 => return Function(.riscv32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.sparc => return Function(.sparc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.sparcv9 => return Function(.sparcv9).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.sparcel => return Function(.sparcel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.s390x => return Function(.s390x).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.tce => return Function(.tce).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.tcele => return Function(.tcele).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.thumb => return Function(.thumb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.thumbeb => return Function(.thumbeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.i386 => return Function(.i386).generate(bin_file, src_loc, func, air, liveness, code, debug_output), .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.xcore => return Function(.xcore).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.nvptx => return Function(.nvptx).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.nvptx64 => return Function(.nvptx64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.le32 => return Function(.le32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.le64 => return Function(.le64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.amdil => return Function(.amdil).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.amdil64 => return Function(.amdil64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.hsail => return Function(.hsail).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.hsail64 => return Function(.hsail64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.spir => return Function(.spir).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.spir64 => return Function(.spir64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.kalimba => return Function(.kalimba).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.shave => return Function(.shave).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.lanai => return Function(.lanai).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.renderscript32 => return Function(.renderscript32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.renderscript64 => return Function(.renderscript64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), //.ve => return Function(.ve).generate(bin_file, src_loc, func, air, liveness, code, debug_output), else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."), } } pub fn generateSymbol( bin_file: *link.File, parent_atom_index: u32, src_loc: Module.SrcLoc, typed_value: TypedValue, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) GenerateSymbolError!Result { const tracy = trace(@src()); defer tracy.end(); log.debug("generateSymbol: ty = {}, val = {}", .{ typed_value.ty, typed_value.val }); if (typed_value.val.isUndefDeep()) { const target = bin_file.options.target; const abi_size = try math.cast(usize, typed_value.ty.abiSize(target)); try code.appendNTimes(0xaa, abi_size); return Result{ .appended = {} }; } switch (typed_value.ty.zigTypeTag()) { .Fn => { return Result{ .fail = try ErrorMsg.create( bin_file.allocator, src_loc, "TODO implement generateSymbol function pointers", .{}, ), }; }, .Array => switch (typed_value.val.tag()) { .bytes => { // TODO populate .debug_info for the array const payload = typed_value.val.castTag(.bytes).?; if (typed_value.ty.sentinel()) |sentinel| { try code.ensureUnusedCapacity(payload.data.len + 1); code.appendSliceAssumeCapacity(payload.data); switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{ .ty = typed_value.ty.elemType(), .val = sentinel, }, code, debug_output)) { .appended => return Result{ .appended = {} }, .externally_managed => |slice| { code.appendSliceAssumeCapacity(slice); return Result{ .appended = {} }; }, .fail => |em| return Result{ .fail = em }, } } else { return Result{ .externally_managed = payload.data }; } }, .array => { // TODO populate .debug_info for the array const elem_vals = typed_value.val.castTag(.array).?.data; const elem_ty = typed_value.ty.elemType(); for (elem_vals) |elem_val| { switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{ .ty = elem_ty, .val = elem_val, }, code, debug_output)) { .appended => {}, .externally_managed => |slice| { code.appendSliceAssumeCapacity(slice); }, .fail => |em| return Result{ .fail = em }, } } return Result{ .appended = {} }; }, .repeated => { const array = typed_value.val.castTag(.repeated).?.data; const elem_ty = typed_value.ty.childType(); const sentinel = typed_value.ty.sentinel(); const len = typed_value.ty.arrayLen(); var index: u64 = 0; while (index < len) : (index += 1) { switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{ .ty = elem_ty, .val = array, }, code, debug_output)) { .appended => {}, .externally_managed => |slice| { code.appendSliceAssumeCapacity(slice); }, .fail => |em| return Result{ .fail = em }, } } if (sentinel) |sentinel_val| { switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{ .ty = elem_ty, .val = sentinel_val, }, code, debug_output)) { .appended => {}, .externally_managed => |slice| { code.appendSliceAssumeCapacity(slice); }, .fail => |em| return Result{ .fail = em }, } } return Result{ .appended = {} }; }, .empty_array_sentinel => { const elem_ty = typed_value.ty.childType(); const sentinel_val = typed_value.ty.sentinel().?; switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{ .ty = elem_ty, .val = sentinel_val, }, code, debug_output)) { .appended => {}, .externally_managed => |slice| { code.appendSliceAssumeCapacity(slice); }, .fail => |em| return Result{ .fail = em }, } return Result{ .appended = {} }; }, else => return Result{ .fail = try ErrorMsg.create( bin_file.allocator, src_loc, "TODO implement generateSymbol for array type value: {s}", .{@tagName(typed_value.val.tag())}, ), }, }, .Pointer => switch (typed_value.val.tag()) { .variable => { const decl = typed_value.val.castTag(.variable).?.data.owner_decl; return lowerDeclRef(bin_file, parent_atom_index, src_loc, typed_value, decl, code, debug_output); }, .decl_ref => { const decl = typed_value.val.castTag(.decl_ref).?.data; return lowerDeclRef(bin_file, parent_atom_index, src_loc, typed_value, decl, code, debug_output); }, .slice => { const slice = typed_value.val.castTag(.slice).?.data; // generate ptr var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{ .ty = slice_ptr_field_type, .val = slice.ptr, }, code, debug_output)) { .appended => {}, .externally_managed => |external_slice| { code.appendSliceAssumeCapacity(external_slice); }, .fail => |em| return Result{ .fail = em }, } // generate length switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{ .ty = Type.initTag(.usize), .val = slice.len, }, code, debug_output)) { .appended => {}, .externally_managed => |external_slice| { code.appendSliceAssumeCapacity(external_slice); }, .fail => |em| return Result{ .fail = em }, } return Result{ .appended = {} }; }, else => return Result{ .fail = try ErrorMsg.create( bin_file.allocator, src_loc, "TODO implement generateSymbol for pointer type value: '{s}'", .{@tagName(typed_value.val.tag())}, ), }, }, .Int => { // TODO populate .debug_info for the integer const endian = bin_file.options.target.cpu.arch.endian(); const info = typed_value.ty.intInfo(bin_file.options.target); if (info.bits <= 8) { const x = @intCast(u8, typed_value.val.toUnsignedInt()); try code.append(x); return Result{ .appended = {} }; } if (info.bits > 64) { return Result{ .fail = try ErrorMsg.create( bin_file.allocator, src_loc, "TODO implement generateSymbol for big ints ('{}')", .{typed_value.ty}, ), }; } switch (info.signedness) { .unsigned => { if (info.bits <= 16) { const x = @intCast(u16, typed_value.val.toUnsignedInt()); mem.writeInt(u16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { const x = @intCast(u32, typed_value.val.toUnsignedInt()); mem.writeInt(u32, try code.addManyAsArray(4), x, endian); } else { const x = typed_value.val.toUnsignedInt(); mem.writeInt(u64, try code.addManyAsArray(8), x, endian); } }, .signed => { if (info.bits <= 16) { const x = @intCast(i16, typed_value.val.toSignedInt()); mem.writeInt(i16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { const x = @intCast(i32, typed_value.val.toSignedInt()); mem.writeInt(i32, try code.addManyAsArray(4), x, endian); } else { const x = typed_value.val.toSignedInt(); mem.writeInt(i64, try code.addManyAsArray(8), x, endian); } }, } return Result{ .appended = {} }; }, .Enum => { // TODO populate .debug_info for the enum var int_buffer: Value.Payload.U64 = undefined; const int_val = typed_value.enumToInt(&int_buffer); const target = bin_file.options.target; const info = typed_value.ty.intInfo(target); if (info.bits <= 8) { const x = @intCast(u8, int_val.toUnsignedInt()); try code.append(x); return Result{ .appended = {} }; } if (info.bits > 64) { return Result{ .fail = try ErrorMsg.create( bin_file.allocator, src_loc, "TODO implement generateSymbol for big int enums ('{}')", .{typed_value.ty}, ), }; } const endian = target.cpu.arch.endian(); switch (info.signedness) { .unsigned => { if (info.bits <= 16) { const x = @intCast(u16, int_val.toUnsignedInt()); mem.writeInt(u16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { const x = @intCast(u32, int_val.toUnsignedInt()); mem.writeInt(u32, try code.addManyAsArray(4), x, endian); } else { const x = int_val.toUnsignedInt(); mem.writeInt(u64, try code.addManyAsArray(8), x, endian); } }, .signed => { if (info.bits <= 16) { const x = @intCast(i16, int_val.toSignedInt()); mem.writeInt(i16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { const x = @intCast(i32, int_val.toSignedInt()); mem.writeInt(i32, try code.addManyAsArray(4), x, endian); } else { const x = int_val.toSignedInt(); mem.writeInt(i64, try code.addManyAsArray(8), x, endian); } }, } return Result{ .appended = {} }; }, .Bool => { const x: u8 = @boolToInt(typed_value.val.toBool()); try code.append(x); return Result{ .appended = {} }; }, .Struct => { const struct_obj = typed_value.ty.castTag(.@"struct").?.data; if (struct_obj.layout == .Packed) { return Result{ .fail = try ErrorMsg.create( bin_file.allocator, src_loc, "TODO implement generateSymbol for packed struct", .{}, ), }; } const struct_begin = code.items.len; const field_vals = typed_value.val.castTag(.@"struct").?.data; for (field_vals) |field_val, index| { const field_ty = typed_value.ty.structFieldType(index); if (!field_ty.hasRuntimeBits()) continue; switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{ .ty = field_ty, .val = field_val, }, code, debug_output)) { .appended => {}, .externally_managed => |external_slice| { code.appendSliceAssumeCapacity(external_slice); }, .fail => |em| return Result{ .fail = em }, } const unpadded_field_end = code.items.len - struct_begin; // Pad struct members if required const target = bin_file.options.target; const padded_field_end = typed_value.ty.structFieldOffset(index + 1, target); const padding = try math.cast(usize, padded_field_end - unpadded_field_end); if (padding > 0) { try code.writer().writeByteNTimes(0, padding); } } return Result{ .appended = {} }; }, .Union => { // TODO generateSymbol for unions const target = bin_file.options.target; const abi_size = try math.cast(usize, typed_value.ty.abiSize(target)); try code.writer().writeByteNTimes(0xaa, abi_size); return Result{ .appended = {} }; }, .Optional => { // TODO generateSymbol for optionals const target = bin_file.options.target; const abi_size = try math.cast(usize, typed_value.ty.abiSize(target)); try code.writer().writeByteNTimes(0xaa, abi_size); return Result{ .appended = {} }; }, .ErrorUnion => { const error_ty = typed_value.ty.errorUnionSet(); const payload_ty = typed_value.ty.errorUnionPayload(); const is_payload = typed_value.val.errorUnionIsPayload(); const target = bin_file.options.target; const abi_align = typed_value.ty.abiAlignment(target); { const error_val = if (!is_payload) typed_value.val else Value.initTag(.zero); const begin = code.items.len; switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{ .ty = error_ty, .val = error_val, }, code, debug_output)) { .appended => {}, .externally_managed => |external_slice| { code.appendSliceAssumeCapacity(external_slice); }, .fail => |em| return Result{ .fail = em }, } const unpadded_end = code.items.len - begin; const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); const padding = try math.cast(usize, padded_end - unpadded_end); if (padding > 0) { try code.writer().writeByteNTimes(0, padding); } } if (payload_ty.hasRuntimeBits()) { const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.initTag(.undef); const begin = code.items.len; switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{ .ty = payload_ty, .val = payload_val, }, code, debug_output)) { .appended => {}, .externally_managed => |external_slice| { code.appendSliceAssumeCapacity(external_slice); }, .fail => |em| return Result{ .fail = em }, } const unpadded_end = code.items.len - begin; const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); const padding = try math.cast(usize, padded_end - unpadded_end); if (padding > 0) { try code.writer().writeByteNTimes(0, padding); } } return Result{ .appended = {} }; }, .ErrorSet => { const target = bin_file.options.target; switch (typed_value.val.tag()) { .@"error" => { const name = typed_value.val.getError().?; const kv = try bin_file.options.module.?.getErrorValue(name); const endian = target.cpu.arch.endian(); try code.writer().writeInt(u32, kv.value, endian); }, else => { try code.writer().writeByteNTimes(0, @intCast(usize, typed_value.ty.abiSize(target))); }, } return Result{ .appended = {} }; }, else => |t| { return Result{ .fail = try ErrorMsg.create( bin_file.allocator, src_loc, "TODO implement generateSymbol for type '{s}'", .{@tagName(t)}, ), }; }, } } fn lowerDeclRef( bin_file: *link.File, parent_atom_index: u32, src_loc: Module.SrcLoc, typed_value: TypedValue, decl: *Module.Decl, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) GenerateSymbolError!Result { if (typed_value.ty.isSlice()) { // generate ptr var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{ .ty = slice_ptr_field_type, .val = typed_value.val, }, code, debug_output)) { .appended => {}, .externally_managed => |external_slice| { code.appendSliceAssumeCapacity(external_slice); }, .fail => |em| return Result{ .fail = em }, } // generate length var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = typed_value.val.sliceLen(), }; switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{ .ty = Type.initTag(.usize), .val = Value.initPayload(&slice_len.base), }, code, debug_output)) { .appended => {}, .externally_managed => |external_slice| { code.appendSliceAssumeCapacity(external_slice); }, .fail => |em| return Result{ .fail = em }, } return Result{ .appended = {} }; } const target = bin_file.options.target; const ptr_width = target.cpu.arch.ptrBitWidth(); const is_fn_body = decl.ty.zigTypeTag() == .Fn; if (!is_fn_body and !decl.ty.hasRuntimeBits()) { try code.writer().writeByteNTimes(0xaa, @divExact(ptr_width, 8)); return Result{ .appended = {} }; } decl.markAlive(); const vaddr = try bin_file.getDeclVAddr(decl, parent_atom_index, code.items.len); const endian = target.cpu.arch.endian(); switch (ptr_width) { 16 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(u16, vaddr), endian), 32 => mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, vaddr), endian), 64 => mem.writeInt(u64, try code.addManyAsArray(8), vaddr, endian), else => unreachable, } return Result{ .appended = {} }; }
src/codegen.zig
const uefi = @import("std").os.uefi; const Guid = uefi.Guid; const Status = uefi.Status; /// Random Number Generator protocol pub const RNGProtocol = extern struct { _get_info: extern fn (*const RNGProtocol, *usize, [*]align(8) Guid) Status, _get_rng: extern fn (*const RNGProtocol, ?*align(8) const Guid, usize, [*]u8) Status, /// Returns information about the random number generation implementation. pub fn getInfo(self: *const RNGProtocol, list_size: *usize, list: [*]align(8) Guid) Status { return self._get_info(self, list_size, list); } /// Produces and returns an RNG value using either the default or specified RNG algorithm. pub fn getRNG(self: *const RNGProtocol, algo: ?*align(8) const Guid, value_length: usize, value: [*]u8) Status { return self._get_rng(self, algo, value_length, value); } pub const guid align(8) = Guid{ .time_low = 0x3152bca5, .time_mid = 0xeade, .time_high_and_version = 0x433d, .clock_seq_high_and_reserved = 0x86, .clock_seq_low = 0x2e, .node = [_]u8{ 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44 }, }; pub const algorithm_sp800_90_hash_256 align(8) = Guid{ .time_low = 0xa7af67cb, .time_mid = 0x603b, .time_high_and_version = 0x4d42, .clock_seq_high_and_reserved = 0xba, .clock_seq_low = 0x21, .node = [_]u8{ 0x70, 0xbf, 0xb6, 0x29, 0x3f, 0x96 }, }; pub const algorithm_sp800_90_hmac_256 align(8) = Guid{ .time_low = 0xc5149b43, .time_mid = 0xae85, .time_high_and_version = 0x4f53, .clock_seq_high_and_reserved = 0x99, .clock_seq_low = 0x82, .node = [_]u8{ 0xb9, 0x43, 0x35, 0xd3, 0xa9, 0xe7 }, }; pub const algorithm_sp800_90_ctr_256 align(8) = Guid{ .time_low = 0x44f0de6e, .time_mid = 0x4d8c, .time_high_and_version = 0x4045, .clock_seq_high_and_reserved = 0xa8, .clock_seq_low = 0xc7, .node = [_]u8{ 0x4d, 0xd1, 0x68, 0x85, 0x6b, 0x9e }, }; pub const algorithm_x9_31_3des align(8) = Guid{ .time_low = 0x63c4785a, .time_mid = 0xca34, .time_high_and_version = 0x4012, .clock_seq_high_and_reserved = 0xa3, .clock_seq_low = 0xc8, .node = [_]u8{ 0x0b, 0x6a, 0x32, 0x4f, 0x55, 0x46 }, }; pub const algorithm_x9_31_aes align(8) = Guid{ .time_low = 0xacd03321, .time_mid = 0x777e, .time_high_and_version = 0x4d3d, .clock_seq_high_and_reserved = 0xb1, .clock_seq_low = 0xc8, .node = [_]u8{ 0x20, 0xcf, 0xd8, 0x88, 0x20, 0xc9 }, }; pub const algorithm_raw align(8) = Guid{ .time_low = 0xe43176d7, .time_mid = 0xb6e8, .time_high_and_version = 0x4827, .clock_seq_high_and_reserved = 0xb7, .clock_seq_low = 0x84, .node = [_]u8{ 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61 }, }; };
lib/std/os/uefi/protocols/rng_protocol.zig
const sdl = @import("sdl2"); const std = @import("std"); const log = std.log.scoped(.Texture); pub const TextureError = error{ CreationFailure, }; const Texture = @This(); handle: sdl.Texture, pixels: []const u8, stride: u8, colors: ?[]sdl.Color, width: usize, height: usize, pub fn load_file(filename: [:0]const u8, render: sdl.Renderer, allocator: std.mem.Allocator) !Texture { const surf = try sdl.image.loadSurface(filename); defer surf.destroy(); const stride = surf.ptr.format.*.BytesPerPixel; log.debug("Image stride (Bpp) {d}", .{stride}); if (stride == 1) { log.debug("Low stride (bpp) {d}, {s}", .{ surf.ptr.format.*.BitsPerPixel, sdl.c.SDL_GetPixelFormatName(surf.ptr.format.*.format), }); } const pct = @ptrCast([*]const u8, surf.ptr.pixels); const pctlen = @intCast(usize, surf.ptr.w * surf.ptr.h * @intCast(c_int, stride)); const pixels = try allocator.dupe(u8, pct[0..pctlen]); errdefer allocator.free(pixels); const texture = try sdl.createTextureFromSurface(render, surf); errdefer texture.destroy(); var colors: ?[]sdl.Color = null; if (surf.ptr.format.*.palette) |palette| { const len = @intCast(usize, palette.*.ncolors); colors = try allocator.dupe(sdl.Color, @ptrCast([*]sdl.Color, palette.*.colors)[0..len]); log.debug("Found palette of {d} colors.", .{len}); } return Texture{ .width = @intCast(usize, surf.ptr.w), .height = @intCast(usize, surf.ptr.h), .stride = stride, .pixels = pixels, .handle = texture, .colors = colors, }; } pub fn deinit(self: Texture, allocator: std.mem.Allocator) void { self.handle.destroy(); allocator.free(self.pixels); if (self.colors) |colors| { allocator.free(colors); } } pub fn pixel_at_index(self: Texture, i: usize) []const u8 { if (self.stride == 1 and self.colors != null) { // palette index const ci = self.pixels[i]; return std.mem.asBytes(&self.colors.?[ci]); } else { // true RGB888/RGBA8888 const is = i * self.stride; return self.pixels[is .. is + 3]; } }
src/Texture.zig
const std = @import("std"); const mem = std.mem; const fmt = std.fmt; const print = std.debug.print; const alloc = std.heap.page_allocator; const ArrayList = std.ArrayList; const eql = std.ascii.eqlIgnoreCase; const ascii = std.ascii; pub fn main() !void { const file = @embedFile("../inputs/day_04"); var passports = ArrayList([]const u8).init(alloc); var valid_passport_count: i32 = 0; var valid_passport_count_2: i32 = 0; const required = "byr iyr eyr hgt hcl ecl pid"; { var cursor: usize = 0; for (file) |c, i| { if (c == '\n' and file[i + 1] == '\n') { try passports.append(file[cursor..i]); cursor = i; } } } for (passports.items) |passport| { var fields_count: u32 = 0; var is_valid = true; for (passport) |fields, i| { if (fields == ':') { const field = passport[i - 3 .. i]; const value = blk: { const slice = passport[i + 1 ..]; var len: usize = slice.len; for (slice) |c, l| { if (c == ' ' or c == '\n') { len = l; break; } } break :blk slice[0..len]; }; if (mem.indexOf(u8, required, field) != null) { fields_count += 1; } if (is_valid and eql(field, "byr")) { const year = try fmt.parseInt(i32, value, 0); is_valid = year >= 1920 and year <= 2002; } if (is_valid and eql(field, "iyr")) { const year = try fmt.parseInt(i32, value, 0); is_valid = year >= 2010 and year <= 2020; } if (is_valid and eql(field, "eyr")) { const year = try fmt.parseInt(i32, value, 0); is_valid = year >= 2020 and year <= 2030; } if (is_valid and eql(field, "hgt")) { is_valid = blk: { const size_i = value.len - 2; if (value[value.len - 2] == 'c') { const size = try fmt.parseInt(i32, value[0..size_i], 0); break :blk size >= 150 and size <= 193; } if (value[value.len - 2] == 'i') { const size = try fmt.parseInt(i32, value[0..size_i], 0); break :blk size >= 59 and size <= 76; } break :blk false; }; } if (is_valid and eql(field, "hcl")) { if (value.len != 7) { is_valid = false; continue; } for (value) |c, idx| { is_valid = switch (c) { '#', '0'...'9', 'a'...'f' => true, else => false, }; } } if (is_valid and eql(field, "ecl")) { const v = value; is_valid = eql(v, "amb") or eql(v, "blu") or eql(v, "brn") or eql(v, "gry") or eql(v, "grn") or eql(v, "hzl") or eql(v, "oth"); } if (is_valid and eql(field, "pid")) { if (value.len != 9) { is_valid = false; continue; } for (value) |c| { is_valid = switch (c) { '0'...'9' => true, else => false, }; } } } } if (fields_count == 7) { valid_passport_count += 1; if (is_valid) { valid_passport_count_2 += 1; } } } print("ANSWER PART 1: {}\n", .{valid_passport_count}); print("ANSWER PART 2: {}\n", .{valid_passport_count_2}); }
src/04.zig
//-------------------------------------------------------------------------------- // Section: Types (2) //-------------------------------------------------------------------------------- pub const CreateProcessMethod = enum(i32) { CreateProcess = 0, CreateProcessAsUser = 1, AicLaunchAdminProcess = 2, }; pub const CpCreateProcess = CreateProcessMethod.CreateProcess; pub const CpCreateProcessAsUser = CreateProcessMethod.CreateProcessAsUser; pub const CpAicLaunchAdminProcess = CreateProcessMethod.AicLaunchAdminProcess; const IID_IDDEInitializer_Value = @import("../../zig.zig").Guid.initString("30dc931f-33fc-4ffd-a168-942258cf3ca4"); pub const IID_IDDEInitializer = &IID_IDDEInitializer_Value; pub const IDDEInitializer = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Initialize: fn( self: *const IDDEInitializer, fileExtensionOrProtocol: ?[*:0]const u16, method: CreateProcessMethod, currentDirectory: ?[*:0]const u16, execTarget: ?*IShellItem, site: ?*IUnknown, application: ?[*:0]const u16, targetFile: ?[*:0]const u16, arguments: ?[*:0]const u16, verb: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDDEInitializer_Initialize(self: *const T, fileExtensionOrProtocol: ?[*:0]const u16, method: CreateProcessMethod, currentDirectory: ?[*:0]const u16, execTarget: ?*IShellItem, site: ?*IUnknown, application: ?[*:0]const u16, targetFile: ?[*:0]const u16, arguments: ?[*:0]const u16, verb: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const IDDEInitializer.VTable, self.vtable).Initialize(@ptrCast(*const IDDEInitializer, self), fileExtensionOrProtocol, method, currentDirectory, execTarget, site, application, targetFile, arguments, verb); } };} pub usingnamespace MethodMixin(@This()); }; //-------------------------------------------------------------------------------- // Section: Functions (0) //-------------------------------------------------------------------------------- //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (4) //-------------------------------------------------------------------------------- const HRESULT = @import("../../foundation.zig").HRESULT; const IShellItem = @import("../../ui/shell.zig").IShellItem; const IUnknown = @import("../../system/com.zig").IUnknown; const PWSTR = @import("../../foundation.zig").PWSTR; test { @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
win32/system/win_rt/shell.zig
const builtin = @import("builtin"); const std = @import("std"); const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; test "super basic invocations" { const foo = struct { fn foo() i32 { return 1234; } }.foo; try expect(@call(.{}, foo, .{}) == 1234); comptime try expect(@call(.{ .modifier = .always_inline }, foo, .{}) == 1234); { // comptime call without comptime keyword const result = @call(.{ .modifier = .compile_time }, foo, .{}) == 1234; comptime try expect(result); } } test "basic invocations" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const foo = struct { fn foo() i32 { return 1234; } }.foo; try expect(@call(.{}, foo, .{}) == 1234); comptime { // modifiers that allow comptime calls try expect(@call(.{}, foo, .{}) == 1234); try expect(@call(.{ .modifier = .no_async }, foo, .{}) == 1234); try expect(@call(.{ .modifier = .always_tail }, foo, .{}) == 1234); try expect(@call(.{ .modifier = .always_inline }, foo, .{}) == 1234); } { // comptime call without comptime keyword const result = @call(.{ .modifier = .compile_time }, foo, .{}) == 1234; comptime try expect(result); } { // call of non comptime-known function var alias_foo = switch (builtin.zig_backend) { .stage1 => foo, else => &foo, }; try expect(@call(.{ .modifier = .no_async }, alias_foo, .{}) == 1234); try expect(@call(.{ .modifier = .never_tail }, alias_foo, .{}) == 1234); try expect(@call(.{ .modifier = .never_inline }, alias_foo, .{}) == 1234); } } test "tuple parameters" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const add = struct { fn add(a: i32, b: i32) i32 { return a + b; } }.add; var a: i32 = 12; var b: i32 = 34; try expect(@call(.{}, add, .{ a, 34 }) == 46); try expect(@call(.{}, add, .{ 12, b }) == 46); try expect(@call(.{}, add, .{ a, b }) == 46); try expect(@call(.{}, add, .{ 12, 34 }) == 46); if (builtin.zig_backend == .stage1) comptime try expect(@call(.{}, add, .{ 12, 34 }) == 46); // TODO try expect(comptime @call(.{}, add, .{ 12, 34 }) == 46); { const separate_args0 = .{ a, b }; const separate_args1 = .{ a, 34 }; const separate_args2 = .{ 12, 34 }; const separate_args3 = .{ 12, b }; try expect(@call(.{ .modifier = .always_inline }, add, separate_args0) == 46); try expect(@call(.{ .modifier = .always_inline }, add, separate_args1) == 46); try expect(@call(.{ .modifier = .always_inline }, add, separate_args2) == 46); try expect(@call(.{ .modifier = .always_inline }, add, separate_args3) == 46); } } test "result location of function call argument through runtime condition and struct init" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const E = enum { a, b }; const S = struct { e: E, }; const namespace = struct { fn foo(s: S) !void { try expect(s.e == .b); } }; var runtime = true; try namespace.foo(.{ .e = if (!runtime) .a else .b, }); } test "function call with 40 arguments" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest(thirty_nine: i32) !void { const result = add( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, thirty_nine, 40, ); try expect(result == 820); try expect(thirty_nine == 39); } fn add( a0: i32, a1: i32, a2: i32, a3: i32, a4: i32, a5: i32, a6: i32, a7: i32, a8: i32, a9: i32, a10: i32, a11: i32, a12: i32, a13: i32, a14: i32, a15: i32, a16: i32, a17: i32, a18: i32, a19: i32, a20: i32, a21: i32, a22: i32, a23: i32, a24: i32, a25: i32, a26: i32, a27: i32, a28: i32, a29: i32, a30: i32, a31: i32, a32: i32, a33: i32, a34: i32, a35: i32, a36: i32, a37: i32, a38: i32, a39: i32, a40: i32, ) i32 { return a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8 + a9 + a10 + a11 + a12 + a13 + a14 + a15 + a16 + a17 + a18 + a19 + a20 + a21 + a22 + a23 + a24 + a25 + a26 + a27 + a28 + a29 + a30 + a31 + a32 + a33 + a34 + a35 + a36 + a37 + a38 + a39 + a40; } }; try S.doTheTest(39); }
test/behavior/call.zig
const Allocator = @import("std").mem.Allocator; const ArrayList = @import("std").ArrayList; const Header = @import("header.zig").Header; const HeaderName = @import("name.zig").HeaderName; const HeaderType = @import("name.zig").HeaderType; const HeaderValue = @import("value.zig").HeaderValue; const std = @import("std"); const AllocationError = error{OutOfMemory}; pub const Headers = struct { allocator: *Allocator, _items: ArrayList(Header), pub const Error = error{ InvalidHeaderName, InvalidHeaderValue } || AllocationError; pub fn init(allocator: *Allocator) Headers { return Headers{ .allocator = allocator, ._items = ArrayList(Header).init(allocator) }; } pub fn deinit(self: *Headers) void { self._items.deinit(); } pub fn append(self: *Headers, name: []const u8, value: []const u8) Error!void { var _name = HeaderName.parse(name) catch return error.InvalidHeaderName; var _value = HeaderValue.parse(value) catch return error.InvalidHeaderValue; try self._items.append(Header{ .name = _name, .value = _value }); } pub fn len(self: Headers) callconv(.Inline) usize { return self._items.items.len; } pub fn items(self: Headers) callconv(.Inline) []Header { return self._items.items; } pub fn get(self: Headers, name: []const u8) ?Header { var _type = HeaderName.type_of(name); return switch (_type) { .Custom => self.get_custom_header(name), else => self.get_standard_header(_type), }; } pub fn list(self: Headers, name: []const u8) AllocationError![]Header { var _type = HeaderName.type_of(name); return switch (_type) { .Custom => self.get_custom_header_list(name), else => self.get_standard_header_list(_type), }; } fn get_custom_header_list(self: Headers, name: []const u8) callconv(.Inline) AllocationError![]Header { var result = ArrayList(Header).init(self.allocator); for (self.items()) |header| { if (header.name.type == .Custom and std.mem.eql(u8, header.name.raw(), name)) { try result.append(header); } } return result.toOwnedSlice(); } fn get_standard_header_list(self: Headers, name: HeaderType) callconv(.Inline) AllocationError![]Header { var result = ArrayList(Header).init(self.allocator); for (self.items()) |header| { if (header.name.type == name) { try result.append(header); } } return result.toOwnedSlice(); } fn get_custom_header(self: Headers, name: []const u8) callconv(.Inline) ?Header { for (self.items()) |header| { if (header.name.type == .Custom and std.mem.eql(u8, header.name.raw(), name)) { return header; } } return null; } fn get_standard_header(self: Headers, name: HeaderType) callconv(.Inline) ?Header { for (self.items()) |header| { if (header.name.type == name) { return header; } } return null; } }; const expect = std.testing.expect; const expectError = std.testing.expectError; test "Append - Standard header" { var headers = Headers.init(std.testing.allocator); defer headers.deinit(); try headers.append("Content-Length", "42"); expect(headers.len() == 1); const header = headers.items()[0]; expect(header.name.type == .ContentLength); expect(std.mem.eql(u8, header.value, "42")); } test "Append - Custom header" { var headers = Headers.init(std.testing.allocator); defer headers.deinit(); try headers.append("Gotta-Go", "Fast"); expect(headers.len() == 1); const header = headers.items()[0]; expect(header.name.type == .Custom); expect(std.mem.eql(u8, header.name.raw(), "Gotta-Go")); expect(std.mem.eql(u8, header.value, "Fast")); } test "Append - Invalid header name" { var headers = Headers.init(std.testing.allocator); defer headers.deinit(); var failure = headers.append("Invalid Header", "yeah"); expectError(error.InvalidHeaderName, failure); } test "Append - Invalid header value" { var headers = Headers.init(std.testing.allocator); defer headers.deinit(); var failure = headers.append("name", "I\nvalid"); expectError(error.InvalidHeaderValue, failure); } test "Append - Out of memory" { var buffer: [1]u8 = undefined; const allocator = &std.heap.FixedBufferAllocator.init(&buffer).allocator; var headers = Headers.init(allocator); defer headers.deinit(); var failure = headers.append("Gotta-Go", "Fast"); expectError(error.OutOfMemory, failure); } test "Get - Missing header" { var headers = Headers.init(std.testing.allocator); defer headers.deinit(); expect(headers.get("Content-Length") == null); } test "Get - Standard header" { var headers = Headers.init(std.testing.allocator); defer headers.deinit(); try headers.append("Content-Length", "10"); var result = headers.get("Content-Length").?; expect(std.mem.eql(u8, result.value, "10")); } test "Get - Custom header" { var headers = Headers.init(std.testing.allocator); defer headers.deinit(); try headers.append("Gotta-Go", "Fast"); var result = headers.get("Gotta-Go").?; expect(std.mem.eql(u8, result.value, "Fast")); } test "List - Missing header" { var headers = Headers.init(std.testing.allocator); defer headers.deinit(); var result = try headers.list("Content-Length"); defer std.testing.allocator.free(result); expect(result.len == 0); } test "List - Standard header" { var headers = Headers.init(std.testing.allocator); defer headers.deinit(); try headers.append("Content-Length", "10"); try headers.append("Content-Length", "20"); var result = try headers.list("Content-Length"); defer std.testing.allocator.free(result); expect(result.len == 2); expect(std.mem.eql(u8, result[0].value, "10")); expect(std.mem.eql(u8, result[1].value, "20")); } test "List - Custom header" { var headers = Headers.init(std.testing.allocator); defer headers.deinit(); try headers.append("Gotta-Go", "Fast"); try headers.append("Gotta-Go", "Very Fast"); var result = try headers.list("Gotta-Go"); defer std.testing.allocator.free(result); expect(result.len == 2); expect(std.mem.eql(u8, result[0].value, "Fast")); expect(std.mem.eql(u8, result[1].value, "Very Fast")); }
src/headers/headers.zig
const std = @import("std"); const CFO = @import("./CFO.zig"); const debug = std.debug; const os = std.os; const io = std.io; var the_cfo: ?*CFO = null; fn sigHandler(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const anyopaque) callconv(.C) void { const s = io.getStdErr().writer(); const addr = @ptrToInt(info.fields.sigfault.addr); // TODO: this reimplements quite a bit of os.debug // ideally we should be able to do some pre-processing // and then "chain" the default handler const desc = switch (sig) { os.SIG.SEGV => "Segmentation fault", os.SIG.ILL => "Illegal instruction", os.SIG.BUS => "Bus error", os.SIG.TRAP => "Trap", else => "???", }; s.print("\n{s} at address 0x{x}\n\n", .{ desc, addr }) catch unreachable; s.print("Do not indulge in stunt driving or horseplay!\n", .{}) catch unreachable; const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr)); var ip = @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]); const bp = @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]); s.print("RAX {x}\n", .{ctx.mcontext.gregs[os.REG.RAX]}) catch unreachable; s.print("RCX {x}\n\n", .{ctx.mcontext.gregs[os.REG.RCX]}) catch unreachable; if (the_cfo) |c| { if (sig == os.SIG.TRAP) { // if we trapped, ip will refer to the next instruction after the trap ip -= 1; } // WTF is the CFO doing? ip = c.lookup(ip); } debug.dumpStackTraceFromBase(bp, ip); } pub fn install(cfo: *CFO) void { the_cfo = cfo; var act = os.Sigaction{ .handler = .{ .sigaction = sigHandler }, .mask = os.empty_sigset, .flags = (os.SA.SIGINFO | os.SA.RESTART), }; os.sigaction(os.SIG.TRAP, &act, null); act.flags |= os.SA.RESETHAND; os.sigaction(os.SIG.SEGV, &act, null); os.sigaction(os.SIG.ILL, &act, null); os.sigaction(os.SIG.BUS, &act, null); } pub fn clear() void { the_cfo = null; }
src/OSHA.zig
const mecha = @import("mecha"); const std = @import("std"); const event = std.event; const fs = std.fs; const heap = std.heap; const log = std.log; const Message = @import("../message.zig").Message; pub fn cpu(channel: *event.Channel(Message)) void { const loop = event.Loop.instance.?; const cwd = fs.cwd(); // On my system, `cat /proc/meminfo | wc -c` gives 3574. This is with a system // that has 16 threads. We ensure that our buffer is quite a lot bigger so that // we have room for systems with a lot more threads. This buffer size is just a // guess though. var buf: [1024 * 1024]u8 = undefined; var fba = heap.FixedBufferAllocator.init(""); while (true) { var content: []const u8 = cwd.readFile("/proc/stat", &buf) catch |err| { log.warn("Failed to read /proc/stat: {}", .{err}); continue; }; if (first_line(&fba.allocator, content)) |res| { content = res.rest; } else |_| {} while (line(&fba.allocator, content)) |result| : (content = result.rest) { channel.put(.{ .cpu = .{ .id = result.value.id, .user = result.value.cpu.user, .sys = result.value.cpu.sys, .idle = result.value.cpu.idle, }, }); } else |_| {} loop.sleep(std.time.ns_per_s); } } pub const Cpu = struct { id: usize, cpu: CpuInfo, }; pub const CpuInfo = struct { user: usize, nice: usize, sys: usize, idle: usize, iowait: usize, hardirq: usize, softirq: usize, steal: usize, guest: usize, guest_nice: usize, }; const first_line = mecha.combine(.{ mecha.string("cpu"), mecha.manyN(mecha.combine(.{ mecha.discard(mecha.many(mecha.ascii.char(' '), .{ .collect = false })), mecha.int(usize, .{}), }), 10, .{}), mecha.ascii.char('\n'), }); const line = mecha.map(Cpu, mecha.toStruct(Cpu), mecha.combine(.{ mecha.string("cpu"), mecha.int(usize, .{}), mecha.map(CpuInfo, mecha.toStruct(CpuInfo), mecha.manyN(mecha.combine(.{ mecha.discard(mecha.many(mecha.ascii.char(' '), .{ .collect = false })), mecha.int(usize, .{}), }), 10, .{})), mecha.ascii.char('\n'), }));
src/producer/cpu.zig
const std = @import("std"); const builtin = @import("builtin"); const config = @import("./config.zig"); const os = std.os; const assert = std.debug.assert; const is_darwin = builtin.target.os.tag.isDarwin(); const is_windows = builtin.target.os.tag == .windows; pub const Time = struct { const Self = @This(); /// Hardware and/or software bugs can mean that the monotonic clock may regress. /// One example (of many): https://bugzilla.redhat.com/show_bug.cgi?id=448449 /// We crash the process for safety if this ever happens, to protect against infinite loops. /// It's better to crash and come back with a valid monotonic clock than get stuck forever. monotonic_guard: u64 = 0, /// A timestamp to measure elapsed time, meaningful only on the same system, not across reboots. /// Always use a monotonic timestamp if the goal is to measure elapsed time. /// This clock is not affected by discontinuous jumps in the system time, for example if the /// system administrator manually changes the clock. pub fn monotonic(self: *Self) u64 { const m = blk: { // Uses QueryPerformanceCounter() on windows due to it being the highest precision timer // available while also accounting for time spent suspended by default: // https://docs.microsoft.com/en-us/windows/win32/api/realtimeapiset/nf-realtimeapiset-queryunbiasedinterrupttime#remarks if (is_windows) { // QPF need not be globally cached either as it ends up being a load from read-only // memory mapped to all processed by the kernel called KUSER_SHARED_DATA (See "QpcFrequency") // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/ntddk/ns-ntddk-kuser_shared_data // https://www.geoffchappell.com/studies/windows/km/ntoskrnl/inc/api/ntexapi_x/kuser_shared_data/index.htm const qpc = os.windows.QueryPerformanceCounter(); const qpf = os.windows.QueryPerformanceFrequency(); // 10Mhz (1 qpc tick every 100ns) is a common QPF on modern systems. // We can optimize towards this by converting to ns via a single multiply. // https://github.com/microsoft/STL/blob/785143a0c73f030238ef618890fd4d6ae2b3a3a0/stl/inc/chrono#L694-L701 const common_qpf = 10_000_000; if (qpf == common_qpf) break :blk qpc * (std.time.ns_per_s / common_qpf); // Convert qpc to nanos using fixed point to avoid expensive extra divs and overflow. const scale = (std.time.ns_per_s << 32) / qpf; break :blk @truncate(u64, (@as(u96, qpc) * scale) >> 32); } // Uses mach_continuous_time() instead of mach_absolute_time() as it counts while suspended. // https://developer.apple.com/documentation/kernel/1646199-mach_continuous_time // https://opensource.apple.com/source/Libc/Libc-1158.1.2/gen/clock_gettime.c.auto.html if (is_darwin) { const darwin = struct { const mach_timebase_info_t = os.darwin.mach_timebase_info_data; extern "c" fn mach_timebase_info(info: *mach_timebase_info_t) os.darwin.kern_return_t; extern "c" fn mach_continuous_time() u64; }; // mach_timebase_info() called through libc already does global caching for us // https://opensource.apple.com/source/xnu/xnu-7195.81.3/libsyscall/wrappers/mach_timebase_info.c.auto.html var info: darwin.mach_timebase_info_t = undefined; if (darwin.mach_timebase_info(&info) != 0) @panic("mach_timebase_info() failed"); const now = darwin.mach_continuous_time(); return (now * info.numer) / info.denom; } // The true monotonic clock on Linux is not in fact CLOCK_MONOTONIC: // CLOCK_MONOTONIC excludes elapsed time while the system is suspended (e.g. VM migration). // CLOCK_BOOTTIME is the same as CLOCK_MONOTONIC but includes elapsed time during a suspend. // For more detail and why CLOCK_MONOTONIC_RAW is even worse than CLOCK_MONOTONIC, // see https://github.com/ziglang/zig/pull/933#discussion_r656021295. var ts: os.timespec = undefined; os.clock_gettime(os.CLOCK.BOOTTIME, &ts) catch @panic("CLOCK_BOOTTIME required"); break :blk @intCast(u64, ts.tv_sec) * std.time.ns_per_s + @intCast(u64, ts.tv_nsec); }; // "Oops!...I Did It Again" if (m < self.monotonic_guard) @panic("a hardware/kernel bug regressed the monotonic clock"); self.monotonic_guard = m; return m; } /// A timestamp to measure real (i.e. wall clock) time, meaningful across systems, and reboots. /// This clock is affected by discontinuous jumps in the system time. pub fn realtime(_: *Self) i64 { if (is_windows) { const kernel32 = struct { extern "kernel32" fn GetSystemTimePreciseAsFileTime( lpFileTime: *os.windows.FILETIME, ) callconv(os.windows.WINAPI) void; }; var ft: os.windows.FILETIME = undefined; kernel32.GetSystemTimePreciseAsFileTime(&ft); const ft64 = (@as(u64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime; // FileTime is in units of 100 nanoseconds // and uses the NTFS/Windows epoch of 1601-01-01 instead of Unix Epoch 1970-01-01. const epoch_adjust = std.time.epoch.windows * (std.time.ns_per_s / 100); return (@bitCast(i64, ft64) + epoch_adjust) * 100; } if (is_darwin) { // macos has supported clock_gettime() since 10.12: // https://opensource.apple.com/source/Libc/Libc-1158.1.2/gen/clock_gettime.3.auto.html } var ts: os.timespec = undefined; os.clock_gettime(os.CLOCK.REALTIME, &ts) catch unreachable; return @as(i64, ts.tv_sec) * std.time.ns_per_s + ts.tv_nsec; } pub fn tick(_: *Self) void {} };
src/time.zig
const std = @import("std"); const TestContext = @import("../../src/test.zig").TestContext; // These tests should work with all platforms, but we're using linux_x64 for // now for consistency. Will be expanded eventually. const linux_x64 = std.zig.CrossTarget{ .cpu_arch = .x86_64, .os_tag = .linux, }; pub fn addCases(ctx: *TestContext) !void { ctx.c("empty start function", linux_x64, \\export fn _start() noreturn { \\ unreachable; \\} , \\zig_noreturn void _start(void) { \\ zig_unreachable(); \\} \\ ); ctx.h("simple header", linux_x64, \\export fn start() void{} , \\void start(void); \\ ); ctx.c("less empty start function", linux_x64, \\fn main() noreturn { \\ unreachable; \\} \\ \\export fn _start() noreturn { \\ main(); \\} , \\zig_noreturn void main(void); \\ \\zig_noreturn void _start(void) { \\ main(); \\} \\ \\zig_noreturn void main(void) { \\ zig_unreachable(); \\} \\ ); // TODO: implement return values // TODO: figure out a way to prevent asm constants from being generated ctx.c("inline asm", linux_x64, \\fn exitGood() noreturn { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (231), \\ [arg1] "{rdi}" (0) \\ ); \\ unreachable; \\} \\ \\export fn _start() noreturn { \\ exitGood(); \\} , \\#include <stddef.h> \\ \\zig_noreturn void exitGood(void); \\ \\const char *const exitGood__anon_0 = "{rax}"; \\const char *const exitGood__anon_1 = "{rdi}"; \\const char *const exitGood__anon_2 = "syscall"; \\ \\zig_noreturn void _start(void) { \\ exitGood(); \\} \\ \\zig_noreturn void exitGood(void) { \\ register size_t rax_constant __asm__("rax") = 231; \\ register size_t rdi_constant __asm__("rdi") = 0; \\ __asm volatile ("syscall" :: ""(rax_constant), ""(rdi_constant)); \\ zig_unreachable(); \\} \\ ); ctx.c("exit with parameter", linux_x64, \\export fn _start() noreturn { \\ exit(0); \\} \\ \\fn exit(code: usize) noreturn { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (231), \\ [arg1] "{rdi}" (code) \\ ); \\ unreachable; \\} \\ , \\#include <stddef.h> \\ \\zig_noreturn void exit(size_t arg0); \\ \\const char *const exit__anon_0 = "{rax}"; \\const char *const exit__anon_1 = "{rdi}"; \\const char *const exit__anon_2 = "syscall"; \\ \\zig_noreturn void _start(void) { \\ exit(0); \\} \\ \\zig_noreturn void exit(size_t arg0) { \\ register size_t rax_constant __asm__("rax") = 231; \\ register size_t rdi_constant __asm__("rdi") = arg0; \\ __asm volatile ("syscall" :: ""(rax_constant), ""(rdi_constant)); \\ zig_unreachable(); \\} \\ ); ctx.c("exit with u8 parameter", linux_x64, \\export fn _start() noreturn { \\ exit(0); \\} \\ \\fn exit(code: u8) noreturn { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (231), \\ [arg1] "{rdi}" (code) \\ ); \\ unreachable; \\} \\ , \\#include <stddef.h> \\#include <stdint.h> \\ \\zig_noreturn void exit(uint8_t arg0); \\ \\const char *const exit__anon_0 = "{rax}"; \\const char *const exit__anon_1 = "{rdi}"; \\const char *const exit__anon_2 = "syscall"; \\ \\zig_noreturn void _start(void) { \\ exit(0); \\} \\ \\zig_noreturn void exit(uint8_t arg0) { \\ const size_t __temp_0 = (size_t)arg0; \\ register size_t rax_constant __asm__("rax") = 231; \\ register size_t rdi_constant __asm__("rdi") = __temp_0; \\ __asm volatile ("syscall" :: ""(rax_constant), ""(rdi_constant)); \\ zig_unreachable(); \\} \\ ); ctx.c("exit with u8 arithmetic", linux_x64, \\export fn _start() noreturn { \\ exitMath(1); \\} \\ \\fn exitMath(a: u8) noreturn { \\ exit(0 + a - a); \\} \\ \\fn exit(code: u8) noreturn { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (231), \\ [arg1] "{rdi}" (code) \\ ); \\ unreachable; \\} \\ , \\#include <stddef.h> \\#include <stdint.h> \\ \\zig_noreturn void exitMath(uint8_t arg0); \\zig_noreturn void exit(uint8_t arg0); \\ \\const char *const exit__anon_0 = "{rax}"; \\const char *const exit__anon_1 = "{rdi}"; \\const char *const exit__anon_2 = "syscall"; \\ \\zig_noreturn void _start(void) { \\ exitMath(1); \\} \\ \\zig_noreturn void exitMath(uint8_t arg0) { \\ const uint8_t __temp_0 = 0 + arg0; \\ const uint8_t __temp_1 = __temp_0 - arg0; \\ exit(__temp_1); \\} \\ \\zig_noreturn void exit(uint8_t arg0) { \\ const size_t __temp_0 = (size_t)arg0; \\ register size_t rax_constant __asm__("rax") = 231; \\ register size_t rdi_constant __asm__("rdi") = __temp_0; \\ __asm volatile ("syscall" :: ""(rax_constant), ""(rdi_constant)); \\ zig_unreachable(); \\} \\ ); ctx.c("exit with u8 arithmetic inverted", linux_x64, \\export fn _start() noreturn { \\ exitMath(1); \\} \\ \\fn exitMath(a: u8) noreturn { \\ exit(a + 0 - a); \\} \\ \\fn exit(code: u8) noreturn { \\ asm volatile ("syscall" \\ : \\ : [number] "{rax}" (231), \\ [arg1] "{rdi}" (code) \\ ); \\ unreachable; \\} \\ , \\#include <stddef.h> \\#include <stdint.h> \\ \\zig_noreturn void exitMath(uint8_t arg0); \\zig_noreturn void exit(uint8_t arg0); \\ \\const char *const exit__anon_0 = "{rax}"; \\const char *const exit__anon_1 = "{rdi}"; \\const char *const exit__anon_2 = "syscall"; \\ \\zig_noreturn void _start(void) { \\ exitMath(1); \\} \\ \\zig_noreturn void exitMath(uint8_t arg0) { \\ const uint8_t __temp_0 = arg0 + 0; \\ const uint8_t __temp_1 = __temp_0 - arg0; \\ exit(__temp_1); \\} \\ \\zig_noreturn void exit(uint8_t arg0) { \\ const size_t __temp_0 = (size_t)arg0; \\ register size_t rax_constant __asm__("rax") = 231; \\ register size_t rdi_constant __asm__("rdi") = __temp_0; \\ __asm volatile ("syscall" :: ""(rax_constant), ""(rdi_constant)); \\ zig_unreachable(); \\} \\ ); ctx.h("header with single param function", linux_x64, \\export fn start(a: u8) void{} , \\#include <stdint.h> \\ \\void start(uint8_t arg0); \\ ); ctx.h("header with multiple param function", linux_x64, \\export fn start(a: u8, b: u8, c: u8) void{} , \\#include <stdint.h> \\ \\void start(uint8_t arg0, uint8_t arg1, uint8_t arg2); \\ ); ctx.h("header with u32 param function", linux_x64, \\export fn start(a: u32) void{} , \\#include <stdint.h> \\ \\void start(uint32_t arg0); \\ ); ctx.h("header with usize param function", linux_x64, \\export fn start(a: usize) void{} , \\#include <stddef.h> \\ \\void start(size_t arg0); \\ ); ctx.h("header with bool param function", linux_x64, \\export fn start(a: bool) void{} , \\void start(bool arg0); \\ ); ctx.h("header with noreturn function", linux_x64, \\export fn start() noreturn { \\ unreachable; \\} , \\zig_noreturn void start(void); \\ ); ctx.h("header with multiple functions", linux_x64, \\export fn a() void{} \\export fn b() void{} \\export fn c() void{} , \\void a(void); \\void b(void); \\void c(void); \\ ); ctx.h("header with multiple includes", linux_x64, \\export fn start(a: u32, b: usize) void{} , \\#include <stddef.h> \\#include <stdint.h> \\ \\void start(uint32_t arg0, size_t arg1); \\ ); }
test/stage2/cbe.zig
const std = @import("std"); const zlm = @import("zlm"); const graphics = @import("didot-graphics"); const Mesh = graphics.Mesh; const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; const OBJError = error { }; const Element = struct { posIdx: usize, texIdx: usize, normalIdx: usize }; pub fn read_obj(allocator: *Allocator, path: []const u8) !Mesh { const cwd = std.fs.cwd(); const file = try cwd.openFile(path, .{ .read = true, .write = false }); const reader = std.io.bufferedReader(file.reader()).reader(); var vertices = ArrayList(zlm.Vec3).init(allocator); var normals = ArrayList(zlm.Vec3).init(allocator); var texCoords = ArrayList(zlm.Vec2).init(allocator); var elements = ArrayList(Element).init(allocator); defer vertices.deinit(); defer elements.deinit(); defer normals.deinit(); defer texCoords.deinit(); const text = try reader.readAllAlloc(allocator, std.math.maxInt(u64)); defer allocator.free(text); var linesSplit = std.mem.split(text, "\n"); while (true) { const line = if (linesSplit.next()) |s| s else break; var split = std.mem.split(line, " "); const command = split.next().?; if (std.mem.eql(u8, command, "v")) { // vertex (position) const xStr = split.next().?; const yStr = split.next().?; const zStr = split.next().?; //var wStr = "1.0"; //if (split.next()) |w| { // wStr = w; //} const x = try std.fmt.parseFloat(f32, xStr); const y = try std.fmt.parseFloat(f32, yStr); const z = try std.fmt.parseFloat(f32, zStr); try vertices.append(zlm.Vec3.new(x, y, z)); } else if (std.mem.eql(u8, command, "vt")) { // vertex (texture coordinate) const uStr = split.next().?; const vStr = split.next().?; //const wStr = split.next().?; const u = try std.fmt.parseFloat(f32, uStr); const v = try std.fmt.parseFloat(f32, vStr); //const w = try std.fmt.parseFloat(f32, wStr); try texCoords.append(zlm.Vec2.new(u, v)); } else if (std.mem.eql(u8, command, "vn")) { // vertex (normal) const xStr = split.next().?; const yStr = split.next().?; const zStr = split.next().?; const x = try std.fmt.parseFloat(f32, xStr); const y = try std.fmt.parseFloat(f32, yStr); const z = try std.fmt.parseFloat(f32, zStr); try normals.append(zlm.Vec3.new(x, y, z)); } else if (std.mem.eql(u8, command, "f")) { // face while (true) { if (split.next()) |vertex| { var faceSplit = std.mem.split(vertex, "/"); var posIdx = try std.fmt.parseInt(i32, faceSplit.next().?, 10); const texIdxStr = faceSplit.next().?; var texIdx = if (texIdxStr.len == 0) 0 else try std.fmt.parseInt(i32, texIdxStr, 10); const normalIdxStr = faceSplit.next(); var normalIdx = if (normalIdxStr) |str| try std.fmt.parseInt(i32, str, 10) else 0; if (normalIdx < 1) { normalIdx = 1; // TODO } if (texIdx < 1) { texIdx = 1; // TODO } if (posIdx < 1) { posIdx = 1; // TODO } try elements.append(.{ .posIdx = @intCast(usize, posIdx-1), .texIdx = @intCast(usize, texIdx-1), .normalIdx = @intCast(usize, normalIdx-1), }); } else { break; } } } else { //std.debug.warn("Unknown OBJ command: {}\n", .{command}); } } var final = try allocator.alloc(f32, elements.items.len*8); defer allocator.free(final); var i: usize = 0; for (elements.items) |f| { const v = vertices.items[f.posIdx]; const t = if (texCoords.items.len == 0) zlm.Vec2.zero else texCoords.items[f.texIdx]; const n = if (normals.items.len == 0) zlm.Vec3.zero else normals.items[f.normalIdx]; // position final[i] = v.x; final[i+1] = v.y; final[i+2] = v.z; // normal final[i+3] = n.x; final[i+4] = n.y; final[i+5] = n.z; // texture coordinate final[i+6] = t.x; final[i+7] = t.y; i = i + 8; } return Mesh.create(final, null); // TODO simplify }
didot-models/obj.zig
const std = @import("std"); usingnamespace @import("parse_helpers.zig"); usingnamespace @import("../winmd.zig"); const mem = @This(); const meta = std.meta; const trait = meta.trait; /// DatabaseFile is the core of how TypeReader traverses the winmd file /// It will iterate over the bytes and store meta info about table locations as well as other meta details like strings, blobs, and guids pub const DatabaseFile = struct { const Self = @This(); bytes: []const u8, blobs: u32 = 0, guids: u32 = 0, strings: u32 = 0, tables: [16]TableData = [1]TableData{TableData{}} ** 16, /// This is the recommended entry point of creating a DatabaseFile /// fromBytes will validate the bytes passed in as a valid winmd file and parse the bytes to meta data fields pub fn fromBytes(bytes: []const u8) !Self { var self = DatabaseFile{ .bytes = bytes }; const dos = viewAs(ImageDosHeader, self.bytes, 0); if (dos.signature != IMAGE_DOS_SIGNATURE) return error.InvalidDosHeader; const pe = viewAs(ImageNtHeader, self.bytes, dos.lfanew); var com_virtual_address: u32 = undefined; var sections: []const ImageSectionHeader = undefined; switch (pe.optional_header.magic) { MAGIC_PE32 => { var optional_header = pe.optional_header; com_virtual_address = optional_header.data_directory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].virtual_address; const file_header = pe.file_header; sections = viewAsSliceOf(ImageSectionHeader, self.bytes, dos.lfanew + stdO.sizeOf(ImageNtHeader), file_header.number_of_sections); }, MAGIC_PE32PLUS => { var pe_plus = viewAs(ImageNtHeaderPlus, self.bytes, dos.lfanew); com_virtual_address = pe_plus.optional_header.data_directory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].virtual_address; const file_header = pe_plus.file_header; sections = viewAsSliceOf(ImageSectionHeader, self.bytes, dos.lfanew + stdO.sizeOf(ImageNtHeaderPlus), file_header.number_of_sections); }, else => { return error.InvalidMagic; }, } const s_rva = try sectionFromRva(sections, com_virtual_address); const cli = viewAs(ImageCorHeader, self.bytes, offsetFromRva(s_rva, com_virtual_address)); if (cli.cb != stdO.sizeOf(ImageCorHeader)) { return error.InvalidImageCorHeader; } var cli_offset = offsetFromRva(try sectionFromRva(sections, cli.meta_data.virtual_address), cli.meta_data.virtual_address); if (copyAs(u32, self.bytes, cli_offset) != STORAGE_MAGIC_SIG) { return error.InvalidStorageMagicSig; } const version_length = copyAs(u32, self.bytes, cli_offset + 12); var view = cli_offset + version_length + 20; var tables_data = [2]u32{ 0, 0 }; var i: u16 = 0; while (i < copyAs(u16, self.bytes, cli_offset + version_length + 18)) : (i += 1) { const stream_offset = copyAs(u32, self.bytes, view); const stream_size = copyAs(u32, self.bytes, view + 4); const stream_name = viewAsStr(self.bytes, view + 8); if (std.mem.eql(u8, stream_name, "#Strings")) { self.strings = cli_offset + stream_offset; } else if (std.mem.eql(u8, stream_name, "#Blob")) { self.blobs = cli_offset + stream_offset; } else if (std.mem.eql(u8, stream_name, "#GUID")) { self.guids = cli_offset + stream_offset; } else if (std.mem.eql(u8, stream_name, "#~")) { tables_data = [2]u32{ cli_offset + stream_offset, stream_size }; } else if (std.mem.eql(u8, stream_name, "#US")) {} else { return error.InvalidStreamName; } var padding = 4 - stream_name.len % 4; if (padding == 0) { padding = 4; } view += @intCast(u32, (8 + stream_name.len + padding)); } const heap_sizes = self.bytes[tables_data[0] + 6]; const string_index_size: u32 = if ((heap_sizes & 1) == 1) 4 else 2; const guid_index_size: u32 = if ((heap_sizes >> 1 & 1) == 1) 4 else 2; const blob_index_size: u32 = if ((heap_sizes >> 2 & 1) == 1) 4 else 2; const valid_bits = copyAs(u64, self.bytes, tables_data[0] + 8); view = tables_data[0] + 24; // some tables are not needed for our projection, but these are still needed to help determine size offsets var unused_empty = TableData{}; var unused_assembly = TableData{}; var unused_assembly_os = TableData{}; var unused_assembly_processor = TableData{}; var unused_assembly_ref = TableData{}; var unused_assembly_ref_os = TableData{}; var unused_assembly_ref_processor = TableData{}; var unused_class_layout = TableData{}; var unused_decl_security = TableData{}; var unused_event = TableData{}; var unused_event_map = TableData{}; var unused_exported_type = TableData{}; var unused_field_layout = TableData{}; var unused_field_marshal = TableData{}; var unused_field_rva = TableData{}; var unused_file = TableData{}; var unused_generic_param_constraint = TableData{}; var unused_impl_map = TableData{}; var unused_manifest_resource = TableData{}; var unused_method_impl = TableData{}; var unused_method_semantics = TableData{}; var unused_method_spec = TableData{}; var unused_module = TableData{}; var unused_module_ref = TableData{}; var unused_nested_class = TableData{}; var unused_property = TableData{}; var unused_property_map = TableData{}; var unused_standalone_sig = TableData{}; i = 0; while (i < 64) : (i += 1) { if ((valid_bits >> @intCast(u6, i) & 1) == 0) { continue; } var row_count = copyAs(u32, self.bytes, view); view += 4; switch (i) { 0x00 => self.tables[@enumToInt(TableIndex.Module)].row_count = row_count, 0x01 => self.tables[@enumToInt(TableIndex.TypeRef)].row_count = row_count, 0x02 => self.tables[@enumToInt(TableIndex.TypeDef)].row_count = row_count, 0x04 => self.tables[@enumToInt(TableIndex.Field)].row_count = row_count, 0x06 => self.tables[@enumToInt(TableIndex.MethodDef)].row_count = row_count, 0x08 => self.tables[@enumToInt(TableIndex.Param)].row_count = row_count, 0x09 => self.tables[@enumToInt(TableIndex.InterfaceImpl)].row_count = row_count, 0x0a => self.tables[@enumToInt(TableIndex.MemberRef)].row_count = row_count, 0x0b => self.tables[@enumToInt(TableIndex.Constant)].row_count = row_count, 0x0c => self.tables[@enumToInt(TableIndex.CustomAttribute)].row_count = row_count, 0x0d => unused_field_marshal.row_count = row_count, 0x0e => unused_decl_security.row_count = row_count, 0x0f => unused_class_layout.row_count = row_count, 0x10 => unused_field_layout.row_count = row_count, 0x11 => unused_standalone_sig.row_count = row_count, 0x12 => unused_event_map.row_count = row_count, 0x14 => unused_event.row_count = row_count, 0x15 => unused_property_map.row_count = row_count, 0x17 => unused_property.row_count = row_count, 0x18 => unused_method_semantics.row_count = row_count, 0x19 => unused_method_impl.row_count = row_count, 0x1a => self.tables[@enumToInt(TableIndex.ModuleRef)].row_count = row_count, 0x1b => self.tables[@enumToInt(TableIndex.TypeSpec)].row_count = row_count, 0x1c => self.tables[@enumToInt(TableIndex.ImplMap)].row_count = row_count, 0x1d => unused_field_rva.row_count = row_count, 0x20 => unused_assembly.row_count = row_count, 0x21 => unused_assembly_processor.row_count = row_count, 0x22 => unused_assembly_os.row_count = row_count, 0x23 => self.tables[@enumToInt(TableIndex.AssemblyRef)].row_count = row_count, 0x24 => unused_assembly_ref_processor.row_count = row_count, 0x25 => unused_assembly_ref_os.row_count = row_count, 0x26 => unused_file.row_count = row_count, 0x27 => unused_exported_type.row_count = row_count, 0x28 => unused_manifest_resource.row_count = row_count, 0x29 => self.tables[@enumToInt(TableIndex.NestedClass)].row_count = row_count, 0x2a => self.tables[@enumToInt(TableIndex.GenericParam)].row_count = row_count, 0x2b => unused_method_spec.row_count = row_count, 0x2c => unused_generic_param_constraint.row_count = row_count, else => unreachable, } } // define table layouts var type_def_or_ref = compositeIndexSize(&[_]TableData{ self.tables[@enumToInt(TableIndex.TypeDef)], self.tables[@enumToInt(TableIndex.TypeRef)], self.tables[@enumToInt(TableIndex.TypeSpec)], }); var has_constant = compositeIndexSize(&[_]TableData{ self.tables[@enumToInt(TableIndex.Field)], self.tables[@enumToInt(TableIndex.Param)], unused_property, }); var has_custom_attribute = compositeIndexSize(&[_]TableData{ self.tables[@enumToInt(TableIndex.MethodDef)], self.tables[@enumToInt(TableIndex.Field)], self.tables[@enumToInt(TableIndex.TypeRef)], self.tables[@enumToInt(TableIndex.TypeDef)], self.tables[@enumToInt(TableIndex.Param)], self.tables[@enumToInt(TableIndex.InterfaceImpl)], self.tables[@enumToInt(TableIndex.MemberRef)], unused_module, unused_property, unused_event, unused_standalone_sig, self.tables[@enumToInt(TableIndex.ModuleRef)], self.tables[@enumToInt(TableIndex.TypeSpec)], unused_assembly, self.tables[@enumToInt(TableIndex.AssemblyRef)], unused_file, unused_exported_type, unused_manifest_resource, self.tables[@enumToInt(TableIndex.GenericParam)], unused_generic_param_constraint, unused_method_spec, }); var has_field_marshal = compositeIndexSize(&[_]TableData{ self.tables[@enumToInt(TableIndex.Field)], self.tables[@enumToInt(TableIndex.Param)], }); var has_decl_security = compositeIndexSize(&[_]TableData{ self.tables[@enumToInt(TableIndex.TypeDef)], self.tables[@enumToInt(TableIndex.MethodDef)], unused_assembly, }); var member_ref_parent = compositeIndexSize(&[_]TableData{ self.tables[@enumToInt(TableIndex.TypeDef)], self.tables[@enumToInt(TableIndex.TypeRef)], self.tables[@enumToInt(TableIndex.ModuleRef)], self.tables[@enumToInt(TableIndex.MethodDef)], self.tables[@enumToInt(TableIndex.TypeSpec)], }); var has_semantics = compositeIndexSize(&[_]TableData{ unused_event, unused_property }); var method_def_or_ref = compositeIndexSize(&[_]TableData{ self.tables[@enumToInt(TableIndex.MethodDef)], self.tables[@enumToInt(TableIndex.MemberRef)], }); var member_forwarded = compositeIndexSize(&[_]TableData{ self.tables[@enumToInt(TableIndex.Field)], self.tables[@enumToInt(TableIndex.MethodDef)], }); var implementation = compositeIndexSize(&[_]TableData{ unused_file, self.tables[@enumToInt(TableIndex.AssemblyRef)], unused_exported_type, }); var custom_attribute_type = compositeIndexSize(&[_]TableData{ self.tables[@enumToInt(TableIndex.MethodDef)], self.tables[@enumToInt(TableIndex.MemberRef)], unused_empty, unused_empty, unused_empty, }); var resolution_scope = compositeIndexSize(&[_]TableData{ self.tables[@enumToInt(TableIndex.Module)], self.tables[@enumToInt(TableIndex.ModuleRef)], self.tables[@enumToInt(TableIndex.AssemblyRef)], self.tables[@enumToInt(TableIndex.TypeRef)], }); var type_or_method_def = compositeIndexSize(&[_]TableData{ self.tables[@enumToInt(TableIndex.TypeDef)], self.tables[@enumToInt(TableIndex.MethodDef)], }); // set columns of various tables unused_assembly.setColumns( 4, 8, 4, blob_index_size, string_index_size, string_index_size, ); unused_assembly_os.setColumns(4, 4, 4, 0, 0, 0); unused_assembly_processor.setColumns(4, 0, 0, 0, 0, 0); self.tables[@enumToInt(TableIndex.AssemblyRef)].setColumns( 8, 4, blob_index_size, string_index_size, string_index_size, blob_index_size, ); unused_assembly_ref_os.setColumns( 4, 4, 4, self.tables[@enumToInt(TableIndex.AssemblyRef)].indexSize(), 0, 0, ); unused_assembly_ref_processor.setColumns( 4, self.tables[@enumToInt(TableIndex.AssemblyRef)].indexSize(), 0, 0, 0, 0, ); unused_class_layout.setColumns( 2, 4, self.tables[@enumToInt(TableIndex.TypeDef)].indexSize(), 0, 0, 0, ); self.tables[@enumToInt(TableIndex.Constant)].setColumns( 2, has_constant, blob_index_size, 0, 0, 0, ); self.tables[@enumToInt(TableIndex.CustomAttribute)].setColumns( has_custom_attribute, custom_attribute_type, blob_index_size, 0, 0, 0, ); unused_decl_security.setColumns(2, has_decl_security, blob_index_size, 0, 0, 0); unused_event_map.setColumns( self.tables[@enumToInt(TableIndex.TypeDef)].indexSize(), unused_event.indexSize(), 0, 0, 0, 0, ); unused_event.setColumns(2, string_index_size, type_def_or_ref, 0, 0, 0); unused_exported_type.setColumns( 4, 4, string_index_size, string_index_size, implementation, 0, ); self.tables[@enumToInt(TableIndex.Field)].setColumns( 2, string_index_size, blob_index_size, 0, 0, 0, ); unused_field_layout.setColumns( 4, self.tables[@enumToInt(TableIndex.Field)].indexSize(), 0, 0, 0, 0, ); unused_field_marshal.setColumns(has_field_marshal, blob_index_size, 0, 0, 0, 0); unused_field_rva.setColumns( 4, self.tables[@enumToInt(TableIndex.Field)].indexSize(), 0, 0, 0, 0, ); unused_file.setColumns(4, string_index_size, blob_index_size, 0, 0, 0); self.tables[@enumToInt(TableIndex.GenericParam)].setColumns( 2, 2, type_or_method_def, string_index_size, 0, 0, ); unused_generic_param_constraint.setColumns( self.tables[@enumToInt(TableIndex.GenericParam)].indexSize(), type_def_or_ref, 0, 0, 0, 0, ); unused_impl_map.setColumns( 2, member_forwarded, string_index_size, unused_module_ref.indexSize(), 0, 0, ); self.tables[@enumToInt(TableIndex.ImplMap)].setColumns( 2, member_forwarded, string_index_size, self.tables[@enumToInt(TableIndex.ModuleRef)].indexSize(), 0, 0, ); self.tables[@enumToInt(TableIndex.InterfaceImpl)].setColumns( self.tables[@enumToInt(TableIndex.TypeDef)].indexSize(), type_def_or_ref, 0, 0, 0, 0, ); unused_manifest_resource.setColumns(4, 4, string_index_size, implementation, 0, 0); self.tables[@enumToInt(TableIndex.MemberRef)].setColumns( member_ref_parent, string_index_size, blob_index_size, 0, 0, 0, ); self.tables[@enumToInt(TableIndex.MethodDef)].setColumns( 4, 2, 2, string_index_size, blob_index_size, self.tables[@enumToInt(TableIndex.Param)].indexSize(), ); unused_method_impl.setColumns( self.tables[@enumToInt(TableIndex.TypeDef)].indexSize(), method_def_or_ref, method_def_or_ref, 0, 0, 0, ); unused_method_semantics.setColumns( 2, self.tables[@enumToInt(TableIndex.MethodDef)].indexSize(), has_semantics, 0, 0, 0, ); unused_method_spec.setColumns(method_def_or_ref, blob_index_size, 0, 0, 0, 0); self.tables[@enumToInt(TableIndex.Module)].setColumns( 2, string_index_size, guid_index_size, guid_index_size, guid_index_size, 0, ); self.tables[@enumToInt(TableIndex.ModuleRef)].setColumns(string_index_size, 0, 0, 0, 0, 0); self.tables[@enumToInt(TableIndex.NestedClass)].setColumns( self.tables[@enumToInt(TableIndex.TypeDef)].indexSize(), self.tables[@enumToInt(TableIndex.TypeDef)].indexSize(), 0, 0, 0, 0, ); self.tables[@enumToInt(TableIndex.Param)].setColumns(2, 2, string_index_size, 0, 0, 0); unused_property.setColumns(2, string_index_size, blob_index_size, 0, 0, 0); unused_property_map.setColumns( self.tables[@enumToInt(TableIndex.TypeDef)].indexSize(), unused_property.indexSize(), 0, 0, 0, 0, ); unused_standalone_sig.setColumns(blob_index_size, 0, 0, 0, 0, 0); self.tables[@enumToInt(TableIndex.TypeDef)].setColumns( 4, string_index_size, string_index_size, type_def_or_ref, self.tables[@enumToInt(TableIndex.Field)].indexSize(), self.tables[@enumToInt(TableIndex.MethodDef)].indexSize(), ); self.tables[@enumToInt(TableIndex.TypeRef)].setColumns( resolution_scope, string_index_size, string_index_size, 0, 0, 0, ); self.tables[@enumToInt(TableIndex.TypeSpec)].setColumns(blob_index_size, 0, 0, 0, 0, 0); // set data of tables self.tables[@enumToInt(TableIndex.Module)].setData(&view); self.tables[@enumToInt(TableIndex.TypeRef)].setData(&view); self.tables[@enumToInt(TableIndex.TypeDef)].setData(&view); self.tables[@enumToInt(TableIndex.Field)].setData(&view); self.tables[@enumToInt(TableIndex.MethodDef)].setData(&view); self.tables[@enumToInt(TableIndex.Param)].setData(&view); self.tables[@enumToInt(TableIndex.InterfaceImpl)].setData(&view); self.tables[@enumToInt(TableIndex.MemberRef)].setData(&view); self.tables[@enumToInt(TableIndex.Constant)].setData(&view); self.tables[@enumToInt(TableIndex.CustomAttribute)].setData(&view); unused_field_marshal.setData(&view); unused_decl_security.setData(&view); unused_class_layout.setData(&view); unused_field_layout.setData(&view); unused_standalone_sig.setData(&view); unused_event_map.setData(&view); unused_event.setData(&view); unused_property_map.setData(&view); unused_property.setData(&view); unused_method_semantics.setData(&view); unused_method_impl.setData(&view); self.tables[@enumToInt(TableIndex.ModuleRef)].setData(&view); self.tables[@enumToInt(TableIndex.TypeSpec)].setData(&view); self.tables[@enumToInt(TableIndex.ImplMap)].setData(&view); unused_field_rva.setData(&view); unused_assembly.setData(&view); unused_assembly_processor.setData(&view); unused_assembly_os.setData(&view); self.tables[@enumToInt(TableIndex.AssemblyRef)].setData(&view); unused_assembly_ref_processor.setData(&view); unused_assembly_ref_os.setData(&view); unused_file.setData(&view); unused_exported_type.setData(&view); unused_manifest_resource.setData(&view); self.tables[@enumToInt(TableIndex.NestedClass)].setData(&view); self.tables[@enumToInt(TableIndex.GenericParam)].setData(&view); return self; } }; fn sectionFromRva(sections: []const ImageSectionHeader, rva: u32) !ImageSectionHeader { for (sections) |s| { if (rva >= s.virtual_address and rva < s.virtual_address + s.physical_address_or_virtual_size) { return s; } } return error.InvalidFile; } fn offsetFromRva(section: ImageSectionHeader, rva: u32) u32 { return rva - section.virtual_address + section.pointer_to_raw_data; } fn small(row_count: u32, bits: u6) bool { return (@intCast(u64, row_count) < @as(u64, 1) << (16 - bits)); } fn bitsNeeded(bits_value: usize) u6 { var value = bits_value - 1; var bits: u6 = 0; while (value != 0) : (bits += 1) { value >>= 1; } return bits; } fn compositeIndexSize(tables: []TableData) u32 { const vbits_needed = bitsNeeded(tables.len); for (tables) |table| { if (!small(table.row_count, vbits_needed)) return 4; } return 2; } // A set of vadidation consts that each DatabaseFile uses to validate winmd bytes const IMAGE_DOS_SIGNATURE: u16 = 0x5A4D; const MAGIC_PE32: u16 = 0x10B; const MAGIC_PE32PLUS: u16 = 0x20B; const IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR: usize = 14; const STORAGE_MAGIC_SIG: u32 = 0x424A_5342; // A set of packed structs to represent how bytes are laid out in winmd bytes const ImageDosHeader = packed struct { signature: u16, cblp: u16, cp: u16, crlc: u16, cparhdr: u16, minalloc: u16, maxalloc: u16, ss: u16, sp: u16, csum: u16, ip: u16, cs: u16, lfarlc: u16, ovno: u16, res: [4]u16, oemid: u16, oeminfo: u16, res2: [10]u16, lfanew: u32, }; const ImageFileHeader = packed struct { machine: u16, number_of_sections: u16, time_date_stamp: u32, pointer_to_symbol_table: u32, number_of_symbols: u32, size_of_optional_header: u16, characteristics: u16, }; const ImageDataDirectory = packed struct { virtual_address: u32, size: u32, }; const ImageOptionalHeader = packed struct { magic: u16, major_linker_version: u8, minor_linker_version: u8, size_of_code: u32, size_of_initialized_data: u32, size_of_uninitialized_data: u32, address_of_entry_point: u32, base_of_code: u32, base_of_data: u32, image_base: u32, section_alignment: u32, file_alignment: u32, major_operating_system_version: u16, minor_operating_system_version: u16, major_image_version: u16, minor_image_version: u16, major_subsystem_version: u16, minor_subsystem_version: u16, win32_version_value: u32, size_of_image: u32, size_of_headers: u32, check_sum: u32, subsystem: u16, dll_characteristics: u16, size_of_stack_reserve: u32, size_of_stack_commit: u32, size_of_heap_reserve: u32, size_of_heap_commit: u32, loader_flags: u32, number_of_rva_and_sizes: u32, data_directory: [16]ImageDataDirectory, }; const ImageNtHeader = packed struct { signature: u32, file_header: ImageFileHeader, optional_header: ImageOptionalHeader, }; const ImageOptionalHeaderPlus = packed struct { magic: u16, major_linker_version: u8, minor_linker_version: u8, size_of_code: u32, size_of_initialized_data: u32, size_of_uninitialized_data: u32, address_of_entry_point: u32, base_of_code: u32, image_base: u64, section_alignment: u32, file_alignment: u32, major_operating_system_version: u16, minor_operating_system_version: u16, major_image_version: u16, minor_image_version: u16, major_subsystem_version: u16, minor_subsystem_version: u16, win32_version_value: u32, size_of_image: u32, size_of_headers: u32, check_sum: u32, subsystem: u16, dll_characteristics: u16, size_of_stack_reserve: u64, size_of_stack_commit: u64, size_of_heap_reserve: u64, size_of_heap_commit: u64, loader_flags: u32, number_of_rva_and_sizes: u32, data_directory: [16]ImageDataDirectory, }; const ImageNtHeaderPlus = packed struct { signature: u32, file_header: ImageFileHeader, optional_header: ImageOptionalHeaderPlus, }; const ImageSectionHeader = packed struct { name: [8]u8, physical_address_or_virtual_size: u32, virtual_address: u32, size_of_raw_data: u32, pointer_to_raw_data: u32, pointer_to_relocations: u32, pointer_to_line_numbers: u32, number_of_relocations: u16, number_of_line_numbers: u16, characteristics: u32, }; const ImageCorHeader = packed struct { cb: u32, major_runtime_version: u16, minor_runtime_version: u16, meta_data: ImageDataDirectory, flags: u32, entry_point_token_or_entry_point_rva: u32, resources: ImageDataDirectory, strong_name_signature: ImageDataDirectory, code_manager_table: ImageDataDirectory, vtable_fixups: ImageDataDirectory, export_address_table_jumps: ImageDataDirectory, managed_native_header: ImageDataDirectory, };
src/winmd/database_file.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const panic = std.debug.panic; const vk = @import("vk.zig"); usingnamespace @import("device.zig"); pub const Image = struct { const Self = @This(); device: Device, handle: vk.Image, memory: vk.DeviceMemory, image_view: vk.ImageView, format: vk.Format, size: vk.Extent2D, pub fn init(device: Device, format: vk.Format, size: vk.Extent2D, memory_type: vk.MemoryPropertyFlags) !Self { var image = try device.dispatch.createImage( device.handle, .{ .flags = .{}, .image_type = .@"2d", .format = format, .extent = .{ .width = size.width, .height = size.height, .depth = 1, }, .mip_levels = 1, .array_layers = 1, .samples = .{ .@"1_bit" = true }, .tiling = .optimal, .usage = .{ .sampled_bit = true, .transfer_dst_bit = true, }, .sharing_mode = .exclusive, .queue_family_index_count = 0, .p_queue_family_indices = undefined, .initial_layout = .@"undefined", }, null, ); var mem_reqs = device.dispatch.getImageMemoryRequirements(device.handle, image); var memory = try device.allocate_memory(mem_reqs, memory_type); try device.dispatch.bindImageMemory(device.handle, image, memory, 0); return Self{ .device = device, .handle = image, .memory = memory, .image_view = vk.ImageView.null_handle, .format = format, .size = size, }; } pub fn deinit(self: Self) void { self.device.dispatch.destroyImageView(self.device.handle, self.image_view, null); self.device.dispatch.destroyImage(self.device.handle, self.handle, null); self.device.free_memory(self.memory); } pub fn createImageView(self: *Self) !void { self.image_view = try self.device.dispatch.createImageView( self.device.handle, .{ .flags = .{}, .image = self.handle, .view_type = .@"2d", .format = self.format, .components = .{ .r = .r, .g = .g, .b = .b, .a = .a }, .subresource_range = .{ .aspect_mask = .{ .color_bit = true }, .base_mip_level = 0, .level_count = 1, .base_array_layer = 0, .layer_count = 1, }, }, null, ); } };
src/vulkan/image.zig
const sf = @import("../sfml.zig"); const std = @import("std"); const assert = std.debug.assert; const TextureType = enum { ptr, const_ptr }; pub const Texture = union(TextureType) { const Self = @This(); // Constructor/destructor /// Creates a texture from nothing pub fn init(size: sf.Vector2u) !Self { var tex = sf.c.sfTexture_create(@intCast(c_uint, size.x), @intCast(c_uint, size.y)); if (tex == null) return sf.Error.nullptrUnknownReason; return Self{ .ptr = tex.? }; } /// Loads a texture from a file pub fn initFromFile(path: [:0]const u8) !Self { var tex = sf.c.sfTexture_createFromFile(path, null); if (tex == null) return sf.Error.resourceLoadingError; return Self{ .ptr = tex.? }; } /// Creates an texture from an image pub fn initFromImage(image: sf.Image, area: ?sf.IntRect) !Self { var tex = if (area) |a| sf.c.sfTexture_createFromImage(image.ptr, &a.toCSFML()) else sf.c.sfTexture_createFromImage(image.ptr, null); if (tex == null) return sf.Error.nullptrUnknownReason; return Self{ .ptr = tex.? }; } /// Destroys a texture /// Be careful, you can only destroy non const textures pub fn deinit(self: Self) void { // TODO : is it possible to detect that comptime? // Should this panic? if (self == .const_ptr) @panic("Can't destroy a const texture pointer"); sf.c.sfTexture_destroy(self.ptr); } // Getters/Setters /// Gets a const pointer to this texture pub fn get(self: Self) *const sf.c.sfTexture { return switch (self) { .ptr => self.ptr, .const_ptr => self.const_ptr, }; } /// Clones this texture (the clone won't be const) pub fn copy(self: Self) !Self { var cpy = sf.c.sfTexture_copy(self.get()); if (cpy == null) return sf.Error.nullptrUnknownReason; return Self{ .ptr = cpy.? }; } /// Makes this texture constant (I don't know why you would do that) pub fn makeConst(self: *Self) void { var ptr = self.get(); self.* = Self{ .const_ptr = self.get() }; } /// Gets the size of this image pub fn getSize(self: Self) sf.Vector2u { // This is a hack _ = sf.c.sfTexture_getSize(self.get()); // Register Rax holds the return val of function calls that can fit in a register const rax: usize = asm volatile ("" : [ret] "={rax}" (-> usize) ); var x: u32 = @truncate(u32, (rax & 0x00000000FFFFFFFF) >> 00); var y: u32 = @truncate(u32, (rax & 0xFFFFFFFF00000000) >> 32); return sf.Vector2u{ .x = x, .y = y }; } /// Gets the pixel count of this image pub fn getPixelCount(self: Self) usize { var dim = self.getSize(); return dim.x * dim.y; } /// Updates the pixels of the image from an array of pixels (colors) pub fn updateFromPixels(self: Self, pixels: []const sf.Color, zone: ?sf.UintRect) !void { if (self == .const_ptr) @panic("Can't set pixels on a const texture"); if (self.isSrgb()) @panic("Updating an srgb from a pixel array isn't implemented"); var real_zone: sf.UintRect = undefined; var size = self.getSize(); if (zone) |z| { // Check if the given zone is fully inside the image var intersection = z.intersects(sf.UintRect.init(0, 0, size.x, size.y)); if (intersection) |i| { if (!i.equals(z)) return sf.Error.areaDoesNotFit; } else return sf.Error.areaDoesNotFit; real_zone = z; } else { real_zone.left = 0; real_zone.top = 0; real_zone.width = size.x; real_zone.height = size.y; } // Check if there is enough data if (pixels.len < real_zone.width * real_zone.height) return sf.Error.notEnoughData; sf.c.sfTexture_updateFromPixels(self.ptr, @ptrCast([*]const u8, pixels.ptr), real_zone.width, real_zone.height, real_zone.left, real_zone.top); } /// Updates the pixels of the image from an other texture pub fn updateFromTexture(self: Self, other: Texture, copy_pos: ?sf.Vector2u) void { var pos = if (copy_pos) |a| a else sf.Vector2u{.x = 0, .y = 0}; var max = other.getSize().add(pos); var size = self.getSize(); assert(max.x < size.x and max.y < size.y); sf.c.sfTexture_updateFromTexture(self.ptr, other.get(), pos.x, pos.y); } /// Updates the pixels of the image from an image pub fn updateFromImage(self: Self, image: sf.Image, copy_pos: ?sf.Vector2u) void { var pos = if (copy_pos) |a| a else sf.Vector2u{.x = 0, .y = 0}; var max = image.getSize().add(pos); var size = self.getSize(); assert(max.x < size.x and max.y < size.y); sf.c.sfTexture_updateFromImage(self.ptr, image.ptr, pos.x, pos.y); } /// Tells whether or not this texture is to be smoothed pub fn isSmooth(self: Self) bool { return sf.c.sfTexture_isSmooth(self.ptr) != 0; } /// Enables or disables texture smoothing pub fn setSmooth(self: Self, smooth: bool) void { if (self == .const_ptr) @panic("Can't set properties on a const texture"); sf.c.sfTexture_setSmooth(self.ptr, if (smooth) 1 else 0); } /// Tells whether or not this texture should repeat when rendering outside its bounds pub fn isRepeated(self: Self) bool { return sf.c.sfTexture_isRepeated(self.ptr) != 0; } /// Enables or disables texture repeating pub fn setRepeated(self: Self, repeated: bool) void { if (self == .const_ptr) @panic("Can't set properties on a const texture"); sf.c.sfTexture_setRepeated(self.ptr, if (repeated) 1 else 0); } /// Tells whether or not this texture has colors in the SRGB format /// SRGB functions arent implemented yet pub fn isSrgb(self: Self) bool { return sf.c.sfTexture_isSrgb(self.ptr) != 0; } /// Enables or disables SRGB pub fn setSrgb(self: Self, srgb: bool) void { if (self == .const_ptr) @panic("Can't set properties on a const texture"); sf.c.sfTexture_setSrgb(self.ptr, if (srgb) 1 else 0); } /// Swaps this texture's contents with an other texture pub fn swap(self: Self, other: Texture) void { if (self == .const_ptr or other == .const_ptr) @panic("Texture swapping must be done between two non const textures"); sf.c.sfTexture_swap(self.ptr, other.ptr); } // TODO: many things /// Pointer to the csfml texture ptr: *sf.c.sfTexture, /// Const pointer to the csfml texture const_ptr: *const sf.c.sfTexture }; test "texture: sane getters and setters" { const tst = std.testing; const allocator = std.heap.page_allocator; var tex = try sf.Texture.init(.{ .x = 12, .y = 10 }); defer tex.deinit(); var size = tex.getSize(); tex.setSrgb(false); tex.setSmooth(true); tex.setRepeated(true); tst.expectEqual(@as(u32, 12), size.x); tst.expectEqual(@as(u32, 10), size.y); tst.expectEqual(@as(usize, 120), tex.getPixelCount()); var pixel_data = try allocator.alloc(sf.Color, 120); defer allocator.free(pixel_data); for (pixel_data) |c, i| { pixel_data[i] = sf.Color.fromHSVA(@intToFloat(f32, i) / 144 * 360, 100, 100, 1); } try tex.updateFromPixels(pixel_data, null); tst.expect(!tex.isSrgb()); tst.expect(tex.isSmooth()); tst.expect(tex.isRepeated()); var t = tex; t.makeConst(); var copy = try t.copy(); tst.expectEqual(@as(usize, 120), copy.getPixelCount()); var tex2 = try sf.Texture.init(.{ .x = 100, .y = 100 }); copy.swap(tex2); tst.expectEqual(@as(usize, 100 * 100), copy.getPixelCount()); tst.expectEqual(@as(usize, 120), tex2.getPixelCount()); }
src/sfml/graphics/texture.zig
const std = @import("std"); const testing = std.testing; pub fn isWhitespace(c: u8) bool { return c == ' ' or c == '\t' or c == '\n' or c == '\r' or c == '\x0b' or c == '\x0c'; } pub fn isNewline(c: u8) bool { return c == '\n' or c == '\r'; } pub fn isWhitespaceNonNewline(c: u8) bool { return c == ' ' or c == '\t' or c == '\x0b' or c == '\x0c'; } pub fn isDigit(c: u8) bool { return c >= '0' and c <= '9'; } pub fn isFloatDigit(c: u8) bool { return (c >= '0' and c <= '9') or c == '-' or c == '.'; } pub fn isHexDigit(c: u8) bool { return (c >= '0' and c <= '9') or (c >= 'A' and c <= 'Z') or (c >= 'a' and c <= 'z'); } pub fn isASCIIAlphabet(c: u8) bool { return (c >= 'a' and c <= 'z') or (c >= 'A' and c <= 'Z'); } pub fn count(s: []const u8, filter: fn (u8) bool, match: bool) usize { var i: usize = 0; while (i < s.len and filter(s[i]) == match) : (i += 1) {} return i; } pub fn skip(s: []const u8, filter: fn (u8) bool, match: bool) []const u8 { const i = count(s, filter, match); if (i == s.len) { return &[0]u8{}; } return s[i..]; } pub fn getStart(s: []const u8, filter: fn (u8) bool, match: bool) []const u8 { const i = count(s, filter, match); return s[0..i]; } pub fn nextToken(s: *[]const u8) ?[]const u8 { s.* = skip(s.*, isWhitespace, true); if (s.*.len == 0) { return null; } const t = getStart(s.*, isWhitespace, false); std.debug.assert(t.len > 0); if (t.len == s.len) { s.* = [_]u8{}; } else { s.* = s[t.len..]; } return t; } pub fn trimEnd(s: []const u8) []const u8 { if (s.len == 0) { return s; } if (s.len == 1 and isWhitespace(s[0])) { return &[0]u8{}; } var i: usize = s.len - 1; while (i > 0 and isWhitespace(s[i])) : (i -= 1) {} return s[0 .. i + 1]; } // e.g. ("abc\"d\"def", d) returns "def" // Can parse pub fn skipToCharacterNotInQuotes(s: []const u8, c: u8) []const u8 { std.debug.assert(c != '\\' and c != '"'); var in_quotes = false; var i: usize = 0; while (i < s.len) : (i += 1) { if (in_quotes and s[i] == '\\') { i += 1; } else if (s[i] == '"') { in_quotes = !in_quotes; } else if (s[i] == c and !in_quotes) { return s[i..]; } } return &[_]u8{}; } // Simple algorithm O(NM) pub fn find(s: []const u8, x: []const u8) []const u8 { if (s.len >= x.len) { var i: usize = 0; while (i <= s.len - x.len) : (i += 1) { if (std.mem.eql(u8, s[i .. i + x.len], x)) { return s[i..]; } } } return &[_]u8{}; } pub fn findAndSkip(s: []const u8, x: []const u8) []const u8 { var s2 = find(s, x); if (s2.len > 0) { return s2[x.len..]; } return s2; } pub fn startsWith(s: []const u8, prefix: []const u8) bool { return s.len >= prefix.len and std.mem.eql(u8, s[0..prefix.len], prefix); } test "count/skip/get" { { var s: []const u8 = " \nabc "; try testing.expectEqual(count(s, isWhitespace, true), 3); s = skip(s, isWhitespace, true); try testing.expectEqualSlices(u8, s, "abc "); } { var s: []const u8 = "\n\n :)"; try testing.expectEqual(count(s, isNewline, true), 2); s = skip(s, isNewline, true); try testing.expectEqualSlices(u8, s, " :)"); } { var s: []const u8 = "\n\n :)"; s = getStart(s, isNewline, true); try testing.expectEqualSlices(u8, s, "\n\n"); } { var s: []const u8 = "123 456"; try testing.expectEqual(count(s, isWhitespace, false), 3); s = skip(s, isWhitespace, false); try testing.expectEqual(count(s, isWhitespace, true), 2); s = skip(s, isWhitespace, true); try testing.expectEqualSlices(u8, s, "456"); try testing.expectEqualSlices(u8, trimEnd("123 "), "123"); try testing.expectEqualSlices(u8, trimEnd(" 123 "), " 123"); try testing.expectEqualSlices(u8, trimEnd("abc"), "abc"); try testing.expectEqualSlices(u8, trimEnd("\tabc\n\n\t "), "\tabc"); } try testing.expectEqualSlices(u8, skipToCharacterNotInQuotes("123 \"a\\\"bc\" 4bd", 'b'), "bd"); try testing.expectEqualSlices(u8, skipToCharacterNotInQuotes("\"\\\\\"1", '1'), "1"); try testing.expectEqualSlices(u8, find("abdecdefghi", "def"), "defghi"); try testing.expect(startsWith("abcdef", "ab")); try testing.expect(!startsWith("abcdef", "abd")); try testing.expect(!startsWith("abcdef", "abcdefg")); }
src/String.zig
fn bgfx_attachment_init(bgfx_attachment_t* _this, bgfx_texture_handle_t _handle, bgfx_access_t _access, uint16_t _layer, uint16_t _mip, uint8_t _resolve); = enum {}; /// Decode attribute. /// @param[in] _attrib Attribute semantics. See: `bgfx::Attrib` /// @param[out] _num Number of elements. /// @param[out] _type Element type. /// @param[out] _normalized Attribute is normalized. /// @param[out] _asInt Attribute is packed as int. fn bgfx_vertex_layout_decode(const bgfx_vertex_layout_t* _this, bgfx_attrib_t _attrib, uint8_t * _num, bgfx_attrib_type_t * _type, bool * _normalized, bool * _asInt); = enum {}; /// End VertexLayout. fn bgfx_vertex_layout_end(bgfx_vertex_layout_t* _this); = enum {}; /// Pack vertex attribute into vertex stream format. /// @param[in] _input Value to be packed into vertex stream. /// @param[in] _inputNormalized `true` if input value is already normalized. /// @param[in] _attr Attribute to pack. /// @param[in] _layout Vertex stream layout. /// @param[in] _data Destination vertex stream where data will be packed. /// @param[in] _index Vertex index that will be modified. fn bgfx_vertex_pack(const float _input[4], bool _inputNormalized, bgfx_attrib_t _attr, const bgfx_vertex_layout_t * _layout, void* _data, uint32_t _index); = enum {}; /// Unpack vertex attribute from vertex stream format. /// @param[out] _output Result of unpacking. /// @param[in] _attr Attribute to unpack. /// @param[in] _layout Vertex stream layout. /// @param[in] _data Source vertex stream from where data will be unpacked. /// @param[in] _index Vertex index that will be unpacked. fn bgfx_vertex_unpack(float _output[4], bgfx_attrib_t _attr, const bgfx_vertex_layout_t * _layout, const void* _data, uint32_t _index); = enum {}; /// Converts vertex stream data from one vertex stream format to another. /// @param[in] _dstLayout Destination vertex stream layout. /// @param[in] _dstData Destination vertex stream. /// @param[in] _srcLayout Source vertex stream layout. /// @param[in] _srcData Source vertex stream data. /// @param[in] _num Number of vertices to convert from source to destination. fn bgfx_vertex_convert(const bgfx_vertex_layout_t * _dstLayout, void* _dstData, const bgfx_vertex_layout_t * _srcLayout, const void* _srcData, uint32_t _num); = enum {}; /// Sort indices. /// @param[in] _sort Sort order, see `TopologySort::Enum`. /// @param[out] _dst Destination index buffer. /// @param[in] _dstSize Destination index buffer in bytes. It must be /// large enough to contain output indices. If destination size is /// insufficient index buffer will be truncated. /// @param[in] _dir Direction (vector must be normalized). /// @param[in] _pos Position. /// @param[in] _vertices Pointer to first vertex represented as /// float x, y, z. Must contain at least number of vertices /// referencende by index buffer. /// @param[in] _stride Vertex stride. /// @param[in] _indices Source indices. /// @param[in] _numIndices Number of input indices. /// @param[in] _index32 Set to `true` if input indices are 32-bit. fn bgfx_topology_sort_tri_list(bgfx_topology_sort_t _sort, void* _dst, uint32_t _dstSize, const float _dir[3], const float _pos[3], const void* _vertices, uint32_t _stride, const void* _indices, uint32_t _numIndices, bool _index32); = enum {}; /// Returns name of renderer. /// @param[in] _type Renderer backend type. See: `bgfx::RendererType` /// @returns Name of renderer. fn bgfx_init_ctor(bgfx_init_t* _init); = enum {}; /// Shutdown bgfx library. fn bgfx_shutdown(void); = enum {}; /// Reset graphic settings and back-buffer size. /// @attention This call doesn't actually change window size, it just /// resizes back-buffer. Windowing code has to change window size. /// @param[in] _width Back-buffer width. /// @param[in] _height Back-buffer height. /// @param[in] _flags See: `BGFX_RESET_*` for more info. /// - `BGFX_RESET_NONE` - No reset flags. /// - `BGFX_RESET_FULLSCREEN` - Not supported yet. /// - `BGFX_RESET_MSAA_X[2/4/8/16]` - Enable 2, 4, 8 or 16 x MSAA. /// - `BGFX_RESET_VSYNC` - Enable V-Sync. /// - `BGFX_RESET_MAXANISOTROPY` - Turn on/off max anisotropy. /// - `BGFX_RESET_CAPTURE` - Begin screen capture. /// - `BGFX_RESET_FLUSH_AFTER_RENDER` - Flush rendering after submitting to GPU. /// - `BGFX_RESET_FLIP_AFTER_RENDER` - This flag specifies where flip /// occurs. Default behaviour is that flip occurs before rendering new /// frame. This flag only has effect when `BGFX_CONFIG_MULTITHREADED=0`. /// - `BGFX_RESET_SRGB_BACKBUFFER` - Enable sRGB backbuffer. /// @param[in] _format Texture format. See: `TextureFormat::Enum`. fn bgfx_reset(uint32_t _width, uint32_t _height, uint32_t _flags, bgfx_texture_format_t _format); = enum {}; /// Set debug flags. /// @param[in] _debug Available flags: /// - `BGFX_DEBUG_IFH` - Infinitely fast hardware. When this flag is set /// all rendering calls will be skipped. This is useful when profiling /// to quickly assess potential bottlenecks between CPU and GPU. /// - `BGFX_DEBUG_PROFILER` - Enable profiler. /// - `BGFX_DEBUG_STATS` - Display internal statistics. /// - `BGFX_DEBUG_TEXT` - Display debug text. /// - `BGFX_DEBUG_WIREFRAME` - Wireframe rendering. All rendering /// primitives will be rendered as lines. fn bgfx_set_debug(uint32_t _debug); = enum {}; /// Clear internal debug text buffer. /// @param[in] _attr Background color. /// @param[in] _small Default 8x16 or 8x8 font. fn bgfx_dbg_text_clear(uint8_t _attr, bool _small); = enum {}; /// Print formatted data to internal debug text character-buffer (VGA-compatible text mode). /// @param[in] _x Position x from the left corner of the window. /// @param[in] _y Position y from the top corner of the window. /// @param[in] _attr Color palette. Where top 4-bits represent index of background, and bottom /// 4-bits represent foreground color from standard VGA text palette (ANSI escape codes). /// @param[in] _format `printf` style format. /// @param[in] fn bgfx_dbg_text_printf(uint16_t _x, uint16_t _y, uint8_t _attr, const char* _format, ... ); = enum {}; /// Print formatted data from variable argument list to internal debug text character-buffer (VGA-compatible text mode). /// @param[in] _x Position x from the left corner of the window. /// @param[in] _y Position y from the top corner of the window. /// @param[in] _attr Color palette. Where top 4-bits represent index of background, and bottom /// 4-bits represent foreground color from standard VGA text palette (ANSI escape codes). /// @param[in] _format `printf` style format. /// @param[in] _argList Variable arguments list for format string. fn bgfx_dbg_text_vprintf(uint16_t _x, uint16_t _y, uint8_t _attr, const char* _format, va_list _argList); = enum {}; /// Draw image into internal debug text buffer. /// @param[in] _x Position x from the left corner of the window. /// @param[in] _y Position y from the top corner of the window. /// @param[in] _width Image width. /// @param[in] _height Image height. /// @param[in] _data Raw image data (character/attribute raw encoding). /// @param[in] _pitch Image pitch in bytes. fn bgfx_dbg_text_image(uint16_t _x, uint16_t _y, uint16_t _width, uint16_t _height, const void* _data, uint16_t _pitch); = enum {}; /// Set static index buffer debug name. /// @param[in] _handle Static index buffer handle. /// @param[in] _name Static index buffer name. /// @param[in] _len Static index buffer name length (if length is INT32_MAX, it's expected /// that _name is zero terminated string. fn bgfx_set_index_buffer_name(bgfx_index_buffer_handle_t _handle, const char* _name, int32_t _len); = enum {}; /// Destroy static index buffer. /// @param[in] _handle Static index buffer handle. fn bgfx_destroy_index_buffer(bgfx_index_buffer_handle_t _handle); = enum {}; /// Destroy vertex layout. /// @param[in] _layoutHandle Vertex layout handle. fn bgfx_destroy_vertex_layout(bgfx_vertex_layout_handle_t _layoutHandle); = enum {}; /// Set static vertex buffer debug name. /// @param[in] _handle Static vertex buffer handle. /// @param[in] _name Static vertex buffer name. /// @param[in] _len Static vertex buffer name length (if length is INT32_MAX, it's expected /// that _name is zero terminated string. fn bgfx_set_vertex_buffer_name(bgfx_vertex_buffer_handle_t _handle, const char* _name, int32_t _len); = enum {}; /// Destroy static vertex buffer. /// @param[in] _handle Static vertex buffer handle. fn bgfx_destroy_vertex_buffer(bgfx_vertex_buffer_handle_t _handle); = enum {}; /// Update dynamic index buffer. /// @param[in] _handle Dynamic index buffer handle. /// @param[in] _startIndex Start index. /// @param[in] _mem Index buffer data. fn bgfx_update_dynamic_index_buffer(bgfx_dynamic_index_buffer_handle_t _handle, uint32_t _startIndex, const bgfx_memory_t* _mem); = enum {}; /// Destroy dynamic index buffer. /// @param[in] _handle Dynamic index buffer handle. fn bgfx_destroy_dynamic_index_buffer(bgfx_dynamic_index_buffer_handle_t _handle); = enum {}; /// Update dynamic vertex buffer. /// @param[in] _handle Dynamic vertex buffer handle. /// @param[in] _startVertex Start vertex. /// @param[in] _mem Vertex buffer data. fn bgfx_update_dynamic_vertex_buffer(bgfx_dynamic_vertex_buffer_handle_t _handle, uint32_t _startVertex, const bgfx_memory_t* _mem); = enum {}; /// Destroy dynamic vertex buffer. /// @param[in] _handle Dynamic vertex buffer handle. fn bgfx_destroy_dynamic_vertex_buffer(bgfx_dynamic_vertex_buffer_handle_t _handle); = enum {}; /// Allocate transient index buffer. /// @remarks /// Only 16-bit index buffer is supported. /// @param[out] _tib TransientIndexBuffer structure is filled and is valid /// for the duration of frame, and it can be reused for multiple draw /// calls. /// @param[in] _num Number of indices to allocate. fn bgfx_alloc_transient_index_buffer(bgfx_transient_index_buffer_t* _tib, uint32_t _num); = enum {}; /// Allocate transient vertex buffer. /// @param[out] _tvb TransientVertexBuffer structure is filled and is valid /// for the duration of frame, and it can be reused for multiple draw /// calls. /// @param[in] _num Number of vertices to allocate. /// @param[in] _layout Vertex layout. fn bgfx_alloc_transient_vertex_buffer(bgfx_transient_vertex_buffer_t* _tvb, uint32_t _num, const bgfx_vertex_layout_t * _layout); = enum {}; /// Allocate instance data buffer. /// @param[out] _idb InstanceDataBuffer structure is filled and is valid /// for duration of frame, and it can be reused for multiple draw /// calls. /// @param[in] _num Number of instances. /// @param[in] _stride Instance stride. Must be multiple of 16. fn bgfx_alloc_instance_data_buffer(bgfx_instance_data_buffer_t* _idb, uint32_t _num, uint16_t _stride); = enum {}; /// Destroy draw indirect buffer. /// @param[in] _handle Indirect buffer handle. fn bgfx_destroy_indirect_buffer(bgfx_indirect_buffer_handle_t _handle); = enum {}; /// Set shader debug name. /// @param[in] _handle Shader handle. /// @param[in] _name Shader name. /// @param[in] _len Shader name length (if length is INT32_MAX, it's expected /// that _name is zero terminated string). fn bgfx_set_shader_name(bgfx_shader_handle_t _handle, const char* _name, int32_t _len); = enum {}; /// Destroy shader. /// @remark Once a shader program is created with _handle, /// it is safe to destroy that shader. /// @param[in] _handle Shader handle. fn bgfx_destroy_shader(bgfx_shader_handle_t _handle); = enum {}; /// Destroy program. /// @param[in] _handle Program handle. fn bgfx_destroy_program(bgfx_program_handle_t _handle); = enum {}; /// Calculate amount of memory required for texture. /// @param[out] _info Resulting texture info structure. See: `TextureInfo`. /// @param[in] _width Width. /// @param[in] _height Height. /// @param[in] _depth Depth dimension of volume texture. /// @param[in] _cubeMap Indicates that texture contains cubemap. /// @param[in] _hasMips Indicates that texture contains full mip-map chain. /// @param[in] _numLayers Number of layers in texture array. /// @param[in] _format Texture format. See: `TextureFormat::Enum`. fn bgfx_calc_texture_size(bgfx_texture_info_t * _info, uint16_t _width, uint16_t _height, uint16_t _depth, bool _cubeMap, bool _hasMips, uint16_t _numLayers, bgfx_texture_format_t _format); = enum {}; /// Update 2D texture. /// @attention It's valid to update only mutable texture. See `bgfx::createTexture2D` for more info. /// @param[in] _handle Texture handle. /// @param[in] _layer Layer in texture array. /// @param[in] _mip Mip level. /// @param[in] _x X offset in texture. /// @param[in] _y Y offset in texture. /// @param[in] _width Width of texture block. /// @param[in] _height Height of texture block. /// @param[in] _mem Texture update data. /// @param[in] _pitch Pitch of input image (bytes). When _pitch is set to /// UINT16_MAX, it will be calculated internally based on _width. fn bgfx_update_texture_2d(bgfx_texture_handle_t _handle, uint16_t _layer, uint8_t _mip, uint16_t _x, uint16_t _y, uint16_t _width, uint16_t _height, const bgfx_memory_t* _mem, uint16_t _pitch); = enum {}; /// Update 3D texture. /// @attention It's valid to update only mutable texture. See `bgfx::createTexture3D` for more info. /// @param[in] _handle Texture handle. /// @param[in] _mip Mip level. /// @param[in] _x X offset in texture. /// @param[in] _y Y offset in texture. /// @param[in] _z Z offset in texture. /// @param[in] _width Width of texture block. /// @param[in] _height Height of texture block. /// @param[in] _depth Depth of texture block. /// @param[in] _mem Texture update data. fn bgfx_update_texture_3d(bgfx_texture_handle_t _handle, uint8_t _mip, uint16_t _x, uint16_t _y, uint16_t _z, uint16_t _width, uint16_t _height, uint16_t _depth, const bgfx_memory_t* _mem); = enum {}; /// Update Cube texture. /// @attention It's valid to update only mutable texture. See `bgfx::createTextureCube` for more info. /// @param[in] _handle Texture handle. /// @param[in] _layer Layer in texture array. /// @param[in] _side Cubemap side `BGFX_CUBE_MAP_<POSITIVE or NEGATIVE>_<X, Y or Z>`, /// where 0 is +X, 1 is -X, 2 is +Y, 3 is -Y, 4 is +Z, and 5 is -Z. /// +----------+ /// |-z 2| /// | ^ +y | /// | | | Unfolded cube: /// | +---->+x | /// +----------+----------+----------+----------+ /// |+y 1|+y 4|+y 0|+y 5| /// | ^ -x | ^ +z | ^ +x | ^ -z | /// | | | | | | | | | /// | +---->+z | +---->+x | +---->-z | +---->-x | /// +----------+----------+----------+----------+ /// |+z 3| /// | ^ -y | /// | | | /// | +---->+x | /// +----------+ /// @param[in] _mip Mip level. /// @param[in] _x X offset in texture. /// @param[in] _y Y offset in texture. /// @param[in] _width Width of texture block. /// @param[in] _height Height of texture block. /// @param[in] _mem Texture update data. /// @param[in] _pitch Pitch of input image (bytes). When _pitch is set to /// UINT16_MAX, it will be calculated internally based on _width. fn bgfx_update_texture_cube(bgfx_texture_handle_t _handle, uint16_t _layer, uint8_t _side, uint8_t _mip, uint16_t _x, uint16_t _y, uint16_t _width, uint16_t _height, const bgfx_memory_t* _mem, uint16_t _pitch); = enum {}; /// Set texture debug name. /// @param[in] _handle Texture handle. /// @param[in] _name Texture name. /// @param[in] _len Texture name length (if length is INT32_MAX, it's expected /// that _name is zero terminated string. fn bgfx_set_texture_name(bgfx_texture_handle_t _handle, const char* _name, int32_t _len); = enum {}; /// Returns texture direct access pointer. /// @attention Availability depends on: `BGFX_CAPS_TEXTURE_DIRECT_ACCESS`. This feature /// is available on GPUs that have unified memory architecture (UMA) support. /// @param[in] _handle Texture handle. /// @returns Pointer to texture memory. If returned pointer is `NULL` direct access /// is not available for this texture. If pointer is `UINTPTR_MAX` sentinel value /// it means texture is pending creation. Pointer returned can be cached and it /// will be valid until texture is destroyed. fn bgfx_get_direct_access_ptr(bgfx_texture_handle_t _handle); = enum {}; /// Destroy texture. /// @param[in] _handle Texture handle. fn bgfx_destroy_texture(bgfx_texture_handle_t _handle); = enum {}; /// Set frame buffer debug name. /// @param[in] _handle Frame buffer handle. /// @param[in] _name Frame buffer name. /// @param[in] _len Frame buffer name length (if length is INT32_MAX, it's expected /// that _name is zero terminated string. fn bgfx_set_frame_buffer_name(bgfx_frame_buffer_handle_t _handle, const char* _name, int32_t _len); = enum {}; /// Destroy frame buffer. /// @param[in] _handle Frame buffer handle. fn bgfx_destroy_frame_buffer(bgfx_frame_buffer_handle_t _handle); = enum {}; /// Retrieve uniform info. /// @param[in] _handle Handle to uniform object. /// @param[out] _info Uniform info. fn bgfx_get_uniform_info(bgfx_uniform_handle_t _handle, bgfx_uniform_info_t * _info); = enum {}; /// Destroy shader uniform parameter. /// @param[in] _handle Handle to uniform object. fn bgfx_destroy_uniform(bgfx_uniform_handle_t _handle); = enum {}; /// Destroy occlusion query. /// @param[in] _handle Handle to occlusion query object. fn bgfx_destroy_occlusion_query(bgfx_occlusion_query_handle_t _handle); = enum {}; /// Set palette color value. /// @param[in] _index Index into palette. /// @param[in] _rgba RGBA floating point values. fn bgfx_set_palette_color(uint8_t _index, const float _rgba[4]); = enum {}; /// Set palette color value. /// @param[in] _index Index into palette. /// @param[in] _rgba Packed 32-bit RGBA value. fn bgfx_set_palette_color_rgba8(uint8_t _index, uint32_t _rgba); = enum {}; /// Set view name. /// @remarks /// This is debug only feature. /// In graphics debugger view name will appear as: /// "nnnc <view name>" /// ^ ^ ^ /// | +--- compute (C) /// +------ view id /// @param[in] _id View id. /// @param[in] _name View name. fn bgfx_set_view_name(bgfx_view_id_t _id, const char* _name); = enum {}; /// Set view rectangle. Draw primitive outside view will be clipped. /// @param[in] _id View id. /// @param[in] _x Position x from the left corner of the window. /// @param[in] _y Position y from the top corner of the window. /// @param[in] _width Width of view port region. /// @param[in] _height Height of view port region. fn bgfx_set_view_rect(bgfx_view_id_t _id, uint16_t _x, uint16_t _y, uint16_t _width, uint16_t _height); = enum {}; /// Set view rectangle. Draw primitive outside view will be clipped. /// @param[in] _id View id. /// @param[in] _x Position x from the left corner of the window. /// @param[in] _y Position y from the top corner of the window. /// @param[in] _ratio Width and height will be set in respect to back-buffer size. /// See: `BackbufferRatio::Enum`. fn bgfx_set_view_rect_ratio(bgfx_view_id_t _id, uint16_t _x, uint16_t _y, bgfx_backbuffer_ratio_t _ratio); = enum {}; /// Set view scissor. Draw primitive outside view will be clipped. When /// _x, _y, _width and _height are set to 0, scissor will be disabled. /// @param[in] _id View id. /// @param[in] _x Position x from the left corner of the window. /// @param[in] _y Position y from the top corner of the window. /// @param[in] _width Width of view scissor region. /// @param[in] _height Height of view scissor region. fn bgfx_set_view_scissor(bgfx_view_id_t _id, uint16_t _x, uint16_t _y, uint16_t _width, uint16_t _height); = enum {}; /// Set view clear flags. /// @param[in] _id View id. /// @param[in] _flags Clear flags. Use `BGFX_CLEAR_NONE` to remove any clear /// operation. See: `BGFX_CLEAR_*`. /// @param[in] _rgba Color clear value. /// @param[in] _depth Depth clear value. /// @param[in] _stencil Stencil clear value. fn bgfx_set_view_clear(bgfx_view_id_t _id, uint16_t _flags, uint32_t _rgba, float _depth, uint8_t _stencil); = enum {}; /// Set view clear flags with different clear color for each /// frame buffer texture. Must use `bgfx::setPaletteColor` to setup clear color /// palette. /// @param[in] _id View id. /// @param[in] _flags Clear flags. Use `BGFX_CLEAR_NONE` to remove any clear /// operation. See: `BGFX_CLEAR_*`. /// @param[in] _depth Depth clear value. /// @param[in] _stencil Stencil clear value. /// @param[in] _c0 Palette index for frame buffer attachment 0. /// @param[in] _c1 Palette index for frame buffer attachment 1. /// @param[in] _c2 Palette index for frame buffer attachment 2. /// @param[in] _c3 Palette index for frame buffer attachment 3. /// @param[in] _c4 Palette index for frame buffer attachment 4. /// @param[in] _c5 Palette index for frame buffer attachment 5. /// @param[in] _c6 Palette index for frame buffer attachment 6. /// @param[in] _c7 Palette index for frame buffer attachment 7. fn bgfx_set_view_clear_mrt(bgfx_view_id_t _id, uint16_t _flags, float _depth, uint8_t _stencil, uint8_t _c0, uint8_t _c1, uint8_t _c2, uint8_t _c3, uint8_t _c4, uint8_t _c5, uint8_t _c6, uint8_t _c7); = enum {}; /// Set view sorting mode. /// @remarks /// View mode must be set prior calling `bgfx::submit` for the view. /// @param[in] _id View id. /// @param[in] _mode View sort mode. See `ViewMode::Enum`. fn bgfx_set_view_mode(bgfx_view_id_t _id, bgfx_view_mode_t _mode); = enum {}; /// Set view frame buffer. /// @remarks /// Not persistent after `bgfx::reset` call. /// @param[in] _id View id. /// @param[in] _handle Frame buffer handle. Passing `BGFX_INVALID_HANDLE` as /// frame buffer handle will draw primitives from this view into /// default back buffer. fn bgfx_set_view_frame_buffer(bgfx_view_id_t _id, bgfx_frame_buffer_handle_t _handle); = enum {}; /// Set view view and projection matrices, all draw primitives in this /// view will use these matrices. /// @param[in] _id View id. /// @param[in] _view View matrix. /// @param[in] _proj Projection matrix. fn bgfx_set_view_transform(bgfx_view_id_t _id, const void* _view, const void* _proj); = enum {}; /// Post submit view reordering. /// @param[in] _id First view id. /// @param[in] _num Number of views to remap. /// @param[in] _order View remap id table. Passing `NULL` will reset view ids /// to default state. fn bgfx_set_view_order(bgfx_view_id_t _id, uint16_t _num, const bgfx_view_id_t* _order); = enum {}; /// Reset all view settings to default. /// @param[in] _id fn bgfx_reset_view(bgfx_view_id_t _id); = enum {}; /// End submitting draw calls from thread. /// @param[in] _encoder Encoder. fn bgfx_encoder_end(bgfx_encoder_t* _encoder); = enum {}; /// Sets a debug marker. This allows you to group graphics calls together for easy browsing in /// graphics debugging tools. /// @param[in] _marker Marker string. fn bgfx_encoder_set_marker(bgfx_encoder_t* _this, const char* _marker); = enum {}; /// Set render states for draw primitive. /// @remarks /// 1. To setup more complex states use: /// `BGFX_STATE_ALPHA_REF(_ref)`, /// `BGFX_STATE_POINT_SIZE(_size)`, /// `BGFX_STATE_BLEND_FUNC(_src, _dst)`, /// `BGFX_STATE_BLEND_FUNC_SEPARATE(_srcRGB, _dstRGB, _srcA, _dstA)`, /// `BGFX_STATE_BLEND_EQUATION(_equation)`, /// `BGFX_STATE_BLEND_EQUATION_SEPARATE(_equationRGB, _equationA)` /// 2. `BGFX_STATE_BLEND_EQUATION_ADD` is set when no other blend /// equation is specified. /// @param[in] _state State flags. Default state for primitive type is /// triangles. See: `BGFX_STATE_DEFAULT`. /// - `BGFX_STATE_DEPTH_TEST_*` - Depth test function. /// - `BGFX_STATE_BLEND_*` - See remark 1 about BGFX_STATE_BLEND_FUNC. /// - `BGFX_STATE_BLEND_EQUATION_*` - See remark 2. /// - `BGFX_STATE_CULL_*` - Backface culling mode. /// - `BGFX_STATE_WRITE_*` - Enable R, G, B, A or Z write. /// - `BGFX_STATE_MSAA` - Enable hardware multisample antialiasing. /// - `BGFX_STATE_PT_[TRISTRIP/LINES/POINTS]` - Primitive type. /// @param[in] _rgba Sets blend factor used by `BGFX_STATE_BLEND_FACTOR` and /// `BGFX_STATE_BLEND_INV_FACTOR` blend modes. fn bgfx_encoder_set_state(bgfx_encoder_t* _this, uint64_t _state, uint32_t _rgba); = enum {}; /// Set condition for rendering. /// @param[in] _handle Occlusion query handle. /// @param[in] _visible Render if occlusion query is visible. fn bgfx_encoder_set_condition(bgfx_encoder_t* _this, bgfx_occlusion_query_handle_t _handle, bool _visible); = enum {}; /// Set stencil test state. /// @param[in] _fstencil Front stencil state. /// @param[in] _bstencil Back stencil state. If back is set to `BGFX_STENCIL_NONE` /// _fstencil is applied to both front and back facing primitives. fn bgfx_encoder_set_stencil(bgfx_encoder_t* _this, uint32_t _fstencil, uint32_t _bstencil); = enum {}; /// Set scissor from cache for draw primitive. /// @remark /// To scissor for all primitives in view see `bgfx::setViewScissor`. /// @param[in] _cache Index in scissor cache. fn bgfx_encoder_set_scissor_cached(bgfx_encoder_t* _this, uint16_t _cache); = enum {}; /// Set model matrix from matrix cache for draw primitive. /// @param[in] _cache Index in matrix cache. /// @param[in] _num Number of matrices from cache. fn bgfx_encoder_set_transform_cached(bgfx_encoder_t* _this, uint32_t _cache, uint16_t _num); = enum {}; /// Set shader uniform parameter for draw primitive. /// @param[in] _handle Uniform. /// @param[in] _value Pointer to uniform data. /// @param[in] _num Number of elements. Passing `UINT16_MAX` will /// use the _num passed on uniform creation. fn bgfx_encoder_set_uniform(bgfx_encoder_t* _this, bgfx_uniform_handle_t _handle, const void* _value, uint16_t _num); = enum {}; /// Set index buffer for draw primitive. /// @param[in] _handle Index buffer. /// @param[in] _firstIndex First index to render. /// @param[in] _numIndices Number of indices to render. fn bgfx_encoder_set_index_buffer(bgfx_encoder_t* _this, bgfx_index_buffer_handle_t _handle, uint32_t _firstIndex, uint32_t _numIndices); = enum {}; /// Set index buffer for draw primitive. /// @param[in] _handle Dynamic index buffer. /// @param[in] _firstIndex First index to render. /// @param[in] _numIndices Number of indices to render. fn bgfx_encoder_set_dynamic_index_buffer(bgfx_encoder_t* _this, bgfx_dynamic_index_buffer_handle_t _handle, uint32_t _firstIndex, uint32_t _numIndices); = enum {}; /// Set index buffer for draw primitive. /// @param[in] _tib Transient index buffer. /// @param[in] _firstIndex First index to render. /// @param[in] _numIndices Number of indices to render. fn bgfx_encoder_set_transient_index_buffer(bgfx_encoder_t* _this, const bgfx_transient_index_buffer_t* _tib, uint32_t _firstIndex, uint32_t _numIndices); = enum {}; /// Set vertex buffer for draw primitive. /// @param[in] _stream Vertex stream. /// @param[in] _handle Vertex buffer. /// @param[in] _startVertex First vertex to render. /// @param[in] _numVertices Number of vertices to render. fn bgfx_encoder_set_vertex_buffer(bgfx_encoder_t* _this, uint8_t _stream, bgfx_vertex_buffer_handle_t _handle, uint32_t _startVertex, uint32_t _numVertices); = enum {}; /// Set vertex buffer for draw primitive. /// @param[in] _stream Vertex stream. /// @param[in] _handle Vertex buffer. /// @param[in] _startVertex First vertex to render. /// @param[in] _numVertices Number of vertices to render. /// @param[in] _layoutHandle Vertex layout for aliasing vertex buffer. If invalid /// handle is used, vertex layout used for creation /// of vertex buffer will be used. fn bgfx_encoder_set_vertex_buffer_with_layout(bgfx_encoder_t* _this, uint8_t _stream, bgfx_vertex_buffer_handle_t _handle, uint32_t _startVertex, uint32_t _numVertices, bgfx_vertex_layout_handle_t _layoutHandle); = enum {}; /// Set vertex buffer for draw primitive. /// @param[in] _stream Vertex stream. /// @param[in] _handle Dynamic vertex buffer. /// @param[in] _startVertex First vertex to render. /// @param[in] _numVertices Number of vertices to render. fn bgfx_encoder_set_dynamic_vertex_buffer(bgfx_encoder_t* _this, uint8_t _stream, bgfx_dynamic_vertex_buffer_handle_t _handle, uint32_t _startVertex, uint32_t _numVertices); = enum {}; /// Set vertex buffer for draw primitive. /// @param[in] _stream Vertex stream. /// @param[in] _handle Dynamic vertex buffer. /// @param[in] _startVertex First vertex to render. /// @param[in] _numVertices Number of vertices to render. fn bgfx_encoder_set_dynamic_vertex_buffer_with_layout(bgfx_encoder_t* _this, uint8_t _stream, bgfx_dynamic_vertex_buffer_handle_t _handle, uint32_t _startVertex, uint32_t _numVertices, bgfx_vertex_layout_handle_t _layoutHandle); = enum {}; /// Set vertex buffer for draw primitive. /// @param[in] _stream Vertex stream. /// @param[in] _tvb Transient vertex buffer. /// @param[in] _startVertex First vertex to render. /// @param[in] _numVertices Number of vertices to render. fn bgfx_encoder_set_transient_vertex_buffer(bgfx_encoder_t* _this, uint8_t _stream, const bgfx_transient_vertex_buffer_t* _tvb, uint32_t _startVertex, uint32_t _numVertices); = enum {}; /// Set vertex buffer for draw primitive. /// @param[in] _stream Vertex stream. /// @param[in] _tvb Transient vertex buffer. /// @param[in] _startVertex First vertex to render. /// @param[in] _numVertices Number of vertices to render. /// @param[in] _layoutHandle Vertex layout for aliasing vertex buffer. If invalid /// handle is used, vertex layout used for creation /// of vertex buffer will be used. fn bgfx_encoder_set_transient_vertex_buffer_with_layout(bgfx_encoder_t* _this, uint8_t _stream, const bgfx_transient_vertex_buffer_t* _tvb, uint32_t _startVertex, uint32_t _numVertices, bgfx_vertex_layout_handle_t _layoutHandle); = enum {}; /// Set number of vertices for auto generated vertices use in conjuction /// with gl_VertexID. /// @attention Availability depends on: `BGFX_CAPS_VERTEX_ID`. /// @param[in] _numVertices Number of vertices. fn bgfx_encoder_set_vertex_count(bgfx_encoder_t* _this, uint32_t _numVertices); = enum {}; /// Set instance data buffer for draw primitive. /// @param[in] _idb Transient instance data buffer. /// @param[in] _start First instance data. /// @param[in] _num Number of data instances. fn bgfx_encoder_set_instance_data_buffer(bgfx_encoder_t* _this, const bgfx_instance_data_buffer_t* _idb, uint32_t _start, uint32_t _num); = enum {}; /// Set instance data buffer for draw primitive. /// @param[in] _handle Vertex buffer. /// @param[in] _startVertex First instance data. /// @param[in] _num Number of data instances. /// Set instance data buffer for draw primitive. fn bgfx_encoder_set_instance_data_from_vertex_buffer(bgfx_encoder_t* _this, bgfx_vertex_buffer_handle_t _handle, uint32_t _startVertex, uint32_t _num); = enum {}; /// Set instance data buffer for draw primitive. /// @param[in] _handle Dynamic vertex buffer. /// @param[in] _startVertex First instance data. /// @param[in] _num Number of data instances. fn bgfx_encoder_set_instance_data_from_dynamic_vertex_buffer(bgfx_encoder_t* _this, bgfx_dynamic_vertex_buffer_handle_t _handle, uint32_t _startVertex, uint32_t _num); = enum {}; /// Set number of instances for auto generated instances use in conjuction /// with gl_InstanceID. /// @attention Availability depends on: `BGFX_CAPS_VERTEX_ID`. /// @param[in] _numInstances fn bgfx_encoder_set_instance_count(bgfx_encoder_t* _this, uint32_t _numInstances); = enum {}; /// Set texture stage for draw primitive. /// @param[in] _stage Texture unit. /// @param[in] _sampler Program sampler. /// @param[in] _handle Texture handle. /// @param[in] _flags Texture sampling mode. Default value UINT32_MAX uses /// texture sampling settings from the texture. /// - `BGFX_SAMPLER_[U/V/W]_[MIRROR/CLAMP]` - Mirror or clamp to edge wrap /// mode. /// - `BGFX_SAMPLER_[MIN/MAG/MIP]_[POINT/ANISOTROPIC]` - Point or anisotropic /// sampling. fn bgfx_encoder_set_texture(bgfx_encoder_t* _this, uint8_t _stage, bgfx_uniform_handle_t _sampler, bgfx_texture_handle_t _handle, uint32_t _flags); = enum {}; /// Submit an empty primitive for rendering. Uniforms and draw state /// will be applied but no geometry will be submitted. Useful in cases /// when no other draw/compute primitive is submitted to view, but it's /// desired to execute clear view. /// @remark /// These empty draw calls will sort before ordinary draw calls. /// @param[in] _id View id. fn bgfx_encoder_touch(bgfx_encoder_t* _this, bgfx_view_id_t _id); = enum {}; /// Submit primitive for rendering. /// @param[in] _id View id. /// @param[in] _program Program. /// @param[in] _depth Depth for sorting. /// @param[in] _flags Discard or preserve states. See `BGFX_DISCARD_*`. fn bgfx_encoder_submit(bgfx_encoder_t* _this, bgfx_view_id_t _id, bgfx_program_handle_t _program, uint32_t _depth, uint8_t _flags); = enum {}; /// Submit primitive with occlusion query for rendering. /// @param[in] _id View id. /// @param[in] _program Program. /// @param[in] _occlusionQuery Occlusion query. /// @param[in] _depth Depth for sorting. /// @param[in] _flags Discard or preserve states. See `BGFX_DISCARD_*`. fn bgfx_encoder_submit_occlusion_query(bgfx_encoder_t* _this, bgfx_view_id_t _id, bgfx_program_handle_t _program, bgfx_occlusion_query_handle_t _occlusionQuery, uint32_t _depth, uint8_t _flags); = enum {}; /// Submit primitive for rendering with index and instance data info from /// indirect buffer. /// @param[in] _id View id. /// @param[in] _program Program. /// @param[in] _indirectHandle Indirect buffer. /// @param[in] _start First element in indirect buffer. /// @param[in] _num Number of dispatches. /// @param[in] _depth Depth for sorting. /// @param[in] _flags Discard or preserve states. See `BGFX_DISCARD_*`. fn bgfx_encoder_submit_indirect(bgfx_encoder_t* _this, bgfx_view_id_t _id, bgfx_program_handle_t _program, bgfx_indirect_buffer_handle_t _indirectHandle, uint16_t _start, uint16_t _num, uint32_t _depth, uint8_t _flags); = enum {}; /// Set compute index buffer. /// @param[in] _stage Compute stage. /// @param[in] _handle Index buffer handle. /// @param[in] _access Buffer access. See `Access::Enum`. fn bgfx_encoder_set_compute_index_buffer(bgfx_encoder_t* _this, uint8_t _stage, bgfx_index_buffer_handle_t _handle, bgfx_access_t _access); = enum {}; /// Set compute vertex buffer. /// @param[in] _stage Compute stage. /// @param[in] _handle Vertex buffer handle. /// @param[in] _access Buffer access. See `Access::Enum`. fn bgfx_encoder_set_compute_vertex_buffer(bgfx_encoder_t* _this, uint8_t _stage, bgfx_vertex_buffer_handle_t _handle, bgfx_access_t _access); = enum {}; /// Set compute dynamic index buffer. /// @param[in] _stage Compute stage. /// @param[in] _handle Dynamic index buffer handle. /// @param[in] _access Buffer access. See `Access::Enum`. fn bgfx_encoder_set_compute_dynamic_index_buffer(bgfx_encoder_t* _this, uint8_t _stage, bgfx_dynamic_index_buffer_handle_t _handle, bgfx_access_t _access); = enum {}; /// Set compute dynamic vertex buffer. /// @param[in] _stage Compute stage. /// @param[in] _handle Dynamic vertex buffer handle. /// @param[in] _access Buffer access. See `Access::Enum`. fn bgfx_encoder_set_compute_dynamic_vertex_buffer(bgfx_encoder_t* _this, uint8_t _stage, bgfx_dynamic_vertex_buffer_handle_t _handle, bgfx_access_t _access); = enum {}; /// Set compute indirect buffer. /// @param[in] _stage Compute stage. /// @param[in] _handle Indirect buffer handle. /// @param[in] _access Buffer access. See `Access::Enum`. fn bgfx_encoder_set_compute_indirect_buffer(bgfx_encoder_t* _this, uint8_t _stage, bgfx_indirect_buffer_handle_t _handle, bgfx_access_t _access); = enum {}; /// Set compute image from texture. /// @param[in] _stage Compute stage. /// @param[in] _handle Texture handle. /// @param[in] _mip Mip level. /// @param[in] _access Image access. See `Access::Enum`. /// @param[in] _format Texture format. See: `TextureFormat::Enum`. fn bgfx_encoder_set_image(bgfx_encoder_t* _this, uint8_t _stage, bgfx_texture_handle_t _handle, uint8_t _mip, bgfx_access_t _access, bgfx_texture_format_t _format); = enum {}; /// Dispatch compute. /// @param[in] _id View id. /// @param[in] _program Compute program. /// @param[in] _numX Number of groups X. /// @param[in] _numY Number of groups Y. /// @param[in] _numZ Number of groups Z. /// @param[in] _flags Discard or preserve states. See `BGFX_DISCARD_*`. fn bgfx_encoder_dispatch(bgfx_encoder_t* _this, bgfx_view_id_t _id, bgfx_program_handle_t _program, uint32_t _numX, uint32_t _numY, uint32_t _numZ, uint8_t _flags); = enum {}; /// Dispatch compute indirect. /// @param[in] _id View id. /// @param[in] _program Compute program. /// @param[in] _indirectHandle Indirect buffer. /// @param[in] _start First element in indirect buffer. /// @param[in] _num Number of dispatches. /// @param[in] _flags Discard or preserve states. See `BGFX_DISCARD_*`. fn bgfx_encoder_dispatch_indirect(bgfx_encoder_t* _this, bgfx_view_id_t _id, bgfx_program_handle_t _program, bgfx_indirect_buffer_handle_t _indirectHandle, uint16_t _start, uint16_t _num, uint8_t _flags); = enum {}; /// Discard previously set state for draw or compute call. /// @param[in] _flags Discard or preserve states. See `BGFX_DISCARD_*`. fn bgfx_encoder_discard(bgfx_encoder_t* _this, uint8_t _flags); = enum {}; /// Blit 2D texture region between two 2D textures. /// @attention Destination texture must be created with `BGFX_TEXTURE_BLIT_DST` flag. /// @attention Availability depends on: `BGFX_CAPS_TEXTURE_BLIT`. /// @param[in] _id View id. /// @param[in] _dst Destination texture handle. /// @param[in] _dstMip Destination texture mip level. /// @param[in] _dstX Destination texture X position. /// @param[in] _dstY Destination texture Y position. /// @param[in] _dstZ If texture is 2D this argument should be 0. If destination texture is cube /// this argument represents destination texture cube face. For 3D texture this argument /// represents destination texture Z position. /// @param[in] _src Source texture handle. /// @param[in] _srcMip Source texture mip level. /// @param[in] _srcX Source texture X position. /// @param[in] _srcY Source texture Y position. /// @param[in] _srcZ If texture is 2D this argument should be 0. If source texture is cube /// this argument represents source texture cube face. For 3D texture this argument /// represents source texture Z position. /// @param[in] _width Width of region. /// @param[in] _height Height of region. /// @param[in] _depth If texture is 3D this argument represents depth of region, otherwise it's /// unused. fn bgfx_encoder_blit(bgfx_encoder_t* _this, bgfx_view_id_t _id, bgfx_texture_handle_t _dst, uint8_t _dstMip, uint16_t _dstX, uint16_t _dstY, uint16_t _dstZ, bgfx_texture_handle_t _src, uint8_t _srcMip, uint16_t _srcX, uint16_t _srcY, uint16_t _srcZ, uint16_t _width, uint16_t _height, uint16_t _depth); = enum {}; /// Request screen shot of window back buffer. /// @remarks /// `bgfx::CallbackI::screenShot` must be implemented. /// @attention Frame buffer handle must be created with OS' target native window handle. /// @param[in] _handle Frame buffer handle. If handle is `BGFX_INVALID_HANDLE` request will be /// made for main window back buffer. /// @param[in] _filePath Will be passed to `bgfx::CallbackI::screenShot` callback. fn bgfx_request_screen_shot(bgfx_frame_buffer_handle_t _handle, const char* _filePath); = enum {}; /// Set platform data. /// @warning Must be called before `bgfx::init`. /// @param[in] _data Platform data. fn bgfx_set_platform_data(const bgfx_platform_data_t * _data); = enum {}; /// Sets a debug marker. This allows you to group graphics calls together for easy browsing in /// graphics debugging tools. /// @param[in] _marker Marker string. fn bgfx_set_marker(const char* _marker); = enum {}; /// Set render states for draw primitive. /// @remarks /// 1. To setup more complex states use: /// `BGFX_STATE_ALPHA_REF(_ref)`, /// `BGFX_STATE_POINT_SIZE(_size)`, /// `BGFX_STATE_BLEND_FUNC(_src, _dst)`, /// `BGFX_STATE_BLEND_FUNC_SEPARATE(_srcRGB, _dstRGB, _srcA, _dstA)`, /// `BGFX_STATE_BLEND_EQUATION(_equation)`, /// `BGFX_STATE_BLEND_EQUATION_SEPARATE(_equationRGB, _equationA)` /// 2. `BGFX_STATE_BLEND_EQUATION_ADD` is set when no other blend /// equation is specified. /// @param[in] _state State flags. Default state for primitive type is /// triangles. See: `BGFX_STATE_DEFAULT`. /// - `BGFX_STATE_DEPTH_TEST_*` - Depth test function. /// - `BGFX_STATE_BLEND_*` - See remark 1 about BGFX_STATE_BLEND_FUNC. /// - `BGFX_STATE_BLEND_EQUATION_*` - See remark 2. /// - `BGFX_STATE_CULL_*` - Backface culling mode. /// - `BGFX_STATE_WRITE_*` - Enable R, G, B, A or Z write. /// - `BGFX_STATE_MSAA` - Enable hardware multisample antialiasing. /// - `BGFX_STATE_PT_[TRISTRIP/LINES/POINTS]` - Primitive type. /// @param[in] _rgba Sets blend factor used by `BGFX_STATE_BLEND_FACTOR` and /// `BGFX_STATE_BLEND_INV_FACTOR` blend modes. fn bgfx_set_state(uint64_t _state, uint32_t _rgba); = enum {}; /// Set condition for rendering. /// @param[in] _handle Occlusion query handle. /// @param[in] _visible Render if occlusion query is visible. fn bgfx_set_condition(bgfx_occlusion_query_handle_t _handle, bool _visible); = enum {}; /// Set stencil test state. /// @param[in] _fstencil Front stencil state. /// @param[in] _bstencil Back stencil state. If back is set to `BGFX_STENCIL_NONE` /// _fstencil is applied to both front and back facing primitives. fn bgfx_set_stencil(uint32_t _fstencil, uint32_t _bstencil); = enum {}; /// Set scissor from cache for draw primitive. /// @remark /// To scissor for all primitives in view see `bgfx::setViewScissor`. /// @param[in] _cache Index in scissor cache. fn bgfx_set_scissor_cached(uint16_t _cache); = enum {}; /// Set model matrix from matrix cache for draw primitive. /// @param[in] _cache Index in matrix cache. /// @param[in] _num Number of matrices from cache. fn bgfx_set_transform_cached(uint32_t _cache, uint16_t _num); = enum {}; /// Set shader uniform parameter for draw primitive. /// @param[in] _handle Uniform. /// @param[in] _value Pointer to uniform data. /// @param[in] _num Number of elements. Passing `UINT16_MAX` will /// use the _num passed on uniform creation. fn bgfx_set_uniform(bgfx_uniform_handle_t _handle, const void* _value, uint16_t _num); = enum {}; /// Set index buffer for draw primitive. /// @param[in] _handle Index buffer. /// @param[in] _firstIndex First index to render. /// @param[in] _numIndices Number of indices to render. fn bgfx_set_index_buffer(bgfx_index_buffer_handle_t _handle, uint32_t _firstIndex, uint32_t _numIndices); = enum {}; /// Set index buffer for draw primitive. /// @param[in] _handle Dynamic index buffer. /// @param[in] _firstIndex First index to render. /// @param[in] _numIndices Number of indices to render. fn bgfx_set_dynamic_index_buffer(bgfx_dynamic_index_buffer_handle_t _handle, uint32_t _firstIndex, uint32_t _numIndices); = enum {}; /// Set index buffer for draw primitive. /// @param[in] _tib Transient index buffer. /// @param[in] _firstIndex First index to render. /// @param[in] _numIndices Number of indices to render. fn bgfx_set_transient_index_buffer(const bgfx_transient_index_buffer_t* _tib, uint32_t _firstIndex, uint32_t _numIndices); = enum {}; /// Set vertex buffer for draw primitive. /// @param[in] _stream Vertex stream. /// @param[in] _handle Vertex buffer. /// @param[in] _startVertex First vertex to render. /// @param[in] _numVertices Number of vertices to render. fn bgfx_set_vertex_buffer(uint8_t _stream, bgfx_vertex_buffer_handle_t _handle, uint32_t _startVertex, uint32_t _numVertices); = enum {}; /// Set vertex buffer for draw primitive. /// @param[in] _stream Vertex stream. /// @param[in] _handle Vertex buffer. /// @param[in] _startVertex First vertex to render. /// @param[in] _numVertices Number of vertices to render. /// @param[in] _layoutHandle Vertex layout for aliasing vertex buffer. If invalid /// handle is used, vertex layout used for creation /// of vertex buffer will be used. fn bgfx_set_vertex_buffer_with_layout(uint8_t _stream, bgfx_vertex_buffer_handle_t _handle, uint32_t _startVertex, uint32_t _numVertices, bgfx_vertex_layout_handle_t _layoutHandle); = enum {}; /// Set vertex buffer for draw primitive. /// @param[in] _stream Vertex stream. /// @param[in] _handle Dynamic vertex buffer. /// @param[in] _startVertex First vertex to render. /// @param[in] _numVertices Number of vertices to render. fn bgfx_set_dynamic_vertex_buffer(uint8_t _stream, bgfx_dynamic_vertex_buffer_handle_t _handle, uint32_t _startVertex, uint32_t _numVertices); = enum {}; /// Set vertex buffer for draw primitive. /// @param[in] _stream Vertex stream. /// @param[in] _handle Dynamic vertex buffer. /// @param[in] _startVertex First vertex to render. /// @param[in] _numVertices Number of vertices to render. /// @param[in] _layoutHandle Vertex layout for aliasing vertex buffer. If invalid /// handle is used, vertex layout used for creation /// of vertex buffer will be used. fn bgfx_set_dynamic_vertex_buffer_with_layout(uint8_t _stream, bgfx_dynamic_vertex_buffer_handle_t _handle, uint32_t _startVertex, uint32_t _numVertices, bgfx_vertex_layout_handle_t _layoutHandle); = enum {}; /// Set vertex buffer for draw primitive. /// @param[in] _stream Vertex stream. /// @param[in] _tvb Transient vertex buffer. /// @param[in] _startVertex First vertex to render. /// @param[in] _numVertices Number of vertices to render. fn bgfx_set_transient_vertex_buffer(uint8_t _stream, const bgfx_transient_vertex_buffer_t* _tvb, uint32_t _startVertex, uint32_t _numVertices); = enum {}; /// Set vertex buffer for draw primitive. /// @param[in] _stream Vertex stream. /// @param[in] _tvb Transient vertex buffer. /// @param[in] _startVertex First vertex to render. /// @param[in] _numVertices Number of vertices to render. /// @param[in] _layoutHandle Vertex layout for aliasing vertex buffer. If invalid /// handle is used, vertex layout used for creation /// of vertex buffer will be used. fn bgfx_set_transient_vertex_buffer_with_layout(uint8_t _stream, const bgfx_transient_vertex_buffer_t* _tvb, uint32_t _startVertex, uint32_t _numVertices, bgfx_vertex_layout_handle_t _layoutHandle); = enum {}; /// Set number of vertices for auto generated vertices use in conjuction /// with gl_VertexID. /// @attention Availability depends on: `BGFX_CAPS_VERTEX_ID`. /// @param[in] _numVertices Number of vertices. fn bgfx_set_vertex_count(uint32_t _numVertices); = enum {}; /// Set instance data buffer for draw primitive. /// @param[in] _idb Transient instance data buffer. /// @param[in] _start First instance data. /// @param[in] _num Number of data instances. fn bgfx_set_instance_data_buffer(const bgfx_instance_data_buffer_t* _idb, uint32_t _start, uint32_t _num); = enum {}; /// Set instance data buffer for draw primitive. /// @param[in] _handle Vertex buffer. /// @param[in] _startVertex First instance data. /// @param[in] _num Number of data instances. /// Set instance data buffer for draw primitive. fn bgfx_set_instance_data_from_vertex_buffer(bgfx_vertex_buffer_handle_t _handle, uint32_t _startVertex, uint32_t _num); = enum {}; /// Set instance data buffer for draw primitive. /// @param[in] _handle Dynamic vertex buffer. /// @param[in] _startVertex First instance data. /// @param[in] _num Number of data instances. fn bgfx_set_instance_data_from_dynamic_vertex_buffer(bgfx_dynamic_vertex_buffer_handle_t _handle, uint32_t _startVertex, uint32_t _num); = enum {}; /// Set number of instances for auto generated instances use in conjuction /// with gl_InstanceID. /// @attention Availability depends on: `BGFX_CAPS_VERTEX_ID`. /// @param[in] _numInstances fn bgfx_set_instance_count(uint32_t _numInstances); = enum {}; /// Set texture stage for draw primitive. /// @param[in] _stage Texture unit. /// @param[in] _sampler Program sampler. /// @param[in] _handle Texture handle. /// @param[in] _flags Texture sampling mode. Default value UINT32_MAX uses /// texture sampling settings from the texture. /// - `BGFX_SAMPLER_[U/V/W]_[MIRROR/CLAMP]` - Mirror or clamp to edge wrap /// mode. /// - `BGFX_SAMPLER_[MIN/MAG/MIP]_[POINT/ANISOTROPIC]` - Point or anisotropic /// sampling. fn bgfx_set_texture(uint8_t _stage, bgfx_uniform_handle_t _sampler, bgfx_texture_handle_t _handle, uint32_t _flags); = enum {}; /// Submit an empty primitive for rendering. Uniforms and draw state /// will be applied but no geometry will be submitted. /// @remark /// These empty draw calls will sort before ordinary draw calls. /// @param[in] _id View id. fn bgfx_touch(bgfx_view_id_t _id); = enum {}; /// Submit primitive for rendering. /// @param[in] _id View id. /// @param[in] _program Program. /// @param[in] _depth Depth for sorting. /// @param[in] _flags Which states to discard for next draw. See BGFX_DISCARD_ fn bgfx_submit(bgfx_view_id_t _id, bgfx_program_handle_t _program, uint32_t _depth, uint8_t _flags); = enum {}; /// Submit primitive with occlusion query for rendering. /// @param[in] _id View id. /// @param[in] _program Program. /// @param[in] _occlusionQuery Occlusion query. /// @param[in] _depth Depth for sorting. /// @param[in] _flags Which states to discard for next draw. See BGFX_DISCARD_ fn bgfx_submit_occlusion_query(bgfx_view_id_t _id, bgfx_program_handle_t _program, bgfx_occlusion_query_handle_t _occlusionQuery, uint32_t _depth, uint8_t _flags); = enum {}; /// Submit primitive for rendering with index and instance data info from /// indirect buffer. /// @param[in] _id View id. /// @param[in] _program Program. /// @param[in] _indirectHandle Indirect buffer. /// @param[in] _start First element in indirect buffer. /// @param[in] _num Number of dispatches. /// @param[in] _depth Depth for sorting. /// @param[in] _flags Which states to discard for next draw. See BGFX_DISCARD_ fn bgfx_submit_indirect(bgfx_view_id_t _id, bgfx_program_handle_t _program, bgfx_indirect_buffer_handle_t _indirectHandle, uint16_t _start, uint16_t _num, uint32_t _depth, uint8_t _flags); = enum {}; /// Set compute index buffer. /// @param[in] _stage Compute stage. /// @param[in] _handle Index buffer handle. /// @param[in] _access Buffer access. See `Access::Enum`. fn bgfx_set_compute_index_buffer(uint8_t _stage, bgfx_index_buffer_handle_t _handle, bgfx_access_t _access); = enum {}; /// Set compute vertex buffer. /// @param[in] _stage Compute stage. /// @param[in] _handle Vertex buffer handle. /// @param[in] _access Buffer access. See `Access::Enum`. fn bgfx_set_compute_vertex_buffer(uint8_t _stage, bgfx_vertex_buffer_handle_t _handle, bgfx_access_t _access); = enum {}; /// Set compute dynamic index buffer. /// @param[in] _stage Compute stage. /// @param[in] _handle Dynamic index buffer handle. /// @param[in] _access Buffer access. See `Access::Enum`. fn bgfx_set_compute_dynamic_index_buffer(uint8_t _stage, bgfx_dynamic_index_buffer_handle_t _handle, bgfx_access_t _access); = enum {}; /// Set compute dynamic vertex buffer. /// @param[in] _stage Compute stage. /// @param[in] _handle Dynamic vertex buffer handle. /// @param[in] _access Buffer access. See `Access::Enum`. fn bgfx_set_compute_dynamic_vertex_buffer(uint8_t _stage, bgfx_dynamic_vertex_buffer_handle_t _handle, bgfx_access_t _access); = enum {}; /// Set compute indirect buffer. /// @param[in] _stage Compute stage. /// @param[in] _handle Indirect buffer handle. /// @param[in] _access Buffer access. See `Access::Enum`. fn bgfx_set_compute_indirect_buffer(uint8_t _stage, bgfx_indirect_buffer_handle_t _handle, bgfx_access_t _access); = enum {}; /// Set compute image from texture. /// @param[in] _stage Compute stage. /// @param[in] _handle Texture handle. /// @param[in] _mip Mip level. /// @param[in] _access Image access. See `Access::Enum`. /// @param[in] _format Texture format. See: `TextureFormat::Enum`. fn bgfx_set_image(uint8_t _stage, bgfx_texture_handle_t _handle, uint8_t _mip, bgfx_access_t _access, bgfx_texture_format_t _format); = enum {}; /// Dispatch compute. /// @param[in] _id View id. /// @param[in] _program Compute program. /// @param[in] _numX Number of groups X. /// @param[in] _numY Number of groups Y. /// @param[in] _numZ Number of groups Z. /// @param[in] _flags Discard or preserve states. See `BGFX_DISCARD_*`. fn bgfx_dispatch(bgfx_view_id_t _id, bgfx_program_handle_t _program, uint32_t _numX, uint32_t _numY, uint32_t _numZ, uint8_t _flags); = enum {}; /// Dispatch compute indirect. /// @param[in] _id View id. /// @param[in] _program Compute program. /// @param[in] _indirectHandle Indirect buffer. /// @param[in] _start First element in indirect buffer. /// @param[in] _num Number of dispatches. /// @param[in] _flags Discard or preserve states. See `BGFX_DISCARD_*`. fn bgfx_dispatch_indirect(bgfx_view_id_t _id, bgfx_program_handle_t _program, bgfx_indirect_buffer_handle_t _indirectHandle, uint16_t _start, uint16_t _num, uint8_t _flags); = enum {}; /// Discard previously set state for draw or compute call. /// @param[in] _flags Draw/compute states to discard. fn bgfx_discard(uint8_t _flags); = enum {}; /// Blit 2D texture region between two 2D textures. /// @attention Destination texture must be created with `BGFX_TEXTURE_BLIT_DST` flag. /// @attention Availability depends on: `BGFX_CAPS_TEXTURE_BLIT`. /// @param[in] _id View id. /// @param[in] _dst Destination texture handle. /// @param[in] _dstMip Destination texture mip level. /// @param[in] _dstX Destination texture X position. /// @param[in] _dstY Destination texture Y position. /// @param[in] _dstZ If texture is 2D this argument should be 0. If destination texture is cube /// this argument represents destination texture cube face. For 3D texture this argument /// represents destination texture Z position. /// @param[in] _src Source texture handle. /// @param[in] _srcMip Source texture mip level. /// @param[in] _srcX Source texture X position. /// @param[in] _srcY Source texture Y position. /// @param[in] _srcZ If texture is 2D this argument should be 0. If source texture is cube /// this argument represents source texture cube face. For 3D texture this argument /// represents source texture Z position. /// @param[in] _width Width of region. /// @param[in] _height Height of region. /// @param[in] _depth If texture is 3D this argument represents depth of region, otherwise it's /// unused. fn bgfx_blit(bgfx_view_id_t _id, bgfx_texture_handle_t _dst, uint8_t _dstMip, uint16_t _dstX, uint16_t _dstY, uint16_t _dstZ, bgfx_texture_handle_t _src, uint8_t _srcMip, uint16_t _srcX, uint16_t _srcY, uint16_t _srcZ, uint16_t _width, uint16_t _height, uint16_t _depth); = enum {};
lib/bgfx.zig
const std = @import("std"); const mem = std.mem; const net = std.net; const PrimitiveReader = @import("primitive/reader.zig").PrimitiveReader; const PrimitiveWriter = @import("primitive/writer.zig").PrimitiveWriter; const testing = @import("testing.zig"); pub const TopologyChangeType = enum { NEW_NODE, REMOVED_NODE, }; pub const StatusChangeType = enum { UP, DOWN, }; pub const SchemaChangeType = enum { CREATED, UPDATED, DROPPED, }; pub const SchemaChangeTarget = enum { KEYSPACE, TABLE, TYPE, FUNCTION, AGGREGATE, }; pub const SchemaChangeOptions = struct { keyspace: []const u8, object_name: []const u8, arguments: ?[]const []const u8, pub fn init() SchemaChangeOptions { return SchemaChangeOptions{ .keyspace = &[_]u8{}, .object_name = &[_]u8{}, .arguments = null, }; } }; pub const TopologyChange = struct { type: TopologyChangeType, node_address: net.Address, }; pub const StatusChange = struct { type: StatusChangeType, node_address: net.Address, }; pub const SchemaChange = struct { const Self = @This(); type: SchemaChangeType, target: SchemaChangeTarget, options: SchemaChangeOptions, pub fn read(allocator: *mem.Allocator, pr: *PrimitiveReader) !Self { var change = Self{ .type = undefined, .target = undefined, .options = undefined, }; change.type = std.meta.stringToEnum(SchemaChangeType, (try pr.readString(allocator))) orelse return error.InvalidSchemaChangeType; change.target = std.meta.stringToEnum(SchemaChangeTarget, (try pr.readString(allocator))) orelse return error.InvalidSchemaChangeTarget; change.options = SchemaChangeOptions.init(); switch (change.target) { .KEYSPACE => { change.options.keyspace = try pr.readString(allocator); }, .TABLE, .TYPE => { change.options.keyspace = try pr.readString(allocator); change.options.object_name = try pr.readString(allocator); }, .FUNCTION, .AGGREGATE => { change.options.keyspace = try pr.readString(allocator); change.options.object_name = try pr.readString(allocator); change.options.arguments = try pr.readStringList(allocator); }, } return change; } }; pub const EventType = enum { TOPOLOGY_CHANGE, STATUS_CHANGE, SCHEMA_CHANGE, }; pub const Event = union(EventType) { TOPOLOGY_CHANGE: TopologyChange, STATUS_CHANGE: StatusChange, SCHEMA_CHANGE: SchemaChange, }; test "schema change options" { var arena = testing.arenaAllocator(); defer arena.deinit(); var options = SchemaChangeOptions.init(); options.keyspace = try mem.dupe(&arena.allocator, u8, "foobar"); options.object_name = try mem.dupe(&arena.allocator, u8, "barbaz"); var arguments = try arena.allocator.alloc([]const u8, 4); var i: usize = 0; while (i < arguments.len) : (i += 1) { arguments[i] = try mem.dupe(&arena.allocator, u8, "hello"); } options.arguments = arguments; }
src/event.zig
const std = @import("std"); const assert = std.debug.assert; const math = std.math; const zp = @import("../../zplay.zig"); const drawcall = zp.graphics.common.drawcall; const VertexArray = zp.graphics.common.VertexArray; const alg = zp.deps.alg; const Vec2 = alg.Vec2; const Vec3 = alg.Vec3; const Vec4 = alg.Vec4; const Self = @This(); pub const vbo_positions = 0; pub const vbo_normals = 1; pub const vbo_texcoords = 2; pub const vbo_colors = 3; pub const vbo_tangents = 4; pub const vbo_indices = 5; pub const vbo_num = 6; /// vertex array /// each vertex has multiple properties (see VertexAttribute) vertex_array: VertexArray = undefined, /// primitive type primitive_type: drawcall.PrimitiveType = undefined, /// vertex attribute positions: std.ArrayList(Vec3) = undefined, indices: ?std.ArrayList(u32) = null, normals: ?std.ArrayList(Vec3) = null, texcoords: ?std.ArrayList(Vec2) = null, colors: ?std.ArrayList(Vec4) = null, tangents: ?std.ArrayList(Vec4) = null, owns_data: bool = undefined, /// allocate and initialize Mesh instance pub fn init( allocator: std.mem.Allocator, primitive_type: drawcall.PrimitiveType, positions: []const Vec3, indices: ?[]const u32, normals: ?[]const Vec3, texcoords: ?[]const Vec2, colors: ?[]const Vec4, tangents: ?[]const Vec4, ) !Self { var self: Self = .{ .primitive_type = primitive_type, .vertex_array = VertexArray.init(vbo_num), .positions = try std.ArrayList(Vec3).initCapacity(allocator, positions.len), .owns_data = true, }; self.positions.appendSliceAssumeCapacity(positions); if (indices) |ids| { self.indices = try std.ArrayList(u32).initCapacity(allocator, ids.len); self.indices.?.appendSliceAssumeCapacity(ids); } if (normals) |ns| { self.normals = try std.ArrayList(Vec3).initCapacity(allocator, ns.len); self.normals.?.appendSliceAssumeCapacity(ns); } if (texcoords) |ts| { self.texcoords = try std.ArrayList(Vec2).initCapacity(allocator, ts.len); self.texcoords.?.appendSliceAssumeCapacity(ts); } if (colors) |cs| { self.colors = try std.ArrayList(Vec4).initCapacity(allocator, cs.len); self.colors.?.appendSliceAssumeCapacity(cs); } if (tangents) |ts| { self.tangents = try std.ArrayList(Vec4).initCapacity(allocator, ts.len); self.tangents.?.appendSliceAssumeCapacity(ts); } return self; } /// create Mesh, maybe taking ownership of given arrays pub fn fromArrayLists( primitive_type: drawcall.PrimitiveType, positions: std.ArrayList(Vec3), indices: ?std.ArrayList(u32), normals: ?std.ArrayList(Vec3), texcoords: ?std.ArrayList(Vec2), colors: ?std.ArrayList(Vec4), tangents: ?std.ArrayList(Vec4), take_ownership: bool, ) Self { var mesh: Self = .{ .primitive_type = primitive_type, .vertex_array = VertexArray.init(vbo_num), .positions = positions, .normals = normals, .texcoords = texcoords, .colors = colors, .tangents = tangents, .indices = indices, .owns_data = take_ownership, }; return mesh; } /// setup vertex array's data pub fn setup(self: Self) void { self.vertex_array.use(); defer self.vertex_array.disuse(); self.vertex_array.bufferData(vbo_positions, Vec3, self.positions.items, .array_buffer, .static_draw); if (self.indices) |ids| { self.vertex_array.bufferData(vbo_indices, u32, ids.items, .element_array_buffer, .static_draw); } if (self.normals) |ns| { self.vertex_array.bufferData(vbo_normals, Vec3, ns.items, .array_buffer, .static_draw); } if (self.texcoords) |ts| { self.vertex_array.bufferData(vbo_texcoords, Vec2, ts.items, .array_buffer, .static_draw); } if (self.colors) |cs| { self.vertex_array.bufferData(vbo_colors, Vec4, cs.items, .array_buffer, .static_draw); } if (self.tangents) |ts| { self.vertex_array.bufferData(vbo_tangents, Vec4, ts.items, .array_buffer, .static_draw); } } /// free resources pub fn deinit(self: Self) void { self.vertex_array.deinit(); if (self.owns_data) { self.positions.deinit(); if (self.indices) |ids| ids.deinit(); if (self.normals) |ns| ns.deinit(); if (self.texcoords) |ts| ts.deinit(); if (self.colors) |cs| cs.deinit(); if (self.tangents) |ts| ts.deinit(); } } // generate a quad pub fn genQuad( allocator: std.mem.Allocator, w: f32, h: f32, ) !Self { const w2 = w / 2; const h2 = h / 2; const positions: [4]Vec3 = .{ Vec3.new(-w2, -h2, 0), Vec3.new(w2, -h2, 0), Vec3.new(w2, h2, 0), Vec3.new(-w2, h2, 0), }; const normals: [4]Vec3 = .{ Vec3.forward(), Vec3.forward(), Vec3.forward(), Vec3.forward(), }; const texcoords: [4]Vec2 = .{ Vec2.new(0, 0), Vec2.new(1, 0), Vec2.new(1, 1), Vec2.new(0, 1), }; const indices: [6]u32 = .{ 0, 1, 2, 0, 2, 3, }; const mesh = try init( allocator, .triangles, &positions, &indices, &normals, &texcoords, null, null, ); mesh.setup(); return mesh; } // generate a cube pub fn genCube( allocator: std.mem.Allocator, w: f32, d: f32, h: f32, ) !Self { assert(w > 0 and d > 0 and h > 0); const w2 = w / 2; const d2 = d / 2; const h2 = h / 2; const vs: [8]Vec3 = .{ Vec3.new(w2, h2, d2), Vec3.new(w2, h2, -d2), Vec3.new(-w2, h2, -d2), Vec3.new(-w2, h2, d2), Vec3.new(w2, -h2, d2), Vec3.new(w2, -h2, -d2), Vec3.new(-w2, -h2, -d2), Vec3.new(-w2, -h2, d2), }; const positions: [36]Vec3 = .{ vs[0], vs[1], vs[2], vs[0], vs[2], vs[3], // top vs[4], vs[7], vs[6], vs[4], vs[6], vs[5], // bottom vs[6], vs[7], vs[3], vs[6], vs[3], vs[2], // left vs[4], vs[5], vs[1], vs[4], vs[1], vs[0], // right vs[7], vs[4], vs[0], vs[7], vs[0], vs[3], // front vs[5], vs[6], vs[2], vs[5], vs[2], vs[1], // back }; const up = Vec3.up(); const down = Vec3.down(); const left = Vec3.left(); const right = Vec3.right(); const forward = Vec3.forward(); const back = Vec3.back(); const normals: [36]Vec3 = .{ up, up, up, up, up, up, // top down, down, down, down, down, down, // bottom left, left, left, left, left, left, // left right, right, right, right, right, right, // right forward, forward, forward, forward, forward, forward, // front back, back, back, back, back, back, // back }; const cs: [4]Vec2 = .{ Vec2.new(0, 0), Vec2.new(1, 0), Vec2.new(1, 1), Vec2.new(0, 1), }; var texcoords: [36]Vec2 = .{ cs[0], cs[1], cs[2], cs[0], cs[2], cs[3], // top cs[0], cs[1], cs[2], cs[0], cs[2], cs[3], // bottom cs[0], cs[1], cs[2], cs[0], cs[2], cs[3], // left cs[0], cs[1], cs[2], cs[0], cs[2], cs[3], // right cs[0], cs[1], cs[2], cs[0], cs[2], cs[3], // front cs[0], cs[1], cs[2], cs[0], cs[2], cs[3], // back }; const mesh = try init( allocator, .triangles, &positions, null, &normals, &texcoords, null, null, ); mesh.setup(); return mesh; } // generate a sphere pub fn genSphere( allocator: std.mem.Allocator, radius: f32, sector_count: u32, stack_count: u32, ) !Self { assert(radius > 0 and sector_count > 0 and stack_count > 0); const attrib_count = (stack_count + 1) * (sector_count + 1); var positions = try std.ArrayList(Vec3).initCapacity( allocator, attrib_count, ); var normals = try std.ArrayList(Vec3).initCapacity( allocator, attrib_count, ); var texcoords = try std.ArrayList(Vec2).initCapacity( allocator, attrib_count, ); var indices = try std.ArrayList(u32).initCapacity( allocator, (stack_count - 1) * sector_count * 6, ); var sector_step = math.pi * 2.0 / @intToFloat(f32, sector_count); var stack_step = math.pi / @intToFloat(f32, stack_count); var radius_inv = 1.0 / radius; // generate vertex attributes var i: u32 = 0; while (i <= stack_count) : (i += 1) { // starting from pi/2 to -pi/2 var stack_angle = math.pi / 2.0 - @intToFloat(f32, i) * stack_step; var xy = radius * math.cos(stack_angle); var z = radius * math.sin(stack_angle); var j: u32 = 0; while (j <= sector_count) : (j += 1) { // starting from 0 to 2pi var sector_angle = @intToFloat(f32, j) * sector_step; // postion var x = xy * math.cos(sector_angle); var y = xy * math.sin(sector_angle); positions.appendAssumeCapacity(Vec3.new(x, y, z)); // normal normals.appendAssumeCapacity(Vec3.new( x * radius_inv, y * radius_inv, z * radius_inv, )); // tex coords var s = @intToFloat(f32, j) / @intToFloat(f32, sector_count); var t = @intToFloat(f32, i) / @intToFloat(f32, stack_count); texcoords.appendAssumeCapacity(Vec2.new(s, t)); } } // generate vertex indices // k1--k1+1 // | / | // | / | // k2--k2+1 i = 0; while (i < stack_count) : (i += 1) { var k1 = i * (sector_count + 1); // beginning of current stack var k2 = k1 + sector_count + 1; // beginning of next stack var j: u32 = 0; while (j < sector_count) : ({ j += 1; k1 += 1; k2 += 1; }) { // 2 triangles per sector excluding first and last stacks // k1 => k2 => k1+1 if (i != 0) { indices.appendSliceAssumeCapacity(&.{ k1, k2, k1 + 1 }); } // k1+1 => k2 => k2+1 if (i != (stack_count - 1)) { indices.appendSliceAssumeCapacity(&.{ k1 + 1, k2, k2 + 1 }); } } } var mesh = fromArrayLists( .triangles, positions, indices, normals, texcoords, null, null, true, ); mesh.setup(); return mesh; } // generate a cylinder pub fn genCylinder( allocator: std.mem.Allocator, height: f32, bottom_radius: f32, top_radius: f32, stack_count: u32, sector_count: u32, ) !Self { assert(height > 0 and (bottom_radius > 0 or top_radius > 0) and sector_count > 0 and stack_count > 0); const attrib_count = (stack_count + 3) * (sector_count + 1) + 2; var positions = try std.ArrayList(Vec3).initCapacity( allocator, attrib_count, ); var normals = try std.ArrayList(Vec3).initCapacity( allocator, attrib_count, ); var texcoords = try std.ArrayList(Vec2).initCapacity( allocator, attrib_count, ); var indices = try std.ArrayList(u32).initCapacity( allocator, (stack_count + 1) * sector_count * 6, ); var sector_step = math.pi * 2.0 / @intToFloat(f32, sector_count); // unit circle positions var unit_circle = try std.ArrayList(Vec2).initCapacity( allocator, sector_count + 1, ); defer unit_circle.deinit(); var i: u32 = 0; while (i <= sector_count) : (i += 1) { var sector_angle = @intToFloat(f32, i) * sector_step; unit_circle.appendAssumeCapacity(Vec2.new( math.cos(sector_angle), math.sin(sector_angle), )); } // compute normals of side var side_normals = try std.ArrayList(Vec3).initCapacity( allocator, sector_count + 1, ); defer side_normals.deinit(); var zangle = math.atan2(f32, bottom_radius - top_radius, height); i = 0; while (i <= sector_count) : (i += 1) { var sector_angle = @intToFloat(f32, i) * sector_step; side_normals.appendAssumeCapacity(Vec3.new( math.cos(zangle) * math.cos(sector_angle), math.cos(zangle) * math.sin(sector_angle), math.sin(zangle), )); } // sides i = 0; while (i <= stack_count) : (i += 1) { var step = @intToFloat(f32, i) / @intToFloat(f32, stack_count); var z = -(height * 0.5) + step * height; var radius = bottom_radius + step * (top_radius - bottom_radius); var t = 1.0 - step; var j: u32 = 0; while (j <= sector_count) : (j += 1) { positions.appendAssumeCapacity(Vec3.new( unit_circle.items[j].x * radius, unit_circle.items[j].y * radius, z, )); normals.appendAssumeCapacity(side_normals.items[j]); texcoords.appendAssumeCapacity(Vec2.new( @intToFloat(f32, j) / @intToFloat(f32, sector_count), t, )); } } // bottom var bottom_index_offset = @intCast(u32, positions.items.len); var z = -height * 0.5; positions.appendAssumeCapacity(Vec3.new(0, 0, z)); normals.appendAssumeCapacity(Vec3.new(0, 0, -1)); texcoords.appendAssumeCapacity(Vec2.new(0.5, 0.5)); i = 0; while (i <= sector_count) : (i += 1) { var x = unit_circle.items[i].x; var y = unit_circle.items[i].y; positions.appendAssumeCapacity(Vec3.new(x * bottom_radius, y * bottom_radius, z)); normals.appendAssumeCapacity(Vec3.new(0, 0, -1)); texcoords.appendAssumeCapacity(Vec2.new(-x * 0.5 + 0.5, -y * 0.5 + 0.5)); } // top var top_index_offset = @intCast(u32, positions.items.len); z = height * 0.5; positions.appendAssumeCapacity(Vec3.new(0, 0, z)); normals.appendAssumeCapacity(Vec3.new(0, 0, 1)); texcoords.appendAssumeCapacity(Vec2.new(0.5, 0.5)); i = 0; while (i <= sector_count) : (i += 1) { var x = unit_circle.items[i].x; var y = unit_circle.items[i].y; positions.appendAssumeCapacity(Vec3.new(x * top_radius, y * top_radius, z)); normals.appendAssumeCapacity(Vec3.new(0, 0, 1)); texcoords.appendAssumeCapacity(Vec2.new(x * 0.5 + 0.5, y * 0.5 + 0.5)); } // indices i = 0; while (i < stack_count) : (i += 1) { var k1 = i * (sector_count + 1); var k2 = k1 + sector_count + 1; var j: u32 = 0; while (j < sector_count) : ({ j += 1; k1 += 1; k2 += 1; }) { indices.appendSliceAssumeCapacity(&.{ k1, k1 + 1, k2 }); indices.appendSliceAssumeCapacity(&.{ k2, k1 + 1, k2 + 1 }); } } i = 0; while (i < sector_count) : (i += 1) { indices.appendSliceAssumeCapacity(&.{ bottom_index_offset, bottom_index_offset + i + 2, bottom_index_offset + i + 1, }); indices.appendSliceAssumeCapacity(&.{ top_index_offset, top_index_offset + i + 1, top_index_offset + i + 2, }); } var mesh = fromArrayLists( .triangles, positions, indices, normals, texcoords, null, null, true, ); mesh.setup(); return mesh; }
src/graphics/3d/Mesh.zig
const buffer = @import("buffer.zig"); const std = @import("std"); pub const TestSchema = struct { pub const PackageProvider = enum(u8) { _none, /// npm npm, /// git git, /// https https, /// tgz tgz, /// other other, _, pub fn jsonStringify(self: *const @This(), opts: anytype, o: anytype) !void { return try std.json.stringify(@tagName(self), opts, o); } }; pub const ExportsType = enum(u8) { _none, /// commonJs common_js, /// esModule es_module, /// browser browser, _, pub fn jsonStringify(self: *const @This(), opts: anytype, o: anytype) !void { return try std.json.stringify(@tagName(self), opts, o); } }; pub const ExportsManifest = struct { /// source source: [][]u8, /// destination destination: [][]u8, /// exportType export_type: []ExportsType, pub fn decode(allocator: *std.mem.Allocator, reader: anytype) anyerror!ExportsManifest { var obj = std.mem.zeroes(ExportsManifest); try update(&obj, allocator, reader); return obj; } pub fn update(result: *ExportsManifest, allocator: *std.mem.Allocator, reader: anytype) anyerror!void { var length: usize = 0; { var array_count = try reader.readIntNative(u32); if (array_count != result.source.len) { result.source = try allocator.alloc([]u8, array_count); } length = try reader.readIntNative(u32); for (result.source) |content, j| { if (result.source[j].len != length) { result.source[j] = try allocator.alloc(u8, length); } _ = try reader.readAll(result.source[j]); } } { var array_count = try reader.readIntNative(u32); if (array_count != result.destination.len) { result.destination = try allocator.alloc([]u8, array_count); } length = try reader.readIntNative(u32); for (result.destination) |content, j| { if (result.destination[j].len != length) { result.destination[j] = try allocator.alloc(u8, length); } _ = try reader.readAll(result.destination[j]); } } length = try reader.readIntNative(u32); result.export_type = try allocator.alloc(ExportsType, length); { var j: usize = 0; while(j < length) : (j += 1) { result.export_type[j] = try reader.readEnum(ExportsType, .Little); }} return; } pub fn encode(result: *const @This(), writer: anytype) anyerror!void { var n: usize = 0; n = result.source.len; _ = try writer.writeIntNative(u32, @intCast(u32, n)); { var j: usize = 0; while (j < n) : (j += 1) { _ = try writer.writeIntNative(u32, @intCast(u32, result.source[j].len)); try writer.writeAll(result.source[j]); }} n = result.destination.len; _ = try writer.writeIntNative(u32, @intCast(u32, n)); { var j: usize = 0; while (j < n) : (j += 1) { _ = try writer.writeIntNative(u32, @intCast(u32, result.destination[j].len)); try writer.writeAll(result.destination[j]); }} n = result.export_type.len; _ = try writer.writeIntNative(u32, @intCast(u32, n)); { var j: usize = 0; while (j < n) : (j += 1) { try writer.writeByte(@enumToInt(result.export_type[j])); }} return; } }; pub const Version = struct { /// major major: i32 = 0, /// minor minor: i32 = 0, /// patch patch: i32 = 0, /// pre pre: []u8, /// build build: []u8, pub fn decode(allocator: *std.mem.Allocator, reader: anytype) anyerror!Version { var obj = std.mem.zeroes(Version); try update(&obj, allocator, reader); return obj; } pub fn update(result: *Version, allocator: *std.mem.Allocator, reader: anytype) anyerror!void { var length: usize = 0; _ = try reader.readAll(std.mem.asBytes(&result.major)); _ = try reader.readAll(std.mem.asBytes(&result.minor)); _ = try reader.readAll(std.mem.asBytes(&result.patch)); length = try reader.readIntNative(u32); if (result.pre.len != length) { result.pre = try allocator.alloc(u8, length); } _ = try reader.readAll(result.pre); length = try reader.readIntNative(u32); if (result.build.len != length) { result.build = try allocator.alloc(u8, length); } _ = try reader.readAll(result.build); return; } pub fn encode(result: *const @This(), writer: anytype) anyerror!void { try writer.writeIntNative(i32, result.major); try writer.writeIntNative(i32, result.minor); try writer.writeIntNative(i32, result.patch); try writer.writeIntNative(u32, @intCast(u32, result.pre.len)); try writer.writeAll(std.mem.sliceAsBytes(result.pre)); try writer.writeIntNative(u32, @intCast(u32, result.build.len)); try writer.writeAll(std.mem.sliceAsBytes(result.build)); return; } }; pub const JavascriptPackageInput = struct { /// name name: ?[]u8 = null, /// version version: ?[]u8 = null, /// dependencies dependencies: ?RawDependencyList = null, pub fn decode(allocator: *std.mem.Allocator, reader: anytype) anyerror!JavascriptPackageInput { var obj = std.mem.zeroes(JavascriptPackageInput); try update(&obj, allocator, reader); return obj; } pub fn update(result: *JavascriptPackageInput, allocator: *std.mem.Allocator, reader: anytype) anyerror!void { var length: usize = 0; while(true) { const field_type: u8 = try reader.readByte(); switch (field_type) { 0 => { return; }, 1 => { length = try reader.readIntNative(u32); if ((result.name orelse &([_]u8{})).len != length) { result.name = try allocator.alloc(u8, length); } _ = try reader.readAll(result.name.?); }, 2 => { length = try reader.readIntNative(u32); if ((result.version orelse &([_]u8{})).len != length) { result.version = try allocator.alloc(u8, length); } _ = try reader.readAll(result.version.?); }, 3 => { result.dependencies = try RawDependencyList.decode(allocator, reader); }, else => { return error.InvalidMessage; } }} } pub fn encode(result: *const @This(), writer: anytype) anyerror!void { if (result.name) |name| { try writer.writeByte(1); try writer.writeIntNative(u32, @intCast(u32, name.len)); try writer.writeAll(name); } if (result.version) |version| { try writer.writeByte(2); try writer.writeIntNative(u32, @intCast(u32, version.len)); try writer.writeAll(std.mem.sliceAsBytes(version)); } if (result.dependencies) |dependencies| { try writer.writeByte(3); try dependencies.encode(writer); } try writer.writeByte(0); return; } }; pub const RawDependencyList = struct { /// count count: u32 = 0, /// names names: [][]u8, /// versions versions: [][]u8, pub fn decode(allocator: *std.mem.Allocator, reader: anytype) anyerror!RawDependencyList { var obj = std.mem.zeroes(RawDependencyList); try update(&obj, allocator, reader); return obj; } pub fn update(result: *RawDependencyList, allocator: *std.mem.Allocator, reader: anytype) anyerror!void { var length: usize = 0; _ = try reader.readAll(std.mem.asBytes(&result.count)); { var array_count = try reader.readIntNative(u32); if (array_count != result.names.len) { result.names = try allocator.alloc([]u8, array_count); } length = try reader.readIntNative(u32); for (result.names) |content, j| { if (result.names[j].len != length) { result.names[j] = try allocator.alloc(u8, length); } _ = try reader.readAll(result.names[j]); } } { var array_count = try reader.readIntNative(u32); if (array_count != result.versions.len) { result.versions = try allocator.alloc([]u8, array_count); } length = try reader.readIntNative(u32); for (result.versions) |content, j| { if (result.versions[j].len != length) { result.versions[j] = try allocator.alloc(u8, length); } _ = try reader.readAll(result.versions[j]); } } return; } pub fn encode(result: *const @This(), writer: anytype) anyerror!void { var n: usize = 0; try writer.writeIntNative(u32, result.count); n = result.names.len; _ = try writer.writeIntNative(u32, @intCast(u32, n)); { var j: usize = 0; while (j < n) : (j += 1) { _ = try writer.writeIntNative(u32, @intCast(u32, result.names[j].len)); try writer.writeAll(result.names[j]); }} n = result.versions.len; _ = try writer.writeIntNative(u32, @intCast(u32, n)); { var j: usize = 0; while (j < n) : (j += 1) { _ = try writer.writeIntNative(u32, @intCast(u32, result.versions[j].len)); try writer.writeAll(std.mem.sliceAsBytes(result.versions[j])); }} return; } }; pub const JavascriptPackageManifest = struct { /// count count: u32 = 0, /// name name: [][]u8, /// version version: []Version, /// providers providers: []PackageProvider, /// dependencies dependencies: []u32, /// dependenciesIndex dependencies_index: []u32, /// exportsManifest exports_manifest: ExportsManifest, /// exportsManifestIndex exports_manifest_index: []u32, pub fn decode(allocator: *std.mem.Allocator, reader: anytype) anyerror!JavascriptPackageManifest { var obj = std.mem.zeroes(JavascriptPackageManifest); try update(&obj, allocator, reader); return obj; } pub fn update(result: *JavascriptPackageManifest, allocator: *std.mem.Allocator, reader: anytype) anyerror!void { var length: usize = 0; _ = try reader.readAll(std.mem.asBytes(&result.count)); { var array_count = try reader.readIntNative(u32); if (array_count != result.name.len) { result.name = try allocator.alloc([]u8, array_count); } length = try reader.readIntNative(u32); for (result.name) |content, j| { if (result.name[j].len != length) { result.name[j] = try allocator.alloc(u8, length); } _ = try reader.readAll(result.name[j]); } } length = try reader.readIntNative(u32); result.version = try allocator.alloc(Version, length); { var j: usize = 0; while(j < length) : (j += 1) { result.version[j] = try Version.decode(allocator, reader); }} length = try reader.readIntNative(u32); result.providers = try allocator.alloc(PackageProvider, length); { var j: usize = 0; while(j < length) : (j += 1) { result.providers[j] = try reader.readEnum(PackageProvider, .Little); }} length = try reader.readIntNative(u32); result.dependencies = try allocator.alloc(u32, length); { var j: usize = 0; while(j < length) : (j += 1) { _ = try reader.readAll(std.mem.asBytes(&result.dependencies[j])); }} length = try reader.readIntNative(u32); result.dependencies_index = try allocator.alloc(u32, length); { var j: usize = 0; while(j < length) : (j += 1) { _ = try reader.readAll(std.mem.asBytes(&result.dependencies_index[j])); }} result.exports_manifest = try ExportsManifest.decode(allocator, reader); length = try reader.readIntNative(u32); result.exports_manifest_index = try allocator.alloc(u32, length); { var j: usize = 0; while(j < length) : (j += 1) { _ = try reader.readAll(std.mem.asBytes(&result.exports_manifest_index[j])); }} return; } pub fn encode(result: *const @This(), writer: anytype) anyerror!void { var n: usize = 0; try writer.writeIntNative(u32, result.count); n = result.name.len; _ = try writer.writeIntNative(u32, @intCast(u32, n)); { var j: usize = 0; while (j < n) : (j += 1) { _ = try writer.writeIntNative(u32, @intCast(u32, result.name[j].len)); try writer.writeAll(result.name[j]); }} n = result.version.len; _ = try writer.writeIntNative(u32, @intCast(u32, n)); { var j: usize = 0; while (j < n) : (j += 1) { try result.version[j].encode(writer); }} n = result.providers.len; _ = try writer.writeIntNative(u32, @intCast(u32, n)); { var j: usize = 0; while (j < n) : (j += 1) { try writer.writeByte(@enumToInt(result.providers[j])); }} n = result.dependencies.len; _ = try writer.writeIntNative(u32, @intCast(u32, n)); { var j: usize = 0; while (j < n) : (j += 1) { try writer.writeIntNative(u32, result.dependencies[j]); }} n = result.dependencies_index.len; _ = try writer.writeIntNative(u32, @intCast(u32, n)); { var j: usize = 0; while (j < n) : (j += 1) { try writer.writeIntNative(u32, result.dependencies_index[j]); }} try result.exports_manifest.encode(writer); n = result.exports_manifest_index.len; _ = try writer.writeIntNative(u32, @intCast(u32, n)); { var j: usize = 0; while (j < n) : (j += 1) { try writer.writeIntNative(u32, result.exports_manifest_index[j]); }} return; } }; pub const JavascriptPackageRequest = struct { /// clientVersion client_version: ?[]u8 = null, /// name name: ?[]u8 = null, /// dependencies dependencies: ?RawDependencyList = null, /// optionalDependencies optional_dependencies: ?RawDependencyList = null, /// devDependencies dev_dependencies: ?RawDependencyList = null, /// peerDependencies peer_dependencies: ?RawDependencyList = null, pub fn decode(allocator: *std.mem.Allocator, reader: anytype) anyerror!JavascriptPackageRequest { var obj = std.mem.zeroes(JavascriptPackageRequest); try update(&obj, allocator, reader); return obj; } pub fn update(result: *JavascriptPackageRequest, allocator: *std.mem.Allocator, reader: anytype) anyerror!void { var length: usize = 0; while(true) { const field_type: u8 = try reader.readByte(); switch (field_type) { 0 => { return; }, 1 => { length = try reader.readIntNative(u32); if ((result.client_version orelse &([_]u8{})).len != length) { result.client_version = try allocator.alloc(u8, length); } _ = try reader.readAll(result.client_version.?); }, 2 => { length = try reader.readIntNative(u32); if ((result.name orelse &([_]u8{})).len != length) { result.name = try allocator.alloc(u8, length); } _ = try reader.readAll(result.name.?); }, 3 => { result.dependencies = try RawDependencyList.decode(allocator, reader); }, 4 => { result.optional_dependencies = try RawDependencyList.decode(allocator, reader); }, 5 => { result.dev_dependencies = try RawDependencyList.decode(allocator, reader); }, 6 => { result.peer_dependencies = try RawDependencyList.decode(allocator, reader); }, else => { return error.InvalidMessage; } }} } pub fn encode(result: *const @This(), writer: anytype) anyerror!void { if (result.client_version) |client_version| { try writer.writeByte(1); try writer.writeIntNative(u32, @intCast(u32, client_version.len)); try writer.writeAll(std.mem.sliceAsBytes(client_version)); } if (result.name) |name| { try writer.writeByte(2); try writer.writeIntNative(u32, @intCast(u32, name.len)); try writer.writeAll(name); } if (result.dependencies) |dependencies| { try writer.writeByte(3); try dependencies.encode(writer); } if (result.optional_dependencies) |optional_dependencies| { try writer.writeByte(4); try optional_dependencies.encode(writer); } if (result.dev_dependencies) |dev_dependencies| { try writer.writeByte(5); try dev_dependencies.encode(writer); } if (result.peer_dependencies) |peer_dependencies| { try writer.writeByte(6); try peer_dependencies.encode(writer); } try writer.writeByte(0); return; } }; pub const ErrorCode = enum(u32) { _none, /// generic generic, /// missingPackageName missing_package_name, /// serverDown server_down, /// versionDoesntExit version_doesnt_exit, _, pub fn jsonStringify(self: *const @This(), opts: anytype, o: anytype) !void { return try std.json.stringify(@tagName(self), opts, o); } }; pub const JavascriptPackageResponse = struct { /// name name: ?[]u8 = null, /// result result: ?JavascriptPackageManifest = null, /// errorCode error_code: ?ErrorCode = null, /// message message: ?[]u8 = null, pub fn decode(allocator: *std.mem.Allocator, reader: anytype) anyerror!JavascriptPackageResponse { var obj = std.mem.zeroes(JavascriptPackageResponse); try update(&obj, allocator, reader); return obj; } pub fn update(result: *JavascriptPackageResponse, allocator: *std.mem.Allocator, reader: anytype) anyerror!void { var length: usize = 0; while(true) { const field_type: u8 = try reader.readByte(); switch (field_type) { 0 => { return; }, 1 => { length = try reader.readIntNative(u32); if ((result.name orelse &([_]u8{})).len != length) { result.name = try allocator.alloc(u8, length); } _ = try reader.readAll(result.name.?); }, 2 => { result.result = try JavascriptPackageManifest.decode(allocator, reader); }, 3 => { result.error_code = try reader.readEnum(ErrorCode, .Little); }, 4 => { length = try reader.readIntNative(u32); if ((result.message orelse &([_]u8{})).len != length) { result.message = try allocator.alloc(u8, length); } _ = try reader.readAll(result.message.?); }, else => { return error.InvalidMessage; } }} } pub fn encode(result: *const @This(), writer: anytype) anyerror!void { if (result.name) |name| { try writer.writeByte(1); try writer.writeIntNative(u32, @intCast(u32, name.len)); try writer.writeAll(name); } if (result.result) |result| { try writer.writeByte(2); try result.encode(writer); } if (result.error_code) |error_code| { try writer.writeByte(3); try writer.writeAll(@intCast(u8, result.error_code orelse unreachable)); } if (result.message) |message| { try writer.writeByte(4); try writer.writeIntNative(u32, @intCast(u32, message.len)); try writer.writeAll(std.mem.sliceAsBytes(message)); } try writer.writeByte(0); return; } }; };
js/lockfile.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; const nvg = @import("nanovg"); const s2s = @import("s2s"); const gui = @import("gui"); const geometry = gui.geometry; const Point = geometry.Point; const Pointi = Point(i32); const Pointu = Point(u32); const Rect = geometry.Rect; const Recti = Rect(i32); const CanvasWidget = @import("CanvasWidget.zig"); const ColorBitmap = @import("ColorBitmap.zig"); const IndexedBitmap = @import("IndexedBitmap.zig"); const col = @import("color.zig"); const Color = col.Color; const ColorLayer = col.ColorLayer; const BlendMode = col.BlendMode; const Image = @import("Image.zig"); const Clipboard = @import("Clipboard.zig"); const HistoryBuffer = @import("history.zig").Buffer; const HistorySnapshot = @import("history.zig").Snapshot; const Document = @This(); pub const BitmapType = enum(u8) { color, indexed, }; const Bitmap = union(BitmapType) { color: ColorBitmap, indexed: IndexedBitmap, fn init(bitmap_type: BitmapType, allocator: Allocator, width: u32, height: u32) !Bitmap { return switch (bitmap_type) { .color => .{ .color = try ColorBitmap.init(allocator, width, height) }, .indexed => .{ .indexed = try IndexedBitmap.init(allocator, width, height) }, }; } fn initFromImage(image: Image) Bitmap { return if (image.colormap != null) .{ .indexed = IndexedBitmap{ .width = image.width, .height = image.height, .indices = image.pixels, } } else .{ .color = ColorBitmap{ .width = image.width, .height = image.height, .pixels = image.pixels, } }; } fn deinit(self: Bitmap, allocator: Allocator) void { switch (self) { .color => |color_bitmap| color_bitmap.deinit(allocator), .indexed => |indexed_bitmap| indexed_bitmap.deinit(allocator), } } fn clone(self: Bitmap, allocator: Allocator) !Bitmap { return switch (self) { .color => |color_bitmap| Bitmap{ .color = try color_bitmap.clone(allocator) }, .indexed => |indexed_bitmap| Bitmap{ .indexed = try indexed_bitmap.clone(allocator) }, }; } fn createTexture(self: Bitmap, vg: nvg) nvg.Image { return switch (self) { .color => |color_bitmap| nvg.createImageRGBA( vg, color_bitmap.width, color_bitmap.height, .{ .nearest = true }, color_bitmap.pixels, ), .indexed => |indexed_bitmap| nvg.createImageAlpha( vg, indexed_bitmap.width, indexed_bitmap.height, .{ .nearest = true }, indexed_bitmap.indices, ), }; } fn toImage(self: *Bitmap, allocator: Allocator) Image { return switch (self.*) { .color => |color_bitmap| Image{ .allocator = allocator, .width = color_bitmap.width, .height = color_bitmap.height, .pixels = color_bitmap.pixels, }, .indexed => |*indexed_bitmap| Image{ .allocator = allocator, .width = indexed_bitmap.width, .height = indexed_bitmap.height, .pixels = indexed_bitmap.indices, }, }; } fn mirrorHorizontally(self: Bitmap) void { switch (self) { .color => |color_bitmap| color_bitmap.mirrorHorizontally(), .indexed => |indexed_bitmap| indexed_bitmap.mirrorHorizontally(), } } fn mirrorVertically(self: Bitmap) void { switch (self) { .color => |color_bitmap| color_bitmap.mirrorVertically(), .indexed => |indexed_bitmap| indexed_bitmap.mirrorVertically(), } } fn rotate(self: *Bitmap, allocator: Allocator, clockwise: bool) !void { try switch (self.*) { .color => |*color_bitmap| color_bitmap.rotate(allocator, clockwise), .indexed => |*indexed_bitmap| indexed_bitmap.rotate(allocator, clockwise), }; } }; pub const Selection = struct { rect: Recti, bitmap: Bitmap, // texture: nvg.Image, // need_texture_recreation: bool = false, // fn updateTexture(self: Selection) void { // switch (self.bitmap) { // .color => |color_bitmap| nvg.updateImage(self.texture, color_bitmap.pixels), // .indexed => |indexed_bitmap| nvg.updateImage(self.texture, indexed_bitmap.indices), // } // } }; const PrimitiveTag = enum { none, brush, line, full, }; const PrimitivePreview = union(PrimitiveTag) { none: void, brush: struct { x: u32, y: u32, }, line: struct { x0: i32, y0: i32, x1: i32, y1: i32, }, full: void, }; allocator: Allocator, // For tracking offset after cropping operation x: i32 = 0, y: i32 = 0, texture: nvg.Image, // image for display using nvg texture_palette: nvg.Image, bitmap: Bitmap, colormap: []u8, preview_bitmap: Bitmap, // preview brush and lines last_preview: PrimitivePreview = .none, need_texture_update: bool = false, // bitmap needs to be uploaded to gpu on next draw call need_texture_recreation: bool = false, selection: ?Selection = null, copy_location: ?Pointi = null, // where the source was copied from selection_texture: nvg.Image, need_selection_texture_update: bool = false, need_selection_texture_recreation: bool = false, history: *HistoryBuffer, foreground_color: [4]u8 = [_]u8{ 0, 0, 0, 0xff }, background_color: [4]u8 = [_]u8{ 0xff, 0xff, 0xff, 0xff }, foreground_index: u8 = 0, background_index: u8 = 1, blend_mode: BlendMode = .replace, canvas: *CanvasWidget = undefined, const Self = @This(); pub fn init(allocator: Allocator, vg: nvg) !*Self { var self = try allocator.create(Self); self.* = Self{ .allocator = allocator, .texture = undefined, .texture_palette = undefined, .selection_texture = undefined, .bitmap = try Bitmap.init(.color, allocator, 32, 32), .colormap = try allocator.alloc(u8, 4 * 256), .preview_bitmap = undefined, .history = try HistoryBuffer.init(allocator), }; self.bitmap.color.fill(self.background_color); self.preview_bitmap = try self.bitmap.clone(self.allocator); self.texture = self.bitmap.createTexture(vg); self.texture_palette = vg.createImageRGBA(256, 1, .{ .nearest = true }, self.colormap); self.selection_texture = vg.createImageRGBA(0, 0, .{}, &.{}); // dummy try self.history.reset(self); return self; } pub fn deinit(self: *Self, vg: nvg) void { self.history.deinit(); self.bitmap.deinit(self.allocator); self.preview_bitmap.deinit(self.allocator); self.allocator.free(self.colormap); vg.deleteImage(self.texture); vg.deleteImage(self.texture_palette); self.freeSelection(); self.allocator.destroy(self); } pub fn createNew(self: *Self, width: u32, height: u32, bitmap_type: BitmapType) !void { self.bitmap.deinit(self.allocator); self.preview_bitmap.deinit(self.allocator); self.freeSelection(); self.bitmap = try Bitmap.init(bitmap_type, self.allocator, width, height); switch (bitmap_type) { .color => self.bitmap.color.fill(self.background_color), .indexed => self.bitmap.indexed.fill(self.background_index), } self.preview_bitmap = try self.bitmap.clone(self.allocator); self.need_texture_recreation = true; self.x = 0; self.y = 0; try self.history.reset(self); } pub fn load(self: *Self, file_path: []const u8) !void { const image = try Image.initFromFile(self.allocator, file_path); if (image.colormap) |colormap| { if (colormap.len != self.colormap.len) return error.UnexpectedColormapLen; self.allocator.free(self.colormap); self.colormap = colormap; } self.bitmap.deinit(self.allocator); self.bitmap = Bitmap.initFromImage(image); self.preview_bitmap.deinit(self.allocator); self.preview_bitmap = try self.bitmap.clone(self.allocator); self.need_texture_recreation = true; self.x = 0; self.y = 0; self.freeSelection(); try self.history.reset(self); } pub fn save(self: *Self, file_path: []const u8) !void { var image = self.bitmap.toImage(self.allocator); if (self.getBitmapType() == .indexed) { image.colormap = col.trimBlackColorsRight(self.colormap); } try image.writeToFile(file_path); } const Snapshot = struct { x: i32, y: i32, bitmap: Bitmap, colormap: []u8, selection: ?struct { rect: Recti, bitmap: Bitmap, }, }; pub fn serialize(self: Document) ![]u8 { var output = ArrayList(u8).init(self.allocator); var comp = try std.compress.deflate.compressor(self.allocator, output.writer(), .{}); defer comp.deinit(); var snapshot = Snapshot{ .x = self.x, .y = self.y, .bitmap = self.bitmap, .colormap = self.colormap, .selection = if (self.selection) |selection| .{ .rect = selection.rect, .bitmap = selection.bitmap, } else null, }; try s2s.serialize(comp.writer(), Snapshot, snapshot); try comp.close(); return output.items; } pub fn deserialize(self: *Document, data: []u8) !void { self.freeSelection(); var input = std.io.fixedBufferStream(data); var decomp = try std.compress.deflate.decompressor(self.allocator, input.reader(), null); defer decomp.deinit(); var snapshot = try s2s.deserializeAlloc(decomp.reader(), Snapshot, self.allocator); self.bitmap.deinit(self.allocator); self.bitmap = snapshot.bitmap; self.allocator.free(self.colormap); self.colormap = snapshot.colormap; self.freeSelection(); if (snapshot.selection) |selection| { self.selection = Selection{ .rect = selection.rect, .bitmap = selection.bitmap, }; self.need_selection_texture_recreation = true; } _ = decomp.close(); self.preview_bitmap.deinit(self.allocator); self.preview_bitmap = try self.bitmap.clone(self.allocator); self.need_texture_recreation = true; if (self.x != snapshot.x or self.y != snapshot.y) { const dx = snapshot.x - self.x; const dy = snapshot.y - self.y; self.x += dx; self.y += dy; self.canvas.translateByPixel(dx, dy); } self.last_preview = .full; self.clearPreview(); } pub fn convertToTruecolor(self: *Self) !void { switch (self.bitmap) { .color => {}, .indexed => |indexed_bitmap| { const color_bitmap = try indexed_bitmap.convertToTruecolor(self.allocator, self.colormap); if (self.selection) |*selection| { const selection_bitmap = try selection.bitmap.indexed.convertToTruecolor(self.allocator, self.colormap); selection.bitmap.deinit(self.allocator); selection.bitmap = .{ .color = selection_bitmap }; self.need_selection_texture_recreation = true; } self.bitmap.deinit(self.allocator); self.bitmap = .{ .color = color_bitmap }; self.preview_bitmap.deinit(self.allocator); self.preview_bitmap = try self.bitmap.clone(self.allocator); self.need_texture_recreation = true; try self.history.pushFrame(self); }, } } pub fn canLosslesslyConvertToIndexed(self: Self) !bool { switch (self.bitmap) { .indexed => return true, .color => |color_bitmap| { if (self.selection) |selection| { if (!try selection.bitmap.color.canLosslesslyConvertToIndexed(self.allocator, self.colormap)) { return false; } } return try color_bitmap.canLosslesslyConvertToIndexed(self.allocator, self.colormap); }, } } pub fn convertToIndexed(self: *Self) !void { switch (self.bitmap) { .color => |color_bitmap| { const indexed_bitmap = try color_bitmap.convertToIndexed(self.allocator, self.colormap); if (self.selection) |*selection| { const selection_bitmap = try selection.bitmap.color.convertToIndexed(self.allocator, self.colormap); selection.bitmap.deinit(self.allocator); selection.bitmap = .{ .indexed = selection_bitmap }; self.need_selection_texture_recreation = true; } self.bitmap.deinit(self.allocator); self.bitmap = .{ .indexed = indexed_bitmap }; self.preview_bitmap.deinit(self.allocator); self.preview_bitmap = try self.bitmap.clone(self.allocator); self.need_texture_recreation = true; try self.history.pushFrame(self); }, .indexed => {}, } } pub const PaletteUpdateMode = enum { replace, map, }; pub fn applyPalette(self: *Self, palette: []u8, mode: PaletteUpdateMode) !void { if (mode == .map) { switch (self.bitmap) { .indexed => |indexed_bitmap| { var map: [256]u8 = undefined; // colormap -> palette for (map) |*m, i| { m.* = @truncate(u8, col.findNearest(palette, self.colormap[4 * i ..])); } const pixel_count = indexed_bitmap.width * indexed_bitmap.height; var i: usize = 0; while (i < pixel_count) : (i += 1) { indexed_bitmap.indices[i] = map[indexed_bitmap.indices[i]]; } self.last_preview = .full; self.clearPreview(); }, .color => {}, } } std.mem.copy(u8, self.colormap, palette); self.need_texture_update = true; try self.history.pushFrame(self); } pub fn getWidth(self: Self) u32 { return switch (self.bitmap) { .color => |color_bitmap| color_bitmap.width, .indexed => |indexed_bitmap| indexed_bitmap.width, }; } pub fn getHeight(self: Self) u32 { return switch (self.bitmap) { .color => |color_bitmap| color_bitmap.height, .indexed => |indexed_bitmap| indexed_bitmap.height, }; } pub fn getBitmapType(self: Self) BitmapType { return std.meta.activeTag(self.bitmap); } pub fn getColorDepth(self: Self) u32 { return switch (self.bitmap) { .color => 32, .indexed => 8, }; } pub fn canUndo(self: Self) bool { return self.history.canUndo(); } pub fn undo(self: *Self) !void { try self.history.undo(self); } pub fn canRedo(self: Self) bool { return self.history.canRedo(); } pub fn redo(self: *Self) !void { try self.history.redo(self); } pub fn cut(self: *Self) !void { try self.copy(); if (self.selection != null) { self.freeSelection(); } else { switch (self.bitmap) { .color => |color_bitmap| color_bitmap.fill(self.background_color), .indexed => |indexed_bitmap| indexed_bitmap.fill(self.background_index), } self.last_preview = .full; self.clearPreview(); } try self.history.pushFrame(self); } pub fn copy(self: *Self) !void { if (self.selection) |*selection| { var image = selection.bitmap.toImage(self.allocator); if (self.getBitmapType() == .indexed) { image.colormap = col.trimBlackColorsRight(self.colormap); } try Clipboard.setImage(self.allocator, image); self.copy_location = Pointi{ .x = selection.rect.x, .y = selection.rect.y, }; } else { var image = self.bitmap.toImage(self.allocator); if (self.getBitmapType() == .indexed) { image.colormap = col.trimBlackColorsRight(self.colormap); } try Clipboard.setImage(self.allocator, image); self.copy_location = null; } } pub fn paste(self: *Self) !void { const image = try Clipboard.getImage(self.allocator); errdefer self.allocator.free(image.pixels); defer { if (image.colormap) |colormap| { self.allocator.free(colormap); } } if (self.selection) |_| { try self.clearSelection(); } var selection_rect = Recti.make(0, 0, @intCast(i32, image.width), @intCast(i32, image.height)); if (self.copy_location) |copy_location| { selection_rect.x = copy_location.x; selection_rect.y = copy_location.y; } else { selection_rect.x = @intCast(i32, self.getWidth() / 2) - @intCast(i32, image.width / 2); selection_rect.y = @intCast(i32, self.getHeight() / 2) - @intCast(i32, image.height / 2); } var bitmap = Bitmap.initFromImage(image); if (std.meta.activeTag(bitmap) != self.getBitmapType()) { const converted_bitmap: Bitmap = switch (bitmap) { .color => |color_bitmap| .{ .indexed = try color_bitmap.convertToIndexed(self.allocator, self.colormap) }, .indexed => |indexed_bitmap| .{ .color = try indexed_bitmap.convertToTruecolor(self.allocator, image.colormap.?) }, }; bitmap.deinit(self.allocator); bitmap = converted_bitmap; } self.selection = Selection{ .rect = selection_rect, .bitmap = bitmap, }; self.need_selection_texture_recreation = true; try self.history.pushFrame(self); } pub fn crop(self: *Self, rect: Recti) !void { if (rect.w <= 0 or rect.h <= 0) return error.InvalidCropRect; const width = @intCast(u32, rect.w); const height = @intCast(u32, rect.h); const new_bitmap = try Bitmap.init(self.getBitmapType(), self.allocator, width, height); //errdefer self.allocator.free(new_bitmap); // TODO: bad because tries for undo stuff at the bottom switch (new_bitmap) { .color => |color_bitmap| color_bitmap.fill(self.background_color), .indexed => |indexed_bitmap| indexed_bitmap.fill(self.background_index), } const intersection = rect.intersection(.{ .x = 0, .y = 0, .w = @intCast(i32, self.getWidth()), .h = @intCast(i32, self.getHeight()), }); if (intersection.w > 0 and intersection.h > 0) { const ox = if (rect.x < 0) @intCast(u32, -rect.x) else 0; const oy = if (rect.y < 0) @intCast(u32, -rect.y) else 0; const sx = @intCast(u32, intersection.x); const sy = @intCast(u32, intersection.y); const w = @intCast(u32, intersection.w); const h = @intCast(u32, intersection.h); // blit to source var y: u32 = 0; switch (self.bitmap) { .color => |color_bitmap| { while (y < h) : (y += 1) { const si = 4 * ((y + oy) * @intCast(u32, rect.w) + ox); const di = 4 * ((sy + y) * color_bitmap.width + sx); // copy entire line std.mem.copy(u8, new_bitmap.color.pixels[si .. si + 4 * w], color_bitmap.pixels[di .. di + 4 * w]); } }, .indexed => |indexed_bitmap| { while (y < h) : (y += 1) { const si = (y + oy) * @intCast(u32, rect.w) + ox; const di = (sy + y) * indexed_bitmap.width + sx; // copy entire line std.mem.copy(u8, new_bitmap.indexed.indices[si .. si + w], indexed_bitmap.indices[di .. di + w]); } }, } } self.bitmap.deinit(self.allocator); self.bitmap = new_bitmap; self.preview_bitmap.deinit(self.allocator); self.preview_bitmap = try self.bitmap.clone(self.allocator); self.need_texture_recreation = true; self.x += rect.x; self.y += rect.y; self.canvas.translateByPixel(rect.x, rect.y); try self.history.pushFrame(self); } pub fn clearSelection(self: *Self) !void { if (self.selection) |selection| { const rect = selection.rect; const bitmap = selection.bitmap; const intersection = rect.intersection(.{ .x = 0, .y = 0, .w = @intCast(i32, self.getWidth()), .h = @intCast(i32, self.getHeight()), }); if (intersection.w > 0 and intersection.h > 0) { const ox = if (rect.x < 0) @intCast(u32, -rect.x) else 0; const oy = if (rect.y < 0) @intCast(u32, -rect.y) else 0; const sx = @intCast(u32, intersection.x); const sy = @intCast(u32, intersection.y); const w = @intCast(u32, intersection.w); const h = @intCast(u32, intersection.h); // blit to source var y: u32 = 0; switch (self.bitmap) { .color => |color_bitmap| { while (y < h) : (y += 1) { const si = 4 * ((y + oy) * @intCast(u32, rect.w) + ox); const di = 4 * ((sy + y) * color_bitmap.width + sx); switch (self.blend_mode) { .alpha => { var x: u32 = 0; while (x < w) : (x += 1) { const src = bitmap.color.pixels[si + 4 * x .. si + 4 * x + 4]; const dst = color_bitmap.pixels[di + 4 * x .. di + 4 * x + 4]; const out = col.blend(src, dst); std.mem.copy(u8, dst, &out); } }, .replace => std.mem.copy(u8, color_bitmap.pixels[di .. di + 4 * w], bitmap.color.pixels[si .. si + 4 * w]), } } }, .indexed => |indexed_bitmap| { while (y < h) : (y += 1) { const si = (y + oy) * @intCast(u32, rect.w) + ox; const di = (sy + y) * indexed_bitmap.width + sx; std.mem.copy(u8, indexed_bitmap.indices[di .. di + w], bitmap.indexed.indices[si .. si + w]); } }, } self.last_preview = .full; // TODO: just a rect? self.clearPreview(); } self.freeSelection(); try self.history.pushFrame(self); } } pub fn makeSelection(self: *Self, rect: Recti) !void { std.debug.assert(rect.w > 0 and rect.h > 0); const intersection = rect.intersection(.{ .x = 0, .y = 0, .w = @intCast(i32, self.getWidth()), .h = @intCast(i32, self.getHeight()), }); if (intersection.w <= 0 or intersection.h <= 0) return; const sx = @intCast(u32, intersection.x); const sy = @intCast(u32, intersection.y); const w = @intCast(u32, intersection.w); const h = @intCast(u32, intersection.h); const bitmap = try Bitmap.init(self.getBitmapType(), self.allocator, w, h); // move pixels var y: u32 = 0; switch (self.bitmap) { .color => |color_bitmap| { while (y < h) : (y += 1) { const di = 4 * (y * w); const si = 4 * ((sy + y) * color_bitmap.width + sx); std.mem.copy(u8, bitmap.color.pixels[di .. di + 4 * w], color_bitmap.pixels[si .. si + 4 * w]); const dst_line = color_bitmap.pixels[si .. si + 4 * w]; var i: usize = 0; while (i < dst_line.len) : (i += 1) { dst_line[i] = self.background_color[i % 4]; } } }, .indexed => |indexed_bitmap| { while (y < h) : (y += 1) { const di = y * w; const si = (sy + y) * indexed_bitmap.width + sx; std.mem.copy(u8, bitmap.indexed.indices[di .. di + w], indexed_bitmap.indices[si .. si + w]); const dst_line = indexed_bitmap.indices[si .. si + w]; std.mem.set(u8, dst_line, self.background_index); } }, } var selection = Selection{ .rect = intersection, .bitmap = bitmap, }; self.freeSelection(); // clean up previous selection self.selection = selection; self.need_selection_texture_recreation = true; self.last_preview = .full; // TODO: just a rect? self.clearPreview(); try self.history.pushFrame(self); } pub fn movedSelection(self: *Self) !void { try self.history.pushFrame(self); } pub fn deleteSelection(self: *Self) !void { self.freeSelection(); try self.history.pushFrame(self); } pub fn freeSelection(self: *Self) void { if (self.selection) |selection| { selection.bitmap.deinit(self.allocator); self.selection = null; } } pub fn previewBrush(self: *Self, x: i32, y: i32) void { self.clearPreview(); var success = false; switch (self.preview_bitmap) { .color => |preview_color_bitmap| { success = switch (self.blend_mode) { .alpha => preview_color_bitmap.blendPixel(x, y, self.foreground_color), .replace => preview_color_bitmap.setPixel(x, y, self.foreground_color), }; }, .indexed => |preview_indexed_bitmap| { success = preview_indexed_bitmap.setIndex(x, y, self.foreground_index); }, } if (success) { self.last_preview = PrimitivePreview{ .brush = .{ .x = @intCast(u32, x), .y = @intCast(u32, y) } }; } } pub fn previewStroke(self: *Self, x0: i32, y0: i32, x1: i32, y1: i32) void { self.clearPreview(); switch (self.preview_bitmap) { .color => |preview_color_bitmap| { switch (self.blend_mode) { .alpha => preview_color_bitmap.blendLine(x0, y0, x1, y1, self.foreground_color, true), .replace => preview_color_bitmap.drawLine(x0, y0, x1, y1, self.foreground_color, true), } }, .indexed => |preview_indexed_bitmap| { preview_indexed_bitmap.drawLine(x0, y0, x1, y1, self.foreground_index, true); }, } self.last_preview = PrimitivePreview{ .line = .{ .x0 = x0, .y0 = y0, .x1 = x1, .y1 = y1 } }; } pub fn clearPreview(self: *Self) void { switch (self.last_preview) { .none => {}, .brush => |brush| { switch (self.bitmap) { .color => |color_bitmap| color_bitmap.copyPixelUnchecked(self.preview_bitmap.color, brush.x, brush.y), .indexed => |indexed_bitmap| indexed_bitmap.copyIndexUnchecked(self.preview_bitmap.indexed, brush.x, brush.y), } }, .line => |line| { switch (self.bitmap) { .color => |color_bitmap| color_bitmap.copyLine(self.preview_bitmap.color, line.x0, line.y0, line.x1, line.y1), .indexed => |indexed_bitmap| indexed_bitmap.copyLine(self.preview_bitmap.indexed, line.x0, line.y0, line.x1, line.y1), } }, .full => switch (self.bitmap) { .color => |color_bitmap| std.mem.copy(u8, self.preview_bitmap.color.pixels, color_bitmap.pixels), .indexed => |indexed_bitmap| std.mem.copy(u8, self.preview_bitmap.indexed.indices, indexed_bitmap.indices), }, } self.last_preview = .none; self.need_texture_update = true; } pub fn fill(self: *Self, color_layer: ColorLayer) !void { const color = if (color_layer == .foreground) self.foreground_color else self.background_color; const index = if (color_layer == .foreground) self.foreground_index else self.background_index; if (self.selection) |selection| { switch (selection.bitmap) { .color => |color_bitmap| color_bitmap.fill(color), .indexed => |indexed_bitmap| indexed_bitmap.fill(index), } self.need_selection_texture_update = true; } else { switch (self.bitmap) { .color => |color_bitmap| color_bitmap.fill(color), .indexed => |indexed_bitmap| indexed_bitmap.fill(index), } self.last_preview = .full; self.clearPreview(); try self.history.pushFrame(self); } } pub fn mirrorHorizontally(self: *Self) !void { if (self.selection) |*selection| { selection.bitmap.mirrorHorizontally(); self.need_selection_texture_update = true; } else { self.bitmap.mirrorHorizontally(); self.last_preview = .full; self.clearPreview(); try self.history.pushFrame(self); } } pub fn mirrorVertically(self: *Self) !void { if (self.selection) |*selection| { selection.bitmap.mirrorVertically(); self.need_selection_texture_update = true; } else { self.bitmap.mirrorVertically(); self.last_preview = .full; self.clearPreview(); try self.history.pushFrame(self); } } pub fn rotate(self: *Self, clockwise: bool) !void { if (self.selection) |*selection| { try selection.bitmap.rotate(self.allocator, clockwise); std.mem.swap(i32, &selection.rect.w, &selection.rect.h); const d = @divTrunc(selection.rect.w - selection.rect.h, 2); if (d != 0) { selection.rect.x -= d; selection.rect.y += d; self.need_selection_texture_recreation = true; } else { self.need_selection_texture_update = true; } } else { try self.bitmap.rotate(self.allocator, clockwise); const d = @divTrunc(@intCast(i32, self.getHeight()) - @intCast(i32, self.getWidth()), 2); if (d != 0) { self.x -= d; self.y += d; self.canvas.translateByPixel(d, -d); self.need_texture_recreation = true; } self.last_preview = .full; self.clearPreview(); try self.history.pushFrame(self); } } pub fn beginStroke(self: *Self, x: i32, y: i32) void { var success = false; switch (self.bitmap) { .color => |color_bitmap| { success = switch (self.blend_mode) { .alpha => color_bitmap.blendPixel(x, y, self.foreground_color), .replace => color_bitmap.setPixel(x, y, self.foreground_color), }; }, .indexed => |indexed_bitmap| { success = indexed_bitmap.setIndex(x, y, self.foreground_index); }, } if (success) { self.last_preview = PrimitivePreview{ .brush = .{ .x = @intCast(u32, x), .y = @intCast(u32, y) } }; self.clearPreview(); } } pub fn stroke(self: *Self, x0: i32, y0: i32, x1: i32, y1: i32) void { switch (self.bitmap) { .color => |color_bitmap| { switch (self.blend_mode) { .alpha => color_bitmap.blendLine(x0, y0, x1, y1, self.foreground_color, true), .replace => color_bitmap.drawLine(x0, y0, x1, y1, self.foreground_color, true), } }, .indexed => |indexed_bitmap| { indexed_bitmap.drawLine(x0, y0, x1, y1, self.foreground_index, true); }, } self.last_preview = PrimitivePreview{ .line = .{ .x0 = x0, .y0 = y0, .x1 = x1, .y1 = y1 } }; self.clearPreview(); } pub fn endStroke(self: *Self) !void { try self.history.pushFrame(self); } pub fn pick(self: *Self, x: i32, y: i32) void { switch (self.bitmap) { .color => |color_bitmap| { if (color_bitmap.getPixel(x, y)) |color| { self.foreground_color = color; } }, .indexed => |indexed_bitmap| { if (indexed_bitmap.getIndex(x, y)) |index| { self.foreground_index = index; const color = self.colormap[4 * @as(usize, self.foreground_index) ..][0..4]; std.mem.copy(u8, &self.foreground_color, color); } }, } } pub fn getColorAt(self: *Self, x: i32, y: i32) ?[4]u8 { switch (self.bitmap) { .color => |color_bitmap| return color_bitmap.getPixel(x, y), .indexed => |indexed_bitmap| { if (indexed_bitmap.getIndex(x, y)) |index| { const c = self.colormap[4 * @as(usize, index) ..]; return [4]u8{ c[0], c[1], c[2], c[3] }; } }, } return null; } pub fn floodFill(self: *Self, x: i32, y: i32) !void { switch (self.bitmap) { .color => |*color_bitmap| { switch (self.blend_mode) { .alpha => { if (color_bitmap.getPixel(x, y)) |dst| { const blended = col.blend(self.foreground_color[0..], dst[0..]); try color_bitmap.floodFill(self.allocator, x, y, blended); } }, .replace => try color_bitmap.floodFill(self.allocator, x, y, self.foreground_color), } }, .indexed => |*indexed_bitmap| try indexed_bitmap.floodFill(self.allocator, x, y, self.foreground_index), } self.last_preview = .full; self.clearPreview(); try self.history.pushFrame(self); } pub fn draw(self: *Self, vg: nvg) void { if (self.need_texture_recreation) { vg.deleteImage(self.texture); self.texture = self.bitmap.createTexture(vg); self.need_texture_recreation = false; vg.updateImage(self.texture_palette, self.colormap); self.need_texture_update = false; } else if (self.need_texture_update) { switch (self.preview_bitmap) { .color => |color_preview_bitmap| { vg.updateImage(self.texture, color_preview_bitmap.pixels); }, .indexed => |indexed_preview_bitmap| { vg.updateImage(self.texture, indexed_preview_bitmap.indices); vg.updateImage(self.texture_palette, self.colormap); }, } self.need_texture_update = false; } const width = @intToFloat(f32, self.getWidth()); const height = @intToFloat(f32, self.getHeight()); vg.beginPath(); vg.rect(0, 0, width, height); const pattern = switch (self.bitmap) { .color => vg.imagePattern(0, 0, width, height, 0, self.texture, 1), .indexed => vg.indexedImagePattern(0, 0, width, height, 0, self.texture, self.texture_palette, 1), }; vg.fillPaint(pattern); vg.fill(); } pub fn drawSelection(self: *Self, vg: nvg) void { if (self.selection) |*selection| { if (self.need_selection_texture_recreation) { vg.deleteImage(self.selection_texture); self.selection_texture = selection.bitmap.createTexture(vg); self.need_selection_texture_recreation = false; self.need_selection_texture_update = false; } else if (self.need_selection_texture_update) { switch (selection.bitmap) { .color => |color_bitmap| vg.updateImage(self.selection_texture, color_bitmap.pixels), .indexed => |indexed_bitmap| vg.updateImage(self.selection_texture, indexed_bitmap.indices), } self.need_selection_texture_update = false; } const rect = Rect(f32).make( @intToFloat(f32, selection.rect.x), @intToFloat(f32, selection.rect.y), @intToFloat(f32, selection.rect.w), @intToFloat(f32, selection.rect.h), ); vg.beginPath(); vg.rect(rect.x, rect.y, rect.w, rect.h); const pattern = switch (self.bitmap) { .color => vg.imagePattern(rect.x, rect.y, rect.w, rect.h, 0, self.selection_texture, 1), .indexed => vg.indexedImagePattern(rect.x, rect.y, rect.w, rect.h, 0, self.selection_texture, self.texture_palette, 1), }; vg.fillPaint(pattern); vg.fill(); } }
src/Document.zig
const std = @import("std"); const alka = @import("alka"); const m = alka.math; usingnamespace alka.log; pub const mlog = std.log.scoped(.app); pub const log_level: std.log.Level = .info; var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const Transform = struct { r: m.Rectangle = m.Rectangle{}, colour: alka.Colour = alka.Colour.rgba(255, 255, 255, 255), }; const PlayerController = struct { left: *const alka.input.State = undefined, right: *const alka.input.State = undefined, up: *const alka.input.State = undefined, down: *const alka.input.State = undefined, speed: f32 = 300, score: f32 = 20, score_multiplier: f32 = 1.0, // NOTE: MAKE USE OF IT score_increase: f32 = 2, score_decrease: f32 = 5, }; const MobController = struct { speed: m.Vec2f = undefined, directionx: f32 = 0, }; const GeneralFabric = struct { maxtime: f32 = 1, ctime: f32 = maxtime, counter: u32 = 0, spawn: fn (self: *GeneralFabric) anyerror!void = undefined, }; const Texture = struct { t: alka.renderer.Texture = undefined, id: u64 = undefined, }; const max_ent = 250; const World = alka.ecs.World(struct { texture: alka.ecs.StoreComponent("TextureDraw", Texture, 2), shape: alka.ecs.StoreComponent("RectangleDraw", bool, max_ent), tr: alka.ecs.StoreComponent("Transform", Transform, max_ent), vel: alka.ecs.StoreComponent("Velocity", m.Vec2f, max_ent), fab: alka.ecs.StoreComponent("Fabric", GeneralFabric, 2), plcontroller: alka.ecs.StoreComponent("PlayerController", PlayerController, 1), // player econtroller: alka.ecs.StoreComponent("MobController", MobController, max_ent), al: alka.ecs.StoreComponent("Alive", bool, max_ent), collmask: alka.ecs.StoreComponent("CollisionMask", []const u8, max_ent), }); const player_id: u64 = 0; const wall_left_id: u64 = 1; const wall_right_id: u64 = 2; const wall_top_id: u64 = 3; const wall_bottom_id: u64 = 4; const enemyfabric_id: u64 = 5; var world: World = undefined; var s_ent: std.AutoHashMap(u64, *World.Register) = undefined; var random: *std.rand.Random = undefined; var abortfunc = false; var resetgame = false; var score: *const f32 = undefined; fn firststart() !void { try startup(); try alka.update(); try shutdown(); } fn startup() !void { const callbacks = alka.Callbacks{ .update = update, .fixed = fupdate, .draw = draw, .resize = resize, .close = close, }; try alka.init(&gpa.allocator, callbacks, 1024, 768, "Shooter", 0, true); try alka.getInput().bindKey(.A); try alka.getInput().bindKey(.D); try alka.getInput().bindKey(.W); try alka.getInput().bindKey(.S); const t = try alka.renderer.Texture.createFromPNGMemory(@embedFile("../assets/test.png")); t.setFilter(.filter_nearest, .filter_nearest); try alka.getAssetManager().loadTexturePro(1, t); try alka.getAssetManager().loadFontFromMemory(0, @embedFile("../assets/arial.ttf"), 128); const font = try alka.getAssetManager().getFont(0); font.texture.setFilter(.filter_mipmap_nearest, .filter_linear); try open(); try alka.open(); } fn shutdown() !void { try alka.close(); try alka.deinit(); } fn resetGame() !void { close(); try alka.close(); abortfunc = false; resetgame = false; try open(); try alka.open(); } fn scoreIncrease(factor: f32) !void { var parent = try world.getRegisterPtr(player_id); var c = try parent.getPtr("PlayerController", PlayerController); c.score += c.score_increase * c.score_multiplier * factor; } fn scoreDecrease(factor: f32) !void { var parent = try world.getRegisterPtr(player_id); var c = try parent.getPtr("PlayerController", PlayerController); c.score -= c.score_decrease * c.score_multiplier * factor; if (c.score < 0) { // restart game mlog.warn("Player score is below zero, restarting the game!", .{}); abortfunc = true; resetgame = true; } } fn moveAndCollide(reg: *World.Register) !void { const r = blk: { const tmp = try reg.get("Transform", Transform); break :blk tmp.r; }; var vel = try reg.getPtr("Velocity", m.Vec2f); const mask = try reg.get("CollisionMask", []const u8); const off: f32 = 5; const push: f32 = 0; { comptime const comps = [_][]const u8{ "Alive", "Transform", "CollisionMask", }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (entry.value) |oreg| { const a = try oreg.get("Alive", bool); if (!a or reg.id == oreg.id) continue; const ore = blk: { const tmp = try oreg.get("Transform", Transform); break :blk tmp.r; }; const omask = try oreg.get("CollisionMask", []const u8); var collided = r.aabb(ore); if (vel.x < -0.1 and r.aabbMeeting(ore, m.Vec2f{ .x = -off })) { vel.x = -push; collided = true; } else if (vel.x > 0.1 and r.aabbMeeting(ore, m.Vec2f{ .x = off })) { vel.x = push; collided = true; } if (vel.y < -0.1 and r.aabbMeeting(ore, m.Vec2f{ .y = -off })) { vel.y = push; collided = true; } else if (vel.y > 0.1 and r.aabbMeeting(ore, m.Vec2f{ .y = off })) { vel.y = -push; collided = true; } if (collided) { var destroy = false; var destroyother = false; var decfactor: ?f32 = null; var incfactor: ?f32 = null; if (reg.id != player_id) { var c = reg.getPtr("MobController", MobController) catch null; if (c) |ec| { if (oreg.id == wall_right_id) { ec.directionx = -ec.directionx; } else if (oreg.id == wall_left_id) { ec.directionx = m.abs(ec.directionx); } } if (oreg.id == wall_top_id) { destroy = true; } else if (oreg.id == wall_bottom_id) { destroy = true; decfactor = 1.1; } else if (oreg.id == player_id) { destroy = true; decfactor = 1.2; } else if (std.mem.eql(u8, mask, "PlayerBullet")) { if (std.mem.eql(u8, omask, "EnemyRectangle")) { incfactor = 1.3; destroy = true; destroyother = true; } } } if (destroyother) { oreg.destroy(); try world.removeRegister(oreg.id); abortfunc = true; } if (destroy) { if (decfactor) |factor| try scoreDecrease(factor); if (resetgame) return; if (incfactor) |factor| try scoreIncrease(factor); reg.destroy(); try world.removeRegister(reg.id); abortfunc = true; } } } } } } fn playerbulletFabricSpawn(self: *GeneralFabric) !void { var reg = try world.createRegister(world.findID()); const col = alka.Colour.rgba(230, 79, 46, 255); try reg.create(); try reg.attach("RectangleDraw", true); try reg.attach("Alive", true); try reg.attach("CollisionMask", @as([]const u8, "PlayerBullet")); const parent = try world.getRegister(player_id); const plrect = blk: { var result = m.Rectangle{}; const t = try parent.get("Transform", Transform); result = t.r; break :blk result; }; try reg.attach("MobController", MobController{ .speed = m.Vec2f{ .x = 0, .y = -350, }, .directionx = 0, }); try reg.attach("Transform", Transform{ .r = m.Rectangle{ .position = m.Vec2f{ .x = plrect.position.x + plrect.size.x / 2 - 2.5, .y = plrect.position.y - plrect.size.y / 2 - 10, }, .size = m.Vec2f{ .x = 5, .y = 20, }, }, .colour = col, }); try reg.attach("Velocity", m.Vec2f{}); } fn enemyFabricSpawn(self: *GeneralFabric) !void { var reg = try world.createRegister(world.findID()); const w = alka.getWindow(); try reg.create(); try reg.attach("RectangleDraw", true); try reg.attach("Alive", true); try reg.attach("CollisionMask", @as([]const u8, "EnemyRectangle")); try reg.attach("MobController", MobController{ .speed = m.Vec2f{ .x = @intToFloat(f32, random.intRangeAtMost(i32, 100, 250)), .y = @intToFloat(f32, random.intRangeAtMost(i32, 100, 250)), }, .directionx = @intToFloat(f32, random.intRangeAtMost(i32, -1, 1)), }); try reg.attach("Transform", Transform{ .r = m.Rectangle{ .position = m.Vec2f{ .x = @intToFloat(f32, random.intRangeAtMost(i32, 30, w.size.width - 50)), .y = -100, }, .size = m.Vec2f{ .x = 20, .y = 25, }, }, }); try reg.attach("Velocity", m.Vec2f{}); } fn open() !void { world = try World.init(alka.getAllocator()); s_ent = std.AutoHashMap(u64, *World.Register).init(alka.getAllocator()); const w = alka.getWindow(); // create player { var reg = try world.createRegister(player_id); try s_ent.put(player_id, reg); try reg.create(); try reg.attach("TextureDraw", Texture{ .id = 1, .t = try alka.getAssetManager().getTexture(1), }); try reg.attach("Alive", true); try reg.attach("CollisionMask", @as([]const u8, "Player")); const input = alka.getInput(); try reg.attach("PlayerController", PlayerController{ .left = try input.keyStatePtr(.A), .right = try input.keyStatePtr(.D), .up = try input.keyStatePtr(.W), .down = try input.keyStatePtr(.S), }); try reg.attach("Fabric", GeneralFabric{ .maxtime = 0.3, .ctime = 0.3, .counter = 0, .spawn = playerbulletFabricSpawn, }); const pc = try reg.getPtr("PlayerController", PlayerController); score = &pc.score; const texture = try alka.getAssetManager().getTexture(1); try reg.attach("Transform", Transform{ .r = m.Rectangle{ .position = m.Vec2f{ .x = @intToFloat(f32, @divTrunc(w.size.width, 2) - texture.width * 2), .y = @intToFloat(f32, w.size.height - texture.height * 2) - 50, }, .size = m.Vec2f{ .x = @intToFloat(f32, texture.width) * 2, .y = @intToFloat(f32, texture.height) * 2, }, }, }); try reg.attach("Velocity", m.Vec2f{}); } // create walls { const wallcol = alka.Colour.rgba(45, 99, 150, 255); { var reg = try world.createRegister(wall_left_id); try s_ent.put(wall_left_id, reg); try reg.create(); try reg.attach("RectangleDraw", true); try reg.attach("Alive", true); try reg.attach("CollisionMask", @as([]const u8, "Wall")); try reg.attach("Transform", Transform{ .r = m.Rectangle{ .position = m.Vec2f{ .x = 0, .y = 0, }, .size = m.Vec2f{ .x = 10, .y = @intToFloat(f32, w.size.height), }, }, .colour = wallcol, }); } { var reg = try world.createRegister(wall_right_id); try s_ent.put(wall_right_id, reg); try reg.create(); try reg.attach("RectangleDraw", true); try reg.attach("Alive", true); try reg.attach("CollisionMask", @as([]const u8, "Wall")); try reg.attach("Transform", Transform{ .r = m.Rectangle{ .position = m.Vec2f{ .x = @intToFloat(f32, w.size.width) - 10, .y = 0, }, .size = m.Vec2f{ .x = 10, .y = @intToFloat(f32, w.size.height), }, }, .colour = wallcol, }); } { var reg = try world.createRegister(wall_top_id); try s_ent.put(wall_top_id, reg); try reg.create(); try reg.attach("RectangleDraw", true); try reg.attach("Alive", true); try reg.attach("CollisionMask", @as([]const u8, "Wall")); try reg.attach("Transform", Transform{ .r = m.Rectangle{ .position = m.Vec2f{ .x = 0, .y = -400, }, .size = m.Vec2f{ .x = @intToFloat(f32, w.size.width), .y = 10, }, }, .colour = wallcol, }); } { var reg = try world.createRegister(wall_bottom_id); try s_ent.put(wall_bottom_id, reg); try reg.create(); try reg.attach("RectangleDraw", true); try reg.attach("Alive", true); try reg.attach("CollisionMask", @as([]const u8, "Wall")); try reg.attach("Transform", Transform{ .r = m.Rectangle{ .position = m.Vec2f{ .x = 0, .y = @intToFloat(f32, w.size.height) - 10, }, .size = m.Vec2f{ .x = @intToFloat(f32, w.size.width), .y = 10, }, }, .colour = wallcol, }); } } // create enemy fabric { var reg = try world.createRegister(enemyfabric_id); try s_ent.put(enemyfabric_id, reg); try reg.create(); try reg.attach("Alive", true); try reg.attach("Fabric", GeneralFabric{ .maxtime = 1, .ctime = 1, .counter = 0, .spawn = enemyFabricSpawn, }); } } fn update(dt: f32) !void { if (resetgame) { try resetGame(); return; } defer abortfunc = false; { comptime const comps = [_][]const u8{ "Alive", "Transform", "Velocity", "MobController", }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (abortfunc) break; if (entry.value) |reg| { const a = try reg.get("Alive", bool); if (!a) continue; const tr = try reg.get("Transform", Transform); const c = try reg.getPtr("MobController", MobController); var vel = try reg.getPtr("Velocity", m.Vec2f); vel.* = c.speed.mulValues(dt * c.directionx, dt); } } } { comptime const comps = [_][]const u8{ "Alive", "Transform", "Velocity", "PlayerController", }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (abortfunc) break; if (entry.value) |reg| { if (try reg.get("Alive", bool)) { const tr = try reg.get("Transform", Transform); const c = try reg.get("PlayerController", PlayerController); var vel = try reg.getPtr("Velocity", m.Vec2f); if (c.left.* == .down) { vel.x = -c.speed * dt; } else if (c.right.* == .down) { vel.x = c.speed * dt; } else vel.x = 0; if (c.up.* == .down) { vel.y = -c.speed * dt; } else if (c.down.* == .down) { vel.y = c.speed * dt; } else vel.y = 0; } } } } { comptime const comps = [_][]const u8{ "Alive", "Fabric", }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (abortfunc) break; if (entry.value) |reg| { const a = try reg.get("Alive", bool); if (!a) continue; var fab = try reg.getPtr("Fabric", GeneralFabric); if (fab.ctime <= 0) { try fab.spawn(fab); fab.ctime = fab.maxtime; } else fab.ctime -= 1 * dt; } } } } fn fupdate(dt: f32) !void { if (resetgame) { try resetGame(); return; } defer abortfunc = false; { comptime const comps = [_][]const u8{ "Alive", "Transform", "Velocity", }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (abortfunc) break; if (entry.value) |reg| { const a = try reg.get("Alive", bool); if (!a) continue; var tr = try reg.getPtr("Transform", Transform); var vel = try reg.getPtr("Velocity", m.Vec2f); try moveAndCollide(reg); tr.r.position = tr.r.position.add(vel.*); vel.* = m.Vec2f{}; } } } } fn draw() !void { if (resetgame) { try resetGame(); return; } defer abortfunc = false; { comptime const comps = [_][]const u8{ "Alive", "Transform", "RectangleDraw", }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (abortfunc) break; if (entry.value) |reg| { const a = try reg.get("Alive", bool); if (!a) continue; const tr = try reg.get("Transform", Transform); try alka.drawRectangle(tr.r, tr.colour); } } } { comptime const comps = [_][]const u8{ "Alive", "Transform", "TextureDraw", }; var it = World.iterator(comps.len, comps){ .world = &world }; while (it.next()) |entry| { if (abortfunc) break; if (entry.value) |reg| { if (try reg.get("Alive", bool)) { const tr = try reg.get("Transform", Transform); const texture = try reg.get("TextureDraw", Texture); try alka.drawTexture(texture.id, tr.r, m.Rectangle{ .size = m.Vec2f{ .x = @intToFloat(f32, texture.t.width), .y = @intToFloat(f32, texture.t.height) }, }, tr.colour); } } } } const alloc = alka.getAllocator(); var scoretxt: []u8 = try alloc.alloc(u8, 255); defer alloc.free(scoretxt); scoretxt = try std.fmt.bufPrintZ(scoretxt, "Score: {d:.2}", .{score.*}); try alka.drawText(0, scoretxt, m.Vec2f{ .x = 20, .y = 20 }, 24, alka.Colour.rgba(255, 255, 255, 255)); } fn resize(w: i32, h: i32) void { alka.gl.viewport(0, 0, w, h); } fn close() void { s_ent.deinit(); world.deinit(); } pub fn main() !void { var prng = std.rand.DefaultPrng.init(blk: { var seed: u64 = undefined; try std.os.getrandom(std.mem.asBytes(&seed)); break :blk seed; }); random = &prng.random; try firststart(); const leaked = gpa.deinit(); if (leaked) return error.Leak; }
examples/shooter.zig
const cc_gfx = @import("cc_gfx"); const cc_mem = @import("cc_mem"); const cc_ui = @import("cc_ui"); const cc_ui_gfx = @import("cc_ui_gfx"); const cc_ui_res = @import("cc_ui_res"); const cc_wnd = @import("cc_wnd"); const cc_wnd_gfx = @import("cc_wnd_gfx"); const Example = struct { ba: cc_mem.BumpAllocator, window: cc_wnd.Window, gctx: cc_gfx.Context, uctx: cc_ui.Context, ugctx: cc_ui_gfx.Context, }; const max_instances = 256; pub fn init() !Example { var ba = try cc_mem.BumpAllocator.init(cc_ui.Context.instance_size * max_instances); const window = try cc_wnd.Window.init(.{ .width = 800, .height = 600, .title = "ui" }); var gctx = try cc_gfx.Context.init(cc_wnd_gfx.getContextDesc(window)); const uctx = try cc_ui.Context.init(.{ .allocator = ba.allocator(), .max_instances = max_instances, }); const ugctx = try cc_ui_gfx.Context.init(.{ .device = &gctx.device, .format = gctx.swapchain_format, .vert_shader_bytes = try cc_ui_res.loadVertShaderBytes(), .frag_shader_bytes = try cc_ui_res.loadFragShaderBytes(), .instance_size = cc_ui.Context.instance_size, .max_instances = max_instances, }); return Example{ .ba = ba, .window = window, .gctx = gctx, .uctx = uctx, .ugctx = ugctx }; } pub fn loop(ex: *Example) !void { if (!ex.window.isVisible()) { return; } try ex.uctx.debugText("Hello, world!", .{}); const swapchain_view = try ex.gctx.swapchain.getCurrentTextureView(); var command_encoder = try ex.gctx.device.initCommandEncoder(); var render_pass_desc = cc_gfx.RenderPassDesc{}; render_pass_desc.setColorAttachments(&[_]cc_gfx.ColorAttachment{.{ .view = &swapchain_view, .load_op = .clear, .clear_value = ex.gctx.clear_color, .store_op = .store, }}); var render_pass = try command_encoder.beginRenderPass(render_pass_desc); try ex.ugctx.render(&render_pass, ex.uctx.getInstanceBytes()); try render_pass.end(); try ex.gctx.device.getQueue().submit(&.{try command_encoder.finish()}); try ex.gctx.swapchain.present(); } pub fn deinit(ex: *Example) !void { ex.ugctx.deinit(); ex.uctx.deinit(); ex.gctx.deinit(); ex.window.deinit(); ex.ba.deinit(); }
ex/ui/ui.zig
const std = @import("std"); const json = std.json; const clibs = @import("clibs.zig"); const curl = clibs.curl; var tractor_tsid: ?[36:0]u8 = null; pub const Messages = struct { active: u32 = 0, blocked: u32 = 0, done: u32 = 0, err: u32 = 0, }; pub fn postTractor(allocator: *std.mem.Allocator, post: []const u8) !std.ArrayList(u8) { var tractor_url = std.os.getenv("TRACTOR_URL") orelse "http://tractor/Tractor/monitor"; // global curl init, or fail if (curl.curl_global_init(curl.CURL_GLOBAL_ALL) != curl.CURLE_OK) return error.CURLGlobalInitFailed; defer curl.curl_global_cleanup(); // curl easy handle init, or fail const handle = curl.curl_easy_init() orelse return error.CURLHandleInitFailed; defer curl.curl_easy_cleanup(handle); var response_buffer = std.ArrayList(u8).init(allocator); // superfluous when using an arena allocator, but // important if the allocator implementation changes errdefer response_buffer.deinit(); // setup curl options if (curl.curl_easy_setopt(handle, curl.CURLOPT_URL, tractor_url.ptr) != curl.CURLE_OK) return error.CouldNotSetURL; if (curl.curl_easy_setopt(handle, curl.CURLOPT_POSTFIELDS, post.ptr) != curl.CURLE_OK) return error.CouldNotSetPost; // set write function callbacks if (curl.curl_easy_setopt(handle, curl.CURLOPT_WRITEFUNCTION, writeToArrayListCallback) != curl.CURLE_OK) return error.CouldNotSetWriteCallback; if (curl.curl_easy_setopt(handle, curl.CURLOPT_WRITEDATA, &response_buffer) != curl.CURLE_OK) return error.CouldNotSetWriteCallback; // perform if (curl.curl_easy_perform(handle) != curl.CURLE_OK) return error.FailedToPerformRequest; return response_buffer; } pub fn tractorLogin(allocator: *std.mem.Allocator) !?[36:0]u8 { //std.debug.print("Logging into Tractor\n", .{}); var buf: [64:0]u8 = undefined; var post = try std.fmt.bufPrintZ(buf[0..], "q=login&user={s}", .{std.os.getenv("USER")}); var response = try postTractor(allocator, post); defer response.deinit(); var p = json.Parser.init(allocator, false); defer p.deinit(); var tree = try p.parse(response.items); defer tree.deinit(); //std.debug.print("login\n{s}\n", .{response.items}); tractor_tsid = [_:0]u8{0} ** 36; for (tree.root.Object.get("tsid").?.String) |v, i| { tractor_tsid.?[i] = v; } //std.debug.print("{s}\n", .{tractor_tsid}); return tractor_tsid; } fn parseResponse(allocator: *std.mem.Allocator, response: std.ArrayList(u8)) !?Messages { var p = json.Parser.init(allocator, false); defer p.deinit(); var tree = try p.parse(response.items); defer tree.deinit(); var mbox = tree.root.Object.get("mbox") orelse return null; var msgs = Messages{}; for (mbox.Array.items) |item| { if (std.mem.eql(u8, item.Array.items[0].String, "c")) { if (std.mem.eql(u8, item.Array.items[4].String, "A")) msgs.active += 1; if (std.mem.eql(u8, item.Array.items[4].String, "B")) msgs.blocked += 1; if (std.mem.eql(u8, item.Array.items[4].String, "D")) msgs.done += 1; if (std.mem.eql(u8, item.Array.items[4].String, "E")) msgs.err += 1; } } //try std.testing.expectEqualStrings("c", tree.root.Object.get("mbox").?.Array.items[0].Array.items[0].String); return msgs; } pub fn queryTractor(allocator: *std.mem.Allocator) !?Messages { var buf: [128:0]u8 = undefined; var post = try std.fmt.bufPrintZ(buf[0..], "q=subscribe&jids=0&tsid={s}", .{tractor_tsid.?}); //var post = try std.fmt.bufPrintZ(buf[0..], "q=subscribe&tcs=1&stats=1&jids=0&tsid={s}", .{tractor_tsid.?}); var response = try postTractor(allocator, post); defer response.deinit(); //std.debug.print("\n\n{s}\n\n", .{response.items}); return parseResponse(allocator, response); } fn writeToArrayListCallback(data: *c_void, size: c_uint, nmemb: c_uint, user_data: *c_void) callconv(.C) c_uint { var buffer = @intToPtr(*std.ArrayList(u8), @ptrToInt(user_data)); var typed_data = @intToPtr([*]u8, @ptrToInt(data)); buffer.appendSlice(typed_data[0 .. nmemb * size]) catch return 0; return nmemb * size; } pub const ThreadContext = struct { allocator: *std.mem.Allocator, msgs: Messages, is_ready: bool, }; fn appendNum(ctx: *ThreadContext) !void { //var prng = std.rand.DefaultPrng.init(blk: { // var seed: u64 = undefined; // try std.os.getrandom(std.mem.asBytes(&seed)); // break :blk 42; //}); //const rand = &prng.random; while (true) { //var val = rand.intRangeAtMost(u32, 1, 100); defer std.time.sleep(1 * std.time.ns_per_s); var val = (try queryTractor(ctx.allocator)) orelse continue; // barrier until is_ready is true while (@atomicLoad(bool, &ctx.is_ready, .SeqCst)) { // spinLoopHint() ? } @atomicStore(u32, &ctx.msgs.active, val.active, .SeqCst); @atomicStore(u32, &ctx.msgs.blocked, val.blocked, .SeqCst); @atomicStore(u32, &ctx.msgs.err, val.err, .SeqCst); @atomicStore(u32, &ctx.msgs.done, val.done, .SeqCst); @atomicStore(bool, &ctx.is_ready, true, .SeqCst); } } pub fn startListener(ctx: *ThreadContext) !*std.Thread { var thread = try std.Thread.spawn(appendNum, ctx); return thread; } test "bit compare" { var a: u32 = 1; var b: u32 = 2; try std.testing.expect((a | b) == 3); } test "query test" { _ = tractorLogin(std.testing.allocator) catch unreachable; while (true ) { _ = queryTractor(std.testing.allocator) catch unreachable; std.time.sleep(1 * std.time.ns_per_s); } std.testing.expect(true) catch unreachable; }
src/tractor.zig
const std = @import("std"); const base32 = @import("base32"); const t = std.testing; const stdout = std.io.getStdOut().writer(); pub const ULID = struct { const Self = @This(); data: [16]u8 = [_]u8{0} ** 16, pub fn random(rand: std.rand.Random) ULID { // Get a calendar timestamp, in milliseconds, relative to UTC 1970-01-01. return randomWithTimestamp(rand, std.time.milliTimestamp()); } pub fn randomWithTimestamp(rand: std.rand.Random, unixTimestamp: i64) ULID { var n = ULID{}; rand.bytes(n.data[6..]); const ts = @intCast(u48, unixTimestamp); std.mem.writeIntBig(u48, n.data[0..6], ts); return n; } pub fn timestamp(self: *const Self) i64 { return @intCast(i64, std.mem.readIntBig(u48, self.data[0..6])); } pub fn payload(self: *const Self) *const [10]u8 { return self.data[6..]; } pub fn parse(data: []const u8) !ULID { if (data.len < 26) { return error.InvalidLength; } var n = ULID{}; _ = try base32.crockford_encoding.decode(&n.data, data[0..26]); return n; } pub fn format(self: *const Self, dest: *[26]u8) []const u8 { return base32.crockford_encoding.encode(dest, &self.data); } pub fn fmt(self: *const Self) std.fmt.Formatter(formatULID) { return .{ .data = self }; } }; pub fn formatULID( ulid: *const ULID, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { _ = options; _ = fmt; var buf: [26]u8 = undefined; _ = ulid.format(&buf); try writer.writeAll(&buf); } test "parse" { const a = try ULID.parse("01BX5ZZKBKACTAV9WEVGEMMVRY"); //std.debug.print("payload[{s}]\n", .{std.fmt.fmtSliceHexUpper(a.payload())}); try t.expectEqual(@as(i64, 377202144092), a.timestamp()); var buf: [16]u8 = undefined; const expected = try std.fmt.hexToBytes(&buf, "D4CD2B69E3B707529B00"); try t.expectEqualSlices(u8, expected, a.payload()); if (ULID.parse("***************************")) |_| { return error.ExpectedError; } else |err| if (err != error.CorruptImput) { return err; } if (ULID.parse("123")) |_| { return error.ExpectedError; } else |err| if (err != error.InvalidLength) { return err; } if (ULID.parse("fffffffffffffffffffffffffff")) |_| { return error.ExpectedError; } else |err| if (err != error.CorruptImput) { return err; } const max = try ULID.parse("ZZZZZZZZZZZZZZZZZZZZZZZZZZ"); try t.expectEqual(@as(i64, 281474976710655), max.timestamp()); //std.debug.print("payload[{s}]\n", .{std.fmt.fmtSliceHexUpper(max.payload())}); try t.expectEqualSlices(u8, &[_]u8{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }, max.payload()); } test "formatter" { const a = ULID{}; var fmtbuf: [26]u8 = undefined; _ = try std.fmt.bufPrint(&fmtbuf, "{s}", .{a.fmt()}); try t.expectEqualSlices(u8, "00000000000000000000000000", &fmtbuf); } test "random" { const seed_time = 1469918176385; var prng = std.rand.DefaultPrng.init(0); const a = ULID.randomWithTimestamp(prng.random(), seed_time); try t.expectEqual(@as(i64, seed_time), a.timestamp()); var buf: [16]u8 = undefined; const expected = try std.fmt.hexToBytes(&buf, "DF230B49615D175307D5"); //std.debug.print("payload[{s}]\n", .{std.fmt.fmtSliceHexUpper(a.payload())}); try t.expectEqualSlices(u8, expected, a.payload()); }
src/ulid.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const List = std.ArrayList; const Map = std.AutoHashMap; const StrMap = std.StringHashMap; const BitSet = std.DynamicBitSet; const Str = []const u8; const util = @import("util.zig"); const gpa = util.gpa; const data = @embedFile("../data/day01.txt"); pub fn main() !void { const values = [_]i32{ 191, 185, 188, 189, 204, 213, 215, 227, 222, 221, 236, 235, 236, 232, 224, 228, 234, 226, 227, 228, 230, 232, 234, 213, 197, 198, 210, 238, 248, 261, 263, 265, 262, 295, 296, 311, 330, 333, 337, 344, 345, 356, 359, 360, 364, 367, 383, 386, 387, 391, 424, 426, 428, 440, 442, 446, 450, 423, 428, 423, 425, 445, 429, 436, 465, 456, 463, 461, 463, 483, 484, 500, 517, 525, 529, 527, 530, 535, 566, 578, 582, 587, 604, 613, 614, 609, 611, 619, 623, 627, 621, 622, 626, 644, 646, 655, 657, 659, 661, 671, 673, 668, 673, 676, 682, 683, 678, 679, 682, 681, 684, 686, 690, 693, 698, 700, 702, 707, 708, 732, 733, 734, 743, 745, 776, 794, 804, 784, 790, 813, 856, 855, 859, 850, 851, 886, 885, 888, 889, 883, 885, 920, 919, 917, 915, 918, 909, 910, 924, 925, 932, 933, 935, 966, 970, 971, 974, 958, 959, 961, 959, 964, 969, 970, 973, 972, 976, 998, 1009, 1010, 1011, 1046, 1047, 1040, 1041, 1056, 1062, 1061, 1062, 1077, 1078, 1080, 1082, 1094, 1096, 1097, 1103, 1128, 1125, 1167, 1176, 1175, 1173, 1183, 1182, 1183, 1188, 1202, 1204, 1205, 1206, 1207, 1212, 1235, 1236, 1256, 1268, 1270, 1276, 1281, 1283, 1276, 1287, 1292, 1295, 1291, 1297, 1298, 1301, 1312, 1316, 1333, 1327, 1340, 1338, 1348, 1364, 1381, 1382, 1383, 1394, 1385, 1388, 1387, 1388, 1394, 1398, 1400, 1397, 1399, 1401, 1408, 1410, 1420, 1421, 1434, 1433, 1436, 1456, 1455, 1479, 1482, 1483, 1522, 1526, 1527, 1542, 1543, 1544, 1548, 1557, 1571, 1574, 1580, 1579, 1602, 1618, 1605, 1608, 1612, 1622, 1625, 1626, 1633, 1648, 1641, 1643, 1650, 1652, 1659, 1678, 1681, 1688, 1692, 1702, 1686, 1688, 1685, 1684, 1674, 1694, 1696, 1695, 1720, 1729, 1736, 1762, 1763, 1767, 1780, 1789, 1796, 1798, 1799, 1800, 1808, 1800, 1805, 1848, 1849, 1867, 1888, 1902, 1903, 1920, 1923, 1926, 1930, 1933, 1925, 1906, 1878, 1898, 1899, 1930, 1936, 1948, 1974, 1983, 1984, 1986, 1983, 1984, 1987, 1985, 1988, 1992, 2005, 2009, 2013, 2025, 2033, 2043, 2046, 2030, 2014, 2009, 2011, 2013, 2043, 2049, 2053, 2065, 2070, 2079, 2080, 2090, 2092, 2102, 2116, 2138, 2140, 2141, 2143, 2149, 2152, 2153, 2154, 2170, 2194, 2192, 2216, 2231, 2246, 2261, 2268, 2269, 2252, 2260, 2264, 2274, 2289, 2293, 2296, 2297, 2300, 2305, 2334, 2329, 2353, 2357, 2359, 2364, 2374, 2377, 2364, 2365, 2366, 2360, 2362, 2360, 2361, 2367, 2357, 2362, 2376, 2379, 2380, 2381, 2401, 2436, 2435, 2439, 2446, 2454, 2456, 2459, 2492, 2493, 2514, 2519, 2524, 2529, 2530, 2535, 2548, 2549, 2551, 2548, 2553, 2552, 2556, 2563, 2570, 2603, 2616, 2617, 2623, 2630, 2631, 2641, 2645, 2660, 2662, 2663, 2664, 2662, 2668, 2673, 2677, 2687, 2688, 2699, 2698, 2691, 2695, 2696, 2710, 2711, 2717, 2722, 2723, 2747, 2746, 2754, 2753, 2752, 2771, 2773, 2774, 2777, 2789, 2790, 2805, 2807, 2818, 2817, 2825, 2818, 2828, 2833, 2834, 2837, 2839, 2854, 2856, 2858, 2859, 2863, 2880, 2882, 2884, 2887, 2888, 2889, 2902, 2911, 2917, 2936, 2938, 2940, 2943, 2944, 2943, 2947, 2954, 2956, 2957, 2959, 2957, 2958, 2959, 2956, 2959, 2960, 2969, 2974, 2979, 2983, 2985, 2986, 2992, 2993, 2995, 3025, 3037, 3036, 3037, 3038, 3040, 3043, 3049, 3058, 3071, 3072, 3080, 3083, 3081, 3090, 3091, 3070, 3076, 3079, 3083, 3084, 3086, 3101, 3114, 3106, 3107, 3137, 3138, 3139, 3137, 3133, 3137, 3102, 3108, 3109, 3124, 3130, 3138, 3149, 3150, 3160, 3171, 3191, 3190, 3200, 3205, 3229, 3235, 3237, 3238, 3239, 3243, 3244, 3245, 3246, 3250, 3265, 3268, 3269, 3271, 3279, 3278, 3279, 3280, 3281, 3290, 3295, 3296, 3300, 3303, 3304, 3305, 3308, 3325, 3313, 3326, 3327, 3335, 3336, 3337, 3335, 3361, 3378, 3374, 3378, 3387, 3406, 3416, 3418, 3438, 3454, 3460, 3475, 3476, 3477, 3471, 3476, 3477, 3486, 3487, 3500, 3502, 3503, 3530, 3531, 3530, 3556, 3563, 3564, 3565, 3566, 3580, 3587, 3591, 3598, 3599, 3612, 3613, 3624, 3623, 3631, 3660, 3661, 3662, 3668, 3671, 3672, 3674, 3683, 3684, 3693, 3694, 3700, 3696, 3699, 3705, 3706, 3710, 3717, 3731, 3734, 3729, 3735, 3736, 3739, 3749, 3734, 3739, 3742, 3753, 3756, 3757, 3775, 3777, 3774, 3780, 3785, 3786, 3790, 3792, 3796, 3798, 3802, 3817, 3818, 3815, 3822, 3824, 3849, 3856, 3871, 3845, 3847, 3849, 3851, 3853, 3884, 3885, 3887, 3889, 3891, 3894, 3895, 3900, 3918, 3935, 3936, 3941, 3945, 3936, 3942, 3945, 3942, 3943, 3965, 3974, 3976, 3979, 3975, 3976, 3980, 3993, 3992, 3991, 3999, 4000, 3996, 4019, 4028, 4029, 4031, 4053, 4054, 4080, 4087, 4089, 4088, 4098, 4100, 4103, 4104, 4112, 4125, 4145, 4148, 4152, 4153, 4158, 4162, 4165, 4177, 4184, 4203, 4205, 4206, 4207, 4211, 4215, 4221, 4240, 4247, 4253, 4261, 4262, 4265, 4269, 4273, 4274, 4258, 4260, 4264, 4282, 4299, 4313, 4284, 4287, 4289, 4290, 4293, 4295, 4297, 4296, 4310, 4314, 4313, 4327, 4329, 4330, 4328, 4338, 4347, 4368, 4371, 4373, 4379, 4388, 4392, 4396, 4393, 4394, 4393, 4397, 4406, 4409, 4410, 4414, 4437, 4436, 4428, 4429, 4434, 4426, 4427, 4428, 4445, 4447, 4467, 4471, 4470, 4472, 4485, 4487, 4493, 4495, 4491, 4497, 4500, 4506, 4507, 4508, 4511, 4518, 4519, 4523, 4520, 4523, 4540, 4541, 4548, 4549, 4550, 4551, 4560, 4570, 4586, 4575, 4581, 4585, 4586, 4591, 4601, 4595, 4597, 4601, 4613, 4615, 4617, 4619, 4604, 4609, 4610, 4616, 4640, 4666, 4676, 4677, 4678, 4709, 4704, 4719, 4727, 4729, 4730, 4747, 4753, 4755, 4768, 4770, 4772, 4777, 4781, 4811, 4814, 4843, 4844, 4846, 4850, 4862, 4863, 4868, 4869, 4870, 4871, 4874, 4881, 4882, 4878, 4887, 4895, 4898, 4910, 4920, 4908, 4910, 4909, 4914, 4916, 4922, 4914, 4918, 4926, 4949, 4952, 4953, 4955, 4956, 4959, 4985, 4986, 4990, 4992, 5028, 5029, 5032, 5030, 5037, 5038, 5047, 5053, 5054, 5059, 5057, 5059, 5062, 5067, 5085, 5086, 5090, 5084, 5077, 5078, 5081, 5091, 5094, 5096, 5095, 5097, 5103, 5118, 5144, 5146, 5153, 5160, 5169, 5173, 5174, 5175, 5184, 5187, 5195, 5196, 5216, 5225, 5238, 5239, 5232, 5235, 5237, 5242, 5244, 5247, 5254, 5262, 5271, 5281, 5287, 5288, 5260, 5264, 5282, 5295, 5301, 5304, 5310, 5295, 5296, 5297, 5298, 5303, 5304, 5305, 5277, 5278, 5289, 5295, 5290, 5297, 5299, 5320, 5328, 5341, 5347, 5356, 5361, 5364, 5365, 5378, 5379, 5377, 5378, 5367, 5380, 5381, 5387, 5385, 5390, 5391, 5392, 5415, 5422, 5423, 5425, 5439, 5441, 5448, 5455, 5470, 5477, 5482, 5488, 5489, 5492, 5501, 5499, 5496, 5499, 5533, 5552, 5562, 5563, 5565, 5566, 5572, 5571, 5581, 5583, 5589, 5594, 5593, 5596, 5602, 5606, 5629, 5643, 5644, 5652, 5673, 5676, 5680, 5676, 5681, 5696, 5703, 5698, 5718, 5720, 5733, 5736, 5741, 5766, 5771, 5786, 5787, 5793, 5790, 5786, 5804, 5805, 5808, 5809, 5816, 5810, 5811, 5818, 5829, 5836, 5839, 5824, 5839, 5843, 5845, 5849, 5853, 5856, 5861, 5873, 5894, 5895, 5896, 5897, 5898, 5899, 5900, 5913, 5917, 5918, 5923, 5924, 5934, 5935, 5952, 5965, 5963, 5958, 5961, 5975, 5988, 5989, 5996, 5997, 6008, 6011, 6023, 6024, 6033, 6048, 6050, 6051, 6049, 6048, 6043, 6048, 6051, 6055, 6081, 6084, 6087, 6093, 6092, 6090, 6078, 6079, 6103, 6092, 6094, 6099, 6100, 6116, 6123, 6129, 6127, 6138, 6139, 6141, 6143, 6146, 6147, 6159, 6162, 6163, 6177, 6179, 6183, 6187, 6188, 6194, 6198, 6213, 6237, 6267, 6282, 6288, 6292, 6297, 6310, 6323, 6325, 6324, 6347, 6349, 6350, 6353, 6363, 6376, 6384, 6394, 6397, 6401, 6402, 6399, 6415, 6416, 6421, 6422, 6423, 6420, 6445, 6446, 6449, 6451, 6457, 6460, 6461, 6467, 6468, 6473, 6474, 6517, 6522, 6519, 6515, 6532, 6533, 6535, 6530, 6532, 6522, 6548, 6549, 6565, 6568, 6579, 6556, 6563, 6565, 6572, 6576, 6578, 6601, 6608, 6611, 6612, 6626, 6619, 6651, 6657, 6658, 6666, 6665, 6660, 6682, 6704, 6705, 6711, 6708, 6716, 6720, 6725, 6727, 6729, 6733, 6734, 6737, 6736, 6741, 6742, 6743, 6750, 6756, 6757, 6758, 6755, 6774, 6773, 6774, 6780, 6781, 6790, 6791, 6800, 6812, 6815, 6817, 6827, 6826, 6832, 6833, 6834, 6833, 6822, 6825, 6829, 6836, 6840, 6846, 6850, 6861, 6868, 6869, 6872, 6879, 6882, 6884, 6907, 6917, 6930, 6924, 6934, 6931, 6918, 6924, 6928, 6934, 6935, 6936, 6953, 6955, 6964, 6987, 6999, 7009, 7042, 7048, 7027, 7054, 7053, 7072, 7073, 7055, 7056, 7057, 7079, 7080, 7084, 7095, 7108, 7107, 7109, 7111, 7134, 7135, 7137, 7138, 7145, 7151, 7161, 7172, 7179, 7183, 7189, 7188, 7189, 7195, 7202, 7236, 7241, 7250, 7277, 7295, 7297, 7294, 7298, 7297, 7301, 7289, 7291, 7276, 7293, 7295, 7311, 7318, 7341, 7378, 7398, 7408, 7409, 7412, 7409, 7413, 7397, 7399, 7423, 7424, 7423, 7426, 7418, 7414, 7409, 7412, 7425, 7426, 7433, 7439, 7448, 7462, 7466, 7467, 7468, 7477, 7480, 7481, 7498, 7499, 7501, 7507, 7527, 7535, 7548, 7551, 7552, 7555, 7561, 7554, 7559, 7560, 7587, 7590, 7596, 7620, 7628, 7645, 7646, 7652, 7653, 7654, 7663, 7670, 7678, 7683, 7686, 7687, 7683, 7692, 7691, 7687, 7688, 7691, 7699, 7698, 7725, 7733, 7734, 7736, 7740, 7751, 7744, 7724, 7719, 7729, 7731, 7746, 7747, 7749, 7722, 7725, 7730, 7735, 7737, 7738, 7739, 7724, 7721, 7724, 7736, 7727, 7730, 7731, 7732, 7736, 7737, 7768, 7787, 7797, 7807, 7815, 7819, 7825, 7852, 7856, 7862, 7869, 7881, 7882, 7885, 7888, 7892, 7889, 7893, 7895, 7896, 7901, 7919, 7921, 7932, 7933, 7964, 7979, 7990, 7991, 7994, 7997, 8011, 8005, 8006, 8014, 8047, 8049, 8064, 8065, 8046, 8047, 8079, 8081, 8082, 8083, 8055, 8052, 8056, 8063, 8064, 8066, 8053, 8067, 8072, 8073, 8077, 8079, 8081, 8076, 8083, 8086, 8098, 8100, 8109, 8107, 8124, 8125, 8126, 8127, 8145, 8146, 8152, 8175, 8176, 8177, 8182, 8193, 8195, 8197, 8199, 8203, 8204, 8213, 8217, 8237, 8240, 8239, 8230, 8237, 8245, 8259, 8264, 8272, 8288, 8285, 8286, 8300, 8301, 8304, 8305, 8327, 8326, 8327, 8341, 8343, 8356, 8357, 8358, 8367, 8374, 8375, 8391, 8392, 8393, 8394, 8399, 8405, 8402, 8404, 8435, 8444, 8484, 8481, 8490, 8492, 8493, 8494, 8503, 8506, 8516, 8515, 8516, 8504, 8508, 8513, 8529, 8542, 8545, 8541, 8546, 8549, 8570, 8552, 8553, 8581, 8578, 8585, 8593, 8596, 8605, 8606, 8608, 8616, 8627, 8628, 8627, 8639, 8644, 8645, 8626, 8633, 8635, 8636, 8640, 8645, 8655, 8673, 8693, 8694, 8695, 8696, 8695, 8714, 8716, 8742, 8739, 8735, 8740, 8763, 8762, 8771, 8772, 8777, 8776, 8787, 8788, 8795, 8796, 8777, 8782, 8783, 8794, 8800, 8810, 8827, 8828, 8833, 8829, 8841, 8862, 8865, 8867, 8869, 8895, 8899, 8901, 8904, 8908, 8909, 8927, 8928, 8930, 8931, 8935, 8945, 8946, 8950, 8954, 8957, 8959, 8949, 8961, 8976, 8980, 8986, 8988, 8992, 9003, 9008, 9009, 9029, 9050, 9051, 9054, 9055, 9068, 9054, 9071, 9082, 9087, 9089, 9112, 9113, 9127, 9126, 9125, 9129, 9138, 9140, 9145, 9149, 9151, 9179, 9185, 9187, 9188, 9189, 9186, 9190, 9207, 9208, 9209, 9221, 9228, 9252, 9254, 9264, 9267, 9276, 9287, 9293, 9292, 9260, 9261, 9260, 9265, 9266, 9285, 9287, 9289, 9290, 9291, 9292, 9293, 9295, 9313, 9314, 9291, 9282, 9304, 9315, 9316, 9317, 9327, 9345, 9350, 9352, 9356, 9366, 9367, 9400, 9407, 9408, 9415, 9418, 9419, 9432, 9441, 9442, 9439, 9440, 9451, 9455, 9464, 9472, 9479, 9481, 9490, 9514, 9527, 9546, 9552, 9556, 9570, 9582, 9584, 9592, 9597, 9602, 9618, 9619, 9622, 9639, 9645, 9648, 9643, 9647, 9648, 9650, 9641, 9665, 9678, 9681, 9682, 9683, 9692, 9703, 9704, 9702, 9704, 9712, 9726, 9731, 9732, 9733, 9744, 9745, 9757, 9755, 9763, 9764, 9766, 9768, 9782, 9780, 9781, 9783, 9786, 9803, 9804, 9821, 9822, 9840, 9841, 9843, 9839, 9840, 9846, 9856, 9859, 9863, 9862, 9864, 9865, 9866, 9870, 9878, 9877, 9894, 9896, 9937, 9938, 9943, 9944, 9943, 9952, 9962, 9983, 9985, 9984, 9992, 10002, 10003, 10041, 10040, 10041, 10040, 10042, 10029, 10034, 10033, 10034, 10052, 10053, 10062, 10055, 10056, 10057, 10069, 10073, 10067, 10070, 10065, 10064, 10069, 10071, 10092, 10069, 10070, 10080, 10095, 10099, 10100, 10101, 10099, 10115, 10116, 10117, 10122, 10124, 10129, 10096, 10101, 10106, 10099, 10102, 10143, 10144, 10145, 10146, 10148, 10149, 10150, 10160, 10132, 10137, 10177, 10181, 10178, 10183, 10189, 10191, 10192, 10194, 10203, 10199, 10207, 10218, 10223, 10224, 10225, 10245, 10239, 10247, 10252, 10250, 10254, 10255, 10256, 10279, 10280, 10292, 10293, 10300, 10310, 10312, 10321, 10324, 10326, 10330, 10329, 10328, 10329, 10333, 10355, 10354, 10349, 10352, 10357, 10369, 10386, 10387, 10402, 10406, 10407, 10404, 10405, 10407, 10408, 10412, 10413, 10417, 10420, 10412, 10426, 10427, 10424, 10433, 10434, 10435, 10456, 10472, 10492, 10494, 10508, 10510, 10511, 10512, 10513, 10514, 10507, 10510, 10511, 10515, 10525, 10526, }; var comparison:i32 = 0; var increased: i32 = -1; const stop = values.len - 3; var index: usize = 0; print("Comparison: {}\n", .{comparison}); while (index <= stop) { const sum = values[index] + values[index + 1] + values[index + 2]; if (sum > comparison) { increased += 1; } comparison = sum; index += 1; } print("Result: {}", .{increased}); } // Useful stdlib functions const tokenize = std.mem.tokenize; const split = std.mem.split; const indexOf = std.mem.indexOfScalar; const indexOfAny = std.mem.indexOfAny; const indexOfStr = std.mem.indexOfPosLinear; const lastIndexOf = std.mem.lastIndexOfScalar; const lastIndexOfAny = std.mem.lastIndexOfAny; const lastIndexOfStr = std.mem.lastIndexOfLinear; const trim = std.mem.trim; const sliceMin = std.mem.min; const sliceMax = std.mem.max; const parseInt = std.fmt.parseInt; const parseFloat = std.fmt.parseFloat; const min = std.math.min; const min3 = std.math.min3; const max = std.math.max; const max3 = std.math.max3; const print = std.debug.print; const assert = std.debug.assert; const sort = std.sort.sort; const asc = std.sort.asc; const desc = std.sort.desc;
src/day01.zig
const std = @import("std"); const zlm = @import("zlm"); const gl = @import("gl.zig"); pub const VertexList = struct { vao: gl.VertexArray, vbo: gl.Buffer, data: std.ArrayListUnmanaged(Vertex), pub fn init() VertexList { const vao = gl.VertexArray.init(); const vbo = gl.Buffer.init(); const result = VertexList{ .vao = vao, .vbo = vbo, .data = std.ArrayListUnmanaged(Vertex){}, }; result.makeActive(); inline for (@typeInfo(Vertex).Struct.fields) |field, i| { gl.VertexArray.attribute(i, Vertex, field.name); } return result; } pub fn deinit(self: *VertexList, allocator: *std.mem.Allocator) void { self.data.deinit(allocator); self.vao.deinit(); self.vbo.deinit(); } pub fn makeActive(self: VertexList) void { self.vao.bind(); self.vbo.bind(.array); } pub fn renderAll(self: VertexList, mode: gl.PrimitiveMode) void { self.makeActive(); gl.VertexArray.draw(mode, 0, @intCast(c_int, self.data.items.len)); } pub fn update(self: VertexList) void { self.makeActive(); gl.Buffer.data(.array, self.data.items, .static_draw); } }; pub const Vertex = extern struct { pos: zlm.Vec3, normal: zlm.Vec3, // i think this is the only way around some opengl limitations color_sets: [3]u32, uv: zlm.Vec2, is_textured: bool, pub fn init(pos: zlm.Vec3, normal: zlm.Vec3, colors: [10]u8) Vertex { return .{ .pos = pos, .normal = normal, .color_sets = .{ @as(u32, colors[0]) | (@as(u32, colors[1]) << 8) | (@as(u32, colors[2]) << 16) | (@as(u32, colors[3]) << 24), @as(u32, colors[4]) | (@as(u32, colors[5]) << 8) | (@as(u32, colors[6]) << 16) | (@as(u32, colors[7]) << 24), @as(u16, colors[8]) | (@as(u16, colors[9]) << 8), }, .uv = zlm.Vec2.zero, .is_textured = false, }; } pub fn initUV(pos: zlm.Vec3, normal: zlm.Vec3, color: u8, uv: zlm.Vec2) Vertex { return .{ .pos = pos, .normal = normal, .color_sets = .{ color, 0, 0 }, .uv = uv, .is_textured = true, }; } };
src/render/vertex.zig
const std = @import("std"); const assert = std.debug.assert; const warn = std.debug.warn; const mem = std.mem; const math = std.math; const builtin = @import("builtin"); const AtomicOrder = builtin.AtomicOrder; const AtomicRmwOp = builtin.AtomicRmwOp; const linux = switch(builtin.os) { builtin.Os.linux => std.os.linux, else => @compileError("Only builtin.os.linux is supported"), }; pub use switch(builtin.arch) { builtin.Arch.x86_64 => @import("../zig/std/os/linux/x86_64.zig"), else => @compileError("unsupported arch"), }; pub fn futex_wait(pVal: *usize, expected_value: usize) void { //warn("futex_wait: {*}\n", pVal); _ = syscall4(SYS_futex, @ptrToInt(pVal), linux.FUTEX_WAIT, expected_value, 0); } pub fn futex_wake(pVal: *usize, num_threads_to_wake: u32) void { //warn("futex_wake: {*}\n", pVal); _ = syscall4(SYS_futex, @ptrToInt(pVal), linux.FUTEX_WAKE, num_threads_to_wake, 0); } pub const Style = enum { Simple, Robust, }; pub fn Mutex(comptime mutexType: Style) type { return struct { const Self = @This(); const simple = switch(mutexType) { Style.Simple => Style.Simple, else => @compileError("Only Style.Simple supported"), }; value: usize, simpleStyle: Style, // TODO: Don't require a field to get comptime failure pub fn init() Self { return Self { .value = 0, .simpleStyle = simple, }; } pub fn lock(pSelf: *Self) void { while (@atomicRmw(usize, &pSelf.value, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) { futex_wait(&pSelf.value, 1); } } pub fn unlock(pSelf: *Self) void { assert(@atomicRmw(usize, &pSelf.value, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); futex_wake(&pSelf.value, 1); } }; } const ThreadContext = struct { const Self = @This(); name_len: usize, name: [32]u8, counter: u128, pub fn init(pSelf: *Self, name: [] const u8) void { // Set name_len and then copy with truncation pSelf.name_len = math.min(name.len, pSelf.name.len); mem.copy(u8, pSelf.name[0..pSelf.name_len], name[0..pSelf.name_len]); pSelf.counter = 0; } }; var gThread0_context: ThreadContext = undefined; var gThread1_context: ThreadContext = undefined; var gCounter_mutex = Mutex(Style.Simple).init(); //var gCounter_mutex = Mutex(Style.Robust).init(); Generates a compile time error var gCounter: u128 = undefined; fn threadDispatcher(pContext: *ThreadContext) void { while (pContext.counter < 1000000) { { gCounter_mutex.lock(); defer gCounter_mutex.unlock(); gCounter += 1; } pContext.counter += 1; } } test "Mutex" { warn("\ntest Mutex:+ gCounter={}\n", gCounter); defer warn("test Mutex:- gCounter={}\n", gCounter); var mutex = Mutex(Style.Simple).init(); assert(mutex.value == 0); mutex.lock(); assert(mutex.value == 1); mutex.unlock(); assert(mutex.value == 0); // Initialize gCounter and it's mutex gCounter = 0; //Mtx.init(gCounter_mutex); gThread0_context.init("thread0"); gThread1_context.init("thread1"); var thread0 = try std.os.spawnThread(&gThread0_context, threadDispatcher); var thread1 = try std.os.spawnThread(&gThread1_context, threadDispatcher); warn("call thread0/1.wait\n"); thread0.wait(); thread1.wait(); warn("call after thread0/1.wait\n"); assert(gCounter == 2000000); }
mutex.zig
const xcb = @import("../xcb.zig"); pub const id = xcb.Extension{ .name = "GLX", .global_id = 0 }; pub const PIXMAP = u32; pub const CONTEXT = u32; pub const PBUFFER = u32; pub const WINDOW = u32; pub const FBCONFIG = u32; pub const DRAWABLE = u32; pub const FLOAT32 = f32; pub const FLOAT64 = f64; pub const BOOL32 = u32; pub const CONTEXT_TAG = u32; /// Opcode for Generic. pub const GenericOpcode = -1; /// Opcode for BadContext. pub const BadContextOpcode = 0; /// Opcode for BadContextState. pub const BadContextStateOpcode = 1; /// Opcode for BadDrawable. pub const BadDrawableOpcode = 2; /// Opcode for BadPixmap. pub const BadPixmapOpcode = 3; /// Opcode for BadContextTag. pub const BadContextTagOpcode = 4; /// Opcode for BadCurrentWindow. pub const BadCurrentWindowOpcode = 5; /// Opcode for BadRenderRequest. pub const BadRenderRequestOpcode = 6; /// Opcode for BadLargeRequest. pub const BadLargeRequestOpcode = 7; /// Opcode for UnsupportedPrivateRequest. pub const UnsupportedPrivateRequestOpcode = 8; /// Opcode for BadFBConfig. pub const BadFBConfigOpcode = 9; /// Opcode for BadPbuffer. pub const BadPbufferOpcode = 10; /// Opcode for BadCurrentDrawable. pub const BadCurrentDrawableOpcode = 11; /// Opcode for BadWindow. pub const BadWindowOpcode = 12; /// Opcode for GLXBadProfileARB. pub const GLXBadProfileARBOpcode = 13; /// @brief GenericError pub const GenericError = struct { @"response_type": u8, @"error_code": u8, @"sequence": u16, @"bad_value": u32, @"minor_opcode": u16, @"major_opcode": u8, @"pad0": [21]u8, }; /// Opcode for PbufferClobber. pub const PbufferClobberOpcode = 0; /// @brief PbufferClobberEvent pub const PbufferClobberEvent = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"event_type": u16, @"draw_type": u16, @"drawable": xcb.glx.DRAWABLE, @"b_mask": u32, @"aux_buffer": u16, @"x": u16, @"y": u16, @"width": u16, @"height": u16, @"count": u16, @"pad1": [4]u8, }; /// Opcode for BufferSwapComplete. pub const BufferSwapCompleteOpcode = 1; /// @brief BufferSwapCompleteEvent pub const BufferSwapCompleteEvent = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"event_type": u16, @"pad1": [2]u8, @"drawable": xcb.glx.DRAWABLE, @"ust_hi": u32, @"ust_lo": u32, @"msc_hi": u32, @"msc_lo": u32, @"sbc": u32, }; pub const PBCET = extern enum(c_uint) { @"Damaged" = 32791, @"Saved" = 32792, }; pub const PBCDT = extern enum(c_uint) { @"Window" = 32793, @"Pbuffer" = 32794, }; /// @brief RenderRequest pub const RenderRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 1, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"data": []const u8, }; /// @brief RenderLargeRequest pub const RenderLargeRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 2, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"request_num": u16, @"request_total": u16, @"data_len": u32, @"data": []const u8, }; /// @brief CreateContextRequest pub const CreateContextRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 3, @"length": u16, @"context": xcb.glx.CONTEXT, @"visual": xcb.VISUALID, @"screen": u32, @"share_list": xcb.glx.CONTEXT, @"is_direct": u8, @"pad0": [3]u8, }; /// @brief DestroyContextRequest pub const DestroyContextRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 4, @"length": u16, @"context": xcb.glx.CONTEXT, }; /// @brief MakeCurrentcookie pub const MakeCurrentcookie = struct { sequence: c_uint, }; /// @brief MakeCurrentRequest pub const MakeCurrentRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 5, @"length": u16, @"drawable": xcb.glx.DRAWABLE, @"context": xcb.glx.CONTEXT, @"old_context_tag": xcb.glx.CONTEXT_TAG, }; /// @brief MakeCurrentReply pub const MakeCurrentReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"context_tag": xcb.glx.CONTEXT_TAG, @"pad1": [20]u8, }; /// @brief IsDirectcookie pub const IsDirectcookie = struct { sequence: c_uint, }; /// @brief IsDirectRequest pub const IsDirectRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 6, @"length": u16, @"context": xcb.glx.CONTEXT, }; /// @brief IsDirectReply pub const IsDirectReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"is_direct": u8, @"pad1": [23]u8, }; /// @brief QueryVersioncookie pub const QueryVersioncookie = struct { sequence: c_uint, }; /// @brief QueryVersionRequest pub const QueryVersionRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 7, @"length": u16, @"major_version": u32, @"minor_version": u32, }; /// @brief QueryVersionReply pub const QueryVersionReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"major_version": u32, @"minor_version": u32, @"pad1": [16]u8, }; /// @brief WaitGLRequest pub const WaitGLRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 8, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, }; /// @brief WaitXRequest pub const WaitXRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 9, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, }; /// @brief CopyContextRequest pub const CopyContextRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 10, @"length": u16, @"src": xcb.glx.CONTEXT, @"dest": xcb.glx.CONTEXT, @"mask": u32, @"src_context_tag": xcb.glx.CONTEXT_TAG, }; pub const GC = extern enum(c_uint) { @"GL_CURRENT_BIT" = 1, @"GL_POINT_BIT" = 2, @"GL_LINE_BIT" = 4, @"GL_POLYGON_BIT" = 8, @"GL_POLYGON_STIPPLE_BIT" = 16, @"GL_PIXEL_MODE_BIT" = 32, @"GL_LIGHTING_BIT" = 64, @"GL_FOG_BIT" = 128, @"GL_DEPTH_BUFFER_BIT" = 256, @"GL_ACCUM_BUFFER_BIT" = 512, @"GL_STENCIL_BUFFER_BIT" = 1024, @"GL_VIEWPORT_BIT" = 2048, @"GL_TRANSFORM_BIT" = 4096, @"GL_ENABLE_BIT" = 8192, @"GL_COLOR_BUFFER_BIT" = 16384, @"GL_HINT_BIT" = 32768, @"GL_EVAL_BIT" = 65536, @"GL_LIST_BIT" = 131072, @"GL_TEXTURE_BIT" = 262144, @"GL_SCISSOR_BIT" = 524288, @"GL_ALL_ATTRIB_BITS" = 16777215, }; /// @brief SwapBuffersRequest pub const SwapBuffersRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 11, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"drawable": xcb.glx.DRAWABLE, }; /// @brief UseXFontRequest pub const UseXFontRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 12, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"font": xcb.FONT, @"first": u32, @"count": u32, @"list_base": u32, }; /// @brief CreateGLXPixmapRequest pub const CreateGLXPixmapRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 13, @"length": u16, @"screen": u32, @"visual": xcb.VISUALID, @"pixmap": xcb.PIXMAP, @"glx_pixmap": xcb.glx.PIXMAP, }; /// @brief GetVisualConfigscookie pub const GetVisualConfigscookie = struct { sequence: c_uint, }; /// @brief GetVisualConfigsRequest pub const GetVisualConfigsRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 14, @"length": u16, @"screen": u32, }; /// @brief GetVisualConfigsReply pub const GetVisualConfigsReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"num_visuals": u32, @"num_properties": u32, @"pad1": [16]u8, @"property_list": []u32, }; /// @brief DestroyGLXPixmapRequest pub const DestroyGLXPixmapRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 15, @"length": u16, @"glx_pixmap": xcb.glx.PIXMAP, }; /// @brief VendorPrivateRequest pub const VendorPrivateRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 16, @"length": u16, @"vendor_code": u32, @"context_tag": xcb.glx.CONTEXT_TAG, @"data": []const u8, }; /// @brief VendorPrivateWithReplycookie pub const VendorPrivateWithReplycookie = struct { sequence: c_uint, }; /// @brief VendorPrivateWithReplyRequest pub const VendorPrivateWithReplyRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 17, @"length": u16, @"vendor_code": u32, @"context_tag": xcb.glx.CONTEXT_TAG, @"data": []const u8, }; /// @brief VendorPrivateWithReplyReply pub const VendorPrivateWithReplyReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"retval": u32, @"data1": [24]u8, @"data2": []u8, }; /// @brief QueryExtensionsStringcookie pub const QueryExtensionsStringcookie = struct { sequence: c_uint, }; /// @brief QueryExtensionsStringRequest pub const QueryExtensionsStringRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 18, @"length": u16, @"screen": u32, }; /// @brief QueryExtensionsStringReply pub const QueryExtensionsStringReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"pad2": [16]u8, }; /// @brief QueryServerStringcookie pub const QueryServerStringcookie = struct { sequence: c_uint, }; /// @brief QueryServerStringRequest pub const QueryServerStringRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 19, @"length": u16, @"screen": u32, @"name": u32, }; /// @brief QueryServerStringReply pub const QueryServerStringReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"str_len": u32, @"pad2": [16]u8, @"string": []u8, }; /// @brief ClientInfoRequest pub const ClientInfoRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 20, @"length": u16, @"major_version": u32, @"minor_version": u32, @"str_len": u32, @"string": []const u8, }; /// @brief GetFBConfigscookie pub const GetFBConfigscookie = struct { sequence: c_uint, }; /// @brief GetFBConfigsRequest pub const GetFBConfigsRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 21, @"length": u16, @"screen": u32, }; /// @brief GetFBConfigsReply pub const GetFBConfigsReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"num_FB_configs": u32, @"num_properties": u32, @"pad1": [16]u8, @"property_list": []u32, }; /// @brief CreatePixmapRequest pub const CreatePixmapRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 22, @"length": u16, @"screen": u32, @"fbconfig": xcb.glx.FBCONFIG, @"pixmap": xcb.PIXMAP, @"glx_pixmap": xcb.glx.PIXMAP, @"num_attribs": u32, @"attribs": []const u32, }; /// @brief DestroyPixmapRequest pub const DestroyPixmapRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 23, @"length": u16, @"glx_pixmap": xcb.glx.PIXMAP, }; /// @brief CreateNewContextRequest pub const CreateNewContextRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 24, @"length": u16, @"context": xcb.glx.CONTEXT, @"fbconfig": xcb.glx.FBCONFIG, @"screen": u32, @"render_type": u32, @"share_list": xcb.glx.CONTEXT, @"is_direct": u8, @"pad0": [3]u8, }; /// @brief QueryContextcookie pub const QueryContextcookie = struct { sequence: c_uint, }; /// @brief QueryContextRequest pub const QueryContextRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 25, @"length": u16, @"context": xcb.glx.CONTEXT, }; /// @brief QueryContextReply pub const QueryContextReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"num_attribs": u32, @"pad1": [20]u8, @"attribs": []u32, }; /// @brief MakeContextCurrentcookie pub const MakeContextCurrentcookie = struct { sequence: c_uint, }; /// @brief MakeContextCurrentRequest pub const MakeContextCurrentRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 26, @"length": u16, @"old_context_tag": xcb.glx.CONTEXT_TAG, @"drawable": xcb.glx.DRAWABLE, @"read_drawable": xcb.glx.DRAWABLE, @"context": xcb.glx.CONTEXT, }; /// @brief MakeContextCurrentReply pub const MakeContextCurrentReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"context_tag": xcb.glx.CONTEXT_TAG, @"pad1": [20]u8, }; /// @brief CreatePbufferRequest pub const CreatePbufferRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 27, @"length": u16, @"screen": u32, @"fbconfig": xcb.glx.FBCONFIG, @"pbuffer": xcb.glx.PBUFFER, @"num_attribs": u32, @"attribs": []const u32, }; /// @brief DestroyPbufferRequest pub const DestroyPbufferRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 28, @"length": u16, @"pbuffer": xcb.glx.PBUFFER, }; /// @brief GetDrawableAttributescookie pub const GetDrawableAttributescookie = struct { sequence: c_uint, }; /// @brief GetDrawableAttributesRequest pub const GetDrawableAttributesRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 29, @"length": u16, @"drawable": xcb.glx.DRAWABLE, }; /// @brief GetDrawableAttributesReply pub const GetDrawableAttributesReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"num_attribs": u32, @"pad1": [20]u8, @"attribs": []u32, }; /// @brief ChangeDrawableAttributesRequest pub const ChangeDrawableAttributesRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 30, @"length": u16, @"drawable": xcb.glx.DRAWABLE, @"num_attribs": u32, @"attribs": []const u32, }; /// @brief CreateWindowRequest pub const CreateWindowRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 31, @"length": u16, @"screen": u32, @"fbconfig": xcb.glx.FBCONFIG, @"window": xcb.WINDOW, @"glx_window": xcb.glx.WINDOW, @"num_attribs": u32, @"attribs": []const u32, }; /// @brief DeleteWindowRequest pub const DeleteWindowRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 32, @"length": u16, @"glxwindow": xcb.glx.WINDOW, }; /// @brief SetClientInfoARBRequest pub const SetClientInfoARBRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 33, @"length": u16, @"major_version": u32, @"minor_version": u32, @"num_versions": u32, @"gl_str_len": u32, @"glx_str_len": u32, @"gl_versions": []const u32, @"gl_extension_string": []const u8, @"glx_extension_string": []const u8, }; /// @brief CreateContextAttribsARBRequest pub const CreateContextAttribsARBRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 34, @"length": u16, @"context": xcb.glx.CONTEXT, @"fbconfig": xcb.glx.FBCONFIG, @"screen": u32, @"share_list": xcb.glx.CONTEXT, @"is_direct": u8, @"pad0": [3]u8, @"num_attribs": u32, @"attribs": []const u32, }; /// @brief SetClientInfo2ARBRequest pub const SetClientInfo2ARBRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 35, @"length": u16, @"major_version": u32, @"minor_version": u32, @"num_versions": u32, @"gl_str_len": u32, @"glx_str_len": u32, @"gl_versions": []const u32, @"gl_extension_string": []const u8, @"glx_extension_string": []const u8, }; /// @brief NewListRequest pub const NewListRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 101, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"list": u32, @"mode": u32, }; /// @brief EndListRequest pub const EndListRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 102, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, }; /// @brief DeleteListsRequest pub const DeleteListsRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 103, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"list": u32, @"range": i32, }; /// @brief GenListscookie pub const GenListscookie = struct { sequence: c_uint, }; /// @brief GenListsRequest pub const GenListsRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 104, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"range": i32, }; /// @brief GenListsReply pub const GenListsReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"ret_val": u32, }; /// @brief FeedbackBufferRequest pub const FeedbackBufferRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 105, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"size": i32, @"type": i32, }; /// @brief SelectBufferRequest pub const SelectBufferRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 106, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"size": i32, }; /// @brief RenderModecookie pub const RenderModecookie = struct { sequence: c_uint, }; /// @brief RenderModeRequest pub const RenderModeRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 107, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"mode": u32, }; /// @brief RenderModeReply pub const RenderModeReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"ret_val": u32, @"n": u32, @"new_mode": u32, @"pad1": [12]u8, @"data": []u32, }; pub const RM = extern enum(c_uint) { @"GL_RENDER" = 7168, @"GL_FEEDBACK" = 7169, @"GL_SELECT" = 7170, }; /// @brief Finishcookie pub const Finishcookie = struct { sequence: c_uint, }; /// @brief FinishRequest pub const FinishRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 108, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, }; /// @brief FinishReply pub const FinishReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, }; /// @brief PixelStorefRequest pub const PixelStorefRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 109, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"pname": u32, @"datum": xcb.glx.FLOAT32, }; /// @brief PixelStoreiRequest pub const PixelStoreiRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 110, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"pname": u32, @"datum": i32, }; /// @brief ReadPixelscookie pub const ReadPixelscookie = struct { sequence: c_uint, }; /// @brief ReadPixelsRequest pub const ReadPixelsRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 111, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"x": i32, @"y": i32, @"width": i32, @"height": i32, @"format": u32, @"type": u32, @"swap_bytes": u8, @"lsb_first": u8, }; /// @brief ReadPixelsReply pub const ReadPixelsReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [24]u8, @"data": []u8, }; /// @brief GetBooleanvcookie pub const GetBooleanvcookie = struct { sequence: c_uint, }; /// @brief GetBooleanvRequest pub const GetBooleanvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 112, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"pname": i32, }; /// @brief GetBooleanvReply pub const GetBooleanvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": u8, @"pad2": [15]u8, @"data": []u8, }; /// @brief GetClipPlanecookie pub const GetClipPlanecookie = struct { sequence: c_uint, }; /// @brief GetClipPlaneRequest pub const GetClipPlaneRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 113, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"plane": i32, }; /// @brief GetClipPlaneReply pub const GetClipPlaneReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [24]u8, @"data": []xcb.glx.FLOAT64, }; /// @brief GetDoublevcookie pub const GetDoublevcookie = struct { sequence: c_uint, }; /// @brief GetDoublevRequest pub const GetDoublevRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 114, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"pname": u32, }; /// @brief GetDoublevReply pub const GetDoublevReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT64, @"pad2": [8]u8, @"data": []xcb.glx.FLOAT64, }; /// @brief GetErrorcookie pub const GetErrorcookie = struct { sequence: c_uint, }; /// @brief GetErrorRequest pub const GetErrorRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 115, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, }; /// @brief GetErrorReply pub const GetErrorReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"error": i32, }; /// @brief GetFloatvcookie pub const GetFloatvcookie = struct { sequence: c_uint, }; /// @brief GetFloatvRequest pub const GetFloatvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 116, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"pname": u32, }; /// @brief GetFloatvReply pub const GetFloatvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT32, @"pad2": [12]u8, @"data": []xcb.glx.FLOAT32, }; /// @brief GetIntegervcookie pub const GetIntegervcookie = struct { sequence: c_uint, }; /// @brief GetIntegervRequest pub const GetIntegervRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 117, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"pname": u32, }; /// @brief GetIntegervReply pub const GetIntegervReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief GetLightfvcookie pub const GetLightfvcookie = struct { sequence: c_uint, }; /// @brief GetLightfvRequest pub const GetLightfvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 118, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"light": u32, @"pname": u32, }; /// @brief GetLightfvReply pub const GetLightfvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT32, @"pad2": [12]u8, @"data": []xcb.glx.FLOAT32, }; /// @brief GetLightivcookie pub const GetLightivcookie = struct { sequence: c_uint, }; /// @brief GetLightivRequest pub const GetLightivRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 119, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"light": u32, @"pname": u32, }; /// @brief GetLightivReply pub const GetLightivReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief GetMapdvcookie pub const GetMapdvcookie = struct { sequence: c_uint, }; /// @brief GetMapdvRequest pub const GetMapdvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 120, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"query": u32, }; /// @brief GetMapdvReply pub const GetMapdvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT64, @"pad2": [8]u8, @"data": []xcb.glx.FLOAT64, }; /// @brief GetMapfvcookie pub const GetMapfvcookie = struct { sequence: c_uint, }; /// @brief GetMapfvRequest pub const GetMapfvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 121, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"query": u32, }; /// @brief GetMapfvReply pub const GetMapfvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT32, @"pad2": [12]u8, @"data": []xcb.glx.FLOAT32, }; /// @brief GetMapivcookie pub const GetMapivcookie = struct { sequence: c_uint, }; /// @brief GetMapivRequest pub const GetMapivRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 122, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"query": u32, }; /// @brief GetMapivReply pub const GetMapivReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief GetMaterialfvcookie pub const GetMaterialfvcookie = struct { sequence: c_uint, }; /// @brief GetMaterialfvRequest pub const GetMaterialfvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 123, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"face": u32, @"pname": u32, }; /// @brief GetMaterialfvReply pub const GetMaterialfvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT32, @"pad2": [12]u8, @"data": []xcb.glx.FLOAT32, }; /// @brief GetMaterialivcookie pub const GetMaterialivcookie = struct { sequence: c_uint, }; /// @brief GetMaterialivRequest pub const GetMaterialivRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 124, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"face": u32, @"pname": u32, }; /// @brief GetMaterialivReply pub const GetMaterialivReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief GetPixelMapfvcookie pub const GetPixelMapfvcookie = struct { sequence: c_uint, }; /// @brief GetPixelMapfvRequest pub const GetPixelMapfvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 125, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"map": u32, }; /// @brief GetPixelMapfvReply pub const GetPixelMapfvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT32, @"pad2": [12]u8, @"data": []xcb.glx.FLOAT32, }; /// @brief GetPixelMapuivcookie pub const GetPixelMapuivcookie = struct { sequence: c_uint, }; /// @brief GetPixelMapuivRequest pub const GetPixelMapuivRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 126, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"map": u32, }; /// @brief GetPixelMapuivReply pub const GetPixelMapuivReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": u32, @"pad2": [12]u8, @"data": []u32, }; /// @brief GetPixelMapusvcookie pub const GetPixelMapusvcookie = struct { sequence: c_uint, }; /// @brief GetPixelMapusvRequest pub const GetPixelMapusvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 127, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"map": u32, }; /// @brief GetPixelMapusvReply pub const GetPixelMapusvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": u16, @"pad2": [16]u8, @"data": []u16, }; /// @brief GetPolygonStipplecookie pub const GetPolygonStipplecookie = struct { sequence: c_uint, }; /// @brief GetPolygonStippleRequest pub const GetPolygonStippleRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 128, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"lsb_first": u8, }; /// @brief GetPolygonStippleReply pub const GetPolygonStippleReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [24]u8, @"data": []u8, }; /// @brief GetStringcookie pub const GetStringcookie = struct { sequence: c_uint, }; /// @brief GetStringRequest pub const GetStringRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 129, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"name": u32, }; /// @brief GetStringReply pub const GetStringReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"pad2": [16]u8, @"string": []u8, }; /// @brief GetTexEnvfvcookie pub const GetTexEnvfvcookie = struct { sequence: c_uint, }; /// @brief GetTexEnvfvRequest pub const GetTexEnvfvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 130, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"pname": u32, }; /// @brief GetTexEnvfvReply pub const GetTexEnvfvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT32, @"pad2": [12]u8, @"data": []xcb.glx.FLOAT32, }; /// @brief GetTexEnvivcookie pub const GetTexEnvivcookie = struct { sequence: c_uint, }; /// @brief GetTexEnvivRequest pub const GetTexEnvivRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 131, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"pname": u32, }; /// @brief GetTexEnvivReply pub const GetTexEnvivReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief GetTexGendvcookie pub const GetTexGendvcookie = struct { sequence: c_uint, }; /// @brief GetTexGendvRequest pub const GetTexGendvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 132, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"coord": u32, @"pname": u32, }; /// @brief GetTexGendvReply pub const GetTexGendvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT64, @"pad2": [8]u8, @"data": []xcb.glx.FLOAT64, }; /// @brief GetTexGenfvcookie pub const GetTexGenfvcookie = struct { sequence: c_uint, }; /// @brief GetTexGenfvRequest pub const GetTexGenfvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 133, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"coord": u32, @"pname": u32, }; /// @brief GetTexGenfvReply pub const GetTexGenfvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT32, @"pad2": [12]u8, @"data": []xcb.glx.FLOAT32, }; /// @brief GetTexGenivcookie pub const GetTexGenivcookie = struct { sequence: c_uint, }; /// @brief GetTexGenivRequest pub const GetTexGenivRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 134, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"coord": u32, @"pname": u32, }; /// @brief GetTexGenivReply pub const GetTexGenivReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief GetTexImagecookie pub const GetTexImagecookie = struct { sequence: c_uint, }; /// @brief GetTexImageRequest pub const GetTexImageRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 135, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"level": i32, @"format": u32, @"type": u32, @"swap_bytes": u8, }; /// @brief GetTexImageReply pub const GetTexImageReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [8]u8, @"width": i32, @"height": i32, @"depth": i32, @"pad2": [4]u8, @"data": []u8, }; /// @brief GetTexParameterfvcookie pub const GetTexParameterfvcookie = struct { sequence: c_uint, }; /// @brief GetTexParameterfvRequest pub const GetTexParameterfvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 136, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"pname": u32, }; /// @brief GetTexParameterfvReply pub const GetTexParameterfvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT32, @"pad2": [12]u8, @"data": []xcb.glx.FLOAT32, }; /// @brief GetTexParameterivcookie pub const GetTexParameterivcookie = struct { sequence: c_uint, }; /// @brief GetTexParameterivRequest pub const GetTexParameterivRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 137, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"pname": u32, }; /// @brief GetTexParameterivReply pub const GetTexParameterivReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief GetTexLevelParameterfvcookie pub const GetTexLevelParameterfvcookie = struct { sequence: c_uint, }; /// @brief GetTexLevelParameterfvRequest pub const GetTexLevelParameterfvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 138, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"level": i32, @"pname": u32, }; /// @brief GetTexLevelParameterfvReply pub const GetTexLevelParameterfvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT32, @"pad2": [12]u8, @"data": []xcb.glx.FLOAT32, }; /// @brief GetTexLevelParameterivcookie pub const GetTexLevelParameterivcookie = struct { sequence: c_uint, }; /// @brief GetTexLevelParameterivRequest pub const GetTexLevelParameterivRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 139, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"level": i32, @"pname": u32, }; /// @brief GetTexLevelParameterivReply pub const GetTexLevelParameterivReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief IsEnabledcookie pub const IsEnabledcookie = struct { sequence: c_uint, }; /// @brief IsEnabledRequest pub const IsEnabledRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 140, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"capability": u32, }; /// @brief IsEnabledReply pub const IsEnabledReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"ret_val": xcb.glx.BOOL32, }; /// @brief IsListcookie pub const IsListcookie = struct { sequence: c_uint, }; /// @brief IsListRequest pub const IsListRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 141, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"list": u32, }; /// @brief IsListReply pub const IsListReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"ret_val": xcb.glx.BOOL32, }; /// @brief FlushRequest pub const FlushRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 142, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, }; /// @brief AreTexturesResidentcookie pub const AreTexturesResidentcookie = struct { sequence: c_uint, }; /// @brief AreTexturesResidentRequest pub const AreTexturesResidentRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 143, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"n": i32, @"textures": []const u32, }; /// @brief AreTexturesResidentReply pub const AreTexturesResidentReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"ret_val": xcb.glx.BOOL32, @"pad1": [20]u8, @"data": []u8, }; /// @brief DeleteTexturesRequest pub const DeleteTexturesRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 144, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"n": i32, @"textures": []const u32, }; /// @brief GenTexturescookie pub const GenTexturescookie = struct { sequence: c_uint, }; /// @brief GenTexturesRequest pub const GenTexturesRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 145, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"n": i32, }; /// @brief GenTexturesReply pub const GenTexturesReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [24]u8, @"data": []u32, }; /// @brief IsTexturecookie pub const IsTexturecookie = struct { sequence: c_uint, }; /// @brief IsTextureRequest pub const IsTextureRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 146, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"texture": u32, }; /// @brief IsTextureReply pub const IsTextureReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"ret_val": xcb.glx.BOOL32, }; /// @brief GetColorTablecookie pub const GetColorTablecookie = struct { sequence: c_uint, }; /// @brief GetColorTableRequest pub const GetColorTableRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 147, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"format": u32, @"type": u32, @"swap_bytes": u8, }; /// @brief GetColorTableReply pub const GetColorTableReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [8]u8, @"width": i32, @"pad2": [12]u8, @"data": []u8, }; /// @brief GetColorTableParameterfvcookie pub const GetColorTableParameterfvcookie = struct { sequence: c_uint, }; /// @brief GetColorTableParameterfvRequest pub const GetColorTableParameterfvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 148, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"pname": u32, }; /// @brief GetColorTableParameterfvReply pub const GetColorTableParameterfvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT32, @"pad2": [12]u8, @"data": []xcb.glx.FLOAT32, }; /// @brief GetColorTableParameterivcookie pub const GetColorTableParameterivcookie = struct { sequence: c_uint, }; /// @brief GetColorTableParameterivRequest pub const GetColorTableParameterivRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 149, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"pname": u32, }; /// @brief GetColorTableParameterivReply pub const GetColorTableParameterivReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief GetConvolutionFiltercookie pub const GetConvolutionFiltercookie = struct { sequence: c_uint, }; /// @brief GetConvolutionFilterRequest pub const GetConvolutionFilterRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 150, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"format": u32, @"type": u32, @"swap_bytes": u8, }; /// @brief GetConvolutionFilterReply pub const GetConvolutionFilterReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [8]u8, @"width": i32, @"height": i32, @"pad2": [8]u8, @"data": []u8, }; /// @brief GetConvolutionParameterfvcookie pub const GetConvolutionParameterfvcookie = struct { sequence: c_uint, }; /// @brief GetConvolutionParameterfvRequest pub const GetConvolutionParameterfvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 151, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"pname": u32, }; /// @brief GetConvolutionParameterfvReply pub const GetConvolutionParameterfvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT32, @"pad2": [12]u8, @"data": []xcb.glx.FLOAT32, }; /// @brief GetConvolutionParameterivcookie pub const GetConvolutionParameterivcookie = struct { sequence: c_uint, }; /// @brief GetConvolutionParameterivRequest pub const GetConvolutionParameterivRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 152, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"pname": u32, }; /// @brief GetConvolutionParameterivReply pub const GetConvolutionParameterivReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief GetSeparableFiltercookie pub const GetSeparableFiltercookie = struct { sequence: c_uint, }; /// @brief GetSeparableFilterRequest pub const GetSeparableFilterRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 153, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"format": u32, @"type": u32, @"swap_bytes": u8, }; /// @brief GetSeparableFilterReply pub const GetSeparableFilterReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [8]u8, @"row_w": i32, @"col_h": i32, @"pad2": [8]u8, @"rows_and_cols": []u8, }; /// @brief GetHistogramcookie pub const GetHistogramcookie = struct { sequence: c_uint, }; /// @brief GetHistogramRequest pub const GetHistogramRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 154, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"format": u32, @"type": u32, @"swap_bytes": u8, @"reset": u8, }; /// @brief GetHistogramReply pub const GetHistogramReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [8]u8, @"width": i32, @"pad2": [12]u8, @"data": []u8, }; /// @brief GetHistogramParameterfvcookie pub const GetHistogramParameterfvcookie = struct { sequence: c_uint, }; /// @brief GetHistogramParameterfvRequest pub const GetHistogramParameterfvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 155, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"pname": u32, }; /// @brief GetHistogramParameterfvReply pub const GetHistogramParameterfvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT32, @"pad2": [12]u8, @"data": []xcb.glx.FLOAT32, }; /// @brief GetHistogramParameterivcookie pub const GetHistogramParameterivcookie = struct { sequence: c_uint, }; /// @brief GetHistogramParameterivRequest pub const GetHistogramParameterivRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 156, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"pname": u32, }; /// @brief GetHistogramParameterivReply pub const GetHistogramParameterivReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief GetMinmaxcookie pub const GetMinmaxcookie = struct { sequence: c_uint, }; /// @brief GetMinmaxRequest pub const GetMinmaxRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 157, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"format": u32, @"type": u32, @"swap_bytes": u8, @"reset": u8, }; /// @brief GetMinmaxReply pub const GetMinmaxReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [24]u8, @"data": []u8, }; /// @brief GetMinmaxParameterfvcookie pub const GetMinmaxParameterfvcookie = struct { sequence: c_uint, }; /// @brief GetMinmaxParameterfvRequest pub const GetMinmaxParameterfvRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 158, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"pname": u32, }; /// @brief GetMinmaxParameterfvReply pub const GetMinmaxParameterfvReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": xcb.glx.FLOAT32, @"pad2": [12]u8, @"data": []xcb.glx.FLOAT32, }; /// @brief GetMinmaxParameterivcookie pub const GetMinmaxParameterivcookie = struct { sequence: c_uint, }; /// @brief GetMinmaxParameterivRequest pub const GetMinmaxParameterivRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 159, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"pname": u32, }; /// @brief GetMinmaxParameterivReply pub const GetMinmaxParameterivReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief GetCompressedTexImageARBcookie pub const GetCompressedTexImageARBcookie = struct { sequence: c_uint, }; /// @brief GetCompressedTexImageARBRequest pub const GetCompressedTexImageARBRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 160, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"level": i32, }; /// @brief GetCompressedTexImageARBReply pub const GetCompressedTexImageARBReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [8]u8, @"size": i32, @"pad2": [12]u8, @"data": []u8, }; /// @brief DeleteQueriesARBRequest pub const DeleteQueriesARBRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 161, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"n": i32, @"ids": []const u32, }; /// @brief GenQueriesARBcookie pub const GenQueriesARBcookie = struct { sequence: c_uint, }; /// @brief GenQueriesARBRequest pub const GenQueriesARBRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 162, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"n": i32, }; /// @brief GenQueriesARBReply pub const GenQueriesARBReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [24]u8, @"data": []u32, }; /// @brief IsQueryARBcookie pub const IsQueryARBcookie = struct { sequence: c_uint, }; /// @brief IsQueryARBRequest pub const IsQueryARBRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 163, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"id": u32, }; /// @brief IsQueryARBReply pub const IsQueryARBReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"ret_val": xcb.glx.BOOL32, }; /// @brief GetQueryivARBcookie pub const GetQueryivARBcookie = struct { sequence: c_uint, }; /// @brief GetQueryivARBRequest pub const GetQueryivARBRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 164, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"target": u32, @"pname": u32, }; /// @brief GetQueryivARBReply pub const GetQueryivARBReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief GetQueryObjectivARBcookie pub const GetQueryObjectivARBcookie = struct { sequence: c_uint, }; /// @brief GetQueryObjectivARBRequest pub const GetQueryObjectivARBRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 165, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"id": u32, @"pname": u32, }; /// @brief GetQueryObjectivARBReply pub const GetQueryObjectivARBReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": i32, @"pad2": [12]u8, @"data": []i32, }; /// @brief GetQueryObjectuivARBcookie pub const GetQueryObjectuivARBcookie = struct { sequence: c_uint, }; /// @brief GetQueryObjectuivARBRequest pub const GetQueryObjectuivARBRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 166, @"length": u16, @"context_tag": xcb.glx.CONTEXT_TAG, @"id": u32, @"pname": u32, }; /// @brief GetQueryObjectuivARBReply pub const GetQueryObjectuivARBReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pad1": [4]u8, @"n": u32, @"datum": u32, @"pad2": [12]u8, @"data": []u32, }; test "" { @import("std").testing.refAllDecls(@This()); }
src/auto/glx.zig
const xcb = @import("../xcb.zig"); pub const id = xcb.Extension{ .name = "RANDR", .global_id = 0 }; pub const MODE = u32; pub const CRTC = u32; pub const OUTPUT = u32; pub const PROVIDER = u32; /// Opcode for BadOutput. pub const BadOutputOpcode = 0; /// @brief BadOutputError pub const BadOutputError = struct { @"response_type": u8, @"error_code": u8, @"sequence": u16, }; /// Opcode for BadCrtc. pub const BadCrtcOpcode = 1; /// @brief BadCrtcError pub const BadCrtcError = struct { @"response_type": u8, @"error_code": u8, @"sequence": u16, }; /// Opcode for BadMode. pub const BadModeOpcode = 2; /// @brief BadModeError pub const BadModeError = struct { @"response_type": u8, @"error_code": u8, @"sequence": u16, }; /// Opcode for BadProvider. pub const BadProviderOpcode = 3; /// @brief BadProviderError pub const BadProviderError = struct { @"response_type": u8, @"error_code": u8, @"sequence": u16, }; pub const Rotation = extern enum(c_uint) { @"Rotate_0" = 1, @"Rotate_90" = 2, @"Rotate_180" = 4, @"Rotate_270" = 8, @"Reflect_X" = 16, @"Reflect_Y" = 32, }; /// @brief ScreenSize pub const ScreenSize = struct { @"width": u16, @"height": u16, @"mwidth": u16, @"mheight": u16, }; /// @brief RefreshRates pub const RefreshRates = struct { @"nRates": u16, @"rates": []u16, }; /// @brief QueryVersioncookie pub const QueryVersioncookie = struct { sequence: c_uint, }; /// @brief QueryVersionRequest pub const QueryVersionRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 0, @"length": u16, @"major_version": u32, @"minor_version": u32, }; /// @brief QueryVersionReply pub const QueryVersionReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"major_version": u32, @"minor_version": u32, @"pad1": [16]u8, }; pub const SetConfig = extern enum(c_uint) { @"Success" = 0, @"InvalidConfigTime" = 1, @"InvalidTime" = 2, @"Failed" = 3, }; /// @brief SetScreenConfigcookie pub const SetScreenConfigcookie = struct { sequence: c_uint, }; /// @brief SetScreenConfigRequest pub const SetScreenConfigRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 2, @"length": u16, @"window": xcb.WINDOW, @"timestamp": xcb.TIMESTAMP, @"config_timestamp": xcb.TIMESTAMP, @"sizeID": u16, @"rotation": u16, @"rate": u16, @"pad0": [2]u8, }; /// @brief SetScreenConfigReply pub const SetScreenConfigReply = struct { @"response_type": u8, @"status": u8, @"sequence": u16, @"length": u32, @"new_timestamp": xcb.TIMESTAMP, @"config_timestamp": xcb.TIMESTAMP, @"root": xcb.WINDOW, @"subpixel_order": u16, @"pad0": [10]u8, }; pub const NotifyMask = extern enum(c_uint) { @"ScreenChange" = 1, @"CrtcChange" = 2, @"OutputChange" = 4, @"OutputProperty" = 8, @"ProviderChange" = 16, @"ProviderProperty" = 32, @"ResourceChange" = 64, }; /// @brief SelectInputRequest pub const SelectInputRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 4, @"length": u16, @"window": xcb.WINDOW, @"enable": u16, @"pad0": [2]u8, }; /// @brief GetScreenInfocookie pub const GetScreenInfocookie = struct { sequence: c_uint, }; /// @brief GetScreenInfoRequest pub const GetScreenInfoRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 5, @"length": u16, @"window": xcb.WINDOW, }; /// @brief GetScreenInfoReply pub const GetScreenInfoReply = struct { @"response_type": u8, @"rotations": u8, @"sequence": u16, @"length": u32, @"root": xcb.WINDOW, @"timestamp": xcb.TIMESTAMP, @"config_timestamp": xcb.TIMESTAMP, @"nSizes": u16, @"sizeID": u16, @"rotation": u16, @"rate": u16, @"nInfo": u16, @"pad0": [2]u8, @"sizes": []xcb.randr.ScreenSize, @"rates": []xcb.randr.RefreshRates, }; /// @brief GetScreenSizeRangecookie pub const GetScreenSizeRangecookie = struct { sequence: c_uint, }; /// @brief GetScreenSizeRangeRequest pub const GetScreenSizeRangeRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 6, @"length": u16, @"window": xcb.WINDOW, }; /// @brief GetScreenSizeRangeReply pub const GetScreenSizeRangeReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"min_width": u16, @"min_height": u16, @"max_width": u16, @"max_height": u16, @"pad1": [16]u8, }; /// @brief SetScreenSizeRequest pub const SetScreenSizeRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 7, @"length": u16, @"window": xcb.WINDOW, @"width": u16, @"height": u16, @"mm_width": u32, @"mm_height": u32, }; pub const ModeFlag = extern enum(c_uint) { @"HsyncPositive" = 1, @"HsyncNegative" = 2, @"VsyncPositive" = 4, @"VsyncNegative" = 8, @"Interlace" = 16, @"DoubleScan" = 32, @"Csync" = 64, @"CsyncPositive" = 128, @"CsyncNegative" = 256, @"HskewPresent" = 512, @"Bcast" = 1024, @"PixelMultiplex" = 2048, @"DoubleClock" = 4096, @"HalveClock" = 8192, }; /// @brief ModeInfo pub const ModeInfo = struct { @"id": u32, @"width": u16, @"height": u16, @"dot_clock": u32, @"hsync_start": u16, @"hsync_end": u16, @"htotal": u16, @"hskew": u16, @"vsync_start": u16, @"vsync_end": u16, @"vtotal": u16, @"name_len": u16, @"mode_flags": u32, }; /// @brief GetScreenResourcescookie pub const GetScreenResourcescookie = struct { sequence: c_uint, }; /// @brief GetScreenResourcesRequest pub const GetScreenResourcesRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 8, @"length": u16, @"window": xcb.WINDOW, }; /// @brief GetScreenResourcesReply pub const GetScreenResourcesReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"timestamp": xcb.TIMESTAMP, @"config_timestamp": xcb.TIMESTAMP, @"num_crtcs": u16, @"num_outputs": u16, @"num_modes": u16, @"names_len": u16, @"pad1": [8]u8, @"crtcs": []xcb.randr.CRTC, @"outputs": []xcb.randr.OUTPUT, @"modes": []xcb.randr.ModeInfo, @"names": []u8, }; pub const Connection = extern enum(c_uint) { @"Connected" = 0, @"Disconnected" = 1, @"Unknown" = 2, }; /// @brief GetOutputInfocookie pub const GetOutputInfocookie = struct { sequence: c_uint, }; /// @brief GetOutputInfoRequest pub const GetOutputInfoRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 9, @"length": u16, @"output": xcb.randr.OUTPUT, @"config_timestamp": xcb.TIMESTAMP, }; /// @brief GetOutputInfoReply pub const GetOutputInfoReply = struct { @"response_type": u8, @"status": u8, @"sequence": u16, @"length": u32, @"timestamp": xcb.TIMESTAMP, @"crtc": xcb.randr.CRTC, @"mm_width": u32, @"mm_height": u32, @"connection": u8, @"subpixel_order": u8, @"num_crtcs": u16, @"num_modes": u16, @"num_preferred": u16, @"num_clones": u16, @"name_len": u16, @"crtcs": []xcb.randr.CRTC, @"modes": []xcb.randr.MODE, @"clones": []xcb.randr.OUTPUT, @"name": []u8, }; /// @brief ListOutputPropertiescookie pub const ListOutputPropertiescookie = struct { sequence: c_uint, }; /// @brief ListOutputPropertiesRequest pub const ListOutputPropertiesRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 10, @"length": u16, @"output": xcb.randr.OUTPUT, }; /// @brief ListOutputPropertiesReply pub const ListOutputPropertiesReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"num_atoms": u16, @"pad1": [22]u8, @"atoms": []xcb.ATOM, }; /// @brief QueryOutputPropertycookie pub const QueryOutputPropertycookie = struct { sequence: c_uint, }; /// @brief QueryOutputPropertyRequest pub const QueryOutputPropertyRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 11, @"length": u16, @"output": xcb.randr.OUTPUT, @"property": xcb.ATOM, }; /// @brief QueryOutputPropertyReply pub const QueryOutputPropertyReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pending": u8, @"range": u8, @"immutable": u8, @"pad1": [21]u8, @"validValues": []i32, }; /// @brief ConfigureOutputPropertyRequest pub const ConfigureOutputPropertyRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 12, @"length": u16, @"output": xcb.randr.OUTPUT, @"property": xcb.ATOM, @"pending": u8, @"range": u8, @"pad0": [2]u8, @"values": []const i32, }; /// @brief ChangeOutputPropertyRequest pub const ChangeOutputPropertyRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 13, @"length": u16, @"output": xcb.randr.OUTPUT, @"property": xcb.ATOM, @"type": xcb.ATOM, @"format": u8, @"mode": u8, @"pad0": [2]u8, @"num_units": u32, @"data": []const u8, }; /// @brief DeleteOutputPropertyRequest pub const DeleteOutputPropertyRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 14, @"length": u16, @"output": xcb.randr.OUTPUT, @"property": xcb.ATOM, }; /// @brief GetOutputPropertycookie pub const GetOutputPropertycookie = struct { sequence: c_uint, }; /// @brief GetOutputPropertyRequest pub const GetOutputPropertyRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 15, @"length": u16, @"output": xcb.randr.OUTPUT, @"property": xcb.ATOM, @"type": xcb.ATOM, @"long_offset": u32, @"long_length": u32, @"delete": u8, @"pending": u8, @"pad0": [2]u8, }; /// @brief GetOutputPropertyReply pub const GetOutputPropertyReply = struct { @"response_type": u8, @"format": u8, @"sequence": u16, @"length": u32, @"type": xcb.ATOM, @"bytes_after": u32, @"num_items": u32, @"pad0": [12]u8, @"data": []u8, }; /// @brief CreateModecookie pub const CreateModecookie = struct { sequence: c_uint, }; /// @brief CreateModeRequest pub const CreateModeRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 16, @"length": u16, @"window": xcb.WINDOW, @"mode_info": xcb.randr.ModeInfo, @"name": []const u8, }; /// @brief CreateModeReply pub const CreateModeReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"mode": xcb.randr.MODE, @"pad1": [20]u8, }; /// @brief DestroyModeRequest pub const DestroyModeRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 17, @"length": u16, @"mode": xcb.randr.MODE, }; /// @brief AddOutputModeRequest pub const AddOutputModeRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 18, @"length": u16, @"output": xcb.randr.OUTPUT, @"mode": xcb.randr.MODE, }; /// @brief DeleteOutputModeRequest pub const DeleteOutputModeRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 19, @"length": u16, @"output": xcb.randr.OUTPUT, @"mode": xcb.randr.MODE, }; /// @brief GetCrtcInfocookie pub const GetCrtcInfocookie = struct { sequence: c_uint, }; /// @brief GetCrtcInfoRequest pub const GetCrtcInfoRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 20, @"length": u16, @"crtc": xcb.randr.CRTC, @"config_timestamp": xcb.TIMESTAMP, }; /// @brief GetCrtcInfoReply pub const GetCrtcInfoReply = struct { @"response_type": u8, @"status": u8, @"sequence": u16, @"length": u32, @"timestamp": xcb.TIMESTAMP, @"x": i16, @"y": i16, @"width": u16, @"height": u16, @"mode": xcb.randr.MODE, @"rotation": u16, @"rotations": u16, @"num_outputs": u16, @"num_possible_outputs": u16, @"outputs": []xcb.randr.OUTPUT, @"possible": []xcb.randr.OUTPUT, }; /// @brief SetCrtcConfigcookie pub const SetCrtcConfigcookie = struct { sequence: c_uint, }; /// @brief SetCrtcConfigRequest pub const SetCrtcConfigRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 21, @"length": u16, @"crtc": xcb.randr.CRTC, @"timestamp": xcb.TIMESTAMP, @"config_timestamp": xcb.TIMESTAMP, @"x": i16, @"y": i16, @"mode": xcb.randr.MODE, @"rotation": u16, @"pad0": [2]u8, @"outputs": []const xcb.randr.OUTPUT, }; /// @brief SetCrtcConfigReply pub const SetCrtcConfigReply = struct { @"response_type": u8, @"status": u8, @"sequence": u16, @"length": u32, @"timestamp": xcb.TIMESTAMP, @"pad0": [20]u8, }; /// @brief GetCrtcGammaSizecookie pub const GetCrtcGammaSizecookie = struct { sequence: c_uint, }; /// @brief GetCrtcGammaSizeRequest pub const GetCrtcGammaSizeRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 22, @"length": u16, @"crtc": xcb.randr.CRTC, }; /// @brief GetCrtcGammaSizeReply pub const GetCrtcGammaSizeReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"size": u16, @"pad1": [22]u8, }; /// @brief GetCrtcGammacookie pub const GetCrtcGammacookie = struct { sequence: c_uint, }; /// @brief GetCrtcGammaRequest pub const GetCrtcGammaRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 23, @"length": u16, @"crtc": xcb.randr.CRTC, }; /// @brief GetCrtcGammaReply pub const GetCrtcGammaReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"size": u16, @"pad1": [22]u8, @"red": []u16, @"green": []u16, @"blue": []u16, }; /// @brief SetCrtcGammaRequest pub const SetCrtcGammaRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 24, @"length": u16, @"crtc": xcb.randr.CRTC, @"size": u16, @"pad0": [2]u8, @"red": []const u16, @"green": []const u16, @"blue": []const u16, }; /// @brief GetScreenResourcesCurrentcookie pub const GetScreenResourcesCurrentcookie = struct { sequence: c_uint, }; /// @brief GetScreenResourcesCurrentRequest pub const GetScreenResourcesCurrentRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 25, @"length": u16, @"window": xcb.WINDOW, }; /// @brief GetScreenResourcesCurrentReply pub const GetScreenResourcesCurrentReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"timestamp": xcb.TIMESTAMP, @"config_timestamp": xcb.TIMESTAMP, @"num_crtcs": u16, @"num_outputs": u16, @"num_modes": u16, @"names_len": u16, @"pad1": [8]u8, @"crtcs": []xcb.randr.CRTC, @"outputs": []xcb.randr.OUTPUT, @"modes": []xcb.randr.ModeInfo, @"names": []u8, }; pub const Transform = extern enum(c_uint) { @"Unit" = 1, @"ScaleUp" = 2, @"ScaleDown" = 4, @"Projective" = 8, }; /// @brief SetCrtcTransformRequest pub const SetCrtcTransformRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 26, @"length": u16, @"crtc": xcb.randr.CRTC, @"transform": xcb.render.TRANSFORM, @"filter_len": u16, @"pad0": [2]u8, @"filter_name": []const u8, @"filter_params": []const xcb.render.FIXED, }; /// @brief GetCrtcTransformcookie pub const GetCrtcTransformcookie = struct { sequence: c_uint, }; /// @brief GetCrtcTransformRequest pub const GetCrtcTransformRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 27, @"length": u16, @"crtc": xcb.randr.CRTC, }; /// @brief GetCrtcTransformReply pub const GetCrtcTransformReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pending_transform": xcb.render.TRANSFORM, @"has_transforms": u8, @"pad1": [3]u8, @"current_transform": xcb.render.TRANSFORM, @"pad2": [4]u8, @"pending_len": u16, @"pending_nparams": u16, @"current_len": u16, @"current_nparams": u16, @"pending_filter_name": []u8, @"pending_params": []xcb.render.FIXED, @"current_filter_name": []u8, @"current_params": []xcb.render.FIXED, }; /// @brief GetPanningcookie pub const GetPanningcookie = struct { sequence: c_uint, }; /// @brief GetPanningRequest pub const GetPanningRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 28, @"length": u16, @"crtc": xcb.randr.CRTC, }; /// @brief GetPanningReply pub const GetPanningReply = struct { @"response_type": u8, @"status": u8, @"sequence": u16, @"length": u32, @"timestamp": xcb.TIMESTAMP, @"left": u16, @"top": u16, @"width": u16, @"height": u16, @"track_left": u16, @"track_top": u16, @"track_width": u16, @"track_height": u16, @"border_left": i16, @"border_top": i16, @"border_right": i16, @"border_bottom": i16, }; /// @brief SetPanningcookie pub const SetPanningcookie = struct { sequence: c_uint, }; /// @brief SetPanningRequest pub const SetPanningRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 29, @"length": u16, @"crtc": xcb.randr.CRTC, @"timestamp": xcb.TIMESTAMP, @"left": u16, @"top": u16, @"width": u16, @"height": u16, @"track_left": u16, @"track_top": u16, @"track_width": u16, @"track_height": u16, @"border_left": i16, @"border_top": i16, @"border_right": i16, @"border_bottom": i16, }; /// @brief SetPanningReply pub const SetPanningReply = struct { @"response_type": u8, @"status": u8, @"sequence": u16, @"length": u32, @"timestamp": xcb.TIMESTAMP, }; /// @brief SetOutputPrimaryRequest pub const SetOutputPrimaryRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 30, @"length": u16, @"window": xcb.WINDOW, @"output": xcb.randr.OUTPUT, }; /// @brief GetOutputPrimarycookie pub const GetOutputPrimarycookie = struct { sequence: c_uint, }; /// @brief GetOutputPrimaryRequest pub const GetOutputPrimaryRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 31, @"length": u16, @"window": xcb.WINDOW, }; /// @brief GetOutputPrimaryReply pub const GetOutputPrimaryReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"output": xcb.randr.OUTPUT, }; /// @brief GetProviderscookie pub const GetProviderscookie = struct { sequence: c_uint, }; /// @brief GetProvidersRequest pub const GetProvidersRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 32, @"length": u16, @"window": xcb.WINDOW, }; /// @brief GetProvidersReply pub const GetProvidersReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"timestamp": xcb.TIMESTAMP, @"num_providers": u16, @"pad1": [18]u8, @"providers": []xcb.randr.PROVIDER, }; pub const ProviderCapability = extern enum(c_uint) { @"SourceOutput" = 1, @"SinkOutput" = 2, @"SourceOffload" = 4, @"SinkOffload" = 8, }; /// @brief GetProviderInfocookie pub const GetProviderInfocookie = struct { sequence: c_uint, }; /// @brief GetProviderInfoRequest pub const GetProviderInfoRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 33, @"length": u16, @"provider": xcb.randr.PROVIDER, @"config_timestamp": xcb.TIMESTAMP, }; /// @brief GetProviderInfoReply pub const GetProviderInfoReply = struct { @"response_type": u8, @"status": u8, @"sequence": u16, @"length": u32, @"timestamp": xcb.TIMESTAMP, @"capabilities": u32, @"num_crtcs": u16, @"num_outputs": u16, @"num_associated_providers": u16, @"name_len": u16, @"pad0": [8]u8, @"crtcs": []xcb.randr.CRTC, @"outputs": []xcb.randr.OUTPUT, @"associated_providers": []xcb.randr.PROVIDER, @"associated_capability": []u32, @"name": []u8, }; /// @brief SetProviderOffloadSinkRequest pub const SetProviderOffloadSinkRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 34, @"length": u16, @"provider": xcb.randr.PROVIDER, @"sink_provider": xcb.randr.PROVIDER, @"config_timestamp": xcb.TIMESTAMP, }; /// @brief SetProviderOutputSourceRequest pub const SetProviderOutputSourceRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 35, @"length": u16, @"provider": xcb.randr.PROVIDER, @"source_provider": xcb.randr.PROVIDER, @"config_timestamp": xcb.TIMESTAMP, }; /// @brief ListProviderPropertiescookie pub const ListProviderPropertiescookie = struct { sequence: c_uint, }; /// @brief ListProviderPropertiesRequest pub const ListProviderPropertiesRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 36, @"length": u16, @"provider": xcb.randr.PROVIDER, }; /// @brief ListProviderPropertiesReply pub const ListProviderPropertiesReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"num_atoms": u16, @"pad1": [22]u8, @"atoms": []xcb.ATOM, }; /// @brief QueryProviderPropertycookie pub const QueryProviderPropertycookie = struct { sequence: c_uint, }; /// @brief QueryProviderPropertyRequest pub const QueryProviderPropertyRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 37, @"length": u16, @"provider": xcb.randr.PROVIDER, @"property": xcb.ATOM, }; /// @brief QueryProviderPropertyReply pub const QueryProviderPropertyReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"pending": u8, @"range": u8, @"immutable": u8, @"pad1": [21]u8, @"valid_values": []i32, }; /// @brief ConfigureProviderPropertyRequest pub const ConfigureProviderPropertyRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 38, @"length": u16, @"provider": xcb.randr.PROVIDER, @"property": xcb.ATOM, @"pending": u8, @"range": u8, @"pad0": [2]u8, @"values": []const i32, }; /// @brief ChangeProviderPropertyRequest pub const ChangeProviderPropertyRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 39, @"length": u16, @"provider": xcb.randr.PROVIDER, @"property": xcb.ATOM, @"type": xcb.ATOM, @"format": u8, @"mode": u8, @"pad0": [2]u8, @"num_items": u32, @"data": []const u8, }; /// @brief DeleteProviderPropertyRequest pub const DeleteProviderPropertyRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 40, @"length": u16, @"provider": xcb.randr.PROVIDER, @"property": xcb.ATOM, }; /// @brief GetProviderPropertycookie pub const GetProviderPropertycookie = struct { sequence: c_uint, }; /// @brief GetProviderPropertyRequest pub const GetProviderPropertyRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 41, @"length": u16, @"provider": xcb.randr.PROVIDER, @"property": xcb.ATOM, @"type": xcb.ATOM, @"long_offset": u32, @"long_length": u32, @"delete": u8, @"pending": u8, @"pad0": [2]u8, }; /// @brief GetProviderPropertyReply pub const GetProviderPropertyReply = struct { @"response_type": u8, @"format": u8, @"sequence": u16, @"length": u32, @"type": xcb.ATOM, @"bytes_after": u32, @"num_items": u32, @"pad0": [12]u8, @"data": []u8, }; /// Opcode for ScreenChangeNotify. pub const ScreenChangeNotifyOpcode = 0; /// @brief ScreenChangeNotifyEvent pub const ScreenChangeNotifyEvent = struct { @"response_type": u8, @"rotation": u8, @"sequence": u16, @"timestamp": xcb.TIMESTAMP, @"config_timestamp": xcb.TIMESTAMP, @"root": xcb.WINDOW, @"request_window": xcb.WINDOW, @"sizeID": u16, @"subpixel_order": u16, @"width": u16, @"height": u16, @"mwidth": u16, @"mheight": u16, }; pub const Notify = extern enum(c_uint) { @"CrtcChange" = 0, @"OutputChange" = 1, @"OutputProperty" = 2, @"ProviderChange" = 3, @"ProviderProperty" = 4, @"ResourceChange" = 5, }; /// @brief CrtcChange pub const CrtcChange = struct { @"timestamp": xcb.TIMESTAMP, @"window": xcb.WINDOW, @"crtc": xcb.randr.CRTC, @"mode": xcb.randr.MODE, @"rotation": u16, @"pad0": [2]u8, @"x": i16, @"y": i16, @"width": u16, @"height": u16, }; /// @brief OutputChange pub const OutputChange = struct { @"timestamp": xcb.TIMESTAMP, @"config_timestamp": xcb.TIMESTAMP, @"window": xcb.WINDOW, @"output": xcb.randr.OUTPUT, @"crtc": xcb.randr.CRTC, @"mode": xcb.randr.MODE, @"rotation": u16, @"connection": u8, @"subpixel_order": u8, }; /// @brief OutputProperty pub const OutputProperty = struct { @"window": xcb.WINDOW, @"output": xcb.randr.OUTPUT, @"atom": xcb.ATOM, @"timestamp": xcb.TIMESTAMP, @"status": u8, @"pad0": [11]u8, }; /// @brief ProviderChange pub const ProviderChange = struct { @"timestamp": xcb.TIMESTAMP, @"window": xcb.WINDOW, @"provider": xcb.randr.PROVIDER, @"pad0": [16]u8, }; /// @brief ProviderProperty pub const ProviderProperty = struct { @"window": xcb.WINDOW, @"provider": xcb.randr.PROVIDER, @"atom": xcb.ATOM, @"timestamp": xcb.TIMESTAMP, @"state": u8, @"pad0": [11]u8, }; /// @brief ResourceChange pub const ResourceChange = struct { @"timestamp": xcb.TIMESTAMP, @"window": xcb.WINDOW, @"pad0": [20]u8, }; /// @brief NotifyData pub const NotifyData = union { @"cc": xcb.randr.CrtcChange, @"oc": xcb.randr.OutputChange, @"op": xcb.randr.OutputProperty, @"pc": xcb.randr.ProviderChange, @"pp": xcb.randr.ProviderProperty, @"rc": xcb.randr.ResourceChange, }; /// Opcode for Notify. pub const NotifyOpcode = 1; /// @brief NotifyEvent pub const NotifyEvent = struct { @"response_type": u8, @"subCode": u8, @"sequence": u16, @"u": xcb.randr.NotifyData, }; /// @brief MonitorInfo pub const MonitorInfo = struct { @"name": xcb.ATOM, @"primary": u8, @"automatic": u8, @"nOutput": u16, @"x": i16, @"y": i16, @"width": u16, @"height": u16, @"width_in_millimeters": u32, @"height_in_millimeters": u32, @"outputs": []xcb.randr.OUTPUT, }; /// @brief GetMonitorscookie pub const GetMonitorscookie = struct { sequence: c_uint, }; /// @brief GetMonitorsRequest pub const GetMonitorsRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 42, @"length": u16, @"window": xcb.WINDOW, @"get_active": u8, }; /// @brief GetMonitorsReply pub const GetMonitorsReply = struct { @"response_type": u8, @"pad0": u8, @"sequence": u16, @"length": u32, @"timestamp": xcb.TIMESTAMP, @"nMonitors": u32, @"nOutputs": u32, @"pad1": [12]u8, @"monitors": []xcb.randr.MonitorInfo, }; /// @brief SetMonitorRequest pub const SetMonitorRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 43, @"length": u16, @"window": xcb.WINDOW, }; /// @brief DeleteMonitorRequest pub const DeleteMonitorRequest = struct { @"major_opcode": u8, @"minor_opcode": u8 = 44, @"length": u16, @"window": xcb.WINDOW, @"name": xcb.ATOM, }; test "" { @import("std").testing.refAllDecls(@This()); }
src/auto/randr.zig
const std = @import("std"); const cgltf = @import("cgltf"); const vk = @import("vk"); const engine = @import("engine.zig"); const render = engine.render; const Child = std.meta.Child; pub const Buffer = struct { raw: *cgltf.Buffer, usageFlags: vk.BufferUsageFlags = vk.BufferUsageFlags{}, updateRate: engine.render.UpdateRate = .STATIC, gpuBuffer: ?render.Buffer = null, }; pub const BufferView = struct { raw: *cgltf.BufferView, buffer: *Buffer, }; pub const Accessor = struct { raw: *cgltf.Accessor, buffer_view: ?*BufferView, sparse_indices_buffer_view: ?*BufferView, sparse_values_buffer_view: ?*BufferView, }; pub const Attribute = struct { raw: *cgltf.Attribute, name: [:0]const u8, data: *Accessor, }; pub const Image = struct { raw: *cgltf.Image, name: [:0]const u8, buffer_view: ?*BufferView, }; pub const Sampler = struct { raw: *cgltf.Sampler, }; pub const Texture = struct { raw: *cgltf.Texture, name: [:0]const u8, image: ?*Image, sampler: ?*Sampler, }; pub const Material = struct { raw: *cgltf.Material, name: [:0]const u8, pbr_metallic_color_texture: ?*Texture, pbr_metallic_roughness_texture: ?*Texture, pbr_specular_diffuse_texture: ?*Texture, pbr_specular_gloss_texture: ?*Texture, normal_texture: ?*Texture, occlusion_texture: ?*Texture, emissive_texture: ?*Texture, }; pub const MorphTarget = struct { raw: *cgltf.MorphTarget, attributes: []Attribute, }; pub const Primitive = struct { raw: *cgltf.Primitive, indices: ?*Accessor, material: ?*Material, attributes: []Attribute, targets: []MorphTarget, }; pub const Mesh = struct { raw: *cgltf.Mesh, name: [:0]const u8, primitives: []Primitive, weights: []f32, target_names: [][:0]const u8, }; pub const Skin = struct { raw: *cgltf.Skin, name: [:0]const u8, joints: []*Node, skeleton: ?*Node, inverse_bind_matrices: ?*Accessor, }; pub const Camera = struct { raw: *cgltf.Camera, name: [:0]const u8, }; pub const Light = struct { raw: *cgltf.Light, name: [:0]const u8, }; pub const Node = struct { raw: *cgltf.Node, name: [:0]const u8, parent: ?*Node, children: []*Node, skin: ?*Skin, mesh: ?*Mesh, camera: ?*Camera, light: ?*Light, }; pub const Scene = struct { raw: *cgltf.Scene, name: [:0]const u8, nodes: []*Node, }; pub const AnimationSampler = struct { raw: *cgltf.AnimationSampler, input: *Accessor, output: *Accessor, }; pub const AnimationChannel = struct { raw: *cgltf.AnimationChannel, sampler: *AnimationSampler, target_node: ?*Node, }; pub const Animation = struct { raw: *cgltf.Animation, name: [:0]const u8, samplers: []AnimationSampler, channels: []AnimationChannel, }; pub const Data = struct { raw: *cgltf.Data, allocator: std.heap.ArenaAllocator, meshes: []Mesh, materials: []Material, accessors: []Accessor, buffer_views: []BufferView, buffers: []Buffer, images: []Image, textures: []Texture, samplers: []Sampler, skins: []Skin, cameras: []Camera, lights: []Light, nodes: []Node, scenes: []Scene, scene: ?*Scene, animations: []Animation, renderingDataInitialized: bool = false, }; pub fn wrap(rawData: *cgltf.Data, parentAllocator: *std.mem.Allocator) !*Data { var data = try parentAllocator.create(Data); errdefer parentAllocator.destroy(data); data.raw = rawData; data.allocator = std.heap.ArenaAllocator.init(parentAllocator); const allocator = &data.allocator.allocator; errdefer data.allocator.deinit(); data.meshes = try allocator.alloc(Mesh, rawData.meshes_count); data.materials = try allocator.alloc(Material, rawData.materials_count); data.accessors = try allocator.alloc(Accessor, rawData.accessors_count); data.buffer_views = try allocator.alloc(BufferView, rawData.buffer_views_count); data.buffers = try allocator.alloc(Buffer, rawData.buffers_count); data.images = try allocator.alloc(Image, rawData.images_count); data.textures = try allocator.alloc(Texture, rawData.textures_count); data.samplers = try allocator.alloc(Sampler, rawData.samplers_count); data.skins = try allocator.alloc(Skin, rawData.skins_count); data.cameras = try allocator.alloc(Camera, rawData.cameras_count); data.lights = try allocator.alloc(Light, rawData.lights_count); data.nodes = try allocator.alloc(Node, rawData.nodes_count); data.scenes = try allocator.alloc(Scene, rawData.scenes_count); data.animations = try allocator.alloc(Animation, rawData.animations_count); for (data.meshes) |*mesh, i| { const rawMesh = &rawData.meshes[i]; const primitives = try allocator.alloc(Primitive, rawMesh.primitives_count); for (primitives) |*prim, j| { const rawPrim = &rawMesh.primitives[j]; const attributes = try copyAttributes(data, rawData, rawPrim.attributes, rawPrim.attributes_count); const targets = try allocator.alloc(MorphTarget, rawPrim.targets_count); for (targets) |*target, k| { const rawTarget = &rawPrim.targets[k]; const targetAttributes = try copyAttributes(data, rawData, rawTarget.attributes, rawTarget.attributes_count); target.* = MorphTarget{ .raw = rawTarget, .attributes = targetAttributes, }; } prim.* = Primitive{ .raw = rawPrim, .indices = fixOptional(rawPrim.indices, rawData.accessors, data.accessors), .material = fixOptional(rawPrim.material, rawData.materials, data.materials), .attributes = attributes, .targets = targets, }; } const names = try allocator.alloc([:0]const u8, rawMesh.target_names_count); for (names) |*name, j| name.* = cstr(rawMesh.target_names[j]); mesh.* = Mesh{ .raw = rawMesh, .name = cstr(rawMesh.name), .primitives = primitives, .weights = rawMesh.weights[0..rawMesh.weights_count], .target_names = names, }; } for (data.materials) |*material, i| { const rawMat = &rawData.materials[i]; material.* = Material{ .raw = rawMat, .name = cstr(rawMat.name), .pbr_metallic_color_texture = null, .pbr_metallic_roughness_texture = null, .pbr_specular_diffuse_texture = null, .pbr_specular_gloss_texture = null, .normal_texture = fixOptional(rawMat.normal_texture.texture, rawData.textures, data.textures), .occlusion_texture = fixOptional(rawMat.occlusion_texture.texture, rawData.textures, data.textures), .emissive_texture = fixOptional(rawMat.emissive_texture.texture, rawData.textures, data.textures), }; if (rawMat.has_pbr_metallic_roughness != 0) { material.pbr_metallic_color_texture = fixOptional(rawMat.pbr_metallic_roughness.base_color_texture.texture, rawData.textures, data.textures); material.pbr_metallic_roughness_texture = fixOptional(rawMat.pbr_metallic_roughness.metallic_roughness_texture.texture, rawData.textures, data.textures); } if (rawMat.has_pbr_specular_glossiness != 0) { material.pbr_specular_diffuse_texture = fixOptional(rawMat.pbr_specular_glossiness.diffuse_texture.texture, rawData.textures, data.textures); material.pbr_specular_gloss_texture = fixOptional(rawMat.pbr_specular_glossiness.specular_glossiness_texture.texture, rawData.textures, data.textures); } } for (data.accessors) |*accessor, i| { const rawAcc = &rawData.accessors[i]; accessor.* = Accessor{ .raw = rawAcc, .buffer_view = fixOptional(rawAcc.buffer_view, rawData.buffer_views, data.buffer_views), .sparse_indices_buffer_view = null, .sparse_values_buffer_view = null, }; if (rawAcc.is_sparse != 0) { accessor.sparse_indices_buffer_view = fixOptional(rawAcc.sparse.indices_buffer_view, rawData.buffer_views, data.buffer_views); accessor.sparse_values_buffer_view = fixOptional(rawAcc.sparse.values_buffer_view, rawData.buffer_views, data.buffer_views); } } for (data.buffer_views) |*view, i| { const rawView = &rawData.buffer_views[i]; view.* = BufferView{ .raw = rawView, .buffer = fixNonnull(rawView.buffer, rawData.buffers, data.buffers), }; } for (data.buffers) |*buffer, i| { const rawBuf = &rawData.buffers[i]; buffer.* = Buffer{ .raw = rawBuf, }; } for (data.images) |*image, i| { const rawImage = &rawData.images[i]; image.* = Image{ .raw = rawImage, .name = cstr(rawImage.name), .buffer_view = fixNonnull(rawImage.buffer_view, rawData.buffer_views, data.buffer_views), }; } for (data.textures) |*tex, i| { const rawTex = &rawData.textures[i]; tex.* = Texture{ .raw = rawTex, .name = cstr(rawTex.name), .image = fixOptional(rawTex.image, rawData.images, data.images), .sampler = fixOptional(rawTex.sampler, rawData.samplers, data.samplers), }; } for (data.samplers) |*sampler, i| { sampler.* = Sampler{ .raw = &rawData.samplers[i] }; } for (data.skins) |*skin, i| { const rawSkin = &rawData.skins[i]; const joints = try allocator.alloc(*Node, rawSkin.joints_count); for (joints) |*joint, j| joint.* = fixNonnull(rawSkin.joints[j], rawData.nodes, data.nodes); skin.* = Skin{ .raw = rawSkin, .name = cstr(rawSkin.name), .joints = joints, .skeleton = fixOptional(rawSkin.skeleton, rawData.nodes, data.nodes), .inverse_bind_matrices = fixOptional(rawSkin.inverse_bind_matrices, rawData.accessors, data.accessors), }; } for (data.cameras) |*cam, i| { const rawCam = &rawData.cameras[i]; cam.* = Camera{ .raw = rawCam, .name = cstr(rawCam.name), }; } for (data.lights) |*light, i| { const rawLight = &rawData.lights[i]; light.* = Light{ .raw = rawLight, .name = cstr(rawLight.name), }; } for (data.nodes) |*node, i| { const rawNode = &rawData.nodes[i]; const children = try allocator.alloc(*Node, rawNode.children_count); for (children) |*child, j| child.* = fixNonnull(rawNode.children[j], rawData.nodes, data.nodes); node.* = Node{ .raw = rawNode, .name = cstr(rawNode.name), .parent = fixOptional(rawNode.parent, rawData.nodes, data.nodes), .children = children, .skin = fixOptional(rawNode.skin, rawData.skins, data.skins), .mesh = fixOptional(rawNode.mesh, rawData.meshes, data.meshes), .camera = fixOptional(rawNode.camera, rawData.cameras, data.cameras), .light = fixOptional(rawNode.light, rawData.lights, data.lights), }; } for (data.scenes) |*scene, i| { const rawScene = &rawData.scenes[i]; const nodes = try allocator.alloc(*Node, rawScene.nodes_count); for (nodes) |*node, j| node.* = fixNonnull(rawScene.nodes[j], rawData.nodes, data.nodes); scene.* = Scene{ .raw = rawScene, .name = cstr(rawScene.name), .nodes = nodes, }; } data.scene = fixOptional(rawData.scene, rawData.scenes, data.scenes); for (data.animations) |*anim, i| { const rawAnim = &rawData.animations[i]; const samplers = try allocator.alloc(AnimationSampler, rawAnim.samplers_count); const channels = try allocator.alloc(AnimationChannel, rawAnim.channels_count); for (samplers) |*sampler, j| { const rawSampler = &rawAnim.samplers[j]; sampler.* = AnimationSampler{ .raw = rawSampler, .input = fixNonnull(rawSampler.input, rawData.accessors, data.accessors), .output = fixNonnull(rawSampler.output, rawData.accessors, data.accessors), }; } for (channels) |*channel, j| { const rawChannel = &rawAnim.channels[j]; channel.* = AnimationChannel{ .raw = rawChannel, .sampler = fixNonnull(rawChannel.sampler, rawAnim.samplers, samplers), .target_node = fixOptional(rawChannel.target_node, rawData.nodes, data.nodes), }; } anim.* = Animation{ .raw = rawAnim, .name = cstr(rawAnim.name), .samplers = samplers, .channels = channels, }; } return data; } const unnamed: [:0]const u8 = "<null>"; fn cstr(dataOpt: ?[*:0]const u8) [:0]const u8 { return if (dataOpt) |data| std.mem.spanZ(data) else unnamed; } fn copyAttributes(data: *Data, rawData: *cgltf.Data, rawAttributes: [*]cgltf.Attribute, rawCount: usize) ![]Attribute { const attributes = try data.allocator.allocator.alloc(Attribute, rawCount); for (attributes) |*attr, i| { const rawAttr = &rawAttributes[i]; attr.* = Attribute{ .raw = rawAttr, .name = cstr(rawAttr.name), .data = fixNonnull(rawAttr.data, rawData.accessors, data.accessors), }; } return attributes; } pub fn free(data: *Data) void { const parentAllocator = data.allocator.child_allocator; data.allocator.deinit(); parentAllocator.destroy(data); } fn fixOptional(pointer: anytype, rawArray: anytype, wrapArray: anytype) ?*Child(@TypeOf(wrapArray.ptr)) { if (pointer) |nonNull| { return fixNonnull(nonNull, rawArray, wrapArray); } else { return null; } } fn fixNonnull(pointer: anytype, rawArray: anytype, wrapArray: anytype) *Child(@TypeOf(wrapArray.ptr)) { const diff = @divExact(@ptrToInt(pointer) - @ptrToInt(rawArray), @sizeOf(Child(@TypeOf(rawArray)))); return &wrapArray[diff]; }
src/gltf_wrap.zig
const std = @import("std"); const audiometa = @import("audiometa"); const assert = std.debug.assert; pub fn main() anyerror!void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer assert(gpa.deinit() == false); const allocator = gpa.allocator(); var args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); if (args.len < 3) { std.debug.print("usage: {s} <tagged_file> <output_file> [tag_number]\n", .{args[0]}); return; } const in_path = args[1]; const out_path = args[2]; var index_selection: ?usize = blk: { if (args.len < 4) break :blk null; const index_str = args[3]; break :blk (std.fmt.parseInt(usize, index_str, 10) catch null); }; var file = try std.fs.cwd().openFile(in_path, .{}); defer file.close(); var stream_source = std.io.StreamSource{ .file = file }; var all_metadata = try audiometa.metadata.readAll(allocator, &stream_source); defer all_metadata.deinit(); var buf: std.ArrayList(u8) = std.ArrayList(u8).init(allocator); defer buf.deinit(); for (all_metadata.tags) |tag, i| { if (index_selection != null and index_selection.? != i + 1) { continue; } const metadata: *const audiometa.metadata.Metadata = switch (tag) { .id3v2 => &tag.id3v2.metadata, .ape => &tag.ape.metadata, .id3v1, .vorbis, .flac, .mp4 => |*val| val, }; if (tag == .flac) { // FLAC start/end offsets only include the block itself, so we need to provide the headers var buf_writer = buf.writer(); try buf_writer.writeAll(audiometa.flac.flac_stream_marker); const is_last_metadata_block = @as(u8, 1 << 7); try buf_writer.writeByte(audiometa.flac.block_type_vorbis_comment | is_last_metadata_block); try buf_writer.writeIntBig(u24, @intCast(u24, metadata.end_offset - metadata.start_offset)); try sliceFileIntoBuf(&buf, file, metadata.start_offset, metadata.end_offset); } else if (tag == .mp4) { // MP4 start/end offsets only include the 'meta' atom, so we need to provide the ftyp and moov atoms const meta_data_len = metadata.end_offset - metadata.start_offset; try writeMp4Atoms(buf.writer(), @intCast(u32, meta_data_len)); try sliceFileIntoBuf(&buf, file, metadata.start_offset, metadata.end_offset); } else { try sliceFileIntoBuf(&buf, file, metadata.start_offset, metadata.end_offset); } } std.debug.print("buflen: {d}\n", .{buf.items.len}); var out_file = try std.fs.cwd().createFile(out_path, .{}); defer out_file.close(); try out_file.writeAll(buf.items); } fn sliceFileIntoBuf(buf: *std.ArrayList(u8), file: std.fs.File, start_offset: usize, end_offset: usize) !void { var contents_len = end_offset - start_offset; try buf.ensureUnusedCapacity(contents_len); var buf_slice = buf.unusedCapacitySlice()[0..contents_len]; const bytes_read = try file.pread(buf_slice, start_offset); assert(bytes_read == contents_len); buf.items.len += bytes_read; } fn writeMp4Atoms(writer: anytype, meta_atom_len: u32) !void { const moov_len = audiometa.mp4.AtomHeader.len; const udta_len = audiometa.mp4.AtomHeader.len; const minimal_ftyp_data = "\x00\x00\x00\x08ftyp"; try writer.writeAll(minimal_ftyp_data); var atom_len: u32 = moov_len + udta_len + meta_atom_len; try writer.writeIntBig(u32, atom_len); try writer.writeAll("moov"); atom_len -= moov_len; try writer.writeIntBig(u32, atom_len); try writer.writeAll("udta"); }
tools/extract_tag.zig
const std = @import("std"); var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const alloc = gpa.allocator(); pub const World = struct { entity_count: usize, pools: std.AutoHashMap(usize, *c_void), pub fn init() World { return World{ .entity_count = 0, .pools = std.AutoHashMap(usize, *c_void).init(alloc), }; } pub fn deinit(self: *World) void { _ = self; // TODO: deinit pools } pub fn spawn(self: *World) usize { var entity = self.entity_count; self.entity_count += 1; return entity; } pub fn add(self: *World, entity: usize, component: anytype) !void { const T = @TypeOf(component); const type_id = typeId(T); var pool: *Pool(T) = undefined; if (self.pools.contains(type_id)) { var pool_ptr = self.*.pools.get(type_id).?; pool = ptrToStruct(Pool(T), pool_ptr); } else { pool = try alloc.create(Pool(T)); pool.* = Pool(T).init(alloc); var pool_ptr = structToPtr(pool); try self.pools.put(type_id, pool_ptr); } try pool.add(entity, component); } pub fn get(self: *World, comptime T: type, entity: usize) !*T { const type_id = typeId(T); var pool_ptr = self.pools.get(type_id).?; var pool = ptrToStruct(Pool(T), pool_ptr); return pool.get(entity); } pub fn remove(self: *World, comptime T: type, entity: usize) void { const type_id = typeId(T); var pool_ptr = self.pools.get(type_id).?; var pool = ptrToStruct(Pool(T), pool_ptr); pool.remove(entity); } pub fn has(self: *World, comptime T: type, entity: usize) bool { const type_id = typeId(T); var pool_ptr = self.pools.get(type_id).?; var pool = ptrToStruct(Pool(T), pool_ptr); return pool.has(entity); } pub fn query(self: *World, arche_type: anytype) ![]usize { const ArcheType = @TypeOf(arche_type); if (@typeInfo(ArcheType) != .Struct) { @compileError("Expected tuple or struct argument, found " ++ @typeName(ArcheType)); } var list = std.ArrayList(usize).init(alloc); var index: usize = 0; while (index < self.entity_count) : (index += 1) { var has_components = true; inline for (arche_type) |T| { if (!self.has(T, index)) { has_components = false; } } if (has_components) { try list.append(index); } } return list.items; } }; pub fn Pool(comptime T: type) type { return struct { const Self = @This(); indices: std.AutoHashMap(usize, usize), components: std.ArrayList(T), pub fn init(allocator: std.mem.Allocator) Self { return Self{ .indices = std.AutoHashMap(usize, usize).init(allocator), .components = std.ArrayList(T).init(allocator), }; } pub fn add(self: *Pool(T), entity: usize, component: T) !void { if (self.indices.contains(entity)) { self.components.items[self.indices.get(entity).?] = component; } else { try self.components.append(component); try self.indices.put(entity, self.components.items.len - 1); } } pub fn get(self: *Pool(T), entity: usize) !*T { return &self.components.items[self.indices.get(entity).?]; } pub fn remove(self: *Pool(T), entity: usize) void { _ = self.indices.remove(entity); } pub fn has(self: Pool(T), entity: usize) bool { return self.indices.contains(entity); } }; } fn structToPtr(s: anytype) *c_void { return @ptrCast(*c_void, s); } fn ptrToStruct(comptime T: type, ptr: *c_void) *T { return @ptrCast(*T, @alignCast(@alignOf(T), ptr)); } fn TypeId(comptime T: type) type { _ = T; return struct { pub var uniq: u8 = 0; }; } fn typeId(comptime T: type) usize { return @ptrToInt(&TypeId(T).uniq); }
src/ecs.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const List = std.ArrayList; const Map = std.AutoHashMap; const StrMap = std.StringHashMap; const BitSet = std.DynamicBitSet; const Str = []const u8; const int = i64; const util = @import("util.zig"); const gpa = util.gpa; const data = @embedFile("../data/day10.txt"); pub fn main() !void { var part1: usize = 0; var completion_scores = blk: { var completion_scores = List(usize).init(gpa); errdefer completion_scores.deinit(); var lines = tokenize(u8, data, "\r\n"); next_line: while (lines.next()) |line| { var state = std.ArrayList(u8).init(gpa); defer state.deinit(); for (line) |c| { switch (c) { '(' => try state.append(')'), '{' => try state.append('}'), '<' => try state.append('>'), '[' => try state.append(']'), else => if (state.items.len == 0 or state.pop() != c) { switch (c) { ')' => part1 += 3, ']' => part1 += 57, '}' => part1 += 1197, '>' => part1 += 25137, else => unreachable, } continue :next_line; }, } } var score: usize = 0; while (state.popOrNull()) |comp| { score = score * 5; switch (comp) { ')' => score += 1, ']' => score += 2, '}' => score += 3, '>' => score += 4, else => unreachable, } } try completion_scores.append(score); } break :blk completion_scores.toOwnedSlice(); }; defer gpa.free(completion_scores); sort(usize, completion_scores, {}, comptime asc(usize)); assert(completion_scores.len % 2 == 1); const part2 = completion_scores[completion_scores.len / 2]; print("part1={}, part2={}\n", .{part1, part2}); } // Useful stdlib functions const tokenize = std.mem.tokenize; const split = std.mem.split; const indexOf = std.mem.indexOfScalar; const indexOfAny = std.mem.indexOfAny; const indexOfStr = std.mem.indexOfPosLinear; const lastIndexOf = std.mem.lastIndexOfScalar; const lastIndexOfAny = std.mem.lastIndexOfAny; const lastIndexOfStr = std.mem.lastIndexOfLinear; const trim = std.mem.trim; const sliceMin = std.mem.min; const sliceMax = std.mem.max; const eql = std.mem.eql; const parseEnum = std.meta.stringToEnum; const parseInt = std.fmt.parseInt; const parseFloat = std.fmt.parseFloat; const min = std.math.min; const min3 = std.math.min3; const max = std.math.max; const max3 = std.math.max3; const print = std.debug.print; const assert = std.debug.assert; const sort = std.sort.sort; const asc = std.sort.asc; const desc = std.sort.desc;
src/day10.zig
const std = @import("std"); const os = std.os; const index = @import("index.zig"); const ptrace = index.ptrace; const c = index.c; const events = @import("events.zig"); const waitpid = @import("waitpid.zig").waitpid; pub const Context = events.Context; pub const Options = struct { multithread: bool = true, inverse: bool = false, }; /// Intercepts syscalls, filtering them as the caller sees fit. /// Expected use: /// /// var inspector = syspect.Inspector.init(allocator, &[_]os.SYS{.connect, .read, .write}, options); /// defer inspector.deinit(); /// /// const target_argv = &[_][]const u8{ "program to run", "arg for program" }; /// _ = try inspector.spawn_process(allocator, target_argv); /// /// while (try inspector.next_syscall()) |*syscall| { /// switch (syscall.*) { /// .pre_call => |context| { /// warn("{} attempting syscall with registers {}\n", .{ context.pid, context.registers }); /// /// can_modify_registers_here(context); /// /// if (block_until_syscall_finishes) { /// const maybe_registers = try inspector.start_and_finish_syscall(context.pid); /// if (maybe_registers) |regs| { /// warn("Syscall result: {}\n", .{regs}); /// } else { /// continue; /// } /// inspector.resume_tracee(context.pid); /// }, /// .post_call => |context| { /// warn("Syscall result: {}\n", .{context.registers}); /// // Tracee is paused after finishing the syscall. Resume it here. /// inspector.resume_tracee(context) /// }, /// } /// } pub const Inspector = struct { /// The syscalls we filter in or out. syscalls: []const os.SYS, /// If true, our syscalls field is what we filter out. /// Otherwise, we ignore syscalls that are not in our syscalls field. inverse: bool, /// If true, automatically follow child threads and processes. multithread: bool, /// Stores process info about our tracees. /// Maps a pid to an events.Tracee struct. tracee_map: events.TraceeMap, /// Written to internally. Read internally and externally. /// Indicates if the Inspector has at least one active tracee. has_tracees: bool, pub const SyscallContext = union(enum) { pre_call: Context, post_call: Context, }; pub fn init(allocator: *std.mem.Allocator, syscalls: []const os.SYS, options: Options) Inspector { return Inspector{ .syscalls = syscalls, .multithread = options.multithread, .inverse = options.inverse, .tracee_map = events.TraceeMap.init(allocator), .has_tracees = false, }; } pub fn deinit(self: *Inspector) void { self.tracee_map.deinit(); } pub fn spawn_process(self: *Inspector, allocator: *std.mem.Allocator, argv: []const []const u8) !os.pid_t { const tracee_pid = try fork_spawn_process(allocator, argv); try self.set_ptrace_options(tracee_pid); // Resume/Set off tracee _ = try ptrace.syscall(tracee_pid); _ = try events.get_or_make_tracee(&self.tracee_map, tracee_pid); self.has_tracees = true; return tracee_pid; } fn set_ptrace_options(self: *Inspector, tracee_pid: os.pid_t) !void { var opts = c.PTRACE_O_EXITKILL | c.PTRACE_O_TRACESYSGOOD; opts |= c.PTRACE_O_TRACEEXEC; if (self.multithread) opts |= c.PTRACE_O_TRACEFORK | c.PTRACE_O_TRACECLONE; _ = try ptrace.ptrace(c.PTRACE_SETOPTIONS, tracee_pid, 0, opts); } /// Attach to a running process, setting it as our tracee pub fn attach_to_process(self: *Inspector, pid: os.pid_t) !void { // Try to attach _ = try ptrace.ptrace(c.PTRACE_ATTACH, pid, 0, 0); // Wait for tracee to receive STOPSIG const wait_result = try waitpid(pid, 0); try self.set_ptrace_options(pid); // Ensure we are at the spot we're expecting. switch (wait_result.status) { .stop => |signal| { switch (signal) { .stop => {}, else => return error.PtraceAttachError, } }, else => return error.PtraceAttachError, } // Resume/Set off tracee _ = try ptrace.syscall(pid); self.has_tracees = true; } /// Relinquishes ptrace control of the pid. /// Tracee must be in ptrace-stop state when calling this function. /// Tracee will be in a ptrace-stop state when next_syscall returns. pub fn detach_from_process(self: *Inspector, pid: os.pid_t) !void { _ = try ptrace.ptrace(c.PTRACE_DETACH, pid, 0, 0); // TODO: // Detect tracee state, if it is not in a prace-stop state, // send a signal in order to move it to the required state. } pub fn next_syscall(self: *Inspector) !?SyscallContext { if (!self.has_tracees) return null; var context = events.Context{ .pid = undefined, .registers = undefined, }; while (true) { const action = try events.next_event(null, &self.tracee_map, &context, .{ .inverse = self.inverse, .calls = self.syscalls }); switch (action) { .CONT, .NORMAL, .INSPECT_RESULT_UNKNOWN_SYSCALL => continue, .EXIT => { if (self.tracee_map.count() == 0) self.has_tracees = false; return null; }, .INSPECT => { return SyscallContext{ .pre_call = context }; }, .INSPECT_RESULT => { return SyscallContext{ .post_call = context }; }, } } } /// Resumes Tracee after a syscall or syscall result has been inspected. pub fn resume_tracee(self: *Inspector, pid: os.pid_t) !void { try events.resume_from_inspection(&self.tracee_map, pid); } /// This will block while trying to finish the syscall. /// Make sure you are only using this method on non-blocking syscalls. /// Executes a syscall that has been inspected and waits for syscall to finish. /// Returns resulting registers on success. /// If result is null, program has concluded. pub fn start_and_finish_syscall_blocking(self: *Inspector, context: events.Context) !?c.registers { try self.resume_tracee(context.pid); var new_ctx = context; while (true) { const action = try events.next_event(context.pid, &self.tracee_map, &new_ctx, .{ .inverse = self.inverse, .calls = self.syscalls }); switch (action) { .CONT => { // If tracee exists, it must have had an unexpected stop that did not kill the process. // Therefore, we want to wait for the next event again until it is the event we expected. if (self.tracee_map.get(context.pid)) |_| { continue; } // Otherwise, the traced process is dead. return null; }, .EXIT => { if (self.tracee_map.count() == 0) self.has_tracees = false; return null; }, // NORMAL action means a non-inspected syscall has started or ended. // Is this even possible while we're in the middle of a different syscall? .NORMAL => continue, .INSPECT => @panic("This should not occur. Inspecting a call that should be finished"), .INSPECT_RESULT => return new_ctx.registers, .INSPECT_RESULT_UNKNOWN_SYSCALL => return error.NonExistentSyscall, } } } /// Nullifies the syscall, returning an error provided by the caller. /// Only works on calls that are in a 'pre_call' state. /// When this call finishes successfully, the tracee will have just exited its 'post_call' state. pub fn nullify_syscall(self: *Inspector, context: events.Context, errno: c.sregT) !void { var newregs = context.registers; newregs.syscall = @bitCast(c.regT, @as(c.sregT, -1)); // set syscall identifier to one that doesn't exist try ptrace.setregs(context.pid, newregs); _ = self.start_and_finish_syscall_blocking(context) catch |err| { switch (err) { error.NonExistentSyscall => { newregs.syscall = context.registers.syscall; newregs.result = @bitCast(c.regT, -errno); try ptrace.setregs(context.pid, newregs); return; }, else => return err, } }; return error.ErrorNullifyingSyscall; } }; /// Forks and initiates ptrace from the child program. /// Child then executes the target process. /// Parent syncs with child, and then returns the child's PID fn fork_spawn_process(allocator: *std.mem.Allocator, argv: []const []const u8) !os.pid_t { const child_pid = try os.fork(); var envmap = try std.process.getEnvMap(allocator); defer envmap.deinit(); switch (child_pid) { -1 => return error.UnknownForkingError, // child process 0 => { _ = try ptrace.ptrace(c.PTRACE_TRACEME, 0, 0, 0); const err = os.execvpe(allocator, argv, &envmap); return err; }, else => { _ = os.waitpid(child_pid, 0); return child_pid; }, } }
src/syspect.zig
const std = @import("std"); const kernel = @import("root"); const mm = kernel.mm; const x86 = @import("../x86.zig"); const apic = x86.apic; const elf = std.elf; const MMIORegion = kernel.mmio.DynamicMMIORegion; const lib = kernel.lib; var logger = @TypeOf(x86.logger).childOf(@typeName(@This())){}; const TRAMPOLINE_BASE = 0x8000; fn allocateTrampoline() !mm.PhysicalAddress { return mm.PhysicalAddress.new(TRAMPOLINE_BASE); } const RelocType = enum(u32) { R_AMD64_64 = 1, R_AMD64_32 = 10, R_AMD64_16 = 12, _, }; fn patchCr3Value(buffer: []u8, offset: u64) void { const cr3_value = x86.mm.kernel_vm_impl.pml4.root.value; if (cr3_value > std.math.maxInt(u32)) @panic("PML4 too far"); std.mem.writeIntSliceLittle( u32, buffer[offset .. offset + @sizeOf(u32)], @truncate(u32, cr3_value), ); } var ap_boot_stack: [0x1000]u8 = undefined; var startup_lock = kernel.lib.Spinlock.init(); var ap_booted: bool = false; fn waitUntilBooted() void { while (true) { const held = startup_lock.acquire(); defer held.release(); if (ap_booted) break; } } fn apEntry() callconv(.C) noreturn { x86.main_gdt.load(); x86.set_ds(x86.null_entry.raw); x86.set_es(x86.null_entry.raw); x86.set_fs(x86.null_entry.raw); x86.set_gs(x86.null_entry.raw); x86.set_ss(x86.kernel_data.raw); x86.main_gdt.reload_cs(x86.kernel_code); x86.main_idt.load(); //GSBASE.write(@ptrToInt(&boot_cpu_gsstruct)); { const held = startup_lock.acquire(); defer held.release(); ap_booted = true; } x86.hang(); } fn patchEntrypoint(buffer: []u8, offset: u64) void { const entry = @ptrToInt(apEntry); std.mem.writeIntSliceLittle( u64, buffer[offset .. offset + @sizeOf(u64)], @truncate(u64, entry), ); } fn patchRspValue(buffer: []u8, offset: u64) void { const rsp_value = @ptrToInt(&ap_boot_stack) + @sizeOf(@TypeOf(ap_boot_stack)); std.mem.writeIntSliceLittle( u64, buffer[offset .. offset + @sizeOf(u64)], @truncate(u64, rsp_value), ); } fn patchSectionRel(buffer: []u8, offset: u64, addend: i64, typ: RelocType) void { // Relocate with respect to section address switch (typ) { .R_AMD64_16 => { const val: u32 = TRAMPOLINE_BASE + @intCast(u32, addend); std.mem.writeIntSliceLittle( u16, buffer[offset .. offset + @sizeOf(u16)], @truncate(u16, val), ); }, .R_AMD64_32 => { const val: u32 = TRAMPOLINE_BASE + @intCast(u32, addend); std.mem.writeIntSliceLittle( u32, buffer[offset .. offset + @sizeOf(u32)], @truncate(u32, val), ); }, .R_AMD64_64 => { const val: u64 = TRAMPOLINE_BASE + @intCast(u64, addend); std.mem.writeIntSliceLittle( u64, buffer[offset .. offset + @sizeOf(u64)], val, ); }, _ => @panic("Unimplemented"), } } fn relocateStartupCode(buffer: []u8) void { var relocations = x86.trampoline.getSectionData(".rela.smp_trampoline").?; while (relocations.len > 0) : (relocations = relocations[@sizeOf(elf.Elf64_Rela)..]) { var rela: elf.Elf64_Rela = undefined; std.mem.copy(u8, std.mem.asBytes(&rela), relocations[0..@sizeOf(@TypeOf(rela))]); const offset = rela.r_offset; const typ = @intToEnum(RelocType, rela.r_type()); const symbol = x86.trampoline.getSymbol(rela.r_sym()); const name = x86.trampoline.getString(symbol.?.st_name); const has_name = name != null; const eql = std.mem.eql; // zig bug variable has to be hoiseted outside {} // https://github.com/ziglang/zig/issues/7467 const escaped_name = if (name) |nam| std.fmt.fmtSliceEscapeLower(nam) else std.fmt.fmtSliceEscapeLower("(null)"); logger.debug("{} of symbol {} at {}\n", .{ typ, escaped_name, rela.r_offset }); if (has_name and eql(u8, name.?, "KERNEL_CR3") and typ == .R_AMD64_32) { patchCr3Value(buffer, offset); } else if (has_name and eql(u8, name.?, "STACK") and typ == .R_AMD64_64) { patchRspValue(buffer, offset); } else if (has_name and eql(u8, name.?, "ENTRYPOINT") and typ == .R_AMD64_64) { patchEntrypoint(buffer, offset); } else if (has_name and eql(u8, name.?, "")) { patchSectionRel(buffer, offset, rela.r_addend, typ); } else { logger.log("Unknown relocation\n", .{}); logger.log("{}\n", .{symbol}); logger.log("{}\n", .{rela}); } } } fn wakeUpCpu(lapic: *const MMIORegion, lapic_id: u8, start_page: u8) void { // TODO add sleeping logger.debug("Sending INIT\n", .{}); apic.sendCommand(lapic, lapic_id, 0b101 << 8 | 1 << 14); // SIPI command const command = init: { var v: u20 = 0; // vector v |= start_page; // SIPI v |= 0b110 << 8; break :init v; }; logger.debug("Sending SIPI 1/2\n", .{}); apic.sendCommand(lapic, lapic_id, command); logger.debug("Sending SIPI 2/2\n", .{}); apic.sendCommand(lapic, lapic_id, command); } const LapicIterator = struct { madt_it: x86.acpi.MADTIterator, pub fn next(self: *@This()) ?*const x86.acpi.MADTLapic { if (self.madt_it.next()) |header| { const typ = lib.intToEnumSafe(x86.acpi.MADTEntryType, header.entry_type); if (typ == null) return self.next(); const typ_enum = typ.?; switch (typ_enum) { .LocalApic => { return @ptrCast(*const x86.acpi.MADTLapic, header); }, else => return null, } } return null; } pub fn init(madt_it: x86.acpi.MADTIterator) LapicIterator { return .{ .madt_it = madt_it }; } }; fn iterLapic() LapicIterator { var it = x86.acpi.iterMADT(); return LapicIterator.init(it); } pub fn init() void { const PAGE_SIZE = 0x1000; const phys_start = try allocateTrampoline(); const start_page: u8 = @truncate(u8, (phys_start.value / PAGE_SIZE)); const virt_start = mm.VirtualAddress.new(TRAMPOLINE_BASE); const trampoline = mm.kernel_vm.map_memory( virt_start, phys_start, PAGE_SIZE, mm.VirtualMemory.Protection.RWX, ) catch { logger.err("Failed to map AP memory"); return; }; defer mm.kernel_vm.unmap(trampoline) catch { @panic("Failed to unmap AP memory"); }; const startup_code = x86.trampoline.getSectionData(".smp_trampoline").?; const buffer = trampoline.as_bytes()[0..startup_code.len]; if (startup_code.len > buffer.len) { @panic("Trampoline size more than one page long!"); } std.mem.copy(u8, buffer, startup_code); logger.log("Performing AP startup code relocation\n", .{}); relocateStartupCode(buffer); var it = iterLapic(); const apic_id = x86.apic.getLapicId(); while (it.next()) |lapic| { { const held = startup_lock.acquire(); defer held.release(); ap_booted = false; } if (apic_id != lapic.acpi_processor_uid) { wakeUpCpu(&apic.lapic, lapic.acpi_processor_uid, start_page); waitUntilBooted(); logger.info("CPU{} up\n", .{lapic.acpi_processor_uid}); } } }
kernel/arch/x86/smp.zig
const std = @import("std"); const Allocator = std.mem.Allocator; const math = std.math; const assert = std.debug.assert; const log = @import("./log.zig").log; const render = @import("./render.zig"); const Pixel = render.Pixel; const Canvas = render.Canvas; const math_utils = @import("./math_utils.zig"); const Point = math_utils.Point; const Line = math_utils.Line; pub const Coord = packed struct { latitude: f32, longitude: f32, }; pub const SkyCoord = packed struct { right_ascension: f32 = 0, declination: f32 = 0, pub fn getCoord(sky_coord: SkyCoord, observer_timestamp: i64) Coord { const j2000_offset_millis = 949_428_000_000; const days_since_j2000 = @intToFloat(f64, observer_timestamp - j2000_offset_millis) / 86400000.0; var longitude = sky_coord.right_ascension - (100.46 + (0.985647 * days_since_j2000) + @intToFloat(f64, 15 * observer_timestamp)); longitude = math_utils.floatMod(longitude, 360); if (longitude < -180) { longitude += 360; } else if (longitude > 180) { longitude -= 360; } return Coord{ .latitude = sky_coord.declination, .longitude = @floatCast(f32, longitude) }; } }; pub const ObserverPosition = struct { latitude: f32, longitude: f32, timestamp: i64, pub fn localSiderealTime(pos: ObserverPosition) f64 { const j2000_offset_millis = 949_428_000_000; const days_since_j2000 = @intToFloat(f64, pos.timestamp - j2000_offset_millis) / 86_400_000.0; return 100.46 + (0.985647 * days_since_j2000) + @floatCast(f64, pos.longitude) + @intToFloat(f64, 15 * pos.timestamp); } }; pub const Star = packed struct { right_ascension: f32, declination: f32, brightness: f32, spec_type: SpectralType, pub fn getColor(star: Star) Pixel { var base_color = star.spec_type.getColor(); base_color.a = blk: { const brightness = star.brightness + 0.15; break :blk if (brightness >= 1.0) 255 else if (brightness <= 0) 0 else @floatToInt(u8, brightness * 255.0); }; return base_color; } }; pub const Constellation = struct { pub const BoundaryIter = struct { constellation: Constellation, boundary_index: usize = 0, pub fn next(iter: *BoundaryIter) ?[2]SkyCoord { if (iter.boundary_index >= iter.constellation.boundaries.len) { return null; } if (iter.boundary_index == iter.constellation.boundaries.len - 1) { const result = [2]SkyCoord{ iter.constellation.boundaries[iter.boundary_index], iter.constellation.boundaries[0] }; iter.boundary_index += 1; return result; } const result = [2]SkyCoord{ iter.constellation.boundaries[iter.boundary_index], iter.constellation.boundaries[iter.boundary_index + 1] }; iter.boundary_index += 1; return result; } }; asterism: []SkyCoord, boundaries: []SkyCoord, is_zodiac: bool, pub fn boundary_iter(self: Constellation) BoundaryIter { return .{ .constellation = self }; } pub fn centroid(constellation: Constellation) SkyCoord { var x: f32 = 0; var y: f32 = 0; var z: f32 = 0; for (constellation.boundaries) |b| { // convert to radians const ra_rad = b.right_ascension * (math.pi / 180.0); const dec_rad = b.declination * (math.pi / 180.0); x += math.cos(dec_rad) * math.cos(ra_rad); y += math.cos(dec_rad) * math.sin(ra_rad); z += math.sin(dec_rad); } x /= @intToFloat(f32, constellation.boundaries.len); y /= @intToFloat(f32, constellation.boundaries.len); z /= @intToFloat(f32, constellation.boundaries.len); const central_long = math.atan2(f32, y, x); const central_sqrt = math.sqrt(x * x + y * y); const central_lat = math.atan2(f32, z, central_sqrt); return SkyCoord{ .right_ascension = central_long * (180.0 / math.pi), .declination = central_lat * (180.0 / math.pi) }; } }; // @todo It's possible that the reason for the contellation flickering is that the constellations at the end of the list // aren't getting drawn before the next draw cycle starts. That could explain why they happen towards the middle of the screen, // since the constellations are ordered in a roughly clockwise-by-longitude way. Needs more investigation though pub fn GreatCircle(comptime num_waypoints: usize) type { return struct { const Self = @This(); start: Coord, end: Coord, distance: f32, course_angle: f32, waypoints: [num_waypoints]Coord = undefined, pub fn init(start: Coord, end: Coord) Self { const start_radians = Coord{ .latitude = math_utils.degToRad(start.latitude), .longitude = math_utils.degToRadLong(start.longitude) }; const end_radians = Coord{ .latitude = math_utils.degToRad(end.latitude), .longitude = math_utils.degToRadLong(end.longitude) }; const distance = blk: { const long_diff = end_radians.longitude - start_radians.longitude; const cos_d = math.sin(start_radians.latitude) * math.sin(end_radians.latitude) + math.cos(start_radians.latitude) * math.cos(end_radians.latitude) * math.cos(long_diff); break :blk math_utils.boundedACos(cos_d) catch 0; }; const course_angle = blk: { var cos_c = (math.sin(end_radians.latitude) - math.sin(start_radians.latitude) * math.cos(distance)) / (math.cos(start_radians.latitude) * math.sin(distance)); break :blk math_utils.boundedACos(cos_c) catch 0; }; var great_circle = Self{ .start = start_radians, .end = end_radians, .distance = distance, .course_angle = course_angle, }; const negative_dir = great_circle.end.longitude < great_circle.start.longitude and great_circle.end.longitude > (great_circle.start.longitude - math.pi); const waypoint_inc: f32 = great_circle.distance / @intToFloat(f32, num_waypoints); var waypoints: [num_waypoints]Coord = undefined; for (waypoints) |*waypoint, i| { const waypoint_dist = @intToFloat(f32, i + 1) * waypoint_inc; const lat = blk: { const sin_lat_x = math.sin(great_circle.start.latitude) * math.cos(waypoint_dist) + math.cos(great_circle.start.latitude) * math.sin(waypoint_dist) * math.cos(great_circle.course_angle); break :blk math_utils.boundedASin(sin_lat_x) catch 0; }; const rel_long = blk: { const cos_long_x = (math.cos(waypoint_dist) - math.sin(great_circle.start.latitude) * math.sin(lat)) / (math.cos(great_circle.start.latitude) * math.cos(lat)); break :blk math_utils.boundedACos(cos_long_x) catch 0; }; const long = if (negative_dir) great_circle.start.longitude - rel_long else great_circle.start.longitude + rel_long; waypoint.* = Coord{ .latitude = math_utils.radToDeg(lat), .longitude = math_utils.radToDegLong(long) }; } great_circle.waypoints = waypoints; return great_circle; } }; } pub const SpectralType = enum(u8) { /// > 30,000 K O, /// 10,000 K <> 30,000 K B, /// 7,500 K <> 10,000 K A, /// 6,000 K <> 7,500 K F, /// 5,200 K <> 6,000 K G, /// 3,700 K <> 5,200 K K, /// 2,400 K <> 3,700 K M, pub fn getColor(spec: SpectralType) Pixel { return switch (spec) { // Blue .O => Pixel.rgb(2, 89, 156), // Blue-white .B => Pixel.rgb(131, 195, 222), // White .A => Pixel.rgb(255, 255, 255), // Yellow-white .F => Pixel.rgb(249, 250, 192), // Yellow .G => Pixel.rgb(253, 255, 133), // Orange .K => Pixel.rgb(255, 142, 61), // Red .M => Pixel.rgb(207, 32, 23) }; } }; pub fn projectStar(canvas: *Canvas, star: Star, observer_pos: ObserverPosition) void { const point = canvas.coordToPoint( SkyCoord{ .right_ascension = star.right_ascension, .declination = star.declination }, observer_pos, true ) orelse return; if (canvas.isInsideCircle(point)) { canvas.setPixelAt(point, star.getColor()); } } pub fn projectConstellationGrid(canvas: *Canvas, constellation: Constellation, color: Pixel, line_width: u32, observer_pos: ObserverPosition) void { var iter = constellation.boundary_iter(); while (iter.next()) |bound| { const point_a = canvas.coordToPoint(bound[0], observer_pos, false) orelse continue; const point_b = canvas.coordToPoint(bound[1], observer_pos, false) orelse continue; canvas.drawLine(Line{ .a = point_a, .b = point_b }, color, line_width); } } pub fn projectConstellationAsterism(canvas: *Canvas, constellation: Constellation, color: Pixel, line_width: u32, observer_pos: ObserverPosition) void { var branch_index: usize = 0; while (branch_index < constellation.asterism.len - 1) : (branch_index += 2) { const point_a = canvas.coordToPoint(constellation.asterism[branch_index], observer_pos, false) orelse continue; const point_b = canvas.coordToPoint(constellation.asterism[branch_index + 1], observer_pos, false) orelse continue; canvas.drawLine(Line{ .a = point_a, .b = point_b }, color, line_width); } } pub fn drawSkyGrid(canvas: Canvas, observer_pos: ObserverPosition) void { const grid_color = Pixel.rgba(91, 101, 117, 180); const grid_zero_color = Pixel.rgba(176, 98, 65, 225); var base_right_ascension: f32 = 0; while (base_right_ascension < 360) : (base_right_ascension += 15) { var declination: f32 = -90; while (declination <= 90) : (declination += 0.1) { const point = canvas.coordToPoint(.{ .right_ascension = base_right_ascension, .declination = declination }, observer_pos, true); if (point) |p| { if (canvas.isInsideCircle(p)) { if (base_right_ascension == 0) { canvas.setPixelAt(p, grid_zero_color); } else { canvas.setPixelAt(p, grid_color); } } } } } var base_declination: f32 = -90; while (base_declination <= 90) : (base_declination += 15) { var right_ascension: f32 = 0; while (right_ascension <= 360) : (right_ascension += 0.1) { const point = canvas.coordToPoint(.{ .right_ascension = right_ascension, .declination = base_declination }, observer_pos, true); if (point) |p| { if (canvas.isInsideCircle(p)) { if (base_declination == 0) { canvas.setPixelAt(p, grid_zero_color); } else { canvas.setPixelAt(p, grid_color); } } } } } } pub fn getConstellationAtPoint(canvas: *Canvas, point: Point, constellations: []Constellation, observer_pos: ObserverPosition) ?usize { if (!canvas.isInsideCircle(point)) return null; // Get a ray projected from the point to the right side of the canvas const point_ray_right = Line{ .a = point, .b = Point{ .x = @intToFloat(f32, canvas.settings.width), .y = point.y } }; // Get a ray projected from the point to the left side of the canvas const point_ray_left = Line{ .a = point, .b = Point{ .x = -@intToFloat(f32, canvas.settings.width), .y = point.y } }; for (constellations) |c, constellation_index| { if (canvas.settings.zodiac_only and !c.is_zodiac) continue; // var b_index: usize = 0; var num_intersections_right: u32 = 0; var num_intersections_left: u32 = 0; // Loop over all of the boundaries and count how many times both rays intersect with the boundary line // If they intersect inside the canvas circle, then add that to the left or right intersection counter var iter = c.boundary_iter(); while (iter.next()) |bound| { const b_a = canvas.coordToPoint(bound[0], observer_pos, false) orelse continue; const b_b = canvas.coordToPoint(bound[1], observer_pos, false) orelse continue; const bound_line = Line{ .a = b_a, .b = b_b }; if (point_ray_right.segmentIntersection(bound_line)) |inter_point| { if (canvas.isInsideCircle(inter_point)) { num_intersections_right += 1; } } if (point_ray_left.segmentIntersection(bound_line)) |inter_point| { if (canvas.isInsideCircle(inter_point)) { num_intersections_left += 1; } } } // If there are an odd number of intersections on the left and right side of the point, then the point // is inside the shape if (num_intersections_left % 2 == 1 and num_intersections_right % 2 == 1) { return constellation_index; } } return null; } pub fn dragAndMove(drag_start_x: f32, drag_start_y: f32, drag_end_x: f32, drag_end_y: f32, drag_speed: f32) Coord { const dist_x = drag_end_x - drag_start_x; const dist_y = drag_end_y - drag_start_y; // Angle between the starting point and the end point // Usually atan2 is used with the parameters in the reverse order (atan2(y, x)). // The order here (x, y) is intentional, since otherwise horizontal drags would result in vertical movement // and vice versa // @todo Maybe hack to fix issue with backwards display? See getProjectedCoord const dist_phi = -math.atan2(f32, dist_x, dist_y); // drag_distance is the angular distance between the starting location and the result location after a single drag // Higher = move more with smaller cursor movements, and vice versa const drag_distance: f32 = math_utils.degToRad(drag_speed); // Calculate asin(new_latitude), and clamp the result between [-1, 1] var sin_lat_x = math.sin(drag_distance) * math.cos(dist_phi); if (sin_lat_x > 1.0) { sin_lat_x = 1.0; } else if (sin_lat_x < -1.0) { sin_lat_x = -1.0; } const new_latitude = math.asin(sin_lat_x); // Calculate acos(new_relative_longitude) and clamp the result between [-1, 1] var cos_long_x = math.cos(drag_distance) / math.cos(new_latitude); if (cos_long_x > 1.0) { cos_long_x = 1.0; } else if (cos_long_x < -1.0) { cos_long_x = -1.0; } var new_relative_longitude = math_utils.radToDegLong(math.acos(cos_long_x)); new_relative_longitude = if (dist_phi < 0.0) -new_relative_longitude else new_relative_longitude; return .{ .latitude = math_utils.radToDeg(new_latitude), .longitude = new_relative_longitude, }; }
night-math/src/star_math.zig
const std = @import("std"); const root = @import("./dns.zig"); const Name = root.Name; const ResourceType = root.ResourceType; const ResourceClass = root.ResourceClass; pub const ResponseCode = enum(u4) { NoError = 0, FormatError = 1, ServFail = 2, NameError = 3, NotImplemented = 4, Refused = 5, }; /// Describes the header of a DNS packet. pub const Header = packed struct { /// The ID of the packet. Replies to a packet MUST have the same ID. id: u16 = 0, /// Query/Response flag /// Defines if this is a response packet or not. is_response: bool = false, /// TODO convert to enum opcode: i4 = 0, /// Authoritative Answer flag /// Only valid in response packets. Specifies if the server /// replying is an authority for the domain name. aa_flag: bool = false, /// TC flag - TrunCation. /// If the packet was truncated. truncated: bool = false, /// RD flag - Recursion Desired. /// Must be copied to a response packet. If set, the server /// handling the request can pursue the query recursively. wanted_recursion: bool = false, /// RA flag - Recursion Available /// Whether recursive query support is available on the server. recursion_available: bool = false, /// DO NOT USE. RFC1035 has not assigned anything to the Z bits z: u3 = 0, /// Response code. response_code: ResponseCode = .NoError, /// Amount of questions in the packet. question_length: u16 = 0, /// Amount of answers in the packet. answer_length: u16 = 0, /// Amount of nameservers in the packet. nameserver_length: u16 = 0, /// Amount of additional records in the packet. additional_length: u16 = 0, }; pub const Question = struct { name: Name, typ: ResourceType, class: ResourceClass, }; /// DNS resource pub const Resource = struct { name: Name, typ: ResourceType, class: ResourceClass, ttl: i32, /// Opaque Resource Data. /// Parsing of the data in this is done by a separate package, dns.rdata opaque_rdata: []const u8, /// Give the size, in bytes, of the binary representation of a resource. pub fn size(self: @This()) usize { var res_size: usize = 0; // name for the resource res_size += self.name.size(); // typ, class, ttl = 3 * u16 // rdata length is u32 // // TODO(!!!): what size is rdata length actually? // synchronize this with serialization. res_size += @sizeOf(u16) * 3; res_size += @sizeOf(u32); // then add the rest of the rdata section res_size += self.opaque_rdata.len * @sizeOf(u8); return res_size; } pub fn serialize(self: @This(), serializer: anytype) !void { try serializer.serialize(self.name); try serializer.serialize(self.typ); try serializer.serialize(self.class); try serializer.serialize(self.ttl); // not doing the cast means it gets serialized as an usize. try serializer.serialize(@intCast(u16, self.opaque_rdata.len)); try serializer.serialize(self.opaque_rdata); } }; const ByteList = std.ArrayList(u8); const StringList = std.ArrayList([]u8); const ManyStringList = std.ArrayList([][]const u8); pub const DeserializationContext = struct { allocator: *std.mem.Allocator, label_pool: StringList, name_pool: ManyStringList, packet_list: ByteList, const Self = @This(); pub fn init(allocator: *std.mem.Allocator) Self { return Self{ .allocator = allocator, .label_pool = StringList.init(allocator), .name_pool = ManyStringList.init(allocator), .packet_list = ByteList.init(allocator), }; } pub fn deinit(self: *Self) void { for (self.label_pool.items) |label| { self.allocator.free(label); } self.label_pool.deinit(); for (self.name_pool.items) |item| { self.allocator.free(item); } self.name_pool.deinit(); self.packet_list.deinit(); } pub fn newLabel(self: *Self, length: usize) ![]u8 { var newly_allocated = try self.allocator.alloc(u8, length); // keep track of newly allocated label for deinitting try self.label_pool.append(newly_allocated); return newly_allocated; } }; const LabelComponent = union(enum) { Full: []const u8, Pointer: Name, Null: void, }; fn WrapperReader(comptime ReaderType: anytype) type { return struct { underlying_reader: ReaderType, ctx: *DeserializationContext, const Self = @This(); pub fn init(underlying_reader: ReaderType, ctx: *DeserializationContext) Self { return .{ .underlying_reader = underlying_reader, .ctx = ctx, }; } pub fn read(self: *Self, buffer: []u8) !usize { const bytes_read = try self.underlying_reader.read(buffer); const bytes = buffer[0..bytes_read]; try self.ctx.packet_list.writer().writeAll(bytes); return bytes_read; } pub const Error = ReaderType.Error || error{OutOfMemory}; pub const Reader = std.io.Reader(*Self, Error, read); pub fn reader(self: *Self) Reader { return Reader{ .context = self }; } }; } pub const Packet = struct { header: Header, questions: []Question, answers: []Resource, nameservers: []Resource, additionals: []Resource, const Self = @This(); fn sliceSizes(self: Self) usize { var pkt_size: usize = 0; for (self.questions) |question| { pkt_size += question.name.size(); // add both type and class (both u16's) pkt_size += @sizeOf(u16); pkt_size += @sizeOf(u16); } for (self.answers) |resource| { pkt_size += resource.size(); } for (self.nameservers) |resource| { pkt_size += resource.size(); } for (self.additionals) |resource| { pkt_size += resource.size(); } return pkt_size; } /// Returns the size in bytes of the binary representation of the packet. pub fn size(self: Self) usize { return @sizeOf(Header) + self.sliceSizes(); } fn serializeResourceList(self: Self, serializer: anytype, resource_list: []Resource) !void { for (resource_list) |resource| { try serializer.serialize(resource); } } pub fn serialize(self: Self, serializer: anytype) !void { std.debug.assert(self.header.question_length == self.questions.len); std.debug.assert(self.header.answer_length == self.answers.len); std.debug.assert(self.header.nameserver_length == self.nameservers.len); std.debug.assert(self.header.additional_length == self.additionals.len); try serializer.serialize(self.header); for (self.questions) |question| { try serializer.serialize(question.name); try serializer.serialize(question.typ); try serializer.serialize(question.class); } try self.serializeResourceList(serializer, self.answers); try self.serializeResourceList(serializer, self.nameservers); try self.serializeResourceList(serializer, self.additionals); } fn unfoldPointer( first_offset_component: u8, deserializer: anytype, ctx: *DeserializationContext, /// Buffer that holds the memory for the dns name name_buffer: [][]const u8, name_index: usize, ) anyerror!Name { // we need to read another u8 and merge both that and first_offset_component // into a u16 we can use as an offset in the entire packet, etc. // the final offset is actually 14 bits, the first two are identification // of the offset itself. const second_offset_component = try deserializer.deserialize(u8); // merge them together var offset: u16 = (first_offset_component << 7) | second_offset_component; // set first two bits of ptr_offset to zero as they're the // pointer prefix bits (which are always 1, which brings problems) offset &= ~@as(u16, 1 << 15); offset &= ~@as(u16, 1 << 14); // RFC1035 says: // // The OFFSET field specifies an offset from // the start of the message (i.e., the first octet of the ID field in the // domain header). A zero offset specifies the first byte of the ID field, // etc. // this mechanism requires us to hold the entire packet in memory // // one guarantee we have is that pointers can't reference packet // offsets in the past (oh than god that makes *some* sense!) // to make this work with nicer safety guarantees, we slice the // packet bytes we know of, starting on that offset, and ending in the // first zero octet we find. // // if offset is X, // then our slice starts at X and ends at X+n, as follows: // // ... [ 0] ... // | | // X X+n // // we just need to calculate n by using indexOf to find the null octet // // TODO a way to hold the memory we deserialized, maybe a custom // wrapper Reader that allocates and stores the bytes it read? i think // we already have that kind of thing in std, but i need more time var offset_size_opt = std.mem.indexOf(u8, ctx.packet_list.items[offset..], "\x00"); if (offset_size_opt == null) return error.ParseFail; var offset_size = offset_size_opt.?; // from our slice, we need to read a name from it. we do it via // creating a FixedBufferStream, extracting a reader from it, creating // a deserializer, and feeding that to readName. const label_data = ctx.packet_list.items[offset .. offset + (offset_size + 1)]; const T = std.io.FixedBufferStream([]const u8); const InnerDeserializer = std.io.Deserializer(.Big, .Bit, T.Reader); var stream = T{ .buffer = label_data, .pos = 0, }; var new_deserializer = InnerDeserializer.init(stream.reader()); // TODO: no name buffer available here. i think we can create a // NameDeserializationContext which holds both the name buffer AND // the index so we could keep appending new labels to it return Self.readName(&new_deserializer, ctx, name_buffer, name_index); } /// Deserialize a LabelComponent, which can be: /// - a pointer /// - a label /// - a null octet fn readLabel( deserializer: anytype, ctx: *DeserializationContext, name_buffer: [][]const u8, name_index: usize, ) !LabelComponent { // pointers, in the binary representation of a byte, are as follows // 1 1 B B B B B B | B B B B B B B B // they are two bytes length, but to identify one, you check if the // first two bits are 1 and 1 respectively. // // then you read the rest, and turn it into an offset (without the // starting bits!!!) // // to prevent inefficiencies, we just read a single bite, see if it // has the starting bits, and then we chop it off, merging with the // next byte. pointer offsets are 14 bits long // // when it isn't a pointer, its a length for a given label, and that // length can only be a single byte. // // if the length is 0, its a null octet var possible_length = try deserializer.deserialize(u8); if (possible_length == 0) return LabelComponent{ .Null = {} }; // RFC1035: // since the label must begin with two zero bits because // labels are restricted to 63 octets or less. var bit1 = (possible_length & (1 << 7)) != 0; var bit2 = (possible_length & (1 << 6)) != 0; if (bit1 and bit2) { // its a pointer! var name = try Self.unfoldPointer( possible_length, deserializer, ctx, name_buffer, name_index, ); return LabelComponent{ .Pointer = name }; } else { // those must be 0 std.debug.assert((!bit1) and (!bit2)); // the next <possible_length> bytes contain a full label. // // we use the label pool so we can give a bigger lifetime var label = try ctx.newLabel(possible_length); var index: usize = 0; while (index < possible_length) : (index += 1) { label[index] = try deserializer.deserialize(u8); } return LabelComponent{ .Full = label }; } } /// Deserializes a DNS Name pub fn readName( deserializer: anytype, ctx: *DeserializationContext, name_buffer: [][]const u8, name_index: ?usize, ) !Name { var buffer_index: usize = name_index orelse 0; // RFC1035, 4.1.4 Message Compression: // The compression scheme allows a domain name in a message to be // represented as either: // // - a sequence of labels ending in a zero octet // - a pointer // - a sequence of labels ending with a pointer // // == // // All three of those must end in some way of // name_buffer[buffer_index] = something; // since thats where our result will go. // keep attempting to get labels off the deserializer and // filling the name_buffer. // // if it ends in 0, be done // if its a pointer, follow pointer // if it ends in a pointer, follow pointer // else, fill label while (true) { var component: LabelComponent = try Self.readLabel(deserializer, ctx, name_buffer, buffer_index); switch (component) { .Full => |label| { name_buffer[buffer_index] = label; buffer_index += 1; }, .Pointer => |name| return name, .Null => break, } } return Name{ .labels = name_buffer[0..(buffer_index)] }; } /// (almost) Deserialize an RDATA section. This only deserializes to a slice of u8. /// Parsing of RDATA sections are in their own dns.rdata module. fn deserializeRData( self: *Self, deserializer: anytype, ctx: *DeserializationContext, ) ![]const u8 { var rdata_length = try deserializer.deserialize(u16); var opaque_rdata = try ctx.allocator.alloc(u8, rdata_length); // TODO create dedicated pool for this? try ctx.label_pool.append(opaque_rdata); var i: u16 = 0; while (i < rdata_length) : (i += 1) { opaque_rdata[i] = try deserializer.deserialize(u8); } return opaque_rdata; } fn deserializeResourceList( self: *Self, deserializer: anytype, ctx: *DeserializationContext, length: usize, resource_list: *[]Resource, ) !void { var list = std.ArrayList(Resource).init(ctx.allocator); var i: usize = 0; while (i < length) : (i += 1) { // TODO name buffer stuff var name_buffer = try ctx.allocator.alloc([]u8, 128); try ctx.name_pool.append(name_buffer); var name = try Self.readName(deserializer, ctx, name_buffer, null); var typ = try deserializer.deserialize(u16); var class = try deserializer.deserialize(u16); var ttl = try deserializer.deserialize(i32); // rdlength and rdata are under deserializeRData var opaque_rdata = try self.deserializeRData(deserializer, ctx); var resource = Resource{ .name = name, .typ = try std.meta.intToEnum(ResourceType, typ), .class = try std.meta.intToEnum(ResourceClass, class), .ttl = ttl, .opaque_rdata = opaque_rdata, }; try list.append(resource); } resource_list.* = list.items; } pub fn readInto( self: *Self, upstream_reader: anytype, ctx: *DeserializationContext, ) !void { const WrapperReaderType = WrapperReader(@TypeOf(upstream_reader)); var wrapper_reader = WrapperReaderType.init(upstream_reader, ctx); var reader = wrapper_reader.reader(); const DeserializerType = std.io.Deserializer(.Big, .Bit, @TypeOf(reader)); var deserializer = DeserializerType.init(reader); self.header = try deserializer.deserialize(Header); var questions = std.ArrayList(Question).init(ctx.allocator); var i: usize = 0; while (i < self.header.question_length) { var name_buffer = try ctx.allocator.alloc([]u8, 128); try ctx.name_pool.append(name_buffer); var name = try Self.readName(&deserializer, ctx, name_buffer, null); var qtype = try deserializer.deserialize(u16); var qclass = try deserializer.deserialize(u16); var question = Question{ .name = name, .typ = try std.meta.intToEnum(ResourceType, qtype), .class = try std.meta.intToEnum(ResourceClass, qclass), }; try questions.append(question); i += 1; } self.questions = questions.items; try self.deserializeResourceList(&deserializer, ctx, self.header.answer_length, &self.answers); try self.deserializeResourceList(&deserializer, ctx, self.header.nameserver_length, &self.nameservers); try self.deserializeResourceList(&deserializer, ctx, self.header.additional_length, &self.additionals); } };
src/pkg2/packet.zig
const Module = @This(); const std = @import("std"); const Compilation = @import("Compilation.zig"); const mem = std.mem; const Allocator = std.mem.Allocator; const ArrayListUnmanaged = std.ArrayListUnmanaged; const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const TypedValue = @import("TypedValue.zig"); const assert = std.debug.assert; const log = std.log.scoped(.module); const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const Target = std.Target; const Package = @import("Package.zig"); const link = @import("link.zig"); const ir = @import("ir.zig"); const zir = @import("zir.zig"); const Inst = ir.Inst; const Body = ir.Body; const ast = std.zig.ast; const trace = @import("tracy.zig").trace; const astgen = @import("astgen.zig"); const zir_sema = @import("zir_sema.zig"); /// General-purpose allocator. Used for both temporary and long-term storage. gpa: *Allocator, comp: *Compilation, /// Where our incremental compilation metadata serialization will go. zig_cache_artifact_directory: Compilation.Directory, /// Pointer to externally managed resource. `null` if there is no zig file being compiled. root_pkg: *Package, /// Module owns this resource. /// The `Scope` is either a `Scope.ZIRModule` or `Scope.File`. root_scope: *Scope, /// It's rare for a decl to be exported, so we save memory by having a sparse map of /// Decl pointers to details about them being exported. /// The Export memory is owned by the `export_owners` table; the slice itself is owned by this table. decl_exports: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{}, /// We track which export is associated with the given symbol name for quick /// detection of symbol collisions. symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{}, /// This models the Decls that perform exports, so that `decl_exports` can be updated when a Decl /// is modified. Note that the key of this table is not the Decl being exported, but the Decl that /// is performing the export of another Decl. /// This table owns the Export memory. export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{}, /// Maps fully qualified namespaced names to the Decl struct for them. decl_table: std.ArrayHashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false) = .{}, /// We optimize memory usage for a compilation with no compile errors by storing the /// error messages and mapping outside of `Decl`. /// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator. /// Note that a Decl can succeed but the Fn it represents can fail. In this case, /// a Decl can have a failed_decls entry but have analysis status of success. failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *Compilation.ErrorMsg) = .{}, /// Using a map here for consistency with the other fields here. /// The ErrorMsg memory is owned by the `Scope`, using Module's general purpose allocator. failed_files: std.AutoArrayHashMapUnmanaged(*Scope, *Compilation.ErrorMsg) = .{}, /// Using a map here for consistency with the other fields here. /// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator. failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *Compilation.ErrorMsg) = .{}, next_anon_name_index: usize = 0, /// Candidates for deletion. After a semantic analysis update completes, this list /// contains Decls that need to be deleted if they end up having no references to them. deletion_set: ArrayListUnmanaged(*Decl) = .{}, /// Error tags and their values, tag names are duped with mod.gpa. global_error_set: std.StringHashMapUnmanaged(u16) = .{}, /// Keys are fully qualified paths import_table: std.StringArrayHashMapUnmanaged(*Scope.File) = .{}, /// Incrementing integer used to compare against the corresponding Decl /// field to determine whether a Decl's status applies to an ongoing update, or a /// previous analysis. generation: u32 = 0, stage1_flags: packed struct { have_winmain: bool = false, have_wwinmain: bool = false, have_winmain_crt_startup: bool = false, have_wwinmain_crt_startup: bool = false, have_dllmain_crt_startup: bool = false, have_c_main: bool = false, reserved: u2 = 0, } = .{}, pub const Export = struct { options: std.builtin.ExportOptions, /// Byte offset into the file that contains the export directive. src: usize, /// Represents the position of the export, if any, in the output file. link: link.File.Elf.Export, /// The Decl that performs the export. Note that this is *not* the Decl being exported. owner_decl: *Decl, /// The Decl being exported. Note this is *not* the Decl performing the export. exported_decl: *Decl, status: enum { in_progress, failed, /// Indicates that the failure was due to a temporary issue, such as an I/O error /// when writing to the output file. Retrying the export may succeed. failed_retryable, complete, }, }; pub const Decl = struct { /// This name is relative to the containing namespace of the decl. It uses a null-termination /// to save bytes, since there can be a lot of decls in a compilation. The null byte is not allowed /// in symbol names, because executable file formats use null-terminated strings for symbol names. /// All Decls have names, even values that are not bound to a zig namespace. This is necessary for /// mapping them to an address in the output file. /// Memory owned by this decl, using Module's allocator. name: [*:0]const u8, /// The direct parent container of the Decl. This is either a `Scope.Container` or `Scope.ZIRModule`. /// Reference to externally owned memory. scope: *Scope, /// The AST Node decl index or ZIR Inst index that contains this declaration. /// Must be recomputed when the corresponding source file is modified. src_index: usize, /// The most recent value of the Decl after a successful semantic analysis. typed_value: union(enum) { never_succeeded: void, most_recent: TypedValue.Managed, }, /// Represents the "shallow" analysis status. For example, for decls that are functions, /// the function type is analyzed with this set to `in_progress`, however, the semantic /// analysis of the function body is performed with this value set to `success`. Functions /// have their own analysis status field. analysis: enum { /// This Decl corresponds to an AST Node that has not been referenced yet, and therefore /// because of Zig's lazy declaration analysis, it will remain unanalyzed until referenced. unreferenced, /// Semantic analysis for this Decl is running right now. This state detects dependency loops. in_progress, /// This Decl might be OK but it depends on another one which did not successfully complete /// semantic analysis. dependency_failure, /// Semantic analysis failure. /// There will be a corresponding ErrorMsg in Module.failed_decls. sema_failure, /// There will be a corresponding ErrorMsg in Module.failed_decls. /// This indicates the failure was something like running out of disk space, /// and attempting semantic analysis again may succeed. sema_failure_retryable, /// There will be a corresponding ErrorMsg in Module.failed_decls. codegen_failure, /// There will be a corresponding ErrorMsg in Module.failed_decls. /// This indicates the failure was something like running out of disk space, /// and attempting codegen again may succeed. codegen_failure_retryable, /// Everything is done. During an update, this Decl may be out of date, depending /// on its dependencies. The `generation` field can be used to determine if this /// completion status occurred before or after a given update. complete, /// A Module update is in progress, and this Decl has been flagged as being known /// to require re-analysis. outdated, }, /// This flag is set when this Decl is added to a check_for_deletion set, and cleared /// when removed. deletion_flag: bool, /// Whether the corresponding AST decl has a `pub` keyword. is_pub: bool, /// An integer that can be checked against the corresponding incrementing /// generation field of Module. This is used to determine whether `complete` status /// represents pre- or post- re-analysis. generation: u32, /// Represents the position of the code in the output file. /// This is populated regardless of semantic analysis and code generation. link: link.File.LinkBlock, /// Represents the function in the linked output file, if the `Decl` is a function. /// This is stored here and not in `Fn` because `Decl` survives across updates but /// `Fn` does not. /// TODO Look into making `Fn` a longer lived structure and moving this field there /// to save on memory usage. fn_link: link.File.LinkFn, contents_hash: std.zig.SrcHash, /// The shallow set of other decls whose typed_value could possibly change if this Decl's /// typed_value is modified. dependants: DepsTable = .{}, /// The shallow set of other decls whose typed_value changing indicates that this Decl's /// typed_value may need to be regenerated. dependencies: DepsTable = .{}, /// The reason this is not `std.AutoArrayHashMapUnmanaged` is a workaround for /// stage1 compiler giving me: `error: struct 'Module.Decl' depends on itself` pub const DepsTable = std.ArrayHashMapUnmanaged(*Decl, void, std.array_hash_map.getAutoHashFn(*Decl), std.array_hash_map.getAutoEqlFn(*Decl), false); pub fn destroy(self: *Decl, gpa: *Allocator) void { gpa.free(mem.spanZ(self.name)); if (self.typedValueManaged()) |tvm| { tvm.deinit(gpa); } self.dependants.deinit(gpa); self.dependencies.deinit(gpa); gpa.destroy(self); } pub fn src(self: Decl) usize { switch (self.scope.tag) { .container => { const container = @fieldParentPtr(Scope.Container, "base", self.scope); const tree = container.file_scope.contents.tree; // TODO Container should have its own decls() const decl_node = tree.root_node.decls()[self.src_index]; return tree.token_locs[decl_node.firstToken()].start; }, .zir_module => { const zir_module = @fieldParentPtr(Scope.ZIRModule, "base", self.scope); const module = zir_module.contents.module; const src_decl = module.decls[self.src_index]; return src_decl.inst.src; }, .file, .block => unreachable, .gen_zir => unreachable, .local_val => unreachable, .local_ptr => unreachable, .decl => unreachable, } } pub fn fullyQualifiedNameHash(self: Decl) Scope.NameHash { return self.scope.fullyQualifiedNameHash(mem.spanZ(self.name)); } pub fn typedValue(self: *Decl) error{AnalysisFail}!TypedValue { const tvm = self.typedValueManaged() orelse return error.AnalysisFail; return tvm.typed_value; } pub fn value(self: *Decl) error{AnalysisFail}!Value { return (try self.typedValue()).val; } pub fn dump(self: *Decl) void { const loc = std.zig.findLineColumn(self.scope.source.bytes, self.src); std.debug.print("{}:{}:{} name={} status={}", .{ self.scope.sub_file_path, loc.line + 1, loc.column + 1, mem.spanZ(self.name), @tagName(self.analysis), }); if (self.typedValueManaged()) |tvm| { std.debug.print(" ty={} val={}", .{ tvm.typed_value.ty, tvm.typed_value.val }); } std.debug.print("\n", .{}); } pub fn typedValueManaged(self: *Decl) ?*TypedValue.Managed { switch (self.typed_value) { .most_recent => |*x| return x, .never_succeeded => return null, } } fn removeDependant(self: *Decl, other: *Decl) void { self.dependants.removeAssertDiscard(other); } fn removeDependency(self: *Decl, other: *Decl) void { self.dependencies.removeAssertDiscard(other); } }; /// Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator. pub const Fn = struct { /// This memory owned by the Decl's TypedValue.Managed arena allocator. analysis: union(enum) { queued: *ZIR, in_progress, /// There will be a corresponding ErrorMsg in Module.failed_decls sema_failure, /// This Fn might be OK but it depends on another Decl which did not successfully complete /// semantic analysis. dependency_failure, success: Body, }, owner_decl: *Decl, /// This memory is temporary and points to stack memory for the duration /// of Fn analysis. pub const Analysis = struct { inner_block: Scope.Block, }; /// Contains un-analyzed ZIR instructions generated from Zig source AST. pub const ZIR = struct { body: zir.Module.Body, arena: std.heap.ArenaAllocator.State, }; /// For debugging purposes. pub fn dump(self: *Fn, mod: Module) void { std.debug.print("Module.Function(name={}) ", .{self.owner_decl.name}); switch (self.analysis) { .queued => { std.debug.print("queued\n", .{}); }, .in_progress => { std.debug.print("in_progress\n", .{}); }, else => { std.debug.print("\n", .{}); zir.dumpFn(mod, self); }, } } }; pub const Var = struct { /// if is_extern == true this is undefined init: Value, owner_decl: *Decl, is_extern: bool, is_mutable: bool, is_threadlocal: bool, }; pub const Scope = struct { tag: Tag, pub const NameHash = [16]u8; pub fn cast(base: *Scope, comptime T: type) ?*T { if (base.tag != T.base_tag) return null; return @fieldParentPtr(T, "base", base); } /// Asserts the scope has a parent which is a DeclAnalysis and /// returns the arena Allocator. pub fn arena(self: *Scope) *Allocator { switch (self.tag) { .block => return self.cast(Block).?.arena, .decl => return &self.cast(DeclAnalysis).?.arena.allocator, .gen_zir => return self.cast(GenZIR).?.arena, .local_val => return self.cast(LocalVal).?.gen_zir.arena, .local_ptr => return self.cast(LocalPtr).?.gen_zir.arena, .zir_module => return &self.cast(ZIRModule).?.contents.module.arena.allocator, .file => unreachable, .container => unreachable, } } /// If the scope has a parent which is a `DeclAnalysis`, /// returns the `Decl`, otherwise returns `null`. pub fn decl(self: *Scope) ?*Decl { return switch (self.tag) { .block => self.cast(Block).?.decl, .gen_zir => self.cast(GenZIR).?.decl, .local_val => self.cast(LocalVal).?.gen_zir.decl, .local_ptr => self.cast(LocalPtr).?.gen_zir.decl, .decl => self.cast(DeclAnalysis).?.decl, .zir_module => null, .file => null, .container => null, }; } /// Asserts the scope has a parent which is a ZIRModule or Container and /// returns it. pub fn namespace(self: *Scope) *Scope { switch (self.tag) { .block => return self.cast(Block).?.decl.scope, .gen_zir => return self.cast(GenZIR).?.decl.scope, .local_val => return self.cast(LocalVal).?.gen_zir.decl.scope, .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope, .decl => return self.cast(DeclAnalysis).?.decl.scope, .file => return &self.cast(File).?.root_container.base, .zir_module, .container => return self, } } /// Must generate unique bytes with no collisions with other decls. /// The point of hashing here is only to limit the number of bytes of /// the unique identifier to a fixed size (16 bytes). pub fn fullyQualifiedNameHash(self: *Scope, name: []const u8) NameHash { switch (self.tag) { .block => unreachable, .gen_zir => unreachable, .local_val => unreachable, .local_ptr => unreachable, .decl => unreachable, .file => unreachable, .zir_module => return self.cast(ZIRModule).?.fullyQualifiedNameHash(name), .container => return self.cast(Container).?.fullyQualifiedNameHash(name), } } /// Asserts the scope is a child of a File and has an AST tree and returns the tree. pub fn tree(self: *Scope) *ast.Tree { switch (self.tag) { .file => return self.cast(File).?.contents.tree, .zir_module => unreachable, .decl => return self.cast(DeclAnalysis).?.decl.scope.cast(Container).?.file_scope.contents.tree, .block => return self.cast(Block).?.decl.scope.cast(Container).?.file_scope.contents.tree, .gen_zir => return self.cast(GenZIR).?.decl.scope.cast(Container).?.file_scope.contents.tree, .local_val => return self.cast(LocalVal).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree, .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree, .container => return self.cast(Container).?.file_scope.contents.tree, } } /// Asserts the scope is a child of a `GenZIR` and returns it. pub fn getGenZIR(self: *Scope) *GenZIR { return switch (self.tag) { .block => unreachable, .gen_zir => self.cast(GenZIR).?, .local_val => return self.cast(LocalVal).?.gen_zir, .local_ptr => return self.cast(LocalPtr).?.gen_zir, .decl => unreachable, .zir_module => unreachable, .file => unreachable, .container => unreachable, }; } /// Asserts the scope has a parent which is a ZIRModule, Contaienr or File and /// returns the sub_file_path field. pub fn subFilePath(base: *Scope) []const u8 { switch (base.tag) { .container => return @fieldParentPtr(Container, "base", base).file_scope.sub_file_path, .file => return @fieldParentPtr(File, "base", base).sub_file_path, .zir_module => return @fieldParentPtr(ZIRModule, "base", base).sub_file_path, .block => unreachable, .gen_zir => unreachable, .local_val => unreachable, .local_ptr => unreachable, .decl => unreachable, } } pub fn unload(base: *Scope, gpa: *Allocator) void { switch (base.tag) { .file => return @fieldParentPtr(File, "base", base).unload(gpa), .zir_module => return @fieldParentPtr(ZIRModule, "base", base).unload(gpa), .block => unreachable, .gen_zir => unreachable, .local_val => unreachable, .local_ptr => unreachable, .decl => unreachable, .container => unreachable, } } pub fn getSource(base: *Scope, module: *Module) ![:0]const u8 { switch (base.tag) { .container => return @fieldParentPtr(Container, "base", base).file_scope.getSource(module), .file => return @fieldParentPtr(File, "base", base).getSource(module), .zir_module => return @fieldParentPtr(ZIRModule, "base", base).getSource(module), .gen_zir => unreachable, .local_val => unreachable, .local_ptr => unreachable, .block => unreachable, .decl => unreachable, } } /// Asserts the scope is a namespace Scope and removes the Decl from the namespace. pub fn removeDecl(base: *Scope, child: *Decl) void { switch (base.tag) { .container => return @fieldParentPtr(Container, "base", base).removeDecl(child), .zir_module => return @fieldParentPtr(ZIRModule, "base", base).removeDecl(child), .file => unreachable, .block => unreachable, .gen_zir => unreachable, .local_val => unreachable, .local_ptr => unreachable, .decl => unreachable, } } /// Asserts the scope is a File or ZIRModule and deinitializes it, then deallocates it. pub fn destroy(base: *Scope, gpa: *Allocator) void { switch (base.tag) { .file => { const scope_file = @fieldParentPtr(File, "base", base); scope_file.deinit(gpa); gpa.destroy(scope_file); }, .zir_module => { const scope_zir_module = @fieldParentPtr(ZIRModule, "base", base); scope_zir_module.deinit(gpa); gpa.destroy(scope_zir_module); }, .block => unreachable, .gen_zir => unreachable, .local_val => unreachable, .local_ptr => unreachable, .decl => unreachable, .container => unreachable, } } fn name_hash_hash(x: NameHash) u32 { return @truncate(u32, @bitCast(u128, x)); } fn name_hash_eql(a: NameHash, b: NameHash) bool { return @bitCast(u128, a) == @bitCast(u128, b); } pub const Tag = enum { /// .zir source code. zir_module, /// .zig source code. file, /// struct, enum or union, every .file contains one of these. container, block, decl, gen_zir, local_val, local_ptr, }; pub const Container = struct { pub const base_tag: Tag = .container; base: Scope = Scope{ .tag = base_tag }, file_scope: *Scope.File, /// Direct children of the file. decls: std.AutoArrayHashMapUnmanaged(*Decl, void), ty: Type, pub fn deinit(self: *Container, gpa: *Allocator) void { self.decls.deinit(gpa); // TODO either Container of File should have an arena for sub_file_path and ty gpa.destroy(self.ty.cast(Type.Payload.EmptyStruct).?); gpa.free(self.file_scope.sub_file_path); self.* = undefined; } pub fn removeDecl(self: *Container, child: *Decl) void { _ = self.decls.remove(child); } pub fn fullyQualifiedNameHash(self: *Container, name: []const u8) NameHash { // TODO container scope qualified names. return std.zig.hashSrc(name); } }; pub const File = struct { pub const base_tag: Tag = .file; base: Scope = Scope{ .tag = base_tag }, /// Relative to the owning package's root_src_dir. /// Reference to external memory, not owned by File. sub_file_path: []const u8, source: union(enum) { unloaded: void, bytes: [:0]const u8, }, contents: union { not_available: void, tree: *ast.Tree, }, status: enum { never_loaded, unloaded_success, unloaded_parse_failure, loaded_success, }, root_container: Container, pub fn unload(self: *File, gpa: *Allocator) void { switch (self.status) { .never_loaded, .unloaded_parse_failure, .unloaded_success, => {}, .loaded_success => { self.contents.tree.deinit(); self.status = .unloaded_success; }, } switch (self.source) { .bytes => |bytes| { gpa.free(bytes); self.source = .{ .unloaded = {} }; }, .unloaded => {}, } } pub fn deinit(self: *File, gpa: *Allocator) void { self.root_container.deinit(gpa); self.unload(gpa); self.* = undefined; } pub fn dumpSrc(self: *File, src: usize) void { const loc = std.zig.findLineColumn(self.source.bytes, src); std.debug.print("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 }); } pub fn getSource(self: *File, module: *Module) ![:0]const u8 { switch (self.source) { .unloaded => { const source = try module.root_pkg.root_src_directory.handle.readFileAllocOptions( module.gpa, self.sub_file_path, std.math.maxInt(u32), null, 1, 0, ); self.source = .{ .bytes = source }; return source; }, .bytes => |bytes| return bytes, } } }; pub const ZIRModule = struct { pub const base_tag: Tag = .zir_module; base: Scope = Scope{ .tag = base_tag }, /// Relative to the owning package's root_src_dir. /// Reference to external memory, not owned by ZIRModule. sub_file_path: []const u8, source: union(enum) { unloaded: void, bytes: [:0]const u8, }, contents: union { not_available: void, module: *zir.Module, }, status: enum { never_loaded, unloaded_success, unloaded_parse_failure, unloaded_sema_failure, loaded_sema_failure, loaded_success, }, /// Even though .zir files only have 1 module, this set is still needed /// because of anonymous Decls, which can exist in the global set, but /// not this one. decls: ArrayListUnmanaged(*Decl), pub fn unload(self: *ZIRModule, gpa: *Allocator) void { switch (self.status) { .never_loaded, .unloaded_parse_failure, .unloaded_sema_failure, .unloaded_success, => {}, .loaded_success => { self.contents.module.deinit(gpa); gpa.destroy(self.contents.module); self.contents = .{ .not_available = {} }; self.status = .unloaded_success; }, .loaded_sema_failure => { self.contents.module.deinit(gpa); gpa.destroy(self.contents.module); self.contents = .{ .not_available = {} }; self.status = .unloaded_sema_failure; }, } switch (self.source) { .bytes => |bytes| { gpa.free(bytes); self.source = .{ .unloaded = {} }; }, .unloaded => {}, } } pub fn deinit(self: *ZIRModule, gpa: *Allocator) void { self.decls.deinit(gpa); self.unload(gpa); self.* = undefined; } pub fn removeDecl(self: *ZIRModule, child: *Decl) void { for (self.decls.items) |item, i| { if (item == child) { _ = self.decls.swapRemove(i); return; } } } pub fn dumpSrc(self: *ZIRModule, src: usize) void { const loc = std.zig.findLineColumn(self.source.bytes, src); std.debug.print("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 }); } pub fn getSource(self: *ZIRModule, module: *Module) ![:0]const u8 { switch (self.source) { .unloaded => { const source = try module.root_pkg.root_src_directory.handle.readFileAllocOptions( module.gpa, self.sub_file_path, std.math.maxInt(u32), null, 1, 0, ); self.source = .{ .bytes = source }; return source; }, .bytes => |bytes| return bytes, } } pub fn fullyQualifiedNameHash(self: *ZIRModule, name: []const u8) NameHash { // ZIR modules only have 1 file with all decls global in the same namespace. return std.zig.hashSrc(name); } }; /// This is a temporary structure, references to it are valid only /// during semantic analysis of the block. pub const Block = struct { pub const base_tag: Tag = .block; base: Scope = Scope{ .tag = base_tag }, parent: ?*Block, func: ?*Fn, decl: *Decl, instructions: ArrayListUnmanaged(*Inst), /// Points to the arena allocator of DeclAnalysis arena: *Allocator, label: ?Label = null, is_comptime: bool, pub const Label = struct { zir_block: *zir.Inst.Block, results: ArrayListUnmanaged(*Inst), block_inst: *Inst.Block, }; }; /// This is a temporary structure, references to it are valid only /// during semantic analysis of the decl. pub const DeclAnalysis = struct { pub const base_tag: Tag = .decl; base: Scope = Scope{ .tag = base_tag }, decl: *Decl, arena: std.heap.ArenaAllocator, }; /// This is a temporary structure, references to it are valid only /// during semantic analysis of the decl. pub const GenZIR = struct { pub const base_tag: Tag = .gen_zir; base: Scope = Scope{ .tag = base_tag }, /// Parents can be: `GenZIR`, `ZIRModule`, `File` parent: *Scope, decl: *Decl, arena: *Allocator, /// The first N instructions in a function body ZIR are arg instructions. instructions: std.ArrayListUnmanaged(*zir.Inst) = .{}, label: ?Label = null, pub const Label = struct { token: ast.TokenIndex, block_inst: *zir.Inst.Block, result_loc: astgen.ResultLoc, }; }; /// This is always a `const` local and importantly the `inst` is a value type, not a pointer. /// This structure lives as long as the AST generation of the Block /// node that contains the variable. pub const LocalVal = struct { pub const base_tag: Tag = .local_val; base: Scope = Scope{ .tag = base_tag }, /// Parents can be: `LocalVal`, `LocalPtr`, `GenZIR`. parent: *Scope, gen_zir: *GenZIR, name: []const u8, inst: *zir.Inst, }; /// This could be a `const` or `var` local. It has a pointer instead of a value. /// This structure lives as long as the AST generation of the Block /// node that contains the variable. pub const LocalPtr = struct { pub const base_tag: Tag = .local_ptr; base: Scope = Scope{ .tag = base_tag }, /// Parents can be: `LocalVal`, `LocalPtr`, `GenZIR`. parent: *Scope, gen_zir: *GenZIR, name: []const u8, ptr: *zir.Inst, }; }; pub const InnerError = error{ OutOfMemory, AnalysisFail }; pub fn deinit(self: *Module) void { const gpa = self.gpa; self.zig_cache_artifact_directory.handle.close(); self.deletion_set.deinit(gpa); for (self.decl_table.items()) |entry| { entry.value.destroy(gpa); } self.decl_table.deinit(gpa); for (self.failed_decls.items()) |entry| { entry.value.destroy(gpa); } self.failed_decls.deinit(gpa); for (self.failed_files.items()) |entry| { entry.value.destroy(gpa); } self.failed_files.deinit(gpa); for (self.failed_exports.items()) |entry| { entry.value.destroy(gpa); } self.failed_exports.deinit(gpa); for (self.decl_exports.items()) |entry| { const export_list = entry.value; gpa.free(export_list); } self.decl_exports.deinit(gpa); for (self.export_owners.items()) |entry| { freeExportList(gpa, entry.value); } self.export_owners.deinit(gpa); self.symbol_exports.deinit(gpa); self.root_scope.destroy(gpa); var it = self.global_error_set.iterator(); while (it.next()) |entry| { gpa.free(entry.key); } self.global_error_set.deinit(gpa); for (self.import_table.items()) |entry| { entry.value.base.destroy(gpa); } self.import_table.deinit(gpa); } fn freeExportList(gpa: *Allocator, export_list: []*Export) void { for (export_list) |exp| { gpa.free(exp.options.name); gpa.destroy(exp); } gpa.free(export_list); } pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void { const tracy = trace(@src()); defer tracy.end(); const subsequent_analysis = switch (decl.analysis) { .in_progress => unreachable, .sema_failure, .sema_failure_retryable, .codegen_failure, .dependency_failure, .codegen_failure_retryable, => return error.AnalysisFail, .complete => return, .outdated => blk: { log.debug("re-analyzing {}\n", .{decl.name}); // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. self.deleteDeclExports(decl); // Dependencies will be re-discovered, so we remove them here prior to re-analysis. for (decl.dependencies.items()) |entry| { const dep = entry.key; dep.removeDependant(decl); if (dep.dependants.items().len == 0 and !dep.deletion_flag) { // We don't perform a deletion here, because this Decl or another one // may end up referencing it before the update is complete. dep.deletion_flag = true; try self.deletion_set.append(self.gpa, dep); } } decl.dependencies.clearRetainingCapacity(); break :blk true; }, .unreferenced => false, }; const type_changed = if (self.root_scope.cast(Scope.ZIRModule)) |zir_module| try zir_sema.analyzeZirDecl(self, decl, zir_module.contents.module.decls[decl.src_index]) else self.astGenAndAnalyzeDecl(decl) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return error.AnalysisFail, else => { try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1); self.failed_decls.putAssumeCapacityNoClobber(decl, try Compilation.ErrorMsg.create( self.gpa, decl.src(), "unable to analyze: {}", .{@errorName(err)}, )); decl.analysis = .sema_failure_retryable; return error.AnalysisFail; }, }; if (subsequent_analysis) { // We may need to chase the dependants and re-analyze them. // However, if the decl is a function, and the type is the same, we do not need to. if (type_changed or decl.typed_value.most_recent.typed_value.val.tag() != .function) { for (decl.dependants.items()) |entry| { const dep = entry.key; switch (dep.analysis) { .unreferenced => unreachable, .in_progress => unreachable, .outdated => continue, // already queued for update .dependency_failure, .sema_failure, .sema_failure_retryable, .codegen_failure, .codegen_failure_retryable, .complete, => if (dep.generation != self.generation) { try self.markOutdatedDecl(dep); }, } } } } } fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool { const tracy = trace(@src()); defer tracy.end(); const container_scope = decl.scope.cast(Scope.Container).?; const tree = try self.getAstTree(container_scope); const ast_node = tree.root_node.decls()[decl.src_index]; switch (ast_node.tag) { .FnProto => { const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", ast_node); decl.analysis = .in_progress; // This arena allocator's memory is discarded at the end of this function. It is used // to determine the type of the function, and hence the type of the decl, which is needed // to complete the Decl analysis. var fn_type_scope_arena = std.heap.ArenaAllocator.init(self.gpa); defer fn_type_scope_arena.deinit(); var fn_type_scope: Scope.GenZIR = .{ .decl = decl, .arena = &fn_type_scope_arena.allocator, .parent = decl.scope, }; defer fn_type_scope.instructions.deinit(self.gpa); decl.is_pub = fn_proto.getVisibToken() != null; const body_node = fn_proto.getBodyNode() orelse return self.failTok(&fn_type_scope.base, fn_proto.fn_token, "TODO implement extern functions", .{}); const param_decls = fn_proto.params(); const param_types = try fn_type_scope.arena.alloc(*zir.Inst, param_decls.len); const fn_src = tree.token_locs[fn_proto.fn_token].start; const type_type = try astgen.addZIRInstConst(self, &fn_type_scope.base, fn_src, .{ .ty = Type.initTag(.type), .val = Value.initTag(.type_type), }); const type_type_rl: astgen.ResultLoc = .{ .ty = type_type }; for (param_decls) |param_decl, i| { const param_type_node = switch (param_decl.param_type) { .any_type => |node| return self.failNode(&fn_type_scope.base, node, "TODO implement anytype parameter", .{}), .type_expr => |node| node, }; param_types[i] = try astgen.expr(self, &fn_type_scope.base, type_type_rl, param_type_node); } if (fn_proto.getVarArgsToken()) |var_args_token| { return self.failTok(&fn_type_scope.base, var_args_token, "TODO implement var args", .{}); } if (fn_proto.getLibName()) |lib_name| { return self.failNode(&fn_type_scope.base, lib_name, "TODO implement function library name", .{}); } if (fn_proto.getAlignExpr()) |align_expr| { return self.failNode(&fn_type_scope.base, align_expr, "TODO implement function align expression", .{}); } if (fn_proto.getSectionExpr()) |sect_expr| { return self.failNode(&fn_type_scope.base, sect_expr, "TODO implement function section expression", .{}); } if (fn_proto.getCallconvExpr()) |callconv_expr| { return self.failNode( &fn_type_scope.base, callconv_expr, "TODO implement function calling convention expression", .{}, ); } const return_type_expr = switch (fn_proto.return_type) { .Explicit => |node| node, .InferErrorSet => |node| return self.failNode(&fn_type_scope.base, node, "TODO implement inferred error sets", .{}), .Invalid => |tok| return self.failTok(&fn_type_scope.base, tok, "unable to parse return type", .{}), }; const return_type_inst = try astgen.expr(self, &fn_type_scope.base, type_type_rl, return_type_expr); const fn_type_inst = try astgen.addZIRInst(self, &fn_type_scope.base, fn_src, zir.Inst.FnType, .{ .return_type = return_type_inst, .param_types = param_types, }, .{}); // We need the memory for the Type to go into the arena for the Decl var decl_arena = std.heap.ArenaAllocator.init(self.gpa); errdefer decl_arena.deinit(); const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State); var block_scope: Scope.Block = .{ .parent = null, .func = null, .decl = decl, .instructions = .{}, .arena = &decl_arena.allocator, .is_comptime = false, }; defer block_scope.instructions.deinit(self.gpa); const fn_type = try zir_sema.analyzeBodyValueAsType(self, &block_scope, fn_type_inst, .{ .instructions = fn_type_scope.instructions.items, }); const new_func = try decl_arena.allocator.create(Fn); const fn_payload = try decl_arena.allocator.create(Value.Payload.Function); const fn_zir = blk: { // This scope's arena memory is discarded after the ZIR generation // pass completes, and semantic analysis of it completes. var gen_scope_arena = std.heap.ArenaAllocator.init(self.gpa); errdefer gen_scope_arena.deinit(); var gen_scope: Scope.GenZIR = .{ .decl = decl, .arena = &gen_scope_arena.allocator, .parent = decl.scope, }; defer gen_scope.instructions.deinit(self.gpa); // We need an instruction for each parameter, and they must be first in the body. try gen_scope.instructions.resize(self.gpa, fn_proto.params_len); var params_scope = &gen_scope.base; for (fn_proto.params()) |param, i| { const name_token = param.name_token.?; const src = tree.token_locs[name_token].start; const param_name = tree.tokenSlice(name_token); // TODO: call identifierTokenString const arg = try gen_scope_arena.allocator.create(zir.Inst.Arg); arg.* = .{ .base = .{ .tag = .arg, .src = src, }, .positionals = .{ .name = param_name, }, .kw_args = .{}, }; gen_scope.instructions.items[i] = &arg.base; const sub_scope = try gen_scope_arena.allocator.create(Scope.LocalVal); sub_scope.* = .{ .parent = params_scope, .gen_zir = &gen_scope, .name = param_name, .inst = &arg.base, }; params_scope = &sub_scope.base; } const body_block = body_node.cast(ast.Node.Block).?; try astgen.blockExpr(self, params_scope, body_block); if (gen_scope.instructions.items.len == 0 or !gen_scope.instructions.items[gen_scope.instructions.items.len - 1].tag.isNoReturn()) { const src = tree.token_locs[body_block.rbrace].start; _ = try astgen.addZIRNoOp(self, &gen_scope.base, src, .returnvoid); } const fn_zir = try gen_scope_arena.allocator.create(Fn.ZIR); fn_zir.* = .{ .body = .{ .instructions = try gen_scope.arena.dupe(*zir.Inst, gen_scope.instructions.items), }, .arena = gen_scope_arena.state, }; break :blk fn_zir; }; new_func.* = .{ .analysis = .{ .queued = fn_zir }, .owner_decl = decl, }; fn_payload.* = .{ .func = new_func }; var prev_type_has_bits = false; var type_changed = true; if (decl.typedValueManaged()) |tvm| { prev_type_has_bits = tvm.typed_value.ty.hasCodeGenBits(); type_changed = !tvm.typed_value.ty.eql(fn_type); tvm.deinit(self.gpa); } decl_arena_state.* = decl_arena.state; decl.typed_value = .{ .most_recent = .{ .typed_value = .{ .ty = fn_type, .val = Value.initPayload(&fn_payload.base), }, .arena = decl_arena_state, }, }; decl.analysis = .complete; decl.generation = self.generation; if (fn_type.hasCodeGenBits()) { // We don't fully codegen the decl until later, but we do need to reserve a global // offset table index for it. This allows us to codegen decls out of dependency order, // increasing how many computations can be done in parallel. try self.comp.bin_file.allocateDeclIndexes(decl); try self.comp.work_queue.writeItem(.{ .codegen_decl = decl }); } else if (prev_type_has_bits) { self.comp.bin_file.freeDecl(decl); } if (fn_proto.getExternExportInlineToken()) |maybe_export_token| { if (tree.token_ids[maybe_export_token] == .Keyword_export) { const export_src = tree.token_locs[maybe_export_token].start; const name_loc = tree.token_locs[fn_proto.getNameToken().?]; const name = tree.tokenSliceLoc(name_loc); // The scope needs to have the decl in it. try self.analyzeExport(&block_scope.base, export_src, name, decl); } } return type_changed; }, .VarDecl => { const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", ast_node); decl.analysis = .in_progress; // We need the memory for the Type to go into the arena for the Decl var decl_arena = std.heap.ArenaAllocator.init(self.gpa); errdefer decl_arena.deinit(); const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State); var block_scope: Scope.Block = .{ .parent = null, .func = null, .decl = decl, .instructions = .{}, .arena = &decl_arena.allocator, .is_comptime = true, }; defer block_scope.instructions.deinit(self.gpa); decl.is_pub = var_decl.getVisibToken() != null; const is_extern = blk: { const maybe_extern_token = var_decl.getExternExportToken() orelse break :blk false; if (tree.token_ids[maybe_extern_token] != .Keyword_extern) break :blk false; if (var_decl.getInitNode()) |some| { return self.failNode(&block_scope.base, some, "extern variables have no initializers", .{}); } break :blk true; }; if (var_decl.getLibName()) |lib_name| { assert(is_extern); return self.failNode(&block_scope.base, lib_name, "TODO implement function library name", .{}); } const is_mutable = tree.token_ids[var_decl.mut_token] == .Keyword_var; const is_threadlocal = if (var_decl.getThreadLocalToken()) |some| blk: { if (!is_mutable) { return self.failTok(&block_scope.base, some, "threadlocal variable cannot be constant", .{}); } break :blk true; } else false; assert(var_decl.getComptimeToken() == null); if (var_decl.getAlignNode()) |align_expr| { return self.failNode(&block_scope.base, align_expr, "TODO implement function align expression", .{}); } if (var_decl.getSectionNode()) |sect_expr| { return self.failNode(&block_scope.base, sect_expr, "TODO implement function section expression", .{}); } const var_info: struct { ty: Type, val: ?Value } = if (var_decl.getInitNode()) |init_node| vi: { var gen_scope_arena = std.heap.ArenaAllocator.init(self.gpa); defer gen_scope_arena.deinit(); var gen_scope: Scope.GenZIR = .{ .decl = decl, .arena = &gen_scope_arena.allocator, .parent = decl.scope, }; defer gen_scope.instructions.deinit(self.gpa); const init_result_loc: astgen.ResultLoc = if (var_decl.getTypeNode()) |type_node| rl: { const src = tree.token_locs[type_node.firstToken()].start; const type_type = try astgen.addZIRInstConst(self, &gen_scope.base, src, .{ .ty = Type.initTag(.type), .val = Value.initTag(.type_type), }); const var_type = try astgen.expr(self, &gen_scope.base, .{ .ty = type_type }, type_node); break :rl .{ .ty = var_type }; } else .none; const src = tree.token_locs[init_node.firstToken()].start; const init_inst = try astgen.expr(self, &gen_scope.base, init_result_loc, init_node); var inner_block: Scope.Block = .{ .parent = null, .func = null, .decl = decl, .instructions = .{}, .arena = &gen_scope_arena.allocator, .is_comptime = true, }; defer inner_block.instructions.deinit(self.gpa); try zir_sema.analyzeBody(self, &inner_block.base, .{ .instructions = gen_scope.instructions.items }); // The result location guarantees the type coercion. const analyzed_init_inst = init_inst.analyzed_inst.?; // The is_comptime in the Scope.Block guarantees the result is comptime-known. const val = analyzed_init_inst.value().?; const ty = try analyzed_init_inst.ty.copy(block_scope.arena); break :vi .{ .ty = ty, .val = try val.copy(block_scope.arena), }; } else if (!is_extern) { return self.failTok(&block_scope.base, var_decl.firstToken(), "variables must be initialized", .{}); } else if (var_decl.getTypeNode()) |type_node| vi: { // Temporary arena for the zir instructions. var type_scope_arena = std.heap.ArenaAllocator.init(self.gpa); defer type_scope_arena.deinit(); var type_scope: Scope.GenZIR = .{ .decl = decl, .arena = &type_scope_arena.allocator, .parent = decl.scope, }; defer type_scope.instructions.deinit(self.gpa); const src = tree.token_locs[type_node.firstToken()].start; const type_type = try astgen.addZIRInstConst(self, &type_scope.base, src, .{ .ty = Type.initTag(.type), .val = Value.initTag(.type_type), }); const var_type = try astgen.expr(self, &type_scope.base, .{ .ty = type_type }, type_node); const ty = try zir_sema.analyzeBodyValueAsType(self, &block_scope, var_type, .{ .instructions = type_scope.instructions.items, }); break :vi .{ .ty = ty, .val = null, }; } else { return self.failTok(&block_scope.base, var_decl.firstToken(), "unable to infer variable type", .{}); }; if (is_mutable and !var_info.ty.isValidVarType(is_extern)) { return self.failTok(&block_scope.base, var_decl.firstToken(), "variable of type '{}' must be const", .{var_info.ty}); } var type_changed = true; if (decl.typedValueManaged()) |tvm| { type_changed = !tvm.typed_value.ty.eql(var_info.ty); tvm.deinit(self.gpa); } const new_variable = try decl_arena.allocator.create(Var); const var_payload = try decl_arena.allocator.create(Value.Payload.Variable); new_variable.* = .{ .owner_decl = decl, .init = var_info.val orelse undefined, .is_extern = is_extern, .is_mutable = is_mutable, .is_threadlocal = is_threadlocal, }; var_payload.* = .{ .variable = new_variable }; decl_arena_state.* = decl_arena.state; decl.typed_value = .{ .most_recent = .{ .typed_value = .{ .ty = var_info.ty, .val = Value.initPayload(&var_payload.base), }, .arena = decl_arena_state, }, }; decl.analysis = .complete; decl.generation = self.generation; if (var_decl.getExternExportToken()) |maybe_export_token| { if (tree.token_ids[maybe_export_token] == .Keyword_export) { const export_src = tree.token_locs[maybe_export_token].start; const name_loc = tree.token_locs[var_decl.name_token]; const name = tree.tokenSliceLoc(name_loc); // The scope needs to have the decl in it. try self.analyzeExport(&block_scope.base, export_src, name, decl); } } return type_changed; }, .Comptime => { const comptime_decl = @fieldParentPtr(ast.Node.Comptime, "base", ast_node); decl.analysis = .in_progress; // A comptime decl does not store any value so we can just deinit this arena after analysis is done. var analysis_arena = std.heap.ArenaAllocator.init(self.gpa); defer analysis_arena.deinit(); var gen_scope: Scope.GenZIR = .{ .decl = decl, .arena = &analysis_arena.allocator, .parent = decl.scope, }; defer gen_scope.instructions.deinit(self.gpa); _ = try astgen.comptimeExpr(self, &gen_scope.base, .none, comptime_decl.expr); var block_scope: Scope.Block = .{ .parent = null, .func = null, .decl = decl, .instructions = .{}, .arena = &analysis_arena.allocator, .is_comptime = true, }; defer block_scope.instructions.deinit(self.gpa); _ = try zir_sema.analyzeBody(self, &block_scope.base, .{ .instructions = gen_scope.instructions.items, }); decl.analysis = .complete; decl.generation = self.generation; return true; }, .Use => @panic("TODO usingnamespace decl"), else => unreachable, } } fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void { try depender.dependencies.ensureCapacity(self.gpa, depender.dependencies.items().len + 1); try dependee.dependants.ensureCapacity(self.gpa, dependee.dependants.items().len + 1); depender.dependencies.putAssumeCapacity(dependee, {}); dependee.dependants.putAssumeCapacity(depender, {}); } fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module { switch (root_scope.status) { .never_loaded, .unloaded_success => { try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1); const source = try root_scope.getSource(self); var keep_zir_module = false; const zir_module = try self.gpa.create(zir.Module); defer if (!keep_zir_module) self.gpa.destroy(zir_module); zir_module.* = try zir.parse(self.gpa, source); defer if (!keep_zir_module) zir_module.deinit(self.gpa); if (zir_module.error_msg) |src_err_msg| { self.failed_files.putAssumeCapacityNoClobber( &root_scope.base, try Compilation.ErrorMsg.create(self.gpa, src_err_msg.byte_offset, "{}", .{src_err_msg.msg}), ); root_scope.status = .unloaded_parse_failure; return error.AnalysisFail; } root_scope.status = .loaded_success; root_scope.contents = .{ .module = zir_module }; keep_zir_module = true; return zir_module; }, .unloaded_parse_failure, .unloaded_sema_failure, => return error.AnalysisFail, .loaded_success, .loaded_sema_failure => return root_scope.contents.module, } } fn getAstTree(self: *Module, container_scope: *Scope.Container) !*ast.Tree { const tracy = trace(@src()); defer tracy.end(); const root_scope = container_scope.file_scope; switch (root_scope.status) { .never_loaded, .unloaded_success => { try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1); const source = try root_scope.getSource(self); var keep_tree = false; const tree = try std.zig.parse(self.gpa, source); defer if (!keep_tree) tree.deinit(); if (tree.errors.len != 0) { const parse_err = tree.errors[0]; var msg = std.ArrayList(u8).init(self.gpa); defer msg.deinit(); try parse_err.render(tree.token_ids, msg.outStream()); const err_msg = try self.gpa.create(Compilation.ErrorMsg); err_msg.* = .{ .msg = msg.toOwnedSlice(), .byte_offset = tree.token_locs[parse_err.loc()].start, }; self.failed_files.putAssumeCapacityNoClobber(&root_scope.base, err_msg); root_scope.status = .unloaded_parse_failure; return error.AnalysisFail; } root_scope.status = .loaded_success; root_scope.contents = .{ .tree = tree }; keep_tree = true; return tree; }, .unloaded_parse_failure => return error.AnalysisFail, .loaded_success => return root_scope.contents.tree, } } pub fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void { const tracy = trace(@src()); defer tracy.end(); // We may be analyzing it for the first time, or this may be // an incremental update. This code handles both cases. const tree = try self.getAstTree(container_scope); const decls = tree.root_node.decls(); try self.comp.work_queue.ensureUnusedCapacity(decls.len); try container_scope.decls.ensureCapacity(self.gpa, decls.len); // Keep track of the decls that we expect to see in this file so that // we know which ones have been deleted. var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa); defer deleted_decls.deinit(); try deleted_decls.ensureCapacity(container_scope.decls.items().len); for (container_scope.decls.items()) |entry| { deleted_decls.putAssumeCapacityNoClobber(entry.key, {}); } for (decls) |src_decl, decl_i| { if (src_decl.cast(ast.Node.FnProto)) |fn_proto| { // We will create a Decl for it regardless of analysis status. const name_tok = fn_proto.getNameToken() orelse { @panic("TODO missing function name"); }; const name_loc = tree.token_locs[name_tok]; const name = tree.tokenSliceLoc(name_loc); const name_hash = container_scope.fullyQualifiedNameHash(name); const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl)); if (self.decl_table.get(name_hash)) |decl| { // Update the AST Node index of the decl, even if its contents are unchanged, it may // have been re-ordered. decl.src_index = decl_i; if (deleted_decls.remove(decl) == null) { decl.analysis = .sema_failure; const err_msg = try Compilation.ErrorMsg.create(self.gpa, tree.token_locs[name_tok].start, "redefinition of '{}'", .{decl.name}); errdefer err_msg.destroy(self.gpa); try self.failed_decls.putNoClobber(self.gpa, decl, err_msg); } else { if (!srcHashEql(decl.contents_hash, contents_hash)) { try self.markOutdatedDecl(decl); decl.contents_hash = contents_hash; } else switch (self.comp.bin_file.tag) { .coff => { // TODO Implement for COFF }, .elf => if (decl.fn_link.elf.len != 0) { // TODO Look into detecting when this would be unnecessary by storing enough state // in `Decl` to notice that the line number did not change. self.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl }); }, .macho => { // TODO Implement for MachO }, .c, .wasm => {}, } } } else { const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash); container_scope.decls.putAssumeCapacity(new_decl, {}); if (fn_proto.getExternExportInlineToken()) |maybe_export_token| { if (tree.token_ids[maybe_export_token] == .Keyword_export) { self.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); } } } } else if (src_decl.castTag(.VarDecl)) |var_decl| { const name_loc = tree.token_locs[var_decl.name_token]; const name = tree.tokenSliceLoc(name_loc); const name_hash = container_scope.fullyQualifiedNameHash(name); const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl)); if (self.decl_table.get(name_hash)) |decl| { // Update the AST Node index of the decl, even if its contents are unchanged, it may // have been re-ordered. decl.src_index = decl_i; if (deleted_decls.remove(decl) == null) { decl.analysis = .sema_failure; const err_msg = try Compilation.ErrorMsg.create(self.gpa, name_loc.start, "redefinition of '{}'", .{decl.name}); errdefer err_msg.destroy(self.gpa); try self.failed_decls.putNoClobber(self.gpa, decl, err_msg); } else if (!srcHashEql(decl.contents_hash, contents_hash)) { try self.markOutdatedDecl(decl); decl.contents_hash = contents_hash; } } else { const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash); container_scope.decls.putAssumeCapacity(new_decl, {}); if (var_decl.getExternExportToken()) |maybe_export_token| { if (tree.token_ids[maybe_export_token] == .Keyword_export) { self.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); } } } } else if (src_decl.castTag(.Comptime)) |comptime_node| { const name_index = self.getNextAnonNameIndex(); const name = try std.fmt.allocPrint(self.gpa, "__comptime_{}", .{name_index}); defer self.gpa.free(name); const name_hash = container_scope.fullyQualifiedNameHash(name); const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl)); const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash); container_scope.decls.putAssumeCapacity(new_decl, {}); self.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl }); } else if (src_decl.castTag(.ContainerField)) |container_field| { log.err("TODO: analyze container field", .{}); } else if (src_decl.castTag(.TestDecl)) |test_decl| { log.err("TODO: analyze test decl", .{}); } else if (src_decl.castTag(.Use)) |use_decl| { log.err("TODO: analyze usingnamespace decl", .{}); } else { unreachable; } } // Handle explicitly deleted decls from the source code. Not to be confused // with when we delete decls because they are no longer referenced. for (deleted_decls.items()) |entry| { log.debug("noticed '{}' deleted from source\n", .{entry.key.name}); try self.deleteDecl(entry.key); } } pub fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void { // We may be analyzing it for the first time, or this may be // an incremental update. This code handles both cases. const src_module = try self.getSrcModule(root_scope); try self.comp.work_queue.ensureUnusedCapacity(src_module.decls.len); try root_scope.decls.ensureCapacity(self.gpa, src_module.decls.len); var exports_to_resolve = std.ArrayList(*zir.Decl).init(self.gpa); defer exports_to_resolve.deinit(); // Keep track of the decls that we expect to see in this file so that // we know which ones have been deleted. var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa); defer deleted_decls.deinit(); try deleted_decls.ensureCapacity(self.decl_table.items().len); for (self.decl_table.items()) |entry| { deleted_decls.putAssumeCapacityNoClobber(entry.value, {}); } for (src_module.decls) |src_decl, decl_i| { const name_hash = root_scope.fullyQualifiedNameHash(src_decl.name); if (self.decl_table.get(name_hash)) |decl| { deleted_decls.removeAssertDiscard(decl); if (!srcHashEql(src_decl.contents_hash, decl.contents_hash)) { try self.markOutdatedDecl(decl); decl.contents_hash = src_decl.contents_hash; } } else { const new_decl = try self.createNewDecl( &root_scope.base, src_decl.name, decl_i, name_hash, src_decl.contents_hash, ); root_scope.decls.appendAssumeCapacity(new_decl); if (src_decl.inst.cast(zir.Inst.Export)) |export_inst| { try exports_to_resolve.append(src_decl); } } } for (exports_to_resolve.items) |export_decl| { _ = try zir_sema.resolveZirDecl(self, &root_scope.base, export_decl); } // Handle explicitly deleted decls from the source code. Not to be confused // with when we delete decls because they are no longer referenced. for (deleted_decls.items()) |entry| { log.debug("noticed '{}' deleted from source\n", .{entry.key.name}); try self.deleteDecl(entry.key); } } pub fn deleteDecl(self: *Module, decl: *Decl) !void { try self.deletion_set.ensureCapacity(self.gpa, self.deletion_set.items.len + decl.dependencies.items().len); // Remove from the namespace it resides in. In the case of an anonymous Decl it will // not be present in the set, and this does nothing. decl.scope.removeDecl(decl); log.debug("deleting decl '{}'\n", .{decl.name}); const name_hash = decl.fullyQualifiedNameHash(); self.decl_table.removeAssertDiscard(name_hash); // Remove itself from its dependencies, because we are about to destroy the decl pointer. for (decl.dependencies.items()) |entry| { const dep = entry.key; dep.removeDependant(decl); if (dep.dependants.items().len == 0 and !dep.deletion_flag) { // We don't recursively perform a deletion here, because during the update, // another reference to it may turn up. dep.deletion_flag = true; self.deletion_set.appendAssumeCapacity(dep); } } // Anything that depends on this deleted decl certainly needs to be re-analyzed. for (decl.dependants.items()) |entry| { const dep = entry.key; dep.removeDependency(decl); if (dep.analysis != .outdated) { // TODO Move this failure possibility to the top of the function. try self.markOutdatedDecl(dep); } } if (self.failed_decls.remove(decl)) |entry| { entry.value.destroy(self.gpa); } self.deleteDeclExports(decl); self.comp.bin_file.freeDecl(decl); decl.destroy(self.gpa); } /// Delete all the Export objects that are caused by this Decl. Re-analysis of /// this Decl will cause them to be re-created (or not). fn deleteDeclExports(self: *Module, decl: *Decl) void { const kv = self.export_owners.remove(decl) orelse return; for (kv.value) |exp| { if (self.decl_exports.getEntry(exp.exported_decl)) |decl_exports_kv| { // Remove exports with owner_decl matching the regenerating decl. const list = decl_exports_kv.value; var i: usize = 0; var new_len = list.len; while (i < new_len) { if (list[i].owner_decl == decl) { mem.copyBackwards(*Export, list[i..], list[i + 1 .. new_len]); new_len -= 1; } else { i += 1; } } decl_exports_kv.value = self.gpa.shrink(list, new_len); if (new_len == 0) { self.decl_exports.removeAssertDiscard(exp.exported_decl); } } if (self.comp.bin_file.cast(link.File.Elf)) |elf| { elf.deleteExport(exp.link); } if (self.failed_exports.remove(exp)) |entry| { entry.value.destroy(self.gpa); } _ = self.symbol_exports.remove(exp.options.name); self.gpa.free(exp.options.name); self.gpa.destroy(exp); } self.gpa.free(kv.value); } pub fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void { const tracy = trace(@src()); defer tracy.end(); // Use the Decl's arena for function memory. var arena = decl.typed_value.most_recent.arena.?.promote(self.gpa); defer decl.typed_value.most_recent.arena.?.* = arena.state; var inner_block: Scope.Block = .{ .parent = null, .func = func, .decl = decl, .instructions = .{}, .arena = &arena.allocator, .is_comptime = false, }; defer inner_block.instructions.deinit(self.gpa); const fn_zir = func.analysis.queued; defer fn_zir.arena.promote(self.gpa).deinit(); func.analysis = .{ .in_progress = {} }; log.debug("set {} to in_progress\n", .{decl.name}); try zir_sema.analyzeBody(self, &inner_block.base, fn_zir.body); const instructions = try arena.allocator.dupe(*Inst, inner_block.instructions.items); func.analysis = .{ .success = .{ .instructions = instructions } }; log.debug("set {} to success\n", .{decl.name}); } fn markOutdatedDecl(self: *Module, decl: *Decl) !void { log.debug("mark {} outdated\n", .{decl.name}); try self.comp.work_queue.writeItem(.{ .analyze_decl = decl }); if (self.failed_decls.remove(decl)) |entry| { entry.value.destroy(self.gpa); } decl.analysis = .outdated; } fn allocateNewDecl( self: *Module, scope: *Scope, src_index: usize, contents_hash: std.zig.SrcHash, ) !*Decl { const new_decl = try self.gpa.create(Decl); new_decl.* = .{ .name = "", .scope = scope.namespace(), .src_index = src_index, .typed_value = .{ .never_succeeded = {} }, .analysis = .unreferenced, .deletion_flag = false, .contents_hash = contents_hash, .link = switch (self.comp.bin_file.tag) { .coff => .{ .coff = link.File.Coff.TextBlock.empty }, .elf => .{ .elf = link.File.Elf.TextBlock.empty }, .macho => .{ .macho = link.File.MachO.TextBlock.empty }, .c => .{ .c = {} }, .wasm => .{ .wasm = {} }, }, .fn_link = switch (self.comp.bin_file.tag) { .coff => .{ .coff = {} }, .elf => .{ .elf = link.File.Elf.SrcFn.empty }, .macho => .{ .macho = link.File.MachO.SrcFn.empty }, .c => .{ .c = {} }, .wasm => .{ .wasm = null }, }, .generation = 0, .is_pub = false, }; return new_decl; } fn createNewDecl( self: *Module, scope: *Scope, decl_name: []const u8, src_index: usize, name_hash: Scope.NameHash, contents_hash: std.zig.SrcHash, ) !*Decl { try self.decl_table.ensureCapacity(self.gpa, self.decl_table.items().len + 1); const new_decl = try self.allocateNewDecl(scope, src_index, contents_hash); errdefer self.gpa.destroy(new_decl); new_decl.name = try mem.dupeZ(self.gpa, u8, decl_name); self.decl_table.putAssumeCapacityNoClobber(name_hash, new_decl); return new_decl; } /// Get error value for error tag `name`. pub fn getErrorValue(self: *Module, name: []const u8) !std.StringHashMapUnmanaged(u16).Entry { const gop = try self.global_error_set.getOrPut(self.gpa, name); if (gop.found_existing) return gop.entry.*; errdefer self.global_error_set.removeAssertDiscard(name); gop.entry.key = try self.gpa.dupe(u8, name); gop.entry.value = @intCast(u16, self.global_error_set.count() - 1); return gop.entry.*; } pub fn requireFunctionBlock(self: *Module, scope: *Scope, src: usize) !*Scope.Block { return scope.cast(Scope.Block) orelse return self.fail(scope, src, "instruction illegal outside function body", .{}); } pub fn requireRuntimeBlock(self: *Module, scope: *Scope, src: usize) !*Scope.Block { const block = try self.requireFunctionBlock(scope, src); if (block.is_comptime) { return self.fail(scope, src, "unable to resolve comptime value", .{}); } return block; } pub fn resolveConstValue(self: *Module, scope: *Scope, base: *Inst) !Value { return (try self.resolveDefinedValue(scope, base)) orelse return self.fail(scope, base.src, "unable to resolve comptime value", .{}); } pub fn resolveDefinedValue(self: *Module, scope: *Scope, base: *Inst) !?Value { if (base.value()) |val| { if (val.isUndef()) { return self.fail(scope, base.src, "use of undefined value here causes undefined behavior", .{}); } return val; } return null; } pub fn analyzeExport(self: *Module, scope: *Scope, src: usize, borrowed_symbol_name: []const u8, exported_decl: *Decl) !void { try self.ensureDeclAnalyzed(exported_decl); const typed_value = exported_decl.typed_value.most_recent.typed_value; switch (typed_value.ty.zigTypeTag()) { .Fn => {}, else => return self.fail(scope, src, "unable to export type '{}'", .{typed_value.ty}), } try self.decl_exports.ensureCapacity(self.gpa, self.decl_exports.items().len + 1); try self.export_owners.ensureCapacity(self.gpa, self.export_owners.items().len + 1); const new_export = try self.gpa.create(Export); errdefer self.gpa.destroy(new_export); const symbol_name = try self.gpa.dupe(u8, borrowed_symbol_name); errdefer self.gpa.free(symbol_name); const owner_decl = scope.decl().?; new_export.* = .{ .options = .{ .name = symbol_name }, .src = src, .link = .{}, .owner_decl = owner_decl, .exported_decl = exported_decl, .status = .in_progress, }; // Add to export_owners table. const eo_gop = self.export_owners.getOrPutAssumeCapacity(owner_decl); if (!eo_gop.found_existing) { eo_gop.entry.value = &[0]*Export{}; } eo_gop.entry.value = try self.gpa.realloc(eo_gop.entry.value, eo_gop.entry.value.len + 1); eo_gop.entry.value[eo_gop.entry.value.len - 1] = new_export; errdefer eo_gop.entry.value = self.gpa.shrink(eo_gop.entry.value, eo_gop.entry.value.len - 1); // Add to exported_decl table. const de_gop = self.decl_exports.getOrPutAssumeCapacity(exported_decl); if (!de_gop.found_existing) { de_gop.entry.value = &[0]*Export{}; } de_gop.entry.value = try self.gpa.realloc(de_gop.entry.value, de_gop.entry.value.len + 1); de_gop.entry.value[de_gop.entry.value.len - 1] = new_export; errdefer de_gop.entry.value = self.gpa.shrink(de_gop.entry.value, de_gop.entry.value.len - 1); if (self.symbol_exports.get(symbol_name)) |_| { try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1); self.failed_exports.putAssumeCapacityNoClobber(new_export, try Compilation.ErrorMsg.create( self.gpa, src, "exported symbol collision: {}", .{symbol_name}, )); // TODO: add a note new_export.status = .failed; return; } try self.symbol_exports.putNoClobber(self.gpa, symbol_name, new_export); self.comp.bin_file.updateDeclExports(self, exported_decl, de_gop.entry.value) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => { try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1); self.failed_exports.putAssumeCapacityNoClobber(new_export, try Compilation.ErrorMsg.create( self.gpa, src, "unable to export: {}", .{@errorName(err)}, )); new_export.status = .failed_retryable; }, }; } pub fn addNoOp( self: *Module, block: *Scope.Block, src: usize, ty: Type, comptime tag: Inst.Tag, ) !*Inst { const inst = try block.arena.create(tag.Type()); inst.* = .{ .base = .{ .tag = tag, .ty = ty, .src = src, }, }; try block.instructions.append(self.gpa, &inst.base); return &inst.base; } pub fn addUnOp( self: *Module, block: *Scope.Block, src: usize, ty: Type, tag: Inst.Tag, operand: *Inst, ) !*Inst { const inst = try block.arena.create(Inst.UnOp); inst.* = .{ .base = .{ .tag = tag, .ty = ty, .src = src, }, .operand = operand, }; try block.instructions.append(self.gpa, &inst.base); return &inst.base; } pub fn addBinOp( self: *Module, block: *Scope.Block, src: usize, ty: Type, tag: Inst.Tag, lhs: *Inst, rhs: *Inst, ) !*Inst { const inst = try block.arena.create(Inst.BinOp); inst.* = .{ .base = .{ .tag = tag, .ty = ty, .src = src, }, .lhs = lhs, .rhs = rhs, }; try block.instructions.append(self.gpa, &inst.base); return &inst.base; } pub fn addArg(self: *Module, block: *Scope.Block, src: usize, ty: Type, name: [*:0]const u8) !*Inst { const inst = try block.arena.create(Inst.Arg); inst.* = .{ .base = .{ .tag = .arg, .ty = ty, .src = src, }, .name = name, }; try block.instructions.append(self.gpa, &inst.base); return &inst.base; } pub fn addBr( self: *Module, scope_block: *Scope.Block, src: usize, target_block: *Inst.Block, operand: *Inst, ) !*Inst { const inst = try scope_block.arena.create(Inst.Br); inst.* = .{ .base = .{ .tag = .br, .ty = Type.initTag(.noreturn), .src = src, }, .operand = operand, .block = target_block, }; try scope_block.instructions.append(self.gpa, &inst.base); return &inst.base; } pub fn addCondBr( self: *Module, block: *Scope.Block, src: usize, condition: *Inst, then_body: ir.Body, else_body: ir.Body, ) !*Inst { const inst = try block.arena.create(Inst.CondBr); inst.* = .{ .base = .{ .tag = .condbr, .ty = Type.initTag(.noreturn), .src = src, }, .condition = condition, .then_body = then_body, .else_body = else_body, }; try block.instructions.append(self.gpa, &inst.base); return &inst.base; } pub fn addCall( self: *Module, block: *Scope.Block, src: usize, ty: Type, func: *Inst, args: []const *Inst, ) !*Inst { const inst = try block.arena.create(Inst.Call); inst.* = .{ .base = .{ .tag = .call, .ty = ty, .src = src, }, .func = func, .args = args, }; try block.instructions.append(self.gpa, &inst.base); return &inst.base; } pub fn constInst(self: *Module, scope: *Scope, src: usize, typed_value: TypedValue) !*Inst { const const_inst = try scope.arena().create(Inst.Constant); const_inst.* = .{ .base = .{ .tag = Inst.Constant.base_tag, .ty = typed_value.ty, .src = src, }, .val = typed_value.val, }; return &const_inst.base; } pub fn constType(self: *Module, scope: *Scope, src: usize, ty: Type) !*Inst { return self.constInst(scope, src, .{ .ty = Type.initTag(.type), .val = try ty.toValue(scope.arena()), }); } pub fn constVoid(self: *Module, scope: *Scope, src: usize) !*Inst { return self.constInst(scope, src, .{ .ty = Type.initTag(.void), .val = Value.initTag(.void_value), }); } pub fn constNoReturn(self: *Module, scope: *Scope, src: usize) !*Inst { return self.constInst(scope, src, .{ .ty = Type.initTag(.noreturn), .val = Value.initTag(.unreachable_value), }); } pub fn constUndef(self: *Module, scope: *Scope, src: usize, ty: Type) !*Inst { return self.constInst(scope, src, .{ .ty = ty, .val = Value.initTag(.undef), }); } pub fn constBool(self: *Module, scope: *Scope, src: usize, v: bool) !*Inst { return self.constInst(scope, src, .{ .ty = Type.initTag(.bool), .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)], }); } pub fn constIntUnsigned(self: *Module, scope: *Scope, src: usize, ty: Type, int: u64) !*Inst { const int_payload = try scope.arena().create(Value.Payload.Int_u64); int_payload.* = .{ .int = int }; return self.constInst(scope, src, .{ .ty = ty, .val = Value.initPayload(&int_payload.base), }); } pub fn constIntSigned(self: *Module, scope: *Scope, src: usize, ty: Type, int: i64) !*Inst { const int_payload = try scope.arena().create(Value.Payload.Int_i64); int_payload.* = .{ .int = int }; return self.constInst(scope, src, .{ .ty = ty, .val = Value.initPayload(&int_payload.base), }); } pub fn constIntBig(self: *Module, scope: *Scope, src: usize, ty: Type, big_int: BigIntConst) !*Inst { const val_payload = if (big_int.positive) blk: { if (big_int.to(u64)) |x| { return self.constIntUnsigned(scope, src, ty, x); } else |err| switch (err) { error.NegativeIntoUnsigned => unreachable, error.TargetTooSmall => {}, // handled below } const big_int_payload = try scope.arena().create(Value.Payload.IntBigPositive); big_int_payload.* = .{ .limbs = big_int.limbs }; break :blk &big_int_payload.base; } else blk: { if (big_int.to(i64)) |x| { return self.constIntSigned(scope, src, ty, x); } else |err| switch (err) { error.NegativeIntoUnsigned => unreachable, error.TargetTooSmall => {}, // handled below } const big_int_payload = try scope.arena().create(Value.Payload.IntBigNegative); big_int_payload.* = .{ .limbs = big_int.limbs }; break :blk &big_int_payload.base; }; return self.constInst(scope, src, .{ .ty = ty, .val = Value.initPayload(val_payload), }); } pub fn createAnonymousDecl( self: *Module, scope: *Scope, decl_arena: *std.heap.ArenaAllocator, typed_value: TypedValue, ) !*Decl { const name_index = self.getNextAnonNameIndex(); const scope_decl = scope.decl().?; const name = try std.fmt.allocPrint(self.gpa, "{}__anon_{}", .{ scope_decl.name, name_index }); defer self.gpa.free(name); const name_hash = scope.namespace().fullyQualifiedNameHash(name); const src_hash: std.zig.SrcHash = undefined; const new_decl = try self.createNewDecl(scope, name, scope_decl.src_index, name_hash, src_hash); const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State); decl_arena_state.* = decl_arena.state; new_decl.typed_value = .{ .most_recent = .{ .typed_value = typed_value, .arena = decl_arena_state, }, }; new_decl.analysis = .complete; new_decl.generation = self.generation; // TODO: This generates the Decl into the machine code file if it is of a type that is non-zero size. // We should be able to further improve the compiler to not omit Decls which are only referenced at // compile-time and not runtime. if (typed_value.ty.hasCodeGenBits()) { try self.comp.bin_file.allocateDeclIndexes(new_decl); try self.comp.work_queue.writeItem(.{ .codegen_decl = new_decl }); } return new_decl; } fn getNextAnonNameIndex(self: *Module) usize { return @atomicRmw(usize, &self.next_anon_name_index, .Add, 1, .Monotonic); } pub fn lookupDeclName(self: *Module, scope: *Scope, ident_name: []const u8) ?*Decl { const namespace = scope.namespace(); const name_hash = namespace.fullyQualifiedNameHash(ident_name); return self.decl_table.get(name_hash); } pub fn analyzeDeclRef(self: *Module, scope: *Scope, src: usize, decl: *Decl) InnerError!*Inst { const scope_decl = scope.decl().?; try self.declareDeclDependency(scope_decl, decl); self.ensureDeclAnalyzed(decl) catch |err| { if (scope.cast(Scope.Block)) |block| { if (block.func) |func| { func.analysis = .dependency_failure; } else { block.decl.analysis = .dependency_failure; } } else { scope_decl.analysis = .dependency_failure; } return err; }; const decl_tv = try decl.typedValue(); if (decl_tv.val.tag() == .variable) { return self.analyzeVarRef(scope, src, decl_tv); } const ty = try self.simplePtrType(scope, src, decl_tv.ty, false, .One); const val_payload = try scope.arena().create(Value.Payload.DeclRef); val_payload.* = .{ .decl = decl }; return self.constInst(scope, src, .{ .ty = ty, .val = Value.initPayload(&val_payload.base), }); } fn analyzeVarRef(self: *Module, scope: *Scope, src: usize, tv: TypedValue) InnerError!*Inst { const variable = tv.val.cast(Value.Payload.Variable).?.variable; const ty = try self.simplePtrType(scope, src, tv.ty, variable.is_mutable, .One); if (!variable.is_mutable and !variable.is_extern) { const val_payload = try scope.arena().create(Value.Payload.RefVal); val_payload.* = .{ .val = variable.init }; return self.constInst(scope, src, .{ .ty = ty, .val = Value.initPayload(&val_payload.base), }); } const b = try self.requireRuntimeBlock(scope, src); const inst = try b.arena.create(Inst.VarPtr); inst.* = .{ .base = .{ .tag = .varptr, .ty = ty, .src = src, }, .variable = variable, }; try b.instructions.append(self.gpa, &inst.base); return &inst.base; } pub fn analyzeDeref(self: *Module, scope: *Scope, src: usize, ptr: *Inst, ptr_src: usize) InnerError!*Inst { const elem_ty = switch (ptr.ty.zigTypeTag()) { .Pointer => ptr.ty.elemType(), else => return self.fail(scope, ptr_src, "expected pointer, found '{}'", .{ptr.ty}), }; if (ptr.value()) |val| { return self.constInst(scope, src, .{ .ty = elem_ty, .val = try val.pointerDeref(scope.arena()), }); } const b = try self.requireRuntimeBlock(scope, src); return self.addUnOp(b, src, elem_ty, .load, ptr); } pub fn analyzeDeclRefByName(self: *Module, scope: *Scope, src: usize, decl_name: []const u8) InnerError!*Inst { const decl = self.lookupDeclName(scope, decl_name) orelse return self.fail(scope, src, "decl '{}' not found", .{decl_name}); return self.analyzeDeclRef(scope, src, decl); } pub fn wantSafety(self: *Module, scope: *Scope) bool { // TODO take into account scope's safety overrides return switch (self.optimizeMode()) { .Debug => true, .ReleaseSafe => true, .ReleaseFast => false, .ReleaseSmall => false, }; } pub fn analyzeIsNull( self: *Module, scope: *Scope, src: usize, operand: *Inst, invert_logic: bool, ) InnerError!*Inst { if (operand.value()) |opt_val| { const is_null = opt_val.isNull(); const bool_value = if (invert_logic) !is_null else is_null; return self.constBool(scope, src, bool_value); } const b = try self.requireRuntimeBlock(scope, src); const inst_tag: Inst.Tag = if (invert_logic) .isnonnull else .isnull; return self.addUnOp(b, src, Type.initTag(.bool), inst_tag, operand); } pub fn analyzeIsErr(self: *Module, scope: *Scope, src: usize, operand: *Inst) InnerError!*Inst { return self.fail(scope, src, "TODO implement analysis of iserr", .{}); } pub fn analyzeSlice(self: *Module, scope: *Scope, src: usize, array_ptr: *Inst, start: *Inst, end_opt: ?*Inst, sentinel_opt: ?*Inst) InnerError!*Inst { const ptr_child = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return self.fail(scope, src, "expected pointer, found '{}'", .{array_ptr.ty}), }; var array_type = ptr_child; const elem_type = switch (ptr_child.zigTypeTag()) { .Array => ptr_child.elemType(), .Pointer => blk: { if (ptr_child.isSinglePointer()) { if (ptr_child.elemType().zigTypeTag() == .Array) { array_type = ptr_child.elemType(); break :blk ptr_child.elemType().elemType(); } return self.fail(scope, src, "slice of single-item pointer", .{}); } break :blk ptr_child.elemType(); }, else => return self.fail(scope, src, "slice of non-array type '{}'", .{ptr_child}), }; const slice_sentinel = if (sentinel_opt) |sentinel| blk: { const casted = try self.coerce(scope, elem_type, sentinel); break :blk try self.resolveConstValue(scope, casted); } else null; var return_ptr_size: std.builtin.TypeInfo.Pointer.Size = .Slice; var return_elem_type = elem_type; if (end_opt) |end| { if (end.value()) |end_val| { if (start.value()) |start_val| { const start_u64 = start_val.toUnsignedInt(); const end_u64 = end_val.toUnsignedInt(); if (start_u64 > end_u64) { return self.fail(scope, src, "out of bounds slice", .{}); } const len = end_u64 - start_u64; const array_sentinel = if (array_type.zigTypeTag() == .Array and end_u64 == array_type.arrayLen()) array_type.sentinel() else slice_sentinel; return_elem_type = try self.arrayType(scope, len, array_sentinel, elem_type); return_ptr_size = .One; } } } const return_type = try self.ptrType( scope, src, return_elem_type, if (end_opt == null) slice_sentinel else null, 0, // TODO alignment 0, 0, !ptr_child.isConstPtr(), ptr_child.isAllowzeroPtr(), ptr_child.isVolatilePtr(), return_ptr_size, ); return self.fail(scope, src, "TODO implement analysis of slice", .{}); } pub fn analyzeImport(self: *Module, scope: *Scope, src: usize, target_string: []const u8) !*Scope.File { // TODO if (package_table.get(target_string)) |pkg| if (self.import_table.get(target_string)) |some| { return some; } // TODO check for imports outside of pkg path if (false) return error.ImportOutsidePkgPath; // TODO Scope.Container arena for ty and sub_file_path const struct_payload = try self.gpa.create(Type.Payload.EmptyStruct); errdefer self.gpa.destroy(struct_payload); const file_scope = try self.gpa.create(Scope.File); errdefer self.gpa.destroy(file_scope); const file_path = try self.gpa.dupe(u8, target_string); errdefer self.gpa.free(file_path); struct_payload.* = .{ .scope = &file_scope.root_container }; file_scope.* = .{ .sub_file_path = file_path, .source = .{ .unloaded = {} }, .contents = .{ .not_available = {} }, .status = .never_loaded, .root_container = .{ .file_scope = file_scope, .decls = .{}, .ty = Type.initPayload(&struct_payload.base), }, }; self.analyzeContainer(&file_scope.root_container) catch |err| switch (err) { error.AnalysisFail => { assert(self.comp.totalErrorCount() != 0); }, else => |e| return e, }; try self.import_table.put(self.gpa, file_scope.sub_file_path, file_scope); return file_scope; } /// Asserts that lhs and rhs types are both numeric. pub fn cmpNumeric( self: *Module, scope: *Scope, src: usize, lhs: *Inst, rhs: *Inst, op: std.math.CompareOperator, ) !*Inst { assert(lhs.ty.isNumeric()); assert(rhs.ty.isNumeric()); const lhs_ty_tag = lhs.ty.zigTypeTag(); const rhs_ty_tag = rhs.ty.zigTypeTag(); if (lhs_ty_tag == .Vector and rhs_ty_tag == .Vector) { if (lhs.ty.arrayLen() != rhs.ty.arrayLen()) { return self.fail(scope, src, "vector length mismatch: {} and {}", .{ lhs.ty.arrayLen(), rhs.ty.arrayLen(), }); } return self.fail(scope, src, "TODO implement support for vectors in cmpNumeric", .{}); } else if (lhs_ty_tag == .Vector or rhs_ty_tag == .Vector) { return self.fail(scope, src, "mixed scalar and vector operands to comparison operator: '{}' and '{}'", .{ lhs.ty, rhs.ty, }); } if (lhs.value()) |lhs_val| { if (rhs.value()) |rhs_val| { return self.constBool(scope, src, Value.compare(lhs_val, op, rhs_val)); } } // TODO handle comparisons against lazy zero values // Some values can be compared against zero without being runtime known or without forcing // a full resolution of their value, for example `@sizeOf(@Frame(function))` is known to // always be nonzero, and we benefit from not forcing the full evaluation and stack frame layout // of this function if we don't need to. // It must be a runtime comparison. const b = try self.requireRuntimeBlock(scope, src); // For floats, emit a float comparison instruction. const lhs_is_float = switch (lhs_ty_tag) { .Float, .ComptimeFloat => true, else => false, }; const rhs_is_float = switch (rhs_ty_tag) { .Float, .ComptimeFloat => true, else => false, }; if (lhs_is_float and rhs_is_float) { // Implicit cast the smaller one to the larger one. const dest_type = x: { if (lhs_ty_tag == .ComptimeFloat) { break :x rhs.ty; } else if (rhs_ty_tag == .ComptimeFloat) { break :x lhs.ty; } if (lhs.ty.floatBits(self.getTarget()) >= rhs.ty.floatBits(self.getTarget())) { break :x lhs.ty; } else { break :x rhs.ty; } }; const casted_lhs = try self.coerce(scope, dest_type, lhs); const casted_rhs = try self.coerce(scope, dest_type, rhs); return self.addBinOp(b, src, dest_type, Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } // For mixed unsigned integer sizes, implicit cast both operands to the larger integer. // For mixed signed and unsigned integers, implicit cast both operands to a signed // integer with + 1 bit. // For mixed floats and integers, extract the integer part from the float, cast that to // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float, // add/subtract 1. const lhs_is_signed = if (lhs.value()) |lhs_val| lhs_val.compareWithZero(.lt) else (lhs.ty.isFloat() or lhs.ty.isSignedInt()); const rhs_is_signed = if (rhs.value()) |rhs_val| rhs_val.compareWithZero(.lt) else (rhs.ty.isFloat() or rhs.ty.isSignedInt()); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; var dest_float_type: ?Type = null; var lhs_bits: usize = undefined; if (lhs.value()) |lhs_val| { if (lhs_val.isUndef()) return self.constUndef(scope, src, Type.initTag(.bool)); const is_unsigned = if (lhs_is_float) x: { var bigint_space: Value.BigIntSpace = undefined; var bigint = try lhs_val.toBigInt(&bigint_space).toManaged(self.gpa); defer bigint.deinit(); const zcmp = lhs_val.orderAgainstZero(); if (lhs_val.floatHasFraction()) { switch (op) { .eq => return self.constBool(scope, src, false), .neq => return self.constBool(scope, src, true), else => {}, } if (zcmp == .lt) { try bigint.addScalar(bigint.toConst(), -1); } else { try bigint.addScalar(bigint.toConst(), 1); } } lhs_bits = bigint.toConst().bitCountTwosComp(); break :x (zcmp != .lt); } else x: { lhs_bits = lhs_val.intBitCountTwosComp(); break :x (lhs_val.orderAgainstZero() != .lt); }; lhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); } else if (lhs_is_float) { dest_float_type = lhs.ty; } else { const int_info = lhs.ty.intInfo(self.getTarget()); lhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed); } var rhs_bits: usize = undefined; if (rhs.value()) |rhs_val| { if (rhs_val.isUndef()) return self.constUndef(scope, src, Type.initTag(.bool)); const is_unsigned = if (rhs_is_float) x: { var bigint_space: Value.BigIntSpace = undefined; var bigint = try rhs_val.toBigInt(&bigint_space).toManaged(self.gpa); defer bigint.deinit(); const zcmp = rhs_val.orderAgainstZero(); if (rhs_val.floatHasFraction()) { switch (op) { .eq => return self.constBool(scope, src, false), .neq => return self.constBool(scope, src, true), else => {}, } if (zcmp == .lt) { try bigint.addScalar(bigint.toConst(), -1); } else { try bigint.addScalar(bigint.toConst(), 1); } } rhs_bits = bigint.toConst().bitCountTwosComp(); break :x (zcmp != .lt); } else x: { rhs_bits = rhs_val.intBitCountTwosComp(); break :x (rhs_val.orderAgainstZero() != .lt); }; rhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); } else if (rhs_is_float) { dest_float_type = rhs.ty; } else { const int_info = rhs.ty.intInfo(self.getTarget()); rhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed); } const dest_type = if (dest_float_type) |ft| ft else blk: { const max_bits = std.math.max(lhs_bits, rhs_bits); const casted_bits = std.math.cast(u16, max_bits) catch |err| switch (err) { error.Overflow => return self.fail(scope, src, "{} exceeds maximum integer bit count", .{max_bits}), }; break :blk try self.makeIntType(scope, dest_int_is_signed, casted_bits); }; const casted_lhs = try self.coerce(scope, dest_type, lhs); const casted_rhs = try self.coerce(scope, dest_type, rhs); return self.addBinOp(b, src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } fn wrapOptional(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst { if (inst.value()) |val| { return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val }); } const b = try self.requireRuntimeBlock(scope, inst.src); return self.addUnOp(b, inst.src, dest_type, .wrap_optional, inst); } fn makeIntType(self: *Module, scope: *Scope, signed: bool, bits: u16) !Type { if (signed) { const int_payload = try scope.arena().create(Type.Payload.IntSigned); int_payload.* = .{ .bits = bits }; return Type.initPayload(&int_payload.base); } else { const int_payload = try scope.arena().create(Type.Payload.IntUnsigned); int_payload.* = .{ .bits = bits }; return Type.initPayload(&int_payload.base); } } pub fn resolvePeerTypes(self: *Module, scope: *Scope, instructions: []*Inst) !Type { if (instructions.len == 0) return Type.initTag(.noreturn); if (instructions.len == 1) return instructions[0].ty; var prev_inst = instructions[0]; for (instructions[1..]) |next_inst| { if (next_inst.ty.eql(prev_inst.ty)) continue; if (next_inst.ty.zigTypeTag() == .NoReturn) continue; if (prev_inst.ty.zigTypeTag() == .NoReturn) { prev_inst = next_inst; continue; } if (next_inst.ty.zigTypeTag() == .Undefined) continue; if (prev_inst.ty.zigTypeTag() == .Undefined) { prev_inst = next_inst; continue; } if (prev_inst.ty.isInt() and next_inst.ty.isInt() and prev_inst.ty.isSignedInt() == next_inst.ty.isSignedInt()) { if (prev_inst.ty.intInfo(self.getTarget()).bits < next_inst.ty.intInfo(self.getTarget()).bits) { prev_inst = next_inst; } continue; } if (prev_inst.ty.isFloat() and next_inst.ty.isFloat()) { if (prev_inst.ty.floatBits(self.getTarget()) < next_inst.ty.floatBits(self.getTarget())) { prev_inst = next_inst; } continue; } // TODO error notes pointing out each type return self.fail(scope, next_inst.src, "incompatible types: '{}' and '{}'", .{ prev_inst.ty, next_inst.ty }); } return prev_inst.ty; } pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst { // If the types are the same, we can return the operand. if (dest_type.eql(inst.ty)) return inst; const in_memory_result = coerceInMemoryAllowed(dest_type, inst.ty); if (in_memory_result == .ok) { return self.bitcast(scope, dest_type, inst); } // undefined to anything if (inst.value()) |val| { if (val.isUndef() or inst.ty.zigTypeTag() == .Undefined) { return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val }); } } assert(inst.ty.zigTypeTag() != .Undefined); // null to ?T if (dest_type.zigTypeTag() == .Optional and inst.ty.zigTypeTag() == .Null) { return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = Value.initTag(.null_value) }); } // T to ?T if (dest_type.zigTypeTag() == .Optional) { var buf: Type.Payload.PointerSimple = undefined; const child_type = dest_type.optionalChild(&buf); if (child_type.eql(inst.ty)) { return self.wrapOptional(scope, dest_type, inst); } else if (try self.coerceNum(scope, child_type, inst)) |some| { return self.wrapOptional(scope, dest_type, some); } } // *[N]T to []T if (inst.ty.isSinglePointer() and dest_type.isSlice() and (!inst.ty.isConstPtr() or dest_type.isConstPtr())) { const array_type = inst.ty.elemType(); const dst_elem_type = dest_type.elemType(); if (array_type.zigTypeTag() == .Array and coerceInMemoryAllowed(dst_elem_type, array_type.elemType()) == .ok) { return self.coerceArrayPtrToSlice(scope, dest_type, inst); } } // comptime known number to other number if (try self.coerceNum(scope, dest_type, inst)) |some| return some; // integer widening if (inst.ty.zigTypeTag() == .Int and dest_type.zigTypeTag() == .Int) { assert(inst.value() == null); // handled above const src_info = inst.ty.intInfo(self.getTarget()); const dst_info = dest_type.intInfo(self.getTarget()); if ((src_info.signed == dst_info.signed and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (src_info.signed and !dst_info.signed and dst_info.bits > src_info.bits)) { const b = try self.requireRuntimeBlock(scope, inst.src); return self.addUnOp(b, inst.src, dest_type, .intcast, inst); } } // float widening if (inst.ty.zigTypeTag() == .Float and dest_type.zigTypeTag() == .Float) { assert(inst.value() == null); // handled above const src_bits = inst.ty.floatBits(self.getTarget()); const dst_bits = dest_type.floatBits(self.getTarget()); if (dst_bits >= src_bits) { const b = try self.requireRuntimeBlock(scope, inst.src); return self.addUnOp(b, inst.src, dest_type, .floatcast, inst); } } return self.fail(scope, inst.src, "expected {}, found {}", .{ dest_type, inst.ty }); } pub fn coerceNum(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !?*Inst { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); if (dst_zig_tag == .ComptimeInt or dst_zig_tag == .Int) { if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) { if (val.floatHasFraction()) { return self.fail(scope, inst.src, "fractional component prevents float value {} from being casted to type '{}'", .{ val, inst.ty }); } return self.fail(scope, inst.src, "TODO float to int", .{}); } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { if (!val.intFitsInType(dest_type, self.getTarget())) { return self.fail(scope, inst.src, "type {} cannot represent integer value {}", .{ inst.ty, val }); } return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val }); } } else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) { if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) { const res = val.floatCast(scope.arena(), dest_type, self.getTarget()) catch |err| switch (err) { error.Overflow => return self.fail( scope, inst.src, "cast of value {} to type '{}' loses information", .{ val, dest_type }, ), error.OutOfMemory => return error.OutOfMemory, }; return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = res }); } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { return self.fail(scope, inst.src, "TODO int to float", .{}); } } return null; } pub fn storePtr(self: *Module, scope: *Scope, src: usize, ptr: *Inst, uncasted_value: *Inst) !*Inst { if (ptr.ty.isConstPtr()) return self.fail(scope, src, "cannot assign to constant", .{}); const elem_ty = ptr.ty.elemType(); const value = try self.coerce(scope, elem_ty, uncasted_value); if (elem_ty.onePossibleValue() != null) return self.constVoid(scope, src); // TODO handle comptime pointer writes // TODO handle if the element type requires comptime const b = try self.requireRuntimeBlock(scope, src); return self.addBinOp(b, src, Type.initTag(.void), .store, ptr, value); } pub fn bitcast(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst { if (inst.value()) |val| { // Keep the comptime Value representation; take the new type. return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val }); } // TODO validate the type size and other compile errors const b = try self.requireRuntimeBlock(scope, inst.src); return self.addUnOp(b, inst.src, dest_type, .bitcast, inst); } fn coerceArrayPtrToSlice(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val }); } return self.fail(scope, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } pub fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, args: anytype) InnerError { @setCold(true); const err_msg = try Compilation.ErrorMsg.create(self.gpa, src, format, args); return self.failWithOwnedErrorMsg(scope, src, err_msg); } pub fn failTok( self: *Module, scope: *Scope, token_index: ast.TokenIndex, comptime format: []const u8, args: anytype, ) InnerError { @setCold(true); const src = scope.tree().token_locs[token_index].start; return self.fail(scope, src, format, args); } pub fn failNode( self: *Module, scope: *Scope, ast_node: *ast.Node, comptime format: []const u8, args: anytype, ) InnerError { @setCold(true); const src = scope.tree().token_locs[ast_node.firstToken()].start; return self.fail(scope, src, format, args); } fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Compilation.ErrorMsg) InnerError { { errdefer err_msg.destroy(self.gpa); try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1); try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1); } switch (scope.tag) { .decl => { const decl = scope.cast(Scope.DeclAnalysis).?.decl; decl.analysis = .sema_failure; decl.generation = self.generation; self.failed_decls.putAssumeCapacityNoClobber(decl, err_msg); }, .block => { const block = scope.cast(Scope.Block).?; if (block.func) |func| { func.analysis = .sema_failure; } else { block.decl.analysis = .sema_failure; block.decl.generation = self.generation; } self.failed_decls.putAssumeCapacityNoClobber(block.decl, err_msg); }, .gen_zir => { const gen_zir = scope.cast(Scope.GenZIR).?; gen_zir.decl.analysis = .sema_failure; gen_zir.decl.generation = self.generation; self.failed_decls.putAssumeCapacityNoClobber(gen_zir.decl, err_msg); }, .local_val => { const gen_zir = scope.cast(Scope.LocalVal).?.gen_zir; gen_zir.decl.analysis = .sema_failure; gen_zir.decl.generation = self.generation; self.failed_decls.putAssumeCapacityNoClobber(gen_zir.decl, err_msg); }, .local_ptr => { const gen_zir = scope.cast(Scope.LocalPtr).?.gen_zir; gen_zir.decl.analysis = .sema_failure; gen_zir.decl.generation = self.generation; self.failed_decls.putAssumeCapacityNoClobber(gen_zir.decl, err_msg); }, .zir_module => { const zir_module = scope.cast(Scope.ZIRModule).?; zir_module.status = .loaded_sema_failure; self.failed_files.putAssumeCapacityNoClobber(scope, err_msg); }, .file => unreachable, .container => unreachable, } return error.AnalysisFail; } const InMemoryCoercionResult = enum { ok, no_match, }; fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult { if (dest_type.eql(src_type)) return .ok; // TODO: implement more of this function return .no_match; } fn srcHashEql(a: std.zig.SrcHash, b: std.zig.SrcHash) bool { return @bitCast(u128, a) == @bitCast(u128, b); } pub fn intAdd(allocator: *Allocator, lhs: Value, rhs: Value) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space); const rhs_bigint = rhs.toBigInt(&rhs_space); const limbs = try allocator.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); const result_limbs = result_bigint.limbs[0..result_bigint.len]; const val_payload = if (result_bigint.positive) blk: { const val_payload = try allocator.create(Value.Payload.IntBigPositive); val_payload.* = .{ .limbs = result_limbs }; break :blk &val_payload.base; } else blk: { const val_payload = try allocator.create(Value.Payload.IntBigNegative); val_payload.* = .{ .limbs = result_limbs }; break :blk &val_payload.base; }; return Value.initPayload(val_payload); } pub fn intSub(allocator: *Allocator, lhs: Value, rhs: Value) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; const lhs_bigint = lhs.toBigInt(&lhs_space); const rhs_bigint = rhs.toBigInt(&rhs_space); const limbs = try allocator.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.sub(lhs_bigint, rhs_bigint); const result_limbs = result_bigint.limbs[0..result_bigint.len]; const val_payload = if (result_bigint.positive) blk: { const val_payload = try allocator.create(Value.Payload.IntBigPositive); val_payload.* = .{ .limbs = result_limbs }; break :blk &val_payload.base; } else blk: { const val_payload = try allocator.create(Value.Payload.IntBigNegative); val_payload.* = .{ .limbs = result_limbs }; break :blk &val_payload.base; }; return Value.initPayload(val_payload); } pub fn floatAdd(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value { var bit_count = switch (float_type.tag()) { .comptime_float => 128, else => float_type.floatBits(self.getTarget()), }; const allocator = scope.arena(); const val_payload = switch (bit_count) { 16 => { return self.fail(scope, src, "TODO Implement addition for soft floats", .{}); }, 32 => blk: { const lhs_val = lhs.toFloat(f32); const rhs_val = rhs.toFloat(f32); const val_payload = try allocator.create(Value.Payload.Float_32); val_payload.* = .{ .val = lhs_val + rhs_val }; break :blk &val_payload.base; }, 64 => blk: { const lhs_val = lhs.toFloat(f64); const rhs_val = rhs.toFloat(f64); const val_payload = try allocator.create(Value.Payload.Float_64); val_payload.* = .{ .val = lhs_val + rhs_val }; break :blk &val_payload.base; }, 128 => { return self.fail(scope, src, "TODO Implement addition for big floats", .{}); }, else => unreachable, }; return Value.initPayload(val_payload); } pub fn floatSub(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value { var bit_count = switch (float_type.tag()) { .comptime_float => 128, else => float_type.floatBits(self.getTarget()), }; const allocator = scope.arena(); const val_payload = switch (bit_count) { 16 => { return self.fail(scope, src, "TODO Implement substraction for soft floats", .{}); }, 32 => blk: { const lhs_val = lhs.toFloat(f32); const rhs_val = rhs.toFloat(f32); const val_payload = try allocator.create(Value.Payload.Float_32); val_payload.* = .{ .val = lhs_val - rhs_val }; break :blk &val_payload.base; }, 64 => blk: { const lhs_val = lhs.toFloat(f64); const rhs_val = rhs.toFloat(f64); const val_payload = try allocator.create(Value.Payload.Float_64); val_payload.* = .{ .val = lhs_val - rhs_val }; break :blk &val_payload.base; }, 128 => { return self.fail(scope, src, "TODO Implement substraction for big floats", .{}); }, else => unreachable, }; return Value.initPayload(val_payload); } pub fn simplePtrType(self: *Module, scope: *Scope, src: usize, elem_ty: Type, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size) Allocator.Error!Type { if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) { return Type.initTag(.const_slice_u8); } // TODO stage1 type inference bug const T = Type.Tag; const type_payload = try scope.arena().create(Type.Payload.PointerSimple); type_payload.* = .{ .base = .{ .tag = switch (size) { .One => if (mutable) T.single_mut_pointer else T.single_const_pointer, .Many => if (mutable) T.many_mut_pointer else T.many_const_pointer, .C => if (mutable) T.c_mut_pointer else T.c_const_pointer, .Slice => if (mutable) T.mut_slice else T.const_slice, }, }, .pointee_type = elem_ty, }; return Type.initPayload(&type_payload.base); } pub fn ptrType( self: *Module, scope: *Scope, src: usize, elem_ty: Type, sentinel: ?Value, @"align": u32, bit_offset: u16, host_size: u16, mutable: bool, @"allowzero": bool, @"volatile": bool, size: std.builtin.TypeInfo.Pointer.Size, ) Allocator.Error!Type { assert(host_size == 0 or bit_offset < host_size * 8); // TODO check if type can be represented by simplePtrType const type_payload = try scope.arena().create(Type.Payload.Pointer); type_payload.* = .{ .pointee_type = elem_ty, .sentinel = sentinel, .@"align" = @"align", .bit_offset = bit_offset, .host_size = host_size, .@"allowzero" = @"allowzero", .mutable = mutable, .@"volatile" = @"volatile", .size = size, }; return Type.initPayload(&type_payload.base); } pub fn optionalType(self: *Module, scope: *Scope, child_type: Type) Allocator.Error!Type { return Type.initPayload(switch (child_type.tag()) { .single_const_pointer => blk: { const payload = try scope.arena().create(Type.Payload.PointerSimple); payload.* = .{ .base = .{ .tag = .optional_single_const_pointer }, .pointee_type = child_type.elemType(), }; break :blk &payload.base; }, .single_mut_pointer => blk: { const payload = try scope.arena().create(Type.Payload.PointerSimple); payload.* = .{ .base = .{ .tag = .optional_single_mut_pointer }, .pointee_type = child_type.elemType(), }; break :blk &payload.base; }, else => blk: { const payload = try scope.arena().create(Type.Payload.Optional); payload.* = .{ .child_type = child_type, }; break :blk &payload.base; }, }); } pub fn arrayType(self: *Module, scope: *Scope, len: u64, sentinel: ?Value, elem_type: Type) Allocator.Error!Type { if (elem_type.eql(Type.initTag(.u8))) { if (sentinel) |some| { if (some.eql(Value.initTag(.zero))) { const payload = try scope.arena().create(Type.Payload.Array_u8_Sentinel0); payload.* = .{ .len = len, }; return Type.initPayload(&payload.base); } } else { const payload = try scope.arena().create(Type.Payload.Array_u8); payload.* = .{ .len = len, }; return Type.initPayload(&payload.base); } } if (sentinel) |some| { const payload = try scope.arena().create(Type.Payload.ArraySentinel); payload.* = .{ .len = len, .sentinel = some, .elem_type = elem_type, }; return Type.initPayload(&payload.base); } const payload = try scope.arena().create(Type.Payload.Array); payload.* = .{ .len = len, .elem_type = elem_type, }; return Type.initPayload(&payload.base); } pub fn errorUnionType(self: *Module, scope: *Scope, error_set: Type, payload: Type) Allocator.Error!Type { assert(error_set.zigTypeTag() == .ErrorSet); if (error_set.eql(Type.initTag(.anyerror)) and payload.eql(Type.initTag(.void))) { return Type.initTag(.anyerror_void_error_union); } const result = try scope.arena().create(Type.Payload.ErrorUnion); result.* = .{ .error_set = error_set, .payload = payload, }; return Type.initPayload(&result.base); } pub fn anyframeType(self: *Module, scope: *Scope, return_type: Type) Allocator.Error!Type { const result = try scope.arena().create(Type.Payload.AnyFrame); result.* = .{ .return_type = return_type, }; return Type.initPayload(&result.base); } pub fn dumpInst(self: *Module, scope: *Scope, inst: *Inst) void { const zir_module = scope.namespace(); const source = zir_module.getSource(self) catch @panic("dumpInst failed to get source"); const loc = std.zig.findLineColumn(source, inst.src); if (inst.tag == .constant) { std.debug.print("constant ty={} val={} src={}:{}:{}\n", .{ inst.ty, inst.castTag(.constant).?.val, zir_module.subFilePath(), loc.line + 1, loc.column + 1, }); } else if (inst.deaths == 0) { std.debug.print("{} ty={} src={}:{}:{}\n", .{ @tagName(inst.tag), inst.ty, zir_module.subFilePath(), loc.line + 1, loc.column + 1, }); } else { std.debug.print("{} ty={} deaths={b} src={}:{}:{}\n", .{ @tagName(inst.tag), inst.ty, inst.deaths, zir_module.subFilePath(), loc.line + 1, loc.column + 1, }); } } pub const PanicId = enum { unreach, unwrap_null, }; pub fn addSafetyCheck(mod: *Module, parent_block: *Scope.Block, ok: *Inst, panic_id: PanicId) !void { const block_inst = try parent_block.arena.create(Inst.Block); block_inst.* = .{ .base = .{ .tag = Inst.Block.base_tag, .ty = Type.initTag(.void), .src = ok.src, }, .body = .{ .instructions = try parent_block.arena.alloc(*Inst, 1), // Only need space for the condbr. }, }; const ok_body: ir.Body = .{ .instructions = try parent_block.arena.alloc(*Inst, 1), // Only need space for the brvoid. }; const brvoid = try parent_block.arena.create(Inst.BrVoid); brvoid.* = .{ .base = .{ .tag = .brvoid, .ty = Type.initTag(.noreturn), .src = ok.src, }, .block = block_inst, }; ok_body.instructions[0] = &brvoid.base; var fail_block: Scope.Block = .{ .parent = parent_block, .func = parent_block.func, .decl = parent_block.decl, .instructions = .{}, .arena = parent_block.arena, .is_comptime = parent_block.is_comptime, }; defer fail_block.instructions.deinit(mod.gpa); _ = try mod.safetyPanic(&fail_block, ok.src, panic_id); const fail_body: ir.Body = .{ .instructions = try parent_block.arena.dupe(*Inst, fail_block.instructions.items) }; const condbr = try parent_block.arena.create(Inst.CondBr); condbr.* = .{ .base = .{ .tag = .condbr, .ty = Type.initTag(.noreturn), .src = ok.src, }, .condition = ok, .then_body = ok_body, .else_body = fail_body, }; block_inst.body.instructions[0] = &condbr.base; try parent_block.instructions.append(mod.gpa, &block_inst.base); } pub fn safetyPanic(mod: *Module, block: *Scope.Block, src: usize, panic_id: PanicId) !*Inst { // TODO Once we have a panic function to call, call it here instead of breakpoint. _ = try mod.addNoOp(block, src, Type.initTag(.void), .breakpoint); return mod.addNoOp(block, src, Type.initTag(.noreturn), .unreach); } pub fn getTarget(self: Module) Target { return self.comp.bin_file.options.target; } pub fn optimizeMode(self: Module) std.builtin.Mode { return self.comp.bin_file.options.optimize_mode; }
src/Module.zig
const std = @import("std"); const clap = @import("clap"); const version = @import("version"); const zzz = @import("zzz"); const api = @import("api.zig"); const Project = @import("Project.zig"); const Lockfile = @import("Lockfile.zig"); const Dependency = @import("Dependency.zig"); const DependencyTree = @import("DependencyTree.zig"); usingnamespace @import("common.zig"); const Allocator = std.mem.Allocator; const FetchContext = struct { project_file: std.fs.File, lock_file: std.fs.File, project: Project, lockfile: Lockfile, dep_tree: *DependencyTree, build_dep_tree: *DependencyTree, fn deinit(self: *FetchContext) void { self.lockfile.save(self.lock_file) catch {}; self.build_dep_tree.destroy(); self.dep_tree.destroy(); self.lockfile.deinit(); self.project.deinit(); // TODO: delete lockfile if it doesn't have anything in it self.lock_file.close(); self.project_file.close(); } }; pub fn fetchImpl(allocator: *Allocator) !FetchContext { const project_file = std.fs.cwd().openFile( "gyro.zzz", .{ .read = true }, ) catch |err| { return if (err == error.FileNotFound) blk: { std.log.err("Missing gyro.zzz project file", .{}); break :blk error.Explained; } else err; }; errdefer project_file.close(); const lock_file = try std.fs.cwd().createFile( "gyro.lock", .{ .truncate = false, .read = true }, ); errdefer lock_file.close(); var project = try Project.fromFile(allocator, project_file); errdefer project.deinit(); var lockfile = try Lockfile.fromFile(allocator, lock_file); errdefer lockfile.deinit(); const dep_tree = try DependencyTree.generate( allocator, &lockfile, project.dependencies, ); errdefer dep_tree.destroy(); const build_dep_tree = try DependencyTree.generate( allocator, &lockfile, project.build_dependencies, ); errdefer build_dep_tree.destroy(); try lockfile.fetchAll(); return FetchContext{ .project_file = project_file, .lock_file = lock_file, .project = project, .lockfile = lockfile, .dep_tree = dep_tree, .build_dep_tree = build_dep_tree, }; } pub fn fetch(allocator: *Allocator) !void { var ctx = try fetchImpl(allocator); defer ctx.deinit(); } pub fn update(allocator: *Allocator) !void { try std.fs.cwd().deleteFile("gyro.lock"); try fetch(allocator); } const EnvInfo = struct { zig_exe: []const u8, lib_dir: []const u8, std_dir: []const u8, global_cache_dir: []const u8, version: []const u8, }; pub fn build(allocator: *Allocator, args: *clap.args.OsIterator) !void { var ctx = try fetchImpl(allocator); defer ctx.deinit(); std.fs.cwd().access("build.zig", .{ .read = true }) catch |err| { return if (err == error.FileNotFound) blk: { std.log.err("no build.zig in current working directory", .{}); break :blk error.Explained; } else err; }; var fifo = std.fifo.LinearFifo(u8, .{ .Dynamic = {} }).init(allocator); defer fifo.deinit(); const result = try std.ChildProcess.exec(.{ .allocator = allocator, .argv = &[_][]const u8{ "zig", "env" }, }); defer { allocator.free(result.stdout); allocator.free(result.stderr); } switch (result.term) { .Exited => |val| { if (val != 0) { std.log.err("zig compiler returned error code: {}", .{val}); return error.Explained; } }, .Signal => |sig| { std.log.err("zig compiler interrupted by signal: {}", .{sig}); return error.Explained; }, else => return error.UnknownTerm, } const parse_opts = std.json.ParseOptions{ .allocator = allocator }; const env = try std.json.parse( EnvInfo, &std.json.TokenStream.init(result.stdout), parse_opts, ); defer std.json.parseFree(EnvInfo, env, parse_opts); const path = try std.fs.path.join( allocator, &[_][]const u8{ env.std_dir, "special" }, ); defer allocator.free(path); var special_dir = try std.fs.openDirAbsolute( path, .{ .access_sub_paths = true }, ); defer special_dir.close(); try special_dir.copyFile( "build_runner.zig", std.fs.cwd(), "build_runner.zig", .{}, ); defer std.fs.cwd().deleteFile("build_runner.zig") catch {}; // TODO: configurable local cache const pkgs = try ctx.build_dep_tree.assemblePkgs(std.build.Pkg{ .name = "gyro", .path = "deps.zig", }); var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); const b = try std.build.Builder.create( &arena.allocator, env.zig_exe, ".", "zig-cache", env.global_cache_dir, ); defer b.destroy(); const deps_file = try std.fs.cwd().createFile("deps.zig", .{ .truncate = true }); errdefer std.fs.cwd().deleteFile("deps.zig") catch {}; defer deps_file.close(); try ctx.dep_tree.printZig(deps_file.writer()); b.resolveInstallPrefix(); const runner = b.addExecutable("build", "build_runner.zig"); runner.addPackage(std.build.Pkg{ .name = "@build", .path = "build.zig", .dependencies = pkgs, }); const run_cmd = runner.run(); run_cmd.addArgs(&[_][]const u8{ env.zig_exe, ".", "zig-cache", env.global_cache_dir, }); while (try args.next()) |arg| run_cmd.addArg(arg); b.default_step.dependOn(&run_cmd.step); if (b.validateUserInputDidItFail()) { return error.UserInputFailed; } b.make(&[_][]const u8{"install"}) catch |err| { switch (err) { error.UncleanExit => { std.log.err("Compiler had an unclean exit", .{}); return error.Explained; }, else => return err, } }; } pub fn package( allocator: *Allocator, output_dir: ?[]const u8, names: []const []const u8, ) !void { const file = try std.fs.cwd().openFile("gyro.zzz", .{ .read = true }); defer file.close(); var project = try Project.fromFile(allocator, file); defer project.deinit(); if (project.packages.count() == 0) { std.log.err("there are no packages to package!", .{}); return error.Explained; } var found_not_pkg = false; for (names) |name| if (!project.contains(name)) { std.log.err("{s} is not a package", .{name}); found_not_pkg = true; }; if (found_not_pkg) return error.Explained; var write_dir = try std.fs.cwd().openDir( if (output_dir) |output| output else ".", .{ .iterate = true, .access_sub_paths = true }, ); defer write_dir.close(); var read_dir = try std.fs.cwd().openDir(".", .{ .iterate = true }); defer read_dir.close(); if (names.len > 0) { for (names) |name| try project.get(name).?.bundle(read_dir, write_dir); } else { var it = project.iterator(); while (it.next()) |pkg| try pkg.bundle(read_dir, write_dir); } } fn maybePrintKey( json_key: []const u8, zzz_key: []const u8, root: anytype, writer: anytype, ) !void { if (root.get(json_key)) |val| { switch (val) { .String => |str| try writer.print(" {s}: \"{s}\"\n", .{ zzz_key, str }), else => {}, } } } pub fn init( allocator: *Allocator, link: ?[]const u8, ) !void { const file = std.fs.cwd().createFile("gyro.zzz", .{ .exclusive = true }) catch |err| { return if (err == error.PathAlreadyExists) blk: { std.log.err("gyro.zzz already exists", .{}); break :blk error.Explained; } else err; }; errdefer std.fs.cwd().deleteFile("gyro.zzz") catch {}; defer file.close(); const info = try parseUserRepo(link orelse return); var repo_tree = try api.getGithubRepo(allocator, info.user, info.repo); defer repo_tree.deinit(); var topics_tree = try api.getGithubTopics(allocator, info.user, info.repo); defer topics_tree.deinit(); if (repo_tree.root != .Object or topics_tree.root != .Object) { std.log.err("Invalid JSON response from Github", .{}); return error.Explained; } const repo_root = repo_tree.root.Object; const topics_root = topics_tree.root.Object; const writer = file.writer(); try writer.print( \\pkgs: \\ {s}: \\ version: 0.0.0 \\ , .{try normalizeName(info.repo)}); try maybePrintKey("description", "description", repo_root, writer); // pretty gross ngl if (repo_root.get("license")) |license| { switch (license) { .Object => |obj| { if (obj.get("spdx_id")) |spdx| { switch (spdx) { .String => |id| { try writer.print(" license: {s}\n", .{id}); }, else => {}, } } }, else => {}, } } try maybePrintKey("html_url", "source_url", repo_root, writer); if (topics_root.get("names")) |topics| { switch (topics) { .Array => |arr| { if (arr.items.len > 0) { try writer.print(" tags:\n", .{}); for (arr.items) |topic| { switch (topic) { .String => |str| if (std.mem.indexOf(u8, str, "zig") == null) { try writer.print(" {s}\n", .{str}); }, else => {}, } } } }, else => {}, } } try writer.print( \\ \\ root: src/main.zig \\ files: \\ README.md \\ LICENSE \\ , .{}); } pub fn add(allocator: *Allocator, targets: []const []const u8, build_deps: bool, github: bool) !void { const repository = api.default_repo; var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); const file = try std.fs.cwd().createFile("gyro.zzz", .{ .truncate = false, .read = true, .exclusive = false, }); defer file.close(); const text = try file.reader().readAllAlloc(&arena.allocator, std.math.maxInt(usize)); var tree = zzz.ZTree(1, 1000){}; var root = try tree.appendText(text); const deps_key = if (build_deps) "build_deps" else "deps"; var deps = zFindChild(root, deps_key) orelse try tree.addNode(root, .{ .String = deps_key }); var aliases = std.StringHashMap(void).init(allocator); defer aliases.deinit(); var it = ZChildIterator.init(deps); while (it.next()) |dep_node| { var dep = try Dependency.fromZNode(dep_node); try aliases.put(dep.alias, {}); } // TODO: needs to be prettier later for (targets) |target| { const info = try parseUserRepo(target); if (aliases.contains(try normalizeName(info.repo))) { std.log.err("'{s}' alias exists in gyro.zzz", .{info.repo}); return error.Explained; } } for (targets) |target| { const info = try parseUserRepo(target); const dep = if (github) blk: { var value_tree = try api.getGithubRepo(&arena.allocator, info.user, info.repo); if (value_tree.root != .Object) { std.log.err("Invalid JSON response from Github", .{}); return error.Explained; } const root_json = value_tree.root.Object; const default_branch = if (root_json.get("default_branch")) |val| switch (val) { .String => |str| str, else => "main", } else "main"; std.log.debug("default_branch: {s}", .{default_branch}); const text_opt = try api.getGithubGyroFile( &arena.allocator, info.user, info.repo, try api.getHeadCommit(&arena.allocator, info.user, info.repo, default_branch), ); const root_file = if (text_opt) |t| get_root: { const project = try Project.fromText(&arena.allocator, t); var ret: []const u8 = default_root; if (project.packages.count() == 1) ret = project.packages.iterator().next().?.value.root orelse default_root; break :get_root ret; } else default_root; break :blk Dependency{ .alias = try normalizeName(info.repo), .src = .{ .github = .{ .user = info.user, .repo = info.repo, .ref = default_branch, .root = root_file, }, }, }; } else blk: { const latest = try api.getLatest(&arena.allocator, repository, info.user, info.repo, null); var buf = try arena.allocator.alloc(u8, 80); var stream = std.io.fixedBufferStream(buf); try stream.writer().print("^{}", .{latest}); break :blk Dependency{ .alias = info.repo, .src = .{ .pkg = .{ .user = info.user, .name = info.repo, .version = version.Range{ .min = latest, .kind = .caret, }, .repository = api.default_repo, .ver_str = stream.getWritten(), }, }, }; }; try dep.addToZNode(&arena, &tree, deps, false); } try file.seekTo(0); try root.stringifyPretty(file.writer()); }
src/commands.zig
const std = @import("std"); const mem = std.mem; const log = std.log.scoped(.c); const Writer = std.ArrayList(u8).Writer; const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const Inst = @import("../ir.zig").Inst; const Value = @import("../value.zig").Value; const Type = @import("../type.zig").Type; const TypedValue = @import("../TypedValue.zig"); const C = link.File.C; const Decl = Module.Decl; const trace = @import("../tracy.zig").trace; const Mutability = enum { Const, Mut }; pub const CValue = union(enum) { none: void, /// Index into local_names local: usize, /// Index into local_names, but take the address. local_ref: usize, /// A constant instruction, to be rendered inline. constant: *Inst, /// Index into the parameters arg: usize, /// By-value decl: *Decl, }; pub const CValueMap = std.AutoHashMap(*Inst, CValue); /// This data is available when outputting .c code for a Module. /// It is not available when generating .h file. pub const Object = struct { dg: DeclGen, gpa: *mem.Allocator, code: std.ArrayList(u8), value_map: CValueMap, next_arg_index: usize = 0, next_local_index: usize = 0, fn resolveInst(o: *Object, inst: *Inst) !CValue { if (inst.value()) |_| { return CValue{ .constant = inst }; } return o.value_map.get(inst).?; // Instruction does not dominate all uses! } fn allocLocalValue(o: *Object) CValue { const result = o.next_local_index; o.next_local_index += 1; return .{ .local = result }; } fn allocLocal(o: *Object, ty: Type, mutability: Mutability) !CValue { const local_value = o.allocLocalValue(); try o.renderTypeAndName(o.code.writer(), ty, local_value, mutability); return local_value; } fn indent(o: *Object) !void { const indent_size = 4; const indent_level = 1; const indent_amt = indent_size * indent_level; try o.code.writer().writeByteNTimes(' ', indent_amt); } fn writeCValue(o: *Object, writer: Writer, c_value: CValue) !void { switch (c_value) { .none => unreachable, .local => |i| return writer.print("t{d}", .{i}), .local_ref => |i| return writer.print("&t{d}", .{i}), .constant => |inst| return o.dg.renderValue(writer, inst.ty, inst.value().?), .arg => |i| return writer.print("a{d}", .{i}), .decl => |decl| return writer.writeAll(mem.span(decl.name)), } } fn renderTypeAndName( o: *Object, writer: Writer, ty: Type, name: CValue, mutability: Mutability, ) error{ OutOfMemory, AnalysisFail }!void { var suffix = std.ArrayList(u8).init(o.gpa); defer suffix.deinit(); var render_ty = ty; while (render_ty.zigTypeTag() == .Array) { const sentinel_bit = @boolToInt(render_ty.sentinel() != null); const c_len = render_ty.arrayLen() + sentinel_bit; try suffix.writer().print("[{d}]", .{c_len}); render_ty = render_ty.elemType(); } try o.dg.renderType(writer, render_ty); const const_prefix = switch (mutability) { .Const => "const ", .Mut => "", }; try writer.print(" {s}", .{const_prefix}); try o.writeCValue(writer, name); try writer.writeAll(suffix.items); } }; /// This data is available both when outputting .c code and when outputting an .h file. pub const DeclGen = struct { module: *Module, decl: *Decl, fwd_decl: std.ArrayList(u8), error_msg: ?*Compilation.ErrorMsg, fn fail(dg: *DeclGen, src: usize, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { dg.error_msg = try Compilation.ErrorMsg.create(dg.module.gpa, src, format, args); return error.AnalysisFail; } fn renderValue( dg: *DeclGen, writer: Writer, t: Type, val: Value, ) error{ OutOfMemory, AnalysisFail }!void { switch (t.zigTypeTag()) { .Int => { if (t.isSignedInt()) return writer.print("{d}", .{val.toSignedInt()}); return writer.print("{d}", .{val.toUnsignedInt()}); }, .Pointer => switch (val.tag()) { .undef, .zero => try writer.writeAll("0"), .one => try writer.writeAll("1"), .decl_ref => { const decl = val.castTag(.decl_ref).?.data; // Determine if we must pointer cast. const decl_tv = decl.typed_value.most_recent.typed_value; if (t.eql(decl_tv.ty)) { try writer.print("&{s}", .{decl.name}); } else { try writer.writeAll("("); try dg.renderType(writer, t); try writer.print(")&{s}", .{decl.name}); } }, .function => { const func = val.castTag(.function).?.data; try writer.print("{s}", .{func.owner_decl.name}); }, .extern_fn => { const decl = val.castTag(.extern_fn).?.data; try writer.print("{s}", .{decl.name}); }, else => |e| return dg.fail( dg.decl.src(), "TODO: C backend: implement Pointer value {s}", .{@tagName(e)}, ), }, .Array => { // First try specific tag representations for more efficiency. switch (val.tag()) { .undef, .empty_struct_value, .empty_array => try writer.writeAll("{}"), .bytes => { const bytes = val.castTag(.bytes).?.data; // TODO: make our own C string escape instead of using std.zig.fmtEscapes try writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}); }, else => { // Fall back to generic implementation. var arena = std.heap.ArenaAllocator.init(dg.module.gpa); defer arena.deinit(); try writer.writeAll("{"); var index: usize = 0; const len = t.arrayLen(); const elem_ty = t.elemType(); while (index < len) : (index += 1) { if (index != 0) try writer.writeAll(","); const elem_val = try val.elemValue(&arena.allocator, index); try dg.renderValue(writer, elem_ty, elem_val); } if (t.sentinel()) |sentinel_val| { if (index != 0) try writer.writeAll(","); try dg.renderValue(writer, elem_ty, sentinel_val); } try writer.writeAll("}"); }, } }, else => |e| return dg.fail(dg.decl.src(), "TODO: C backend: implement value {s}", .{ @tagName(e), }), } } fn renderFunctionSignature(dg: *DeclGen, w: Writer, is_global: bool) !void { if (!is_global) { try w.writeAll("static "); } const tv = dg.decl.typed_value.most_recent.typed_value; try dg.renderType(w, tv.ty.fnReturnType()); const decl_name = mem.span(dg.decl.name); try w.print(" {s}(", .{decl_name}); var param_len = tv.ty.fnParamLen(); if (param_len == 0) try w.writeAll("void") else { var index: usize = 0; while (index < param_len) : (index += 1) { if (index > 0) { try w.writeAll(", "); } try dg.renderType(w, tv.ty.fnParamType(index)); try w.print(" a{d}", .{index}); } } try w.writeByte(')'); } fn renderType(dg: *DeclGen, w: Writer, t: Type) error{ OutOfMemory, AnalysisFail }!void { switch (t.zigTypeTag()) { .NoReturn => { try w.writeAll("zig_noreturn void"); }, .Void => try w.writeAll("void"), .Bool => try w.writeAll("bool"), .Int => { switch (t.tag()) { .u8 => try w.writeAll("uint8_t"), .i8 => try w.writeAll("int8_t"), .u16 => try w.writeAll("uint16_t"), .i16 => try w.writeAll("int16_t"), .u32 => try w.writeAll("uint32_t"), .i32 => try w.writeAll("int32_t"), .u64 => try w.writeAll("uint64_t"), .i64 => try w.writeAll("int64_t"), .usize => try w.writeAll("uintptr_t"), .isize => try w.writeAll("intptr_t"), .c_short => try w.writeAll("short"), .c_ushort => try w.writeAll("unsigned short"), .c_int => try w.writeAll("int"), .c_uint => try w.writeAll("unsigned int"), .c_long => try w.writeAll("long"), .c_ulong => try w.writeAll("unsigned long"), .c_longlong => try w.writeAll("long long"), .c_ulonglong => try w.writeAll("unsigned long long"), .int_signed, .int_unsigned => { const info = t.intInfo(dg.module.getTarget()); const sign_prefix = switch (info.signedness) { .signed => "i", .unsigned => "", }; inline for (.{ 8, 16, 32, 64, 128 }) |nbits| { if (info.bits <= nbits) { try w.print("{s}int{d}_t", .{ sign_prefix, nbits }); break; } } else { return dg.fail(dg.decl.src(), "TODO: C backend: implement integer types larger than 128 bits", .{}); } }, else => unreachable, } }, .Pointer => { if (t.isSlice()) { return dg.fail(dg.decl.src(), "TODO: C backend: implement slices", .{}); } else { try dg.renderType(w, t.elemType()); try w.writeAll(" *"); if (t.isConstPtr()) { try w.writeAll("const "); } if (t.isVolatilePtr()) { try w.writeAll("volatile "); } } }, .Array => { try dg.renderType(w, t.elemType()); try w.writeAll(" *"); }, else => |e| return dg.fail(dg.decl.src(), "TODO: C backend: implement type {s}", .{ @tagName(e), }), } } fn functionIsGlobal(dg: *DeclGen, tv: TypedValue) bool { switch (tv.val.tag()) { .extern_fn => return true, .function => { const func = tv.val.castTag(.function).?.data; return dg.module.decl_exports.contains(func.owner_decl); }, else => unreachable, } } }; pub fn genDecl(o: *Object) !void { const tracy = trace(@src()); defer tracy.end(); const tv = o.dg.decl.typed_value.most_recent.typed_value; if (tv.val.castTag(.function)) |func_payload| { const is_global = o.dg.functionIsGlobal(tv); const fwd_decl_writer = o.dg.fwd_decl.writer(); if (is_global) { try fwd_decl_writer.writeAll("ZIG_EXTERN_C "); } try o.dg.renderFunctionSignature(fwd_decl_writer, is_global); try fwd_decl_writer.writeAll(";\n"); const func: *Module.Fn = func_payload.data; const instructions = func.body.instructions; const writer = o.code.writer(); try writer.writeAll("\n"); try o.dg.renderFunctionSignature(writer, is_global); if (instructions.len == 0) { try writer.writeAll(" {}\n"); return; } try writer.writeAll(" {"); try writer.writeAll("\n"); for (instructions) |inst| { const result_value = switch (inst.tag) { .add => try genBinOp(o, inst.castTag(.add).?, " + "), .alloc => try genAlloc(o, inst.castTag(.alloc).?), .arg => genArg(o), .assembly => try genAsm(o, inst.castTag(.assembly).?), .block => try genBlock(o, inst.castTag(.block).?), .bitcast => try genBitcast(o, inst.castTag(.bitcast).?), .breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?), .call => try genCall(o, inst.castTag(.call).?), .cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "), .cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "), .cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "), .cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "), .cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "), .cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "), .dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?), .intcast => try genIntCast(o, inst.castTag(.intcast).?), .load => try genLoad(o, inst.castTag(.load).?), .ret => try genRet(o, inst.castTag(.ret).?), .retvoid => try genRetVoid(o), .store => try genStore(o, inst.castTag(.store).?), .sub => try genBinOp(o, inst.castTag(.sub).?, " - "), .unreach => try genUnreach(o, inst.castTag(.unreach).?), else => |e| return o.dg.fail(o.dg.decl.src(), "TODO: C backend: implement codegen for {}", .{e}), }; switch (result_value) { .none => {}, else => try o.value_map.putNoClobber(inst, result_value), } } try writer.writeAll("}\n"); } else if (tv.val.tag() == .extern_fn) { const writer = o.code.writer(); try writer.writeAll("ZIG_EXTERN_C "); try o.dg.renderFunctionSignature(writer, true); try writer.writeAll(";\n"); } else { const writer = o.code.writer(); try writer.writeAll("static "); // TODO ask the Decl if it is const // https://github.com/ziglang/zig/issues/7582 const decl_c_value: CValue = .{ .decl = o.dg.decl }; try o.renderTypeAndName(writer, tv.ty, decl_c_value, .Mut); try writer.writeAll(" = "); try o.dg.renderValue(writer, tv.ty, tv.val); try writer.writeAll(";\n"); } } pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { const tracy = trace(@src()); defer tracy.end(); const tv = dg.decl.typed_value.most_recent.typed_value; const writer = dg.fwd_decl.writer(); switch (tv.ty.zigTypeTag()) { .Fn => { const is_global = dg.functionIsGlobal(tv); if (is_global) { try writer.writeAll("ZIG_EXTERN_C "); } try dg.renderFunctionSignature(writer, is_global); try dg.fwd_decl.appendSlice(";\n"); }, else => {}, } } fn genAlloc(o: *Object, alloc: *Inst.NoOp) !CValue { const writer = o.code.writer(); // First line: the variable used as data storage. try o.indent(); const elem_type = alloc.base.ty.elemType(); const mutability: Mutability = if (alloc.base.ty.isConstPtr()) .Const else .Mut; const local = try o.allocLocal(elem_type, mutability); try writer.writeAll(";\n"); return CValue{ .local_ref = local.local }; } fn genArg(o: *Object) CValue { const i = o.next_arg_index; o.next_arg_index += 1; return .{ .arg = i }; } fn genRetVoid(o: *Object) !CValue { try o.indent(); try o.code.writer().print("return;\n", .{}); return CValue.none; } fn genLoad(o: *Object, inst: *Inst.UnOp) !CValue { const operand = try o.resolveInst(inst.operand); const writer = o.code.writer(); try o.indent(); const local = try o.allocLocal(inst.base.ty, .Const); switch (operand) { .local_ref => |i| { const wrapped: CValue = .{ .local = i }; try writer.writeAll(" = "); try o.writeCValue(writer, wrapped); try writer.writeAll(";\n"); }, else => { try writer.writeAll(" = *"); try o.writeCValue(writer, operand); try writer.writeAll(";\n"); }, } return local; } fn genRet(o: *Object, inst: *Inst.UnOp) !CValue { const operand = try o.resolveInst(inst.operand); try o.indent(); const writer = o.code.writer(); try writer.writeAll("return "); try o.writeCValue(writer, operand); try writer.writeAll(";\n"); return CValue.none; } fn genIntCast(o: *Object, inst: *Inst.UnOp) !CValue { if (inst.base.isUnused()) return CValue.none; const from = try o.resolveInst(inst.operand); try o.indent(); const writer = o.code.writer(); const local = try o.allocLocal(inst.base.ty, .Const); try writer.writeAll(" = ("); try o.dg.renderType(writer, inst.base.ty); try writer.writeAll(")"); try o.writeCValue(writer, from); try writer.writeAll(";\n"); return local; } fn genStore(o: *Object, inst: *Inst.BinOp) !CValue { // *a = b; const dest_ptr = try o.resolveInst(inst.lhs); const src_val = try o.resolveInst(inst.rhs); try o.indent(); const writer = o.code.writer(); switch (dest_ptr) { .local_ref => |i| { const dest: CValue = .{ .local = i }; try o.writeCValue(writer, dest); try writer.writeAll(" = "); try o.writeCValue(writer, src_val); try writer.writeAll(";\n"); }, else => { try writer.writeAll("*"); try o.writeCValue(writer, dest_ptr); try writer.writeAll(" = "); try o.writeCValue(writer, src_val); try writer.writeAll(";\n"); }, } return CValue.none; } fn genBinOp(o: *Object, inst: *Inst.BinOp, operator: []const u8) !CValue { if (inst.base.isUnused()) return CValue.none; const lhs = try o.resolveInst(inst.lhs); const rhs = try o.resolveInst(inst.rhs); try o.indent(); const writer = o.code.writer(); const local = try o.allocLocal(inst.base.ty, .Const); try writer.writeAll(" = "); try o.writeCValue(writer, lhs); try writer.writeAll(operator); try o.writeCValue(writer, rhs); try writer.writeAll(";\n"); return local; } fn genCall(o: *Object, inst: *Inst.Call) !CValue { if (inst.func.castTag(.constant)) |func_inst| { const fn_decl = if (func_inst.val.castTag(.extern_fn)) |extern_fn| extern_fn.data else if (func_inst.val.castTag(.function)) |func_payload| func_payload.data.owner_decl else unreachable; const fn_ty = fn_decl.typed_value.most_recent.typed_value.ty; const ret_ty = fn_ty.fnReturnType(); const unused_result = inst.base.isUnused(); var result_local: CValue = .none; try o.indent(); const writer = o.code.writer(); if (unused_result) { if (ret_ty.hasCodeGenBits()) { try writer.print("(void)", .{}); } } else { result_local = try o.allocLocal(ret_ty, .Const); try writer.writeAll(" = "); } const fn_name = mem.spanZ(fn_decl.name); try writer.print("{s}(", .{fn_name}); if (inst.args.len != 0) { for (inst.args) |arg, i| { if (i > 0) { try writer.writeAll(", "); } if (arg.value()) |val| { try o.dg.renderValue(writer, arg.ty, val); } else { const val = try o.resolveInst(arg); try o.writeCValue(writer, val); } } } try writer.writeAll(");\n"); return result_local; } else { return o.dg.fail(o.dg.decl.src(), "TODO: C backend: implement function pointers", .{}); } } fn genDbgStmt(o: *Object, inst: *Inst.NoOp) !CValue { // TODO emit #line directive here with line number and filename return CValue.none; } fn genBlock(o: *Object, inst: *Inst.Block) !CValue { return o.dg.fail(o.dg.decl.src(), "TODO: C backend: implement blocks", .{}); } fn genBitcast(o: *Object, inst: *Inst.UnOp) !CValue { const operand = try o.resolveInst(inst.operand); const writer = o.code.writer(); try o.indent(); if (inst.base.ty.zigTypeTag() == .Pointer and inst.operand.ty.zigTypeTag() == .Pointer) { const local = try o.allocLocal(inst.base.ty, .Const); try writer.writeAll(" = ("); try o.dg.renderType(writer, inst.base.ty); try writer.writeAll(")"); try o.writeCValue(writer, operand); try writer.writeAll(";\n"); return local; } const local = try o.allocLocal(inst.base.ty, .Mut); try writer.writeAll(";\n"); try o.indent(); try writer.writeAll("memcpy(&"); try o.writeCValue(writer, local); try writer.writeAll(", &"); try o.writeCValue(writer, operand); try writer.writeAll(", sizeof "); try o.writeCValue(writer, local); try writer.writeAll(");\n"); return local; } fn genBreakpoint(o: *Object, inst: *Inst.NoOp) !CValue { try o.indent(); try o.code.writer().writeAll("zig_breakpoint();\n"); return CValue.none; } fn genUnreach(o: *Object, inst: *Inst.NoOp) !CValue { try o.indent(); try o.code.writer().writeAll("zig_unreachable();\n"); return CValue.none; } fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { if (as.base.isUnused() and !as.is_volatile) return CValue.none; const writer = o.code.writer(); for (as.inputs) |i, index| { if (i[0] == '{' and i[i.len - 1] == '}') { const reg = i[1 .. i.len - 1]; const arg = as.args[index]; const arg_c_value = try o.resolveInst(arg); try o.indent(); try writer.writeAll("register "); try o.dg.renderType(writer, arg.ty); try writer.print(" {s}_constant __asm__(\"{s}\") = ", .{ reg, reg }); try o.writeCValue(writer, arg_c_value); try writer.writeAll(";\n"); } else { return o.dg.fail(o.dg.decl.src(), "TODO non-explicit inline asm regs", .{}); } } try o.indent(); const volatile_string: []const u8 = if (as.is_volatile) "volatile " else ""; try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, as.asm_source }); if (as.output) |_| { return o.dg.fail(o.dg.decl.src(), "TODO inline asm output", .{}); } if (as.inputs.len > 0) { if (as.output == null) { try writer.writeAll(" :"); } try writer.writeAll(": "); for (as.inputs) |i, index| { if (i[0] == '{' and i[i.len - 1] == '}') { const reg = i[1 .. i.len - 1]; const arg = as.args[index]; if (index > 0) { try writer.writeAll(", "); } try writer.print("\"r\"({s}_constant)", .{reg}); } else { // This is blocked by the earlier test unreachable; } } } try writer.writeAll(");\n"); if (as.base.isUnused()) return CValue.none; return o.dg.fail(o.dg.decl.src(), "TODO: C backend: inline asm expression result used", .{}); }
src/codegen/c.zig
const std = @import("std"); pub const Lock = if (std.builtin.os.tag == .windows) WindowsLock else if (std.Target.current.isDarwin()) DarwinLock else if (std.builtin.link_libc) PosixLock else if (std.builtin.os.tag == .linux) LinuxLock else @compileError("Platform not supported"); const WindowsLock = struct { lock: std.os.windows.SRWLOCK = std.os.windows.SRWLOCK_INIT, pub fn deinit(self: *@This()) void { self.* = undefined; } pub fn tryAcquire(self: *@This()) bool { return std.os.windows.kernel32.TryAcquireSRWLockExclusive(&self.lock) != 0; } pub fn acquire(self: *@This()) void { std.os.windows.kernel32.AcquireSRWLockExclusive(&self.lock); } pub fn release(self: *@This()) void { std.os.windows.kernel32.ReleaseSRWLockExclusive(&self.lock); } }; const PosixLock = struct { mutex: std.c.pthread_mutex_t = std.c.PTHREAD_MUTEX_INITIALIZER, pub fn deinit(self: *@This()) void { _ = std.c.pthread_mutex_destroy(&self.mutex); } pub fn tryAcquire(self: *@This()) bool { return std.c.pthread_mutex_trylock(&self.mutex) == 0; } pub fn acquire(self: *@This()) void { std.debug.assert(std.c.pthread_mutex_lock(&self.mutex) == 0); } pub fn release(self: *@This()) void { std.debug.assert(std.c.pthread_mutex_unlock(&self.mutex) == 0); } }; const DarwinLock = struct { oul: u32 = 0, pub fn deinit(self: *@This()) void { self.* = undefined; } pub fn tryAcquire(self: *@This()) bool { return os_unfair_lock_trylock(&self.oul); } pub fn acquire(self: *@This()) void { os_unfair_lock_lock(&self.oul); } pub fn release(self: *@This()) void { os_unfair_lock_unlock(&self.oul); } extern "c" fn os_unfair_lock_lock(oul: *u32) callconv(.C) void; extern "c" fn os_unfair_lock_unlock(oul: *u32) callconv(.C) void; extern "c" fn os_unfair_lock_trylock(oul: *u32) callconv(.C) bool; }; const LinuxLock = struct { state: State = .unlocked, const Futex = @import("./Futex.zig").Futex; const State = enum(u32) { unlocked, locked, contended, }; pub fn deinit(self: *@This()) void { self.* = undefined; } pub fn tryAcquire(self: *@This()) bool { return @cmpxchgStrong( State, &self.state, .unlocked, .locked, .Acquire, .Monotonic, ) == null; } pub fn acquire(self: *@This()) void { const state = @atomicRmw(State, &self.state, .Xchg, .locked, .Acquire); if (state != .unlocked) { self.acquireSlow(state); } } fn acquireSlow(self: *@This(), current_state: State) void { @setCold(true); var adaptive_spin: usize = 0; var new_state = current_state; while (true) { var state = @atomicLoad(State, &self.state, .Monotonic); if (state == .unlocked) { state = @cmpxchgStrong( State, &self.state, .unlocked, new_state, .Acquire, .Monotonic, ) orelse return; } if (state != .contended and Futex.yield(adaptive_spin)) { adaptive_spin +%= 1; continue; } new_state = .contended; if (state != .contended) { state = @atomicRmw(State, &self.state, .Xchg, .contended, .Acquire); if (state == .unlocked) { return; } } adaptive_spin = 0; Futex.wait( @ptrCast(*const u32, &self.state), @enumToInt(State.contended), null, ) catch unreachable; } } pub fn release(self: *@This()) void { switch (@atomicRmw(State, &self.state, .Xchg, .unlocked, .Release)) { .unlocked => unreachable, .locked => {}, .contended => self.releaseSlow(), } } fn releaseSlow(self: *@This()) void { @setCold(true); Futex.wake(@ptrCast(*const u32, &self.state), 1); } };
src/runtime/Lock.zig
const Allocator = std.mem.Allocator; const crc = std.hash.crc; const FormatInterface = @import("../format_interface.zig").FormatInterface; const ImageFormat = image.ImageFormat; const ImageReader = image.ImageReader; const ImageInfo = image.ImageInfo; const ImageSeekStream = image.ImageSeekStream; const PixelFormat = @import("../pixel_format.zig").PixelFormat; const color = @import("../color.zig"); const errors = @import("../errors.zig"); const image = @import("../image.zig"); const std = @import("std"); const utils = @import("../utils.zig"); const PNGMagicHeader = "\x89PNG\x0D\x0A\x1A\x0A"; pub const ColorType = enum(u8) { Grayscale = 0, Truecolor = 2, Indexed = 3, GrayscaleAlpha = 4, TruecolorAlpha = 6, const Self = @This(); pub fn channelCount(self: Self) u8 { return switch (self) { .Grayscale => 1, .Truecolor => 3, .Indexed => 1, .GrayscaleAlpha => 2, .TruecolorAlpha => 4, }; } }; pub const FilterType = enum(u8) { None, Sub, Up, Average, Paeth, }; pub const InterlaceMethod = enum(u8) { Standard, Adam7, }; pub const IHDR = packed struct { width: u32, height: u32, bit_depth: u8, color_type: ColorType, compression_method: u8, filter_method: u8, interlace_method: InterlaceMethod, pub const ChunkType = "IHDR"; pub const ChunkID = utils.toMagicNumberBig(ChunkType); const Self = @This(); pub fn deinit(self: Self, allocator: *Allocator) void { _ = self; _ = allocator; } pub fn read(self: *Self, allocator: *Allocator, read_buffer: []u8) !bool { _ = allocator; var stream = std.io.StreamSource{ .buffer = std.io.fixedBufferStream(read_buffer) }; self.* = try utils.readStructBig(stream.reader(), Self); return true; } }; pub const PLTE = struct { palette: []color.Color, pub const ChunkType = "PLTE"; pub const ChunkID = utils.toMagicNumberBig(ChunkType); const Self = @This(); pub fn deinit(self: Self, allocator: *Allocator) void { allocator.free(self.palette); } pub fn read(self: *Self, header: IHDR, allocator: *Allocator, read_buffer: []u8) !bool { _ = header; if (read_buffer.len % 3 != 0) { return errors.PngError.InvalidPalette; } self.palette = try allocator.alloc(color.Color, read_buffer.len / 3); var palette_index: usize = 0; var buffer_index: usize = 0; while (buffer_index < read_buffer.len) { self.palette[palette_index].R = color.toColorFloat(read_buffer[buffer_index]); self.palette[palette_index].G = color.toColorFloat(read_buffer[buffer_index + 1]); self.palette[palette_index].B = color.toColorFloat(read_buffer[buffer_index + 2]); self.palette[palette_index].A = 1.0; palette_index += 1; buffer_index += 3; } return true; } }; pub const IDAT = struct { data: []u8 = undefined, pub const ChunkType = "IDAT"; pub const ChunkID = utils.toMagicNumberBig(ChunkType); const Self = @This(); pub fn deinit(self: Self, allocator: *Allocator) void { allocator.free(self.data); } pub fn read(self: *Self, header: IHDR, allocator: *Allocator, read_buffer: []u8) !bool { _ = header; _ = allocator; self.data = read_buffer; return false; } }; pub const IEND = packed struct { pub const ChunkType = "IEND"; pub const ChunkID = utils.toMagicNumberBig(ChunkType); const Self = @This(); pub fn deinit(self: Self, allocator: *Allocator) void { _ = self; _ = allocator; } pub fn read(self: *Self, header: IHDR, allocator: *Allocator, read_buffer: []u8) !bool { _ = self; _ = header; _ = allocator; _ = read_buffer; return true; } }; pub const gAMA = packed struct { iGamma: u32, pub const ChunkType = "gAMA"; pub const ChunkID = utils.toMagicNumberBig(ChunkType); const Self = @This(); pub fn deinit(self: Self, allocator: *Allocator) void { _ = self; _ = allocator; } pub fn read(self: *Self, header: IHDR, allocator: *Allocator, read_buffer: []u8) !bool { _ = header; _ = allocator; var stream = std.io.fixedBufferStream(read_buffer); self.iGamma = try stream.reader().readIntBig(u32); return true; } pub fn toGammaExponent(self: Self) f32 { return @intToFloat(f32, self.iGamma) / 100000.0; } }; pub const bKGD = packed struct { // TODO: Use a union(enum) once Zig support a union(enum) inside another union(enum) color: enum(u8) { Grayscale, Palette, TrueColor, }, grayscale: u16, palette: u8, red: u16, green: u16, blue: u16, pub const ChunkType = "bKGD"; pub const ChunkID = utils.toMagicNumberBig(ChunkType); const Self = @This(); pub fn deinit(self: Self, allocator: *Allocator) void { _ = self; _ = allocator; } pub fn read(self: *Self, header: IHDR, allocator: *Allocator, read_buffer: []u8) !bool { _ = allocator; var stream = std.io.fixedBufferStream(read_buffer); switch (header.color_type) { .Grayscale, .GrayscaleAlpha => { self.color = .Grayscale; self.grayscale = try stream.reader().readIntBig(u16); }, .Indexed => { self.color = .Palette; self.palette = try stream.reader().readIntBig(u8); }, .Truecolor, .TruecolorAlpha => { self.color = .TrueColor; self.red = try stream.reader().readIntBig(u16); self.green = try stream.reader().readIntBig(u16); self.blue = try stream.reader().readIntBig(u16); }, } return true; } }; pub const ChunkVariant = union(enum) { PLTE: PLTE, IDAT: IDAT, gAMA: gAMA, bKGD: bKGD, const Self = @This(); pub fn deinit(self: Self, allocator: *Allocator) void { switch (self) { .PLTE => |instance| instance.deinit(allocator), .IDAT => |instance| instance.deinit(allocator), .gAMA => |instance| instance.deinit(allocator), .bKGD => |instance| instance.deinit(allocator), } } pub fn getChunkID(self: Self) u32 { return switch (self) { .PLTE => |instance| @field(@TypeOf(instance), "ChunkID"), .IDAT => |instance| @field(@TypeOf(instance), "ChunkID"), .gAMA => |instance| @field(@TypeOf(instance), "ChunkID"), .bKGD => |instance| @field(@TypeOf(instance), "ChunkID"), }; } }; const ChunkAllowed = enum { OneOrMore, OnlyOne, ZeroOrOne, ZeroOrMore, }; const ChunkInfo = struct { chunk_type: type, allowed: ChunkAllowed, store: bool, }; const AllChunks = [_]ChunkInfo{ .{ .chunk_type = IHDR, .allowed = .OnlyOne, .store = false, }, .{ .chunk_type = PLTE, .allowed = .ZeroOrOne, .store = true, }, .{ .chunk_type = bKGD, .allowed = .ZeroOrOne, .store = true, }, .{ .chunk_type = IDAT, .allowed = .OneOrMore, .store = true, }, .{ .chunk_type = gAMA, .allowed = .ZeroOrOne, .store = true, }, .{ .chunk_type = IEND, .allowed = .OnlyOne, .store = false, }, }; fn validBitDepths(color_type: ColorType) []const u8 { return switch (color_type) { .Grayscale => &[_]u8{ 1, 2, 4, 8, 16 }, .Truecolor => &[_]u8{ 8, 16 }, .Indexed => &[_]u8{ 1, 2, 4, 8 }, .GrayscaleAlpha => &[_]u8{ 8, 16 }, .TruecolorAlpha => &[_]u8{ 8, 16 }, }; } /// Implement filtering defined by https://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters const PngFilter = struct { context: []u8 = undefined, index: usize = 0, line_stride: usize = 0, pixel_stride: usize = 0, const Self = @This(); pub fn init(allocator: *Allocator, line_stride: usize, bit_depth: usize) !Self { const context = try allocator.alloc(u8, line_stride * 2); std.mem.set(u8, context[0..], 0); return Self{ .context = context, .line_stride = line_stride, .pixel_stride = if (bit_depth >= 8) bit_depth / 8 else 1, }; } pub fn deinit(self: Self, allocator: *Allocator) void { allocator.free(self.context); } pub fn decode(self: *Self, filter_type: FilterType, input: []const u8) !void { const current_start_position = self.startPosition(); const previous_start_position: usize = if (self.index < self.line_stride) 0 else ((self.index - self.line_stride) % self.context.len); var current_row = self.context[current_start_position..(current_start_position + self.line_stride)]; var previous_row = self.context[previous_start_position..(previous_start_position + self.line_stride)]; switch (filter_type) { .None => { std.mem.copy(u8, current_row, input); }, .Sub => { var i: usize = 0; while (i < input.len) : (i += 1) { const a = self.getA(i, current_row, previous_row); current_row[i] = input[i] +% a; } }, .Up => { var i: usize = 0; while (i < input.len) : (i += 1) { const b = self.getB(i, current_row, previous_row); current_row[i] = input[i] +% b; } }, .Average => { var i: usize = 0; while (i < input.len) : (i += 1) { const a = @intToFloat(f64, self.getA(i, current_row, previous_row)); const b = @intToFloat(f64, self.getB(i, current_row, previous_row)); const result: u8 = @intCast(u8, @floatToInt(u16, std.math.floor((a + b) / 2.0)) & 0xFF); current_row[i] = input[i] +% result; } }, .Paeth => { var i: usize = 0; while (i < input.len) : (i += 1) { const a = self.getA(i, current_row, previous_row); const b = self.getB(i, current_row, previous_row); const c = self.getC(i, current_row, previous_row); const source = input[i]; const predictor = try paethPredictor(a, b, c); const result = @intCast(u8, (@as(u16, source) + @as(u16, predictor)) & 0xFF); current_row[i] = result; } }, } self.index += self.line_stride; } pub fn getSlice(self: Self) []u8 { const start = self.startPosition(); return self.context[start..(start + self.line_stride)]; } fn startPosition(self: Self) usize { return self.index % self.context.len; } inline fn getA(self: Self, index: usize, current_row: []const u8, previous_row: []const u8) u8 { _ = previous_row; if (index >= self.pixel_stride) { return current_row[index - self.pixel_stride]; } else { return 0; } } inline fn getB(self: Self, index: usize, current_row: []const u8, previous_row: []const u8) u8 { _ = self; _ = current_row; return previous_row[index]; } inline fn getC(self: Self, index: usize, current_row: []const u8, previous_row: []const u8) u8 { _ = current_row; if (index >= self.pixel_stride) { return previous_row[index - self.pixel_stride]; } else { return 0; } } fn paethPredictor(a: u8, b: u8, c: u8) !u8 { const large_a = @intCast(isize, a); const large_b = @intCast(isize, b); const large_c = @intCast(isize, c); const p = large_a + large_b - large_c; const pa = try std.math.absInt(p - large_a); const pb = try std.math.absInt(p - large_b); const pc = try std.math.absInt(p - large_c); if (pa <= pb and pa <= pc) { return @intCast(u8, large_a & 0xFF); } else if (pb <= pc) { return @intCast(u8, large_b & 0xFF); } else { return @intCast(u8, large_c & 0xFF); } } }; // Remember, PNG uses network byte order (aka Big Endian) // TODO: Proper validation of chunk order and count pub const PNG = struct { header: IHDR = undefined, chunks: std.ArrayList(ChunkVariant) = undefined, allocator: *Allocator = undefined, const DecompressionContext = struct { pixels: *color.ColorStorage = undefined, pixels_index: usize = 0, compressed_data: std.ArrayList(u8) = undefined, filter: PngFilter = undefined, x: usize = 0, y: usize = 0, }; const Self = @This(); pub fn init(allocator: *Allocator) Self { return Self{ .chunks = std.ArrayList(ChunkVariant).init(allocator), .allocator = allocator, }; } pub fn deinit(self: Self) void { for (self.chunks.items) |chunk| { chunk.deinit(self.allocator); } self.chunks.deinit(); } pub fn formatInterface() FormatInterface { return FormatInterface{ .format = @ptrCast(FormatInterface.FormatFn, format), .formatDetect = @ptrCast(FormatInterface.FormatDetectFn, formatDetect), .readForImage = @ptrCast(FormatInterface.ReadForImageFn, readForImage), .writeForImage = @ptrCast(FormatInterface.WriteForImageFn, writeForImage), }; } pub fn format() ImageFormat { return ImageFormat.Png; } pub fn formatDetect(reader: ImageReader, seek_stream: ImageSeekStream) !bool { _ = seek_stream; var magic_number_buffer: [8]u8 = undefined; _ = try reader.read(magic_number_buffer[0..]); return std.mem.eql(u8, magic_number_buffer[0..], PNGMagicHeader); } pub fn pixelFormat(self: Self) !PixelFormat { switch (self.header.color_type) { .Grayscale => { return switch (self.header.bit_depth) { 1 => PixelFormat.Grayscale1, 2 => PixelFormat.Grayscale2, 4 => PixelFormat.Grayscale4, 8 => PixelFormat.Grayscale8, 16 => PixelFormat.Grayscale16, else => return errors.ImageError.UnsupportedPixelFormat, }; }, .Truecolor => { return switch (self.header.bit_depth) { 8 => PixelFormat.Rgb24, 16 => PixelFormat.Rgb48, else => return errors.ImageError.UnsupportedPixelFormat, }; }, .Indexed => { return switch (self.header.bit_depth) { 1 => PixelFormat.Bpp1, 2 => PixelFormat.Bpp2, 4 => PixelFormat.Bpp4, 8 => PixelFormat.Bpp8, else => return errors.ImageError.UnsupportedPixelFormat, }; }, .GrayscaleAlpha => { return switch (self.header.bit_depth) { 8 => PixelFormat.Grayscale8Alpha, 16 => PixelFormat.Grayscale16Alpha, else => return errors.ImageError.UnsupportedPixelFormat, }; }, .TruecolorAlpha => { return switch (self.header.bit_depth) { 8 => PixelFormat.Rgba32, 16 => PixelFormat.Rgba64, else => return errors.ImageError.UnsupportedPixelFormat, }; }, } } pub fn findFirstChunk(self: Self, chunk_type: []const u8) ?ChunkVariant { const chunk_id = utils.toMagicNumberBig(chunk_type); for (self.chunks.items) |chunk| { if (chunk.getChunkID() == chunk_id) { return chunk; } } return null; } pub fn getPalette(self: Self) ?PLTE { const palette_variant_opt = self.findFirstChunk(PLTE.ChunkType); if (palette_variant_opt) |variant| { return variant.PLTE; } return null; } pub fn getBackgroundColorChunk(self: Self) ?bKGD { const bkgd_variant_opt = self.findFirstChunk(bKGD.ChunkType); if (bkgd_variant_opt) |variant| { return variant.bKGD; } return null; } pub fn readForImage(allocator: *Allocator, reader: ImageReader, seek_stream: ImageSeekStream, pixels_opt: *?color.ColorStorage) !ImageInfo { var png = PNG.init(allocator); defer png.deinit(); try png.read(reader, seek_stream, pixels_opt); var image_info = ImageInfo{}; image_info.width = png.header.width; image_info.height = png.header.height; return image_info; } pub fn writeForImage(allocator: *Allocator, write_stream: image.ImageWriterStream, seek_stream: ImageSeekStream, pixels: color.ColorStorage, save_info: image.ImageSaveInfo) !void { _ = allocator; _ = write_stream; _ = seek_stream; _ = pixels; _ = save_info; } pub fn read(self: *Self, reader: ImageReader, seek_stream: ImageSeekStream, pixels_opt: *?color.ColorStorage) !void { _ = seek_stream; var magic_number_buffer: [8]u8 = undefined; _ = try reader.read(magic_number_buffer[0..]); if (!std.mem.eql(u8, magic_number_buffer[0..], PNGMagicHeader)) { return errors.ImageError.InvalidMagicHeader; } while (try self.readChunk(reader)) {} if (!self.validateBitDepth()) { return errors.PngError.InvalidBitDepth; } const pixel_format = try self.pixelFormat(); pixels_opt.* = try color.ColorStorage.init(self.allocator, pixel_format, self.header.width * self.header.height); if (pixels_opt.*) |*pixels| { if (self.header.color_type == .Indexed) { if (self.getPalette()) |palette_chunk| { switch (pixels.*) { .Bpp1 => |instance| { std.mem.copy(color.Color, instance.palette, palette_chunk.palette); }, .Bpp2 => |instance| { std.mem.copy(color.Color, instance.palette, palette_chunk.palette); }, .Bpp4 => |instance| { std.mem.copy(color.Color, instance.palette, palette_chunk.palette); }, .Bpp8 => |instance| { std.mem.copy(color.Color, instance.palette, palette_chunk.palette); }, else => { return error.NotIndexedPixelFormat; }, } } } var decompression_context = DecompressionContext{}; decompression_context.pixels = pixels; decompression_context.compressed_data = std.ArrayList(u8).init(self.allocator); defer decompression_context.compressed_data.deinit(); // Concatenate all IDAT chunks into a single buffer for (self.chunks.items) |chunk| { if (chunk.getChunkID() == IDAT.ChunkID) { try decompression_context.compressed_data.appendSlice(chunk.IDAT.data); } } try self.readPixelsFromCompressedData(&decompression_context); } else { return errors.ImageError.UnsupportedPixelFormat; } } fn readChunk(self: *Self, reader: ImageReader) !bool { const chunk_size = try reader.readIntBig(u32); var chunk_type: [4]u8 = undefined; _ = try reader.read(chunk_type[0..]); var read_buffer = try self.allocator.alloc(u8, chunk_size); errdefer self.allocator.free(read_buffer); _ = try reader.read(read_buffer); const read_crc = try reader.readIntBig(u32); var crc_hash = crc.Crc32.init(); crc_hash.update(chunk_type[0..]); crc_hash.update(read_buffer[0..]); const computed_crc = crc_hash.final(); if (computed_crc != read_crc) { return errors.PngError.InvalidCRC; } var found = false; var deallocate_buffer = true; var continue_reading = true; const read_chunk_id = utils.toMagicNumberBig(chunk_type[0..]); // TODO: fix the bug in Zig to make this works // inline for (AllChunks) |chunkInfo| { // const typeChunkID = @field(chunkInfo.chunk_type, "ChunkID"); // if (read_chunk_id == typeChunkID) { // found = true; // if (read_chunk_id == IHDR.ChunkID) { // deallocate_buffer = try self.header.read(self.allocator, read_buffer); // } else if (read_chunk_id == IEND.ChunkID) { // continue_reading = false; // } else if (chunkInfo.store) { // const final_chunk = try self.chunks.addOne(); // final_chunk.* = @unionInit(ChunkVariant, @typeName(chunkInfo.chunk_type), undefined); // deallocate_buffer = try @field(final_chunk, @typeName(chunkInfo.chunk_type)).read(self.header, self.allocator, read_buffer); // } // break; // } // } // Remove this when the code below works switch (read_chunk_id) { IHDR.ChunkID => { deallocate_buffer = try self.header.read(self.allocator, read_buffer); found = true; }, IEND.ChunkID => { continue_reading = false; found = true; }, PLTE.ChunkID => { const plte_chunk = try self.chunks.addOne(); plte_chunk.* = @unionInit(ChunkVariant, PLTE.ChunkType, undefined); deallocate_buffer = try @field(plte_chunk, PLTE.ChunkType).read(self.header, self.allocator, read_buffer); found = true; }, bKGD.ChunkID => { const bkgd_chunk = try self.chunks.addOne(); bkgd_chunk.* = @unionInit(ChunkVariant, bKGD.ChunkType, undefined); deallocate_buffer = try @field(bkgd_chunk, bKGD.ChunkType).read(self.header, self.allocator, read_buffer); found = true; }, gAMA.ChunkID => { const gamma_chunk = try self.chunks.addOne(); gamma_chunk.* = @unionInit(ChunkVariant, gAMA.ChunkType, undefined); deallocate_buffer = try @field(gamma_chunk, gAMA.ChunkType).read(self.header, self.allocator, read_buffer); found = true; }, IDAT.ChunkID => { const data_chunk = try self.chunks.addOne(); data_chunk.* = @unionInit(ChunkVariant, IDAT.ChunkType, undefined); deallocate_buffer = try @field(data_chunk, IDAT.ChunkType).read(self.header, self.allocator, read_buffer); found = true; }, else => {}, } if (deallocate_buffer) { self.allocator.free(read_buffer); } const chunk_is_critical = (chunk_type[0] & (1 << 5)) == 0; if (chunk_is_critical and !found) { return errors.PngError.InvalidChunk; } return continue_reading; } fn readPixelsFromCompressedData(self: Self, context: *DecompressionContext) !void { var data_stream = std.io.fixedBufferStream(context.compressed_data.items); var uncompress_stream = try std.compress.zlib.zlibStream(self.allocator, data_stream.reader()); defer uncompress_stream.deinit(); const final_data = try uncompress_stream.reader().readAllAlloc(self.allocator, std.math.maxInt(usize)); defer self.allocator.free(final_data); var final_data_stream = std.io.fixedBufferStream(final_data); switch (self.header.interlace_method) { .Standard => { const line_stride = ((self.header.width * self.header.bit_depth + 7) / 8) * self.header.color_type.channelCount(); context.filter = try PngFilter.init(self.allocator, line_stride, self.header.bit_depth * self.header.color_type.channelCount()); defer context.filter.deinit(self.allocator); try self.readPixelsNonInterlaced(context, &final_data_stream, &final_data_stream.reader()); }, .Adam7 => try self.readPixelsInterlaced(context, &final_data_stream, &final_data_stream.reader()), } } fn readPixelsNonInterlaced(self: Self, context: *DecompressionContext, pixel_stream_source: anytype, pixel_stream: anytype) !void { var scanline = try self.allocator.alloc(u8, context.filter.line_stride); defer self.allocator.free(scanline); var pixel_current_pos = try pixel_stream_source.getPos(); const pixel_end_pos = try pixel_stream_source.getEndPos(); const pixels_length = context.pixels.len(); while (pixel_current_pos < pixel_end_pos and context.pixels_index < pixels_length) { const filter_type = try pixel_stream.readByte(); _ = try pixel_stream.readAll(scanline); const filter_slice = context.filter.getSlice(); try context.filter.decode(@intToEnum(FilterType, filter_type), scanline); var index: usize = 0; var x: usize = 0; switch (context.pixels.*) { .Grayscale1 => |data| { while (index < filter_slice.len) : (index += 1) { const current_byte = filter_slice[index]; var bit: usize = 0; while (context.pixels_index < pixels_length and x < self.header.width and bit < 8) { data[context.pixels_index].value = @intCast(u1, (current_byte >> @intCast(u3, (7 - bit))) & 1); x += 1; bit += 1; context.pixels_index += 1; } } }, .Grayscale2 => |data| { while (index < filter_slice.len) : (index += 1) { const current_byte = filter_slice[index]; var bit: usize = 1; while (context.pixels_index < pixels_length and x < self.header.width and bit < 8) { data[context.pixels_index].value = @intCast(u2, (current_byte >> @intCast(u3, (7 - bit))) & 0b00000011); x += 1; bit += 2; context.pixels_index += 1; } } }, .Grayscale4 => |data| { while (index < filter_slice.len) : (index += 1) { const current_byte = filter_slice[index]; var bit: usize = 3; while (context.pixels_index < pixels_length and x < self.header.width and bit < 8) { data[context.pixels_index].value = @intCast(u4, (current_byte >> @intCast(u3, (7 - bit))) & 0b00001111); x += 1; bit += 4; context.pixels_index += 1; } } }, .Grayscale8 => |data| { while (index < filter_slice.len and context.pixels_index < pixels_length and x < self.header.width) { data[context.pixels_index].value = filter_slice[index]; index += 1; x += 1; context.pixels_index += 1; } }, .Grayscale16 => |data| { while (index < filter_slice.len and context.pixels_index < pixels_length and x < self.header.width) { const read_value = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &filter_slice[index])); data[context.pixels_index].value = read_value; index += 2; x += 1; context.pixels_index += 1; } }, .Rgb24 => |data| { var count: usize = 0; const count_end = filter_slice.len; while (count < count_end and context.pixels_index < pixels_length and x < self.header.width) { data[context.pixels_index].R = filter_slice[count]; data[context.pixels_index].G = filter_slice[count + 1]; data[context.pixels_index].B = filter_slice[count + 2]; count += 3; x += 1; context.pixels_index += 1; } }, .Rgb48 => |data| { var count: usize = 0; const count_end = filter_slice.len; while (count < count_end and context.pixels_index < pixels_length and x < self.header.width) { data[context.pixels_index].R = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &filter_slice[count])); data[context.pixels_index].G = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &filter_slice[count + 2])); data[context.pixels_index].B = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &filter_slice[count + 4])); count += 6; x += 1; context.pixels_index += 1; } }, .Bpp1 => |indexed| { while (index < filter_slice.len) : (index += 1) { const current_byte = filter_slice[index]; var bit: usize = 0; while (context.pixels_index < pixels_length and x < self.header.width and bit < 8) { indexed.indices[context.pixels_index] = @intCast(u1, (current_byte >> @intCast(u3, (7 - bit))) & 1); x += 1; bit += 1; context.pixels_index += 1; } } }, .Bpp2 => |indexed| { while (index < filter_slice.len) : (index += 1) { const current_byte = filter_slice[index]; var bit: usize = 1; while (context.pixels_index < pixels_length and x < self.header.width and bit < 8) { indexed.indices[context.pixels_index] = @intCast(u2, (current_byte >> @intCast(u3, (7 - bit))) & 0b00000011); x += 1; bit += 2; context.pixels_index += 1; } } }, .Bpp4 => |indexed| { while (index < filter_slice.len) : (index += 1) { const current_byte = filter_slice[index]; var bit: usize = 3; while (context.pixels_index < pixels_length and x < self.header.width and bit < 8) { indexed.indices[context.pixels_index] = @intCast(u4, (current_byte >> @intCast(u3, (7 - bit))) & 0b00001111); x += 1; bit += 4; context.pixels_index += 1; } } }, .Bpp8 => |indexed| { while (index < filter_slice.len and context.pixels_index < pixels_length and x < self.header.width) { indexed.indices[context.pixels_index] = filter_slice[index]; index += 1; x += 1; context.pixels_index += 1; } }, .Grayscale8Alpha => |grey_alpha| { var count: usize = 0; const count_end = filter_slice.len; while (count < count_end and context.pixels_index < pixels_length and x < self.header.width) { grey_alpha[context.pixels_index].value = filter_slice[count]; grey_alpha[context.pixels_index].alpha = filter_slice[count + 1]; count += 2; x += 1; context.pixels_index += 1; } }, .Grayscale16Alpha => |grey_alpha| { var count: usize = 0; const count_end = filter_slice.len; while (count < count_end and context.pixels_index < pixels_length and x < self.header.width) { grey_alpha[context.pixels_index].value = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &filter_slice[count])); grey_alpha[context.pixels_index].alpha = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &filter_slice[count + 2])); count += 4; x += 1; context.pixels_index += 1; } }, .Rgba32 => |data| { var count: usize = 0; const count_end = filter_slice.len; while (count < count_end and context.pixels_index < pixels_length and x < self.header.width) { data[context.pixels_index].R = filter_slice[count]; data[context.pixels_index].G = filter_slice[count + 1]; data[context.pixels_index].B = filter_slice[count + 2]; data[context.pixels_index].A = filter_slice[count + 3]; count += 4; x += 1; context.pixels_index += 1; } }, .Rgba64 => |data| { var count: usize = 0; const count_end = filter_slice.len; while (count < count_end and context.pixels_index < pixels_length and x < self.header.width) { data[context.pixels_index].R = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &filter_slice[count])); data[context.pixels_index].G = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &filter_slice[count + 2])); data[context.pixels_index].B = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &filter_slice[count + 4])); data[context.pixels_index].A = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &filter_slice[count + 6])); count += 8; x += 1; context.pixels_index += 1; } }, else => { return errors.ImageError.UnsupportedPixelFormat; }, } pixel_current_pos = try pixel_stream_source.getPos(); } } const InterlacedStartingWidth = [7]usize{ 0, 4, 0, 2, 0, 1, 0 }; const InterlacedStartingHeight = [7]usize{ 0, 0, 4, 0, 2, 0, 1 }; const InterlacedWidthIncrement = [7]usize{ 8, 8, 4, 4, 2, 2, 1 }; const InterlacedHeightIncrement = [7]usize{ 8, 8, 8, 4, 4, 2, 2 }; const InterlacedBlockWidth = [7]usize{ 8, 4, 4, 2, 2, 1, 1 }; const InterlacedBlockHeight = [7]usize{ 8, 8, 4, 4, 2, 2, 1 }; fn adam7Width(self: Self, pass: usize) usize { return switch (pass) { 0 => (self.header.width + 7) / 8, 1 => (self.header.width + 3) / 8, 2 => (self.header.width + 3) / 4, 3 => (self.header.width + 1) / 4, 4 => (self.header.width + 1) / 2, 5 => self.header.width / 2, 6 => self.header.width, else => unreachable, }; } fn adam7Height(self: Self, pass: usize) usize { return switch (pass) { 0 => (self.header.height + 7) / 8, 1 => (self.header.height + 7) / 8, 2 => (self.header.height + 3) / 8, 3 => (self.header.height + 3) / 4, 4 => (self.header.height + 1) / 4, 5 => (self.header.height + 1) / 2, 6 => self.header.height / 2, else => unreachable, }; } fn readPixelsInterlaced(self: Self, context: *DecompressionContext, pixel_stream_source: anytype, pixel_stream: anytype) !void { var pixel_current_pos = try pixel_stream_source.getPos(); const pixel_end_pos = try pixel_stream_source.getEndPos(); _ = pixel_current_pos; _ = pixel_end_pos; const pixel_stride = self.header.bit_depth * self.header.color_type.channelCount(); const bytes_per_pixel = std.math.max(1, pixel_stride / 8); const bit_per_bytes = bytes_per_pixel * 8; var current_pass: usize = 0; while (current_pass < 7) : (current_pass += 1) { const current_pass_width = self.adam7Width(current_pass); const current_pass_height = self.adam7Height(current_pass); if (current_pass_width == 0 or current_pass_height == 0) { continue; } const line_stride = ((current_pass_width * self.header.bit_depth * self.header.color_type.channelCount()) + 7) / 8; context.filter = try PngFilter.init(self.allocator, line_stride, pixel_stride); defer context.filter.deinit(self.allocator); var scanline = try self.allocator.alloc(u8, context.filter.line_stride); defer self.allocator.free(scanline); context.y = InterlacedStartingHeight[current_pass]; var current_line: usize = 0; while (current_line < current_pass_height) : (current_line += 1) { const filter_type = try pixel_stream.readByte(); _ = try pixel_stream.readAll(scanline); const filter_slice = context.filter.getSlice(); try context.filter.decode(@intToEnum(FilterType, filter_type), scanline); var slice_index: usize = 0; var pixel_index: usize = 0; var bit_index: usize = 0; context.x = InterlacedStartingWidth[current_pass]; while (slice_index < filter_slice.len and context.x < self.header.width and pixel_index < current_pass_width) { const block_width = std.math.min(InterlacedBlockWidth[current_pass], if (context.x < self.header.width) self.header.width - context.x else self.header.width); const block_height = std.math.min(InterlacedBlockHeight[current_pass], if (context.y < self.header.height) self.header.height - context.y else self.header.height); try self.writePixelInterlaced(filter_slice[slice_index..], pixel_index, context, block_width, block_height); pixel_index += 1; bit_index += pixel_stride; if ((bit_index % bit_per_bytes) == 0) { slice_index += bytes_per_pixel; } context.x += InterlacedWidthIncrement[current_pass]; } context.y += InterlacedHeightIncrement[current_pass]; } } } fn writePixelInterlaced(self: Self, bytes: []const u8, pixel_index: usize, context: *DecompressionContext, block_width: usize, block_height: usize) !void { switch (context.pixels.*) { .Grayscale1 => |data| { const bit = (pixel_index & 0b111); const value = @intCast(u1, (bytes[0] >> @intCast(u3, 7 - bit)) & 1); var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < data.len) { data[data_index].value = value; } } } } }, .Grayscale2 => |data| { const bit = (pixel_index & 0b011) * 2 + 1; const value = @intCast(u2, (bytes[0] >> @intCast(u3, (7 - bit))) & 0b00000011); var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < data.len) { data[data_index].value = value; } } } } }, .Grayscale4 => |data| { const bit = (pixel_index & 0b1) * 4 + 3; const value = @intCast(u4, (bytes[0] >> @intCast(u3, (7 - bit))) & 0b00001111); var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < data.len) { data[data_index].value = value; } } } } }, .Grayscale8 => |data| { const value = bytes[0]; var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < data.len) { data[data_index].value = value; } } } } }, .Grayscale16 => |data| { const value = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, bytes)); var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < data.len) { data[data_index].value = value; } } } } }, .Rgb24 => |data| { const pixel = color.Rgb24{ .R = bytes[0], .G = bytes[1], .B = bytes[2], }; var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < data.len) { data[data_index] = pixel; } } } } }, .Rgb48 => |data| { const pixel = color.Rgb48{ .R = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &bytes[0])), .G = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &bytes[2])), .B = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &bytes[4])), }; var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < data.len) { data[data_index] = pixel; } } } } }, .Bpp1 => |indexed| { const bit = (pixel_index & 0b111); const value = @intCast(u1, (bytes[0] >> @intCast(u3, 7 - bit)) & 1); var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < indexed.indices.len) { indexed.indices[data_index] = value; } } } } }, .Bpp2 => |indexed| { const bit = (pixel_index & 0b011) * 2 + 1; const value = @intCast(u2, (bytes[0] >> @intCast(u3, (7 - bit))) & 0b00000011); var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < indexed.indices.len) { indexed.indices[data_index] = value; } } } } }, .Bpp4 => |indexed| { const bit = (pixel_index & 0b1) * 4 + 3; const value = @intCast(u4, (bytes[0] >> @intCast(u3, (7 - bit))) & 0b00001111); var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < indexed.indices.len) { indexed.indices[data_index] = value; } } } } }, .Bpp8 => |indexed| { const value = bytes[0]; var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < indexed.indices.len) { indexed.indices[data_index] = value; } } } } }, .Grayscale8Alpha => |grey_alpha| { const value = color.Grayscale8Alpha{ .value = bytes[0], .alpha = bytes[1], }; var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < grey_alpha.len) { grey_alpha[data_index] = value; } } } } }, .Grayscale16Alpha => |grey_alpha| { const value = color.Grayscale16Alpha{ .value = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &bytes[0])), .alpha = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &bytes[2])), }; var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < grey_alpha.len) { grey_alpha[data_index] = value; } } } } }, .Rgba32 => |data| { const pixel = color.Rgba32{ .R = bytes[0], .G = bytes[1], .B = bytes[2], .A = bytes[3], }; var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < data.len) { data[data_index] = pixel; } } } } }, .Rgba64 => |data| { const pixel = color.Rgba64{ .R = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &bytes[0])), .G = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &bytes[2])), .B = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &bytes[4])), .A = std.mem.readIntBig(u16, @ptrCast(*const [2]u8, &bytes[6])), }; var height: usize = 0; while (height < block_height) : (height += 1) { if ((context.y + height) < self.header.height) { var width: usize = 0; var scanline = (context.y + height) * self.header.width; while (width < block_width) : (width += 1) { const data_index = scanline + context.x + width; if ((context.x + width) < self.header.width and data_index < data.len) { data[data_index] = pixel; } } } } }, else => { return errors.ImageError.UnsupportedPixelFormat; }, } } fn validateBitDepth(self: Self) bool { const valid_bit_depths = validBitDepths(self.header.color_type); for (valid_bit_depths) |bitDepth| { if (self.header.bit_depth == bitDepth) { return true; } } return false; } };
src/formats/png.zig
const std = @import("std"); const assert = std.debug.assert; const math = std.math; const mem = std.mem; const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; const bu = @import("bits_utils.zig"); const ddec = @import("dict_decoder.zig"); const deflate_const = @import("deflate_const.zig"); const mu = @import("mem_utils.zig"); const max_match_offset = deflate_const.max_match_offset; const end_block_marker = deflate_const.end_block_marker; const max_code_len = 16; // max length of Huffman code // The next three numbers come from the RFC section 3.2.7, with the // additional proviso in section 3.2.5 which implies that distance codes // 30 and 31 should never occur in compressed data. const max_num_lit = 286; const max_num_dist = 30; const num_codes = 19; // number of codes in Huffman meta-code var corrupt_input_error_offset: u64 = undefined; const InflateError = error{ CorruptInput, // A CorruptInput error reports the presence of corrupt input at a given offset. BadInternalState, // An BadInternalState reports an error in the flate code itself. BadReaderState, // An error was encountered while accessing the inner reader UnexpectedEndOfStream, EndOfStreamWithNoError, }; // The data structure for decoding Huffman tables is based on that of // zlib. There is a lookup table of a fixed bit width (huffman_chunk_bits), // For codes smaller than the table width, there are multiple entries // (each combination of trailing bits has the same value). For codes // larger than the table width, the table contains a link to an overflow // table. The width of each entry in the link table is the maximum code // size minus the chunk width. // // Note that you can do a lookup in the table even without all bits // filled. Since the extra bits are zero, and the DEFLATE Huffman codes // have the property that shorter codes come before longer ones, the // bit length estimate in the result is a lower bound on the actual // number of bits. // // See the following: // https://github.com/madler/zlib/raw/master/doc/algorithm.txt // chunk & 15 is number of bits // chunk >> 4 is value, including table link const huffman_chunk_bits = 9; const huffman_num_chunks = 1 << huffman_chunk_bits; // 512 const huffman_count_mask = 15; // 0b1111 const huffman_value_shift = 4; const HuffmanDecoder = struct { const Self = @This(); allocator: Allocator = undefined, min: u32 = 0, // the minimum code length chunks: [huffman_num_chunks]u16 = [1]u16{0} ** huffman_num_chunks, // chunks as described above links: [][]u16 = undefined, // overflow links link_mask: u32 = 0, // mask the width of the link table initialized: bool = false, sub_chunks: ArrayList(u32) = undefined, // Initialize Huffman decoding tables from array of code lengths. // Following this function, self is guaranteed to be initialized into a complete // tree (i.e., neither over-subscribed nor under-subscribed). The exception is a // degenerate case where the tree has only a single symbol with length 1. Empty // trees are permitted. fn init(self: *Self, allocator: Allocator, lengths: []u32) !bool { // Sanity enables additional runtime tests during Huffman // table construction. It's intended to be used during // development and debugging const sanity = false; if (self.min != 0) { self.* = HuffmanDecoder{}; } self.allocator = allocator; // Count number of codes of each length, // compute min and max length. var count: [max_code_len]u32 = [1]u32{0} ** max_code_len; var min: u32 = 0; var max: u32 = 0; for (lengths) |n| { if (n == 0) { continue; } if (min == 0) { min = n; } min = @minimum(n, min); max = @maximum(n, max); count[n] += 1; } // Empty tree. The decompressor.huffSym function will fail later if the tree // is used. Technically, an empty tree is only valid for the HDIST tree and // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree // is guaranteed to fail since it will attempt to use the tree to decode the // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is // guaranteed to fail later since the compressed data section must be // composed of at least one symbol (the end-of-block marker). if (max == 0) { return true; } var next_code: [max_code_len]u32 = [1]u32{0} ** max_code_len; var code: u32 = 0; { var i = min; while (i <= max) : (i += 1) { code <<= 1; next_code[i] = code; code += count[i]; } } // Check that the coding is complete (i.e., that we've // assigned all 2-to-the-max possible bit sequences). // Exception: To be compatible with zlib, we also need to // accept degenerate single-code codings. See also // TestDegenerateHuffmanCoding. if (code != @as(u32, 1) << @intCast(u5, max) and !(code == 1 and max == 1)) { return false; } self.min = min; if (max > huffman_chunk_bits) { var num_links = @as(u32, 1) << @intCast(u5, max - huffman_chunk_bits); self.link_mask = @intCast(u32, num_links - 1); // create link tables var link = next_code[huffman_chunk_bits + 1] >> 1; self.links = try self.allocator.alloc([]u16, huffman_num_chunks - link); self.sub_chunks = ArrayList(u32).init(self.allocator); self.initialized = true; var j = @intCast(u32, link); while (j < huffman_num_chunks) : (j += 1) { var reverse = @intCast(u32, bu.bitReverse(u16, @intCast(u16, j), 16)); reverse >>= @intCast(u32, 16 - huffman_chunk_bits); var off = j - @intCast(u32, link); if (sanity) { // check we are not overwriting an existing chunk assert(self.chunks[reverse] == 0); } self.chunks[reverse] = @intCast(u16, off << huffman_value_shift | (huffman_chunk_bits + 1)); self.links[off] = try self.allocator.alloc(u16, num_links); if (sanity) { // initialize to a known invalid chunk code (0) to see if we overwrite // this value later on mem.set(u16, self.links[off], 0); } try self.sub_chunks.append(off); } } for (lengths) |n, li| { if (n == 0) { continue; } var ncode = next_code[n]; next_code[n] += 1; var chunk = @intCast(u16, (li << huffman_value_shift) | n); var reverse = @intCast(u16, bu.bitReverse(u16, @intCast(u16, ncode), 16)); reverse >>= @intCast(u4, 16 - n); if (n <= huffman_chunk_bits) { var off = reverse; while (off < self.chunks.len) : (off += @as(u16, 1) << @intCast(u4, n)) { // We should never need to overwrite // an existing chunk. Also, 0 is // never a valid chunk, because the // lower 4 "count" bits should be // between 1 and 15. if (sanity) { assert(self.chunks[off] == 0); } self.chunks[off] = chunk; } } else { var j = reverse & (huffman_num_chunks - 1); if (sanity) { // Expect an indirect chunk assert(self.chunks[j] & huffman_count_mask == huffman_chunk_bits + 1); // Longer codes should have been // associated with a link table above. } var value = self.chunks[j] >> huffman_value_shift; var link_tab = self.links[value]; reverse >>= huffman_chunk_bits; var off = reverse; while (off < link_tab.len) : (off += @as(u16, 1) << @intCast(u4, n - huffman_chunk_bits)) { if (sanity) { // check we are not overwriting an existing chunk assert(link_tab[off] == 0); } link_tab[off] = @intCast(u16, chunk); } } } if (sanity) { // Above we've sanity checked that we never overwrote // an existing entry. Here we additionally check that // we filled the tables completely. for (self.chunks) |chunk, i| { // As an exception, in the degenerate // single-code case, we allow odd // chunks to be missing. if (code == 1 and i % 2 == 1) { continue; } // Assert we are not missing a chunk. // All chunks should have been written once // thus losing their initial value of 0 assert(chunk != 0); } if (self.initialized) { for (self.links) |link_tab| { for (link_tab) |chunk| { // Assert we are not missing a chunk. assert(chunk != 0); } } } } return true; } /// Release all allocated memory. pub fn deinit(self: *Self) void { if (self.initialized and self.links.len > 0) { for (self.sub_chunks.items) |off| { self.allocator.free(self.links[off]); } self.allocator.free(self.links); self.sub_chunks.deinit(); self.initialized = false; } } }; var fixed_huffman_decoder: ?HuffmanDecoder = null; fn fixedHuffmanDecoderInit(allocator: Allocator) !HuffmanDecoder { if (fixed_huffman_decoder != null) { return fixed_huffman_decoder.?; } // These come from the RFC section 3.2.6. var bits: [288]u32 = undefined; var i: u32 = 0; while (i < 144) : (i += 1) { bits[i] = 8; } while (i < 256) : (i += 1) { bits[i] = 9; } while (i < 280) : (i += 1) { bits[i] = 7; } while (i < 288) : (i += 1) { bits[i] = 8; } fixed_huffman_decoder = HuffmanDecoder{}; _ = try fixed_huffman_decoder.?.init(allocator, &bits); return fixed_huffman_decoder.?; } const DecompressorState = enum { init, dict, }; /// Returns a new Decompressor that can be used to read the uncompressed version of `reader`. /// `dictionary` is optional and initializes the Decompressor with a preset dictionary. /// The returned Decompressor behaves as if the uncompressed data stream started with the given /// dictionary, which has already been read. Use the same `dictionary` as the compressor used to /// compress the data. /// This decompressor may use at most 300 KiB of heap memory from the provided allocator. /// The uncompressed data will be written into the provided buffer, see `reader()` and `read()`. pub fn decompressor(allocator: Allocator, reader: anytype, dictionary: ?[]const u8) !Decompressor(@TypeOf(reader)) { return Decompressor(@TypeOf(reader)).init(allocator, reader, dictionary); } pub fn Decompressor(comptime ReaderType: type) type { return struct { const Self = @This(); pub const Error = ReaderType.Error || error{EndOfStream} || InflateError || Allocator.Error; pub const Reader = io.Reader(*Self, Error, read); allocator: Allocator, // Input source. inner_reader: ReaderType, roffset: u64, // Input bits, in top of b. b: u32, nb: u32, // Huffman decoders for literal/length, distance. hd1: HuffmanDecoder, hd2: HuffmanDecoder, // Length arrays used to define Huffman codes. bits: *[max_num_lit + max_num_dist]u32, codebits: *[num_codes]u32, // Output history, buffer. dict: ddec.DictDecoder, // Temporary buffer (avoids repeated allocation). buf: [4]u8, // Next step in the decompression, // and decompression state. step: fn (*Self) Error!void, step_state: DecompressorState, final: bool, err: ?Error, to_read: []u8, // Huffman states for the lit/length values hl: ?*HuffmanDecoder, // Huffman states for the distance values. hd: ?*HuffmanDecoder, copy_len: u32, copy_dist: u32, /// Returns a Reader that reads compressed data from an underlying reader and outputs /// uncompressed data. pub fn reader(self: *Self) Reader { return .{ .context = self }; } fn init(allocator: Allocator, in_reader: ReaderType, dict: ?[]const u8) !Self { fixed_huffman_decoder = try fixedHuffmanDecoderInit(allocator); var bits = try allocator.create([max_num_lit + max_num_dist]u32); var codebits = try allocator.create([num_codes]u32); var dd = ddec.DictDecoder{}; try dd.init(allocator, max_match_offset, dict); return Self{ .allocator = allocator, // Input source. .inner_reader = in_reader, .roffset = 0, // Input bits, in top of b. .b = 0, .nb = 0, // Huffman decoders for literal/length, distance. .hd1 = HuffmanDecoder{}, .hd2 = HuffmanDecoder{}, // Length arrays used to define Huffman codes. .bits = bits, .codebits = codebits, // Output history, buffer. .dict = dd, // Temporary buffer (avoids repeated allocation). .buf = [_]u8{0} ** 4, // Next step in the decompression and decompression state. .step = nextBlock, .step_state = .init, .final = false, .err = null, .to_read = &[0]u8{}, .hl = null, .hd = null, .copy_len = 0, .copy_dist = 0, }; } /// Release all allocated memory. pub fn deinit(self: *Self) void { self.hd2.deinit(); self.hd1.deinit(); self.dict.deinit(); self.allocator.destroy(self.codebits); self.allocator.destroy(self.bits); } fn nextBlock(self: *Self) Error!void { while (self.nb < 1 + 2) { self.moreBits() catch |e| { self.err = e; return e; }; } self.final = self.b & 1 == 1; self.b >>= 1; var typ = self.b & 3; self.b >>= 2; self.nb -= 1 + 2; switch (typ) { 0 => try self.dataBlock(), 1 => { // compressed, fixed Huffman tables self.hl = &fixed_huffman_decoder.?; self.hd = null; try self.huffmanBlock(); }, 2 => { // compressed, dynamic Huffman tables self.hd2.deinit(); self.hd1.deinit(); try self.readHuffman(); self.hl = &self.hd1; self.hd = &self.hd2; try self.huffmanBlock(); }, else => { // 3 is reserved. corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; }, } } /// Reads compressed data from the underlying reader and outputs uncompressed data into /// `output`. pub fn read(self: *Self, output: []u8) Error!usize { while (true) { if (self.to_read.len > 0) { var n = mu.copy(output, self.to_read); self.to_read = self.to_read[n..]; if (self.to_read.len == 0 and self.err != null) { if (self.err.? == InflateError.EndOfStreamWithNoError) { return n; } return self.err.?; } return n; } if (self.err != null) { if (self.err.? == InflateError.EndOfStreamWithNoError) { return 0; } return self.err.?; } self.step(self) catch |e| { self.err = e; if (self.to_read.len == 0) { self.to_read = self.dict.readFlush(); // Flush what's left in case of error } }; } } pub fn close(self: *Self) ?Error { if (self.err == Error.EndOfStreamWithNoError) { return null; } return self.err; } // RFC 1951 section 3.2.7. // Compression with dynamic Huffman codes const code_order = [_]u32{ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; fn readHuffman(self: *Self) Error!void { // HLIT[5], HDIST[5], HCLEN[4]. while (self.nb < 5 + 5 + 4) { try self.moreBits(); } var nlit = @intCast(u32, self.b & 0x1F) + 257; if (nlit > max_num_lit) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } self.b >>= 5; var ndist = @intCast(u32, self.b & 0x1F) + 1; if (ndist > max_num_dist) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } self.b >>= 5; var nclen = @intCast(u32, self.b & 0xF) + 4; // num_codes is 19, so nclen is always valid. self.b >>= 4; self.nb -= 5 + 5 + 4; // (HCLEN+4)*3 bits: code lengths in the magic code_order order. var i: u32 = 0; while (i < nclen) : (i += 1) { while (self.nb < 3) { try self.moreBits(); } self.codebits[code_order[i]] = @intCast(u32, self.b & 0x7); self.b >>= 3; self.nb -= 3; } i = nclen; while (i < code_order.len) : (i += 1) { self.codebits[code_order[i]] = 0; } if (!try self.hd1.init(self.allocator, self.codebits[0..])) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } // HLIT + 257 code lengths, HDIST + 1 code lengths, // using the code length Huffman code. i = 0; var n = nlit + ndist; while (i < n) { var x = try self.huffSym(&self.hd1); if (x < 16) { // Actual length. self.bits[i] = x; i += 1; continue; } // Repeat previous length or zero. var rep: u32 = 0; var nb: u32 = 0; var b: u32 = 0; switch (x) { 16 => { rep = 3; nb = 2; if (i == 0) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } b = self.bits[i - 1]; }, 17 => { rep = 3; nb = 3; b = 0; }, 18 => { rep = 11; nb = 7; b = 0; }, else => return error.BadInternalState, // unexpected length code } while (self.nb < nb) { try self.moreBits(); } rep += @intCast(u32, self.b & (@as(u32, 1) << @intCast(u5, nb)) - 1); self.b >>= @intCast(u5, nb); self.nb -= nb; if (i + rep > n) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } var j: u32 = 0; while (j < rep) : (j += 1) { self.bits[i] = b; i += 1; } } if (!try self.hd1.init(self.allocator, self.bits[0..nlit]) or !try self.hd2.init(self.allocator, self.bits[nlit .. nlit + ndist])) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } // As an optimization, we can initialize the min bits to read at a time // for the HLIT tree to the length of the EOB marker since we know that // every block must terminate with one. This preserves the property that // we never read any extra bytes after the end of the DEFLATE stream. if (self.hd1.min < self.bits[end_block_marker]) { self.hd1.min = self.bits[end_block_marker]; } return; } // Decode a single Huffman block. // hl and hd are the Huffman states for the lit/length values // and the distance values, respectively. If hd == null, using the // fixed distance encoding associated with fixed Huffman blocks. fn huffmanBlock(self: *Self) Error!void { while (true) { switch (self.step_state) { .init => { // Read literal and/or (length, distance) according to RFC section 3.2.3. var v = try self.huffSym(self.hl.?); var n: u32 = 0; // number of bits extra var length: u32 = 0; switch (v) { 0...255 => { self.dict.writeByte(@intCast(u8, v)); if (self.dict.availWrite() == 0) { self.to_read = self.dict.readFlush(); self.step = huffmanBlock; self.step_state = .init; return; } self.step_state = .init; continue; }, 256 => { self.finishBlock(); return; }, // otherwise, reference to older data 257...264 => { length = v - (257 - 3); n = 0; }, 265...268 => { length = v * 2 - (265 * 2 - 11); n = 1; }, 269...272 => { length = v * 4 - (269 * 4 - 19); n = 2; }, 273...276 => { length = v * 8 - (273 * 8 - 35); n = 3; }, 277...280 => { length = v * 16 - (277 * 16 - 67); n = 4; }, 281...284 => { length = v * 32 - (281 * 32 - 131); n = 5; }, max_num_lit - 1 => { // 285 length = 258; n = 0; }, else => { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; }, } if (n > 0) { while (self.nb < n) { try self.moreBits(); } length += @intCast(u32, self.b) & ((@as(u32, 1) << @intCast(u5, n)) - 1); self.b >>= @intCast(u5, n); self.nb -= n; } var dist: u32 = 0; if (self.hd == null) { while (self.nb < 5) { try self.moreBits(); } dist = @intCast( u32, bu.bitReverse(u8, @intCast(u8, (self.b & 0x1F) << 3), 8), ); self.b >>= 5; self.nb -= 5; } else { dist = try self.huffSym(self.hd.?); } switch (dist) { 0...3 => dist += 1, 4...max_num_dist - 1 => { // 4...29 var nb = @intCast(u32, dist - 2) >> 1; // have 1 bit in bottom of dist, need nb more. var extra = (dist & 1) << @intCast(u5, nb); while (self.nb < nb) { try self.moreBits(); } extra |= @intCast(u32, self.b & (@as(u32, 1) << @intCast(u5, nb)) - 1); self.b >>= @intCast(u5, nb); self.nb -= nb; dist = (@as(u32, 1) << @intCast(u5, nb + 1)) + 1 + extra; }, else => { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; }, } // No check on length; encoding can be prescient. if (dist > self.dict.histSize()) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } self.copy_len = length; self.copy_dist = dist; self.step_state = .dict; }, .dict => { // Perform a backwards copy according to RFC section 3.2.3. var cnt = self.dict.tryWriteCopy(self.copy_dist, self.copy_len); if (cnt == 0) { cnt = self.dict.writeCopy(self.copy_dist, self.copy_len); } self.copy_len -= cnt; if (self.dict.availWrite() == 0 or self.copy_len > 0) { self.to_read = self.dict.readFlush(); self.step = huffmanBlock; // We need to continue this work self.step_state = .dict; return; } self.step_state = .init; }, } } } // Copy a single uncompressed data block from input to output. fn dataBlock(self: *Self) Error!void { // Uncompressed. // Discard current half-byte. self.nb = 0; self.b = 0; // Length then ones-complement of length. var nr: u32 = 4; self.inner_reader.readNoEof(self.buf[0..nr]) catch { self.err = InflateError.UnexpectedEndOfStream; return InflateError.UnexpectedEndOfStream; }; self.roffset += @intCast(u64, nr); var n = @intCast(u32, self.buf[0]) | @intCast(u32, self.buf[1]) << 8; var nn = @intCast(u32, self.buf[2]) | @intCast(u32, self.buf[3]) << 8; if (@intCast(u16, nn) != @truncate(u16, ~n)) { corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } if (n == 0) { self.to_read = self.dict.readFlush(); self.finishBlock(); return; } self.copy_len = n; try self.copyData(); } // copyData copies self.copy_len bytes from the underlying reader into self.hist. // It pauses for reads when self.hist is full. fn copyData(self: *Self) Error!void { var buf = self.dict.writeSlice(); if (buf.len > self.copy_len) { buf = buf[0..self.copy_len]; } var cnt = try self.inner_reader.read(buf); if (cnt < buf.len) { self.err = InflateError.UnexpectedEndOfStream; } self.roffset += @intCast(u64, cnt); self.copy_len -= @intCast(u32, cnt); self.dict.writeMark(@intCast(u32, cnt)); if (self.err != null) { return InflateError.UnexpectedEndOfStream; } if (self.dict.availWrite() == 0 or self.copy_len > 0) { self.to_read = self.dict.readFlush(); self.step = copyData; return; } self.finishBlock(); } fn finishBlock(self: *Self) void { if (self.final) { if (self.dict.availRead() > 0) { self.to_read = self.dict.readFlush(); } self.err = InflateError.EndOfStreamWithNoError; } self.step = nextBlock; } fn moreBits(self: *Self) InflateError!void { var c = self.inner_reader.readByte() catch |e| { if (e == error.EndOfStream) { return InflateError.UnexpectedEndOfStream; } return InflateError.BadReaderState; }; self.roffset += 1; self.b |= @as(u32, c) << @intCast(u5, self.nb); self.nb += 8; return; } // Read the next Huffman-encoded symbol according to h. fn huffSym(self: *Self, h: *HuffmanDecoder) InflateError!u32 { // Since a HuffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. var n: u32 = h.min; // Optimization. Go compiler isn't smart enough to keep self.b, self.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b, nb back to self on return. var nb = self.nb; var b = self.b; while (true) { while (nb < n) { var c = self.inner_reader.readByte() catch |e| { self.b = b; self.nb = nb; if (e == error.EndOfStream) { return error.UnexpectedEndOfStream; } return InflateError.BadReaderState; }; self.roffset += 1; b |= @intCast(u32, c) << @intCast(u5, nb & 31); nb += 8; } var chunk = h.chunks[b & (huffman_num_chunks - 1)]; n = @intCast(u32, chunk & huffman_count_mask); if (n > huffman_chunk_bits) { chunk = h.links[chunk >> huffman_value_shift][(b >> huffman_chunk_bits) & h.link_mask]; n = @intCast(u32, chunk & huffman_count_mask); } if (n <= nb) { if (n == 0) { self.b = b; self.nb = nb; corrupt_input_error_offset = self.roffset; self.err = InflateError.CorruptInput; return InflateError.CorruptInput; } self.b = b >> @intCast(u5, n & 31); self.nb = nb - n; return @intCast(u32, chunk >> huffman_value_shift); } } } /// Replaces the inner reader and dictionary with new_reader and new_dict. /// new_reader must be of the same type as the reader being replaced. pub fn reset(s: *Self, new_reader: ReaderType, new_dict: ?[]const u8) !void { s.inner_reader = new_reader; s.step = nextBlock; s.err = null; s.dict.deinit(); try s.dict.init(s.allocator, max_match_offset, new_dict); return; } }; } // tests const expect = std.testing.expect; const expectError = std.testing.expectError; const io = std.io; const testing = std.testing; test "truncated input" { const TruncatedTest = struct { input: []const u8, output: []const u8, }; const tests = [_]TruncatedTest{ .{ .input = "\x00", .output = "" }, .{ .input = "\x00\x0c", .output = "" }, .{ .input = "\x00\x0c\x00", .output = "" }, .{ .input = "\x00\x0c\x00\xf3\xff", .output = "" }, .{ .input = "\x00\x0c\x00\xf3\xffhello", .output = "hello" }, .{ .input = "\x00\x0c\x00\xf3\xffhello, world", .output = "hello, world" }, .{ .input = "\x02", .output = "" }, .{ .input = "\xf2H\xcd", .output = "He" }, .{ .input = "\xf2H͙0a\u{0084}\t", .output = "Hel\x90\x90\x90\x90\x90" }, .{ .input = "\xf2H͙0a\u{0084}\t\x00", .output = "Hel\x90\x90\x90\x90\x90" }, }; for (tests) |t| { var r = io.fixedBufferStream(t.input).reader(); var z = try decompressor(testing.allocator, r, null); defer z.deinit(); var zr = z.reader(); var output = [1]u8{0} ** 12; try expectError(error.UnexpectedEndOfStream, zr.readAll(&output)); try expect(mem.eql(u8, output[0..t.output.len], t.output)); } } test "Go non-regression test for 9842" { // See https://golang.org/issue/9842 const Test = struct { err: ?anyerror, input: []const u8, }; const tests = [_]Test{ .{ .err = error.UnexpectedEndOfStream, .input = ("\x95\x90=o\xc20\x10\x86\xf30") }, .{ .err = error.CorruptInput, .input = ("\x950\x00\x0000000") }, // Huffman.construct errors // lencode .{ .err = error.CorruptInput, .input = ("\x950000") }, .{ .err = error.CorruptInput, .input = ("\x05000") }, // hlen .{ .err = error.CorruptInput, .input = ("\x05\xea\x01\t\x00\x00\x00\x01\x00\\\xbf.\t\x00") }, // hdist .{ .err = error.CorruptInput, .input = ("\x05\xe0\x01A\x00\x00\x00\x00\x10\\\xbf.") }, // like the "empty distance alphabet" test but for ndist instead of nlen .{ .err = error.CorruptInput, .input = ("\x05\xe0\x01\t\x00\x00\x00\x00\x10\\\xbf\xce") }, .{ .err = null, .input = "\x15\xe0\x01\t\x00\x00\x00\x00\x10\\\xbf.0" }, }; for (tests) |t| { const reader = std.io.fixedBufferStream(t.input).reader(); var decomp = try decompressor(testing.allocator, reader, null); defer decomp.deinit(); var output: [10]u8 = undefined; if (t.err != null) { try expectError(t.err.?, decomp.reader().read(&output)); } else { _ = try decomp.reader().read(&output); } } } test "inflate A Tale of Two Cities (1859) intro" { const compressed = [_]u8{ 0x74, 0xeb, 0xcd, 0x0d, 0x80, 0x20, 0x0c, 0x47, 0x71, 0xdc, 0x9d, 0xa2, 0x03, 0xb8, 0x88, 0x63, 0xf0, 0xf1, 0x47, 0x9a, 0x00, 0x35, 0xb4, 0x86, 0xf5, 0x0d, 0x27, 0x63, 0x82, 0xe7, 0xdf, 0x7b, 0x87, 0xd1, 0x70, 0x4a, 0x96, 0x41, 0x1e, 0x6a, 0x24, 0x89, 0x8c, 0x2b, 0x74, 0xdf, 0xf8, 0x95, 0x21, 0xfd, 0x8f, 0xdc, 0x89, 0x09, 0x83, 0x35, 0x4a, 0x5d, 0x49, 0x12, 0x29, 0xac, 0xb9, 0x41, 0xbf, 0x23, 0x2e, 0x09, 0x79, 0x06, 0x1e, 0x85, 0x91, 0xd6, 0xc6, 0x2d, 0x74, 0xc4, 0xfb, 0xa1, 0x7b, 0x0f, 0x52, 0x20, 0x84, 0x61, 0x28, 0x0c, 0x63, 0xdf, 0x53, 0xf4, 0x00, 0x1e, 0xc3, 0xa5, 0x97, 0x88, 0xf4, 0xd9, 0x04, 0xa5, 0x2d, 0x49, 0x54, 0xbc, 0xfd, 0x90, 0xa5, 0x0c, 0xae, 0xbf, 0x3f, 0x84, 0x77, 0x88, 0x3f, 0xaf, 0xc0, 0x40, 0xd6, 0x5b, 0x14, 0x8b, 0x54, 0xf6, 0x0f, 0x9b, 0x49, 0xf7, 0xbf, 0xbf, 0x36, 0x54, 0x5a, 0x0d, 0xe6, 0x3e, 0xf0, 0x9e, 0x29, 0xcd, 0xa1, 0x41, 0x05, 0x36, 0x48, 0x74, 0x4a, 0xe9, 0x46, 0x66, 0x2a, 0x19, 0x17, 0xf4, 0x71, 0x8e, 0xcb, 0x15, 0x5b, 0x57, 0xe4, 0xf3, 0xc7, 0xe7, 0x1e, 0x9d, 0x50, 0x08, 0xc3, 0x50, 0x18, 0xc6, 0x2a, 0x19, 0xa0, 0xdd, 0xc3, 0x35, 0x82, 0x3d, 0x6a, 0xb0, 0x34, 0x92, 0x16, 0x8b, 0xdb, 0x1b, 0xeb, 0x7d, 0xbc, 0xf8, 0x16, 0xf8, 0xc2, 0xe1, 0xaf, 0x81, 0x7e, 0x58, 0xf4, 0x9f, 0x74, 0xf8, 0xcd, 0x39, 0xd3, 0xaa, 0x0f, 0x26, 0x31, 0xcc, 0x8d, 0x9a, 0xd2, 0x04, 0x3e, 0x51, 0xbe, 0x7e, 0xbc, 0xc5, 0x27, 0x3d, 0xa5, 0xf3, 0x15, 0x63, 0x94, 0x42, 0x75, 0x53, 0x6b, 0x61, 0xc8, 0x01, 0x13, 0x4d, 0x23, 0xba, 0x2a, 0x2d, 0x6c, 0x94, 0x65, 0xc7, 0x4b, 0x86, 0x9b, 0x25, 0x3e, 0xba, 0x01, 0x10, 0x84, 0x81, 0x28, 0x80, 0x55, 0x1c, 0xc0, 0xa5, 0xaa, 0x36, 0xa6, 0x09, 0xa8, 0xa1, 0x85, 0xf9, 0x7d, 0x45, 0xbf, 0x80, 0xe4, 0xd1, 0xbb, 0xde, 0xb9, 0x5e, 0xf1, 0x23, 0x89, 0x4b, 0x00, 0xd5, 0x59, 0x84, 0x85, 0xe3, 0xd4, 0xdc, 0xb2, 0x66, 0xe9, 0xc1, 0x44, 0x0b, 0x1e, 0x84, 0xec, 0xe6, 0xa1, 0xc7, 0x42, 0x6a, 0x09, 0x6d, 0x9a, 0x5e, 0x70, 0xa2, 0x36, 0x94, 0x29, 0x2c, 0x85, 0x3f, 0x24, 0x39, 0xf3, 0xae, 0xc3, 0xca, 0xca, 0xaf, 0x2f, 0xce, 0x8e, 0x58, 0x91, 0x00, 0x25, 0xb5, 0xb3, 0xe9, 0xd4, 0xda, 0xef, 0xfa, 0x48, 0x7b, 0x3b, 0xe2, 0x63, 0x12, 0x00, 0x00, 0x20, 0x04, 0x80, 0x70, 0x36, 0x8c, 0xbd, 0x04, 0x71, 0xff, 0xf6, 0x0f, 0x66, 0x38, 0xcf, 0xa1, 0x39, 0x11, 0x0f, }; const expected = \\It was the best of times, \\it was the worst of times, \\it was the age of wisdom, \\it was the age of foolishness, \\it was the epoch of belief, \\it was the epoch of incredulity, \\it was the season of Light, \\it was the season of Darkness, \\it was the spring of hope, \\it was the winter of despair, \\ \\we had everything before us, we had nothing before us, we were all going direct to Heaven, we were all going direct the other way---in short, the period was so far like the present period, that some of its noisiest authorities insisted on its being received, for good or for evil, in the superlative degree of comparison only. \\ ; const reader = std.io.fixedBufferStream(&compressed).reader(); var decomp = try decompressor(testing.allocator, reader, null); defer decomp.deinit(); var got: [700]u8 = undefined; var got_len = try decomp.reader().read(&got); try expect(got_len == 616); try expect(mem.eql(u8, got[0..expected.len], expected)); } test "lengths overflow" { // malformed final dynamic block, tries to write 321 code lengths (MAXCODES is 316) // f dy hlit hdist hclen 16 17 18 0 (18) x138 (18) x138 (18) x39 (16) x6 // 1 10 11101 11101 0000 010 010 010 010 (11) 1111111 (11) 1111111 (11) 0011100 (01) 11 const stream = [_]u8{ 0b11101101, 0b00011101, 0b00100100, 0b11101001, 0b11111111, 0b11111111, 0b00111001, 0b00001110, }; try expectError(error.CorruptInput, decompress(stream[0..])); } test "empty distance alphabet" { // dynamic block with empty distance alphabet is valid if only literals and end of data symbol are used // f dy hlit hdist hclen 16 17 18 0 8 7 9 6 10 5 11 4 12 3 13 2 14 1 15 (18) x128 (18) x128 (1) ( 0) (256) // 1 10 00000 00000 1111 000 000 010 010 000 000 000 000 000 000 000 000 000 000 000 000 000 001 000 (11) 1110101 (11) 1110101 (0) (10) (0) const stream = [_]u8{ 0b00000101, 0b11100000, 0b00000001, 0b00001001, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00010000, 0b01011100, 0b10111111, 0b00101110, }; try decompress(stream[0..]); } test "distance past beginning of output stream" { // f fx ('A') ('B') ('C') <len=4, dist=4> (end) // 1 01 (01110001) (01110010) (01110011) (0000010) (00011) (0000000) const stream = [_]u8{ 0b01110011, 0b01110100, 0b01110010, 0b00000110, 0b01100001, 0b00000000 }; try std.testing.expectError(error.CorruptInput, decompress(stream[0..])); } test "fuzzing" { const compressed = [_]u8{ 0x0a, 0x08, 0x50, 0xeb, 0x25, 0x05, 0xfc, 0x30, 0x0b, 0x0a, 0x08, 0x50, 0xeb, 0x25, 0x05, } ++ [_]u8{0xe1} ** 15 ++ [_]u8{0x30} ++ [_]u8{0xe1} ** 1481; try expectError(error.UnexpectedEndOfStream, decompress(&compressed)); // see https://github.com/ziglang/zig/issues/9842 try expectError(error.UnexpectedEndOfStream, decompress("\x95\x90=o\xc20\x10\x86\xf30")); try expectError(error.CorruptInput, decompress("\x950\x00\x0000000")); // Huffman errors // lencode try expectError(error.CorruptInput, decompress("\x950000")); try expectError(error.CorruptInput, decompress("\x05000")); // hlen try expectError(error.CorruptInput, decompress("\x05\xea\x01\t\x00\x00\x00\x01\x00\\\xbf.\t\x00")); // hdist try expectError(error.CorruptInput, decompress("\x05\xe0\x01A\x00\x00\x00\x00\x10\\\xbf.")); // like the "empty distance alphabet" test but for ndist instead of nlen try expectError(error.CorruptInput, decompress("\x05\xe0\x01\t\x00\x00\x00\x00\x10\\\xbf\xce")); try decompress("\x15\xe0\x01\t\x00\x00\x00\x00\x10\\\xbf.0"); } fn decompress(input: []const u8) !void { const allocator = testing.allocator; const reader = std.io.fixedBufferStream(input).reader(); var decomp = try decompressor(allocator, reader, null); defer decomp.deinit(); var output = try decomp.reader().readAllAlloc(allocator, math.maxInt(usize)); defer std.testing.allocator.free(output); }
lib/std/compress/deflate/decompressor.zig
const std = @import("../std.zig"); /// A protocol is an interface identified by a GUID. pub const protocols = @import("uefi/protocols.zig"); /// Status codes returned by EFI interfaces pub const Status = @import("uefi/status.zig").Status; pub const tables = @import("uefi/tables.zig"); /// The memory type to allocate when using the pool /// Defaults to .LoaderData, the default data allocation type /// used by UEFI applications to allocate pool memory. pub var efi_pool_memory_type: tables.MemoryType = .LoaderData; pub const pool_allocator = @import("uefi/pool_allocator.zig").pool_allocator; pub const raw_pool_allocator = @import("uefi/pool_allocator.zig").raw_pool_allocator; /// The EFI image's handle that is passed to its entry point. pub var handle: Handle = undefined; /// A pointer to the EFI System Table that is passed to the EFI image's entry point. pub var system_table: *tables.SystemTable = undefined; /// A handle to an event structure. pub const Event = *opaque {}; pub const MacAddress = extern struct { address: [32]u8, }; pub const Ipv4Address = extern struct { address: [4]u8, }; pub const Ipv6Address = extern struct { address: [16]u8, }; /// GUIDs must be align(8) pub const Guid = extern struct { time_low: u32, time_mid: u16, time_high_and_version: u16, clock_seq_high_and_reserved: u8, clock_seq_low: u8, node: [6]u8, /// Format GUID into hexadecimal lowercase xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format pub fn format( self: @This(), comptime f: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { _ = options; if (f.len == 0) { const fmt = std.fmt.fmtSliceHexLower; const time_low = @byteSwap(u32, self.time_low); const time_mid = @byteSwap(u16, self.time_mid); const time_high_and_version = @byteSwap(u16, self.time_high_and_version); return std.fmt.format(writer, "{:0>8}-{:0>4}-{:0>4}-{:0>2}{:0>2}-{:0>12}", .{ fmt(std.mem.asBytes(&time_low)), fmt(std.mem.asBytes(&time_mid)), fmt(std.mem.asBytes(&time_high_and_version)), fmt(std.mem.asBytes(&self.clock_seq_high_and_reserved)), fmt(std.mem.asBytes(&self.clock_seq_low)), fmt(std.mem.asBytes(&self.node)), }); } else { @compileError("Unknown format character: '" ++ f ++ "'"); } } pub fn eql(a: std.os.uefi.Guid, b: std.os.uefi.Guid) bool { return a.time_low == b.time_low and a.time_mid == b.time_mid and a.time_high_and_version == b.time_high_and_version and a.clock_seq_high_and_reserved == b.clock_seq_high_and_reserved and a.clock_seq_low == b.clock_seq_low and std.mem.eql(u8, &a.node, &b.node); } }; /// An EFI Handle represents a collection of related interfaces. pub const Handle = *opaque {}; /// This structure represents time information. pub const Time = extern struct { /// 1900 - 9999 year: u16, /// 1 - 12 month: u8, /// 1 - 31 day: u8, /// 0 - 23 hour: u8, /// 0 - 59 minute: u8, /// 0 - 59 second: u8, /// 0 - 999999999 nanosecond: u32, /// The time's offset in minutes from UTC. /// Allowed values are -1440 to 1440 or unspecified_timezone timezone: i16, daylight: packed struct { _pad1: u6, /// If true, the time has been adjusted for daylight savings time. in_daylight: bool, /// If true, the time is affected by daylight savings time. adjust_daylight: bool, }, /// Time is to be interpreted as local time pub const unspecified_timezone: i16 = 0x7ff; }; /// Capabilities of the clock device pub const TimeCapabilities = extern struct { /// Resolution in Hz resolution: u32, /// Accuracy in an error rate of 1e-6 parts per million. accuracy: u32, /// If true, a time set operation clears the device's time below the resolution level. sets_to_zero: bool, }; /// File Handle as specified in the EFI Shell Spec pub const FileHandle = *opaque {}; test "GUID formatting" { var bytes = [_]u8{ 137, 60, 203, 50, 128, 128, 124, 66, 186, 19, 80, 73, 135, 59, 194, 135 }; var guid = @bitCast(Guid, bytes); var str = try std.fmt.allocPrint(std.testing.allocator, "{}", .{guid}); defer std.testing.allocator.free(str); try std.testing.expect(std.mem.eql(u8, str, "32cb3c89-8080-427c-ba13-5049873bc287")); }
lib/std/os/uefi.zig
const std = @import("std"); const QueueCacheConfig = struct { T: type, Owner: type, next_name: []const u8 = "next_in_cache", prev_name: []const u8 = "prev_in_cache", }; /// Cache of nodes.disposeFn is used to drop nodes from cache. disposeFn does not have to held /// exclusive ownership of the cache while its running (e.g. if QueueCache is protected by mutex, /// disposeFn can drop mutex and acquire it back before returning, thread safety is still /// guaranteed) /// disposeFn is called whenever there are too much nodes in the queue (see max_count field) or /// if flush mehtod was called /// Cached type should have two pointers (next and prev) to *?T. Their names are specified by /// cfg.next_name and cfg.prev_name parameters. /// disposeFn accepts owner parameter that could be used to pass context pub fn QueueCache( comptime cfg: QueueCacheConfig, comptime disposeFn: fn (*cfg.Owner, *cfg.T) void, ) type { return struct { head: ?*cfg.T = null, tail: ?*cfg.T = null, count: usize = 0, max_count: ?usize = null, reject_new_counter: usize = 0, /// Dequeue latest node from the cache fn dequeue(self: *@This()) ?*cfg.T { if (self.tail) |tail| { self.cut(tail); return tail; } return null; } /// Check if disposeFn should be called. Returns false if cache has no place for the node fn disposeIfNeeded(self: *@This(), owner: *cfg.Owner) bool { if (self.max_count) |max| { while (self.count == max) { const node_to_delete = self.dequeue() orelse return false; // Prevent insertions in cache while this thread executes disposeFn self.reject_new_counter += 1; disposeFn(owner, node_to_delete); self.reject_new_counter -= 1; } } return true; } /// Clear cache. Use on OOM errors pub fn flush(self: *@This(), owner: *cfg.Owner) void { // Prevent insertions in cache while flush does its work self.reject_new_counter += 1; while (self.dequeue()) |node| { disposeFn(owner, node); } self.reject_new_counter -= 1; } /// Enqueue node in the cache pub fn enqueue(self: *@This(), owner: *cfg.Owner, node: *cfg.T) void { // If flushing operation is in progress, turn node down if (self.reject_new_counter != 0) { disposeFn(owner, node); return; } // If cache size is 0, dispose node directly if (!self.disposeIfNeeded(owner)) { disposeFn(owner, node); return; } @field(node, cfg.next_name) = null; @field(node, cfg.prev_name) = self.head; if (self.head) |head| { @field(head, cfg.next_name) = node; } else { self.tail = node; } self.head = node; self.count += 1; } /// Cut node from the cache pub fn cut(self: *@This(), node: *cfg.T) void { const prev = @field(node, cfg.prev_name); const next = @field(node, cfg.next_name); if (next) |next_nonnull| { if (prev) |prev_nonnull| { @field(prev_nonnull, cfg.next_name) = next_nonnull; @field(next_nonnull, cfg.prev_name) = next_nonnull; } else { @field(next_nonnull, cfg.prev_name) = null; self.tail = next_nonnull; } } else { if (prev) |prev_nonnull| { @field(prev_nonnull, cfg.next_name) = null; self.head = prev_nonnull; } else { self.head = null; self.tail = null; } } if (comptime std.debug.runtime_safety) { @field(node, cfg.prev_name) = null; @field(node, cfg.next_name) = null; } self.count -= 1; } }; }
cache.zig
const std = @import("std"); const array = @import("array.zig"); const Array = array.Array; const tensor = @import("tensor.zig"); const Tensor = tensor.Tensor; const funcs = @import("funcs.zig"); const optim = @import("optim.zig"); pub const Parameter = struct { path: []const u8, value: Tensor, }; pub const ParameterCollection = struct { data: []Parameter, count: u64, alc: *std.mem.Allocator, const Self = @This(); fn init(alc: *std.mem.Allocator) !Self { var data = try alc.alloc(Parameter, 1); return Self{.data=data, .alc=alc, .count=0}; } fn deinit(self: *Self) void { var i : usize = 0; while (i < self.count) : (i += 1) { self.alc.free(self.data[i].path); } self.alc.free(self.data); } fn append(self: *Self, path: []u8, value: Tensor) !void { if (self.data.len == self.count) { var new_data = try self.alc.alloc(Parameter, self.count*2); std.mem.copy(Parameter, new_data, self.data); self.alc.free(self.data); self.data = new_data; } var path_copy = try self.alc.alloc(u8, path.len); std.mem.copy(u8, path_copy, path); self.data[self.count] = Parameter{.path=path_copy, .value=value}; self.count += 1; } }; pub const ParameterCollector = struct { const Self = @This(); prefix: []const u8, collection: *ParameterCollection, parent: ?*const Self, alc: *std.mem.Allocator, pub fn init(alc: *std.mem.Allocator) !Self { var collection = try alc.create(ParameterCollection); collection.* = try ParameterCollection.init(alc); return Self{.prefix="", .collection=collection, .alc=alc, .parent=null}; } pub fn deinit(self: *Self) void { self.collection.deinit(); self.alc.destroy(self.collection); } pub fn collectParameters(self: *const Self, obj: anytype, comptime name: []const u8) !void { try @field(obj, name).collectParameters(self.withPrefix(name)); } pub fn collectSliceParameters(self: *const Self, obj: anytype, comptime name: []const u8) !void { var buf : [1024]u8 = undefined; var slice = @field(obj, name); for (slice) |item, index| { var prefix = try std.fmt.bufPrint(&buf, "{}[{}]", .{name, index}); try item.collectParameters(self.withPrefix(prefix)); } } pub fn addParameter(self: *const Self, obj: anytype, comptime name: []const u8) !void { var value = @field(obj, name); // traverse parent chain to build full path var cur : *const Self = self; var path_len : u64 = 0; while (cur.parent != null) { path_len += cur.prefix.len + 1; cur = cur.parent.?; } path_len += name.len; var path = try self.alc.alloc(u8, path_len); cur = self; var offset: u64 = path_len; offset -= name.len; std.mem.copy(u8, path[offset..], name); while (cur.parent != null) { offset -= 1; path[offset] = '.'; offset -= cur.prefix.len; std.mem.copy(u8, path[offset..], cur.prefix); cur = cur.parent.?; } if (offset != 0) { @panic("Incorrect offset calculation"); } try self.collection.append(path, value); self.alc.free(path); } pub fn getParameters(self: *const Self) []Parameter { return self.collection.data[0..self.collection.count]; } pub fn withPrefix(self: *const Self, prefix: []const u8) Self { return Self{.prefix=prefix, .collection=self.collection, .alc=self.alc, .parent=self}; } }; pub const Dense = struct { weight: Tensor, bias: Tensor, const Self = @This(); pub fn init(alc: *std.mem.Allocator, rng: *std.rand.Random, in_features: u64, out_features: u64) !Self { var weight = try tensor.zerosAlloc(alc, .f32, &[_]u64{in_features, out_features}, tensor.REQUIRES_GRAD); var bias = try tensor.zerosAlloc(alc, .f32, &[_]u64{out_features}, tensor.REQUIRES_GRAD); try funcs.kaimingUniform(alc, weight.data, rng); var high = try array.expr(alc, "1 ./ (in_features .^ 0.5)", .{.in_features=in_features}); defer high.release(); var low = try array.uminusAlloc(alc, high); defer low.release(); array.fillUniform(bias.data, rng, low, high); return Self{.weight=weight, .bias=bias}; } pub fn deinit(self: *Self) void { self.weight.release(); self.bias.release(); } pub fn collectParameters(self: Self, pc: ParameterCollector) !void { try pc.addParameter(self, "weight"); try pc.addParameter(self, "bias"); } pub fn forward(self: *Self, alc: *std.mem.Allocator, x: Tensor) !Tensor { return try tensor.expr(alc, "(x * weight) + bias", .{.x=x, .weight=self.weight, .bias=self.bias}); } }; pub const MLP = struct { fc1: Dense, fc2: Dense, const Self = @This(); pub fn init(alc: *std.mem.Allocator, rng: *std.rand.Random, input_size: u64, hidden_size: u64, output_size: u64) !Self { return Self{.fc1=try Dense.init(alc, rng, input_size, hidden_size), .fc2=try Dense.init(alc, rng, hidden_size, output_size)}; } pub fn deinit(self: *Self) void { self.fc1.deinit(); self.fc2.deinit(); } pub fn collectParameters(self: Self, pc: ParameterCollector) !void { try pc.collectParameters(self, "fc1"); try pc.collectParameters(self, "fc2"); } pub fn forward(self: *Self, alc: *std.mem.Allocator, x: Tensor) !Tensor { var fc1_out = try self.fc1.forward(alc, x); var fc1_act : Tensor = undefined; { defer fc1_out.release(); fc1_act = try funcs.relu(alc, fc1_out); } var fc2_out : Tensor = undefined; { defer fc1_act.release(); return try self.fc2.forward(alc, fc1_act); } } }; test "mlp" { var in_features : u64 = 5; var hidden_features : u64 = 2; var out_features : u64 = 2; var gen = std.rand.Xoroshiro128.init(0); var mlp = try MLP.init(std.testing.allocator, &gen.random, in_features, hidden_features, out_features); defer mlp.deinit(); mlp.fc1.weight.release(); mlp.fc1.weight = try Tensor.allocWithRange(f32, std.testing.allocator, &[_]u64{in_features, hidden_features}, 0.0, 1.0, tensor.REQUIRES_GRAD); mlp.fc1.bias.release(); mlp.fc1.bias = try Tensor.allocWithValue(f32, std.testing.allocator, &[_]u64{hidden_features}, 0, tensor.REQUIRES_GRAD); mlp.fc2.weight.release(); mlp.fc2.weight = try Tensor.allocWithRange(f32, std.testing.allocator, &[_]u64{hidden_features, out_features}, 0.0, 1.0, tensor.REQUIRES_GRAD); mlp.fc2.bias.release(); mlp.fc2.bias = try Tensor.allocWithValue(f32, std.testing.allocator, &[_]u64{out_features}, 0, tensor.REQUIRES_GRAD); var input = try Tensor.allocWithValue(f32, std.testing.allocator, &[_]u64{4, 5}, 1.0, tensor.NO_FLAGS); defer input.release(); var target = try Tensor.allocWithValue(u64, std.testing.allocator, &[_]u64{4}, 0, tensor.NO_FLAGS); defer target.release(); var logits = try mlp.forward(std.testing.allocator, input); defer logits.release(); var output = try funcs.logSoftmax(std.testing.allocator, logits, &[_]u64{1}); defer output.release(); std.testing.expect(output.data.get(f32, &[_]u64{0,0}) == -45.0); std.testing.expect(output.data.get(f32, &[_]u64{0,1}) == 0.0); var loss = try funcs.nllLoss(std.testing.allocator, output, target); defer loss.release(); var grad_output = try tensor.onesLikeAlloc(std.testing.allocator, loss, tensor.NO_FLAGS); defer grad_output.release(); var pc = try ParameterCollector.init(std.testing.allocator); defer pc.deinit(); try mlp.collectParameters(pc); var opt = try optim.SGD.init(std.testing.allocator, pc.getParameters(), 0.0); defer opt.deinit(); try opt.zeroGrad(); try tensor.backwardAlloc(std.testing.allocator, loss, grad_output); std.testing.expect(mlp.fc1.weight.grad.?.get(f32, &[_]u64{0,0}) == 1.0); std.testing.expect(mlp.fc2.weight.grad.?.get(f32, &[_]u64{0,0}) == -20.0); var before = mlp.fc2.weight.data.get(f32, &[_]u64{0,0}); try opt.step(2); var after = mlp.fc2.weight.data.get(f32, &[_]u64{0,0}); std.testing.expect(after - before == 40.0); }
src/module.zig
const std = @import("std"); const zig = std.zig; const Ast = zig.Ast; const Node = Ast.Node; const Token = Ast.TokenIndex; const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; const assert = std.debug.assert; pub const ZigSource = struct { arena: std.heap.ArenaAllocator, decls: []const Decl, pub fn deinit(self: *ZigSource) void { self.arena.deinit(); } }; pub const Decl = union(enum) { Container: ContainerDecl, Fun: FunDecl, }; const ContainerKind = enum { Struct, Enum, Union }; pub const ContainerDecl = struct { name: []const u8, data: ContainerData, }; pub const AnonymousContainerDecl = struct { data: ContainerData, }; pub const ContainerData = struct { kind: ContainerKind, tag_type: ?[]const u8 = null, decls: []const Decl = &.{}, fields: []const ContainerField = &.{}, }; pub const Type = union(enum) { Raw: []const u8, AnonymousContainer: AnonymousContainerDecl, EnumMember: void, }; pub const ContainerField = struct { name: []const u8, type: Type, }; pub const FunDecl = struct { const Param = struct { name: []const u8, type: Type, }; name: []const u8, params: []const Param, return_type: []const u8, is_export: bool, }; pub const ZigType = union { raw: []const u8, signed_integer: struct { size: usize }, unsigned_integer: struct { size: usize }, float: struct { size: usize }, array: ArrayType, ptr: PtrType, }; pub const ArrayType = struct { sentinel: ?[]const u8, elem_type: ZigType, elem_count: usize, }; pub const PtrType = struct { size: std.builtin.TypeInfo.Pointer.Size, }; const ParseError = error{OutOfMemory}; pub fn parseZigSource(allocator: Allocator, source: [:0]const u8) ParseError!ZigSource { var arena = std.heap.ArenaAllocator.init(allocator); errdefer arena.deinit(); var tree = try zig.parse(allocator, source); defer tree.deinit(allocator); var decls = ArrayList(Decl).init(arena.allocator()); errdefer decls.deinit(); const root_decls = tree.rootDecls(); const tags = tree.nodes.items(.tag); const datas = tree.nodes.items(.data); for (root_decls) |node| { const node_tag = tags[node]; switch (node_tag) { .simple_var_decl => { const s = try parseVarDecl(arena.allocator(), tree, tree.simpleVarDecl(node)); if (s) |s2| try decls.append(s2); }, .fn_decl => { const fn_proto = datas[node].lhs; switch (tags[fn_proto]) { .fn_proto => { const fn_decl = try parseFnProto(arena.allocator(), tree, tree.fnProto(fn_proto)); if (fn_decl) |decl| try decls.append(decl); }, .fn_proto_one => { var buffer: [1]Node.Index = undefined; const fn_decl = try parseFnProto(arena.allocator(), tree, tree.fnProtoOne(&buffer, fn_proto)); if (fn_decl) |decl| try decls.append(decl); }, .fn_proto_simple => { var buffer: [1]Node.Index = undefined; const fn_decl = try parseFnProto(arena.allocator(), tree, tree.fnProtoSimple(&buffer, fn_proto)); if (fn_decl) |decl| try decls.append(decl); }, .fn_proto_multi => { const fn_decl = try parseFnProto(arena.allocator(), tree, tree.fnProtoMulti(fn_proto)); if (fn_decl) |decl| try decls.append(decl); }, else => { std.log.warn("Unknown fn_proto {}", .{tags[fn_proto]}); }, } }, else => {}, } } return ZigSource{ .arena = arena, .decls = decls.toOwnedSlice(), }; } fn parseFnProto(allocator: Allocator, tree: Ast, fn_proto: Ast.full.FnProto) !?Decl { const token_tags = tree.tokens.items(.tag); const name = try allocator.dupe(u8, tree.tokenSlice(fn_proto.ast.fn_token + 1)); var params = ArrayList(FunDecl.Param).init(allocator); errdefer params.deinit(); var last_param = fn_proto.lparen; while (true) { last_param += 1; switch (token_tags[last_param]) { .r_paren => break, .identifier => { if (token_tags[last_param + 1] == .colon) { const param_name = tree.tokenSlice(last_param); const param_type = tree.tokenSlice(last_param + 2); last_param += 2; try params.append(.{ .name = param_name, .type = .{ .Raw = param_type } }); } }, else => {}, } } const return_token = tree.firstToken(fn_proto.ast.return_type); const return_type = try allocator.dupe(u8, tree.tokenSlice(return_token)); // TODO May have multiple tokens here, so we should scan until we hit the fn token probably const is_export = if (fn_proto.extern_export_inline_token) |t| blk: { const t2 = token_tags[t]; break :blk t2 == .keyword_export; } else false; return Decl{ .Fun = .{ .name = name, .params = params.toOwnedSlice(), .return_type = return_type, .is_export = is_export, }, }; } fn extractContainerFields(allocator: Allocator, tree: Ast, container_decl: Ast.full.ContainerDecl) ParseError![]ContainerField { const node_tags = tree.nodes.items(.tag); const main_tokens = tree.nodes.items(.main_token); const token_tags = tree.tokens.items(.tag); const container_members = container_decl.ast.members; var container_fields = ArrayList(ContainerField).init(allocator); errdefer container_fields.deinit(); for (container_members) |member_node| { const member_tag = node_tags[member_node]; if (member_tag != .container_field_init) continue; const container_field_init = tree.containerFieldInit(member_node); const type_main_token = main_tokens[container_field_init.ast.type_expr]; const name_token = container_field_init.ast.name_token; const field_type_tag = node_tags[container_field_init.ast.type_expr]; const field_name = try allocator.dupe(u8, tree.tokenSlice(name_token)); switch (field_type_tag) { .root => { // No type - is enum value always in this case? try container_fields.append(.{ .name = field_name, .type = .EnumMember }); }, .identifier => { const field_type = try allocator.dupe(u8, tree.tokenSlice(type_main_token)); try container_fields.append(.{ .name = field_name, .type = .{ .Raw = field_type } }); }, .container_decl, .container_decl_trailing, => { const field_container_decl = tree.containerDecl(container_field_init.ast.type_expr); const field_container_fields = try extractContainerFields(allocator, tree, field_container_decl); const kind: ContainerKind = switch (token_tags[field_container_decl.ast.main_token]) { .keyword_struct => .Struct, .keyword_union => .Union, .keyword_enum => .Enum, else => unreachable, }; try container_fields.append(.{ .name = field_name, .type = .{ .AnonymousContainer = .{ .data = .{ .kind = kind, .fields = field_container_fields, }, }, }, }); }, .container_decl_two, .container_decl_two_trailing, => { var buffer: [2]Node.Index = undefined; const field_container_decl = tree.containerDeclTwo(&buffer, container_field_init.ast.type_expr); const field_container_fields = try extractContainerFields(allocator, tree, field_container_decl); const kind: ContainerKind = switch (token_tags[field_container_decl.ast.main_token]) { .keyword_struct => .Struct, .keyword_union => .Union, .keyword_enum => .Enum, else => unreachable, }; try container_fields.append(.{ .name = field_name, .type = .{ .AnonymousContainer = .{ .data = .{ .kind = kind, .fields = field_container_fields, }, }, }, }); }, .container_decl_arg, .container_decl_arg_trailing, => { const field_container_decl = tree.containerDeclArg(container_field_init.ast.type_expr); const field_container_fields = try extractContainerFields(allocator, tree, field_container_decl); // TODO This is done in multiple cases - refactor const kind: ContainerKind = switch (token_tags[field_container_decl.ast.main_token]) { .keyword_struct => .Struct, .keyword_union => .Union, .keyword_enum => .Enum, else => unreachable, }; try container_fields.append(.{ .name = field_name, .type = .{ .AnonymousContainer = .{ .data = .{ .kind = kind, .tag_type = extractTagType(tree, field_container_decl), .fields = field_container_fields, }, }, }, }); }, else => {}, } } return container_fields.toOwnedSlice(); } fn parseContainerDecl(allocator: Allocator, tree: Ast, decl: Ast.full.VarDecl, container_decl: Ast.full.ContainerDecl) !?Decl { const token_tags = tree.tokens.items(.tag); const token_tag = token_tags[container_decl.ast.main_token]; const name = try allocator.dupe(u8, tree.tokenSlice(decl.ast.mut_token + 1)); switch (token_tag) { .keyword_struct => { return Decl{ .Container = .{ .name = name, .data = .{ .kind = .Struct, .tag_type = extractTagType(tree, container_decl), .decls = try extractContainerDecls(allocator, tree, container_decl), .fields = try extractContainerFields(allocator, tree, container_decl), }, }, }; }, .keyword_union => { return Decl{ .Container = .{ .name = name, .data = .{ .kind = .Union, .tag_type = extractTagType(tree, container_decl), .decls = try extractContainerDecls(allocator, tree, container_decl), .fields = try extractContainerFields(allocator, tree, container_decl), }, }, }; }, .keyword_enum => { return Decl{ .Container = .{ .name = name, .data = .{ .kind = .Enum, .tag_type = extractTagType(tree, container_decl), .fields = try extractContainerFields(allocator, tree, container_decl), }, }, }; }, else => {}, } return null; } fn parseVarDecl(allocator: Allocator, tree: Ast, decl: Ast.full.VarDecl) !?Decl { if (decl.ast.init_node == 0) return null; const node_tags = tree.nodes.items(.tag); const init_node_tag = node_tags[decl.ast.init_node]; switch (init_node_tag) { .container_decl, .container_decl_trailing, => { const container_decl = tree.containerDecl(decl.ast.init_node); return try parseContainerDecl(allocator, tree, decl, container_decl); }, .container_decl_two, .container_decl_two_trailing, => { var buffer: [2]Node.Index = undefined; const container_decl = tree.containerDeclTwo(&buffer, decl.ast.init_node); return try parseContainerDecl(allocator, tree, decl, container_decl); }, .container_decl_arg, .container_decl_arg_trailing, => { const container_decl = tree.containerDeclArg(decl.ast.init_node); return try parseContainerDecl(allocator, tree, decl, container_decl); }, else => {}, } return null; } fn extractTagType(tree: Ast, container_decl: Ast.full.ContainerDecl) ?[]const u8 { const main_tokens = tree.nodes.items(.main_token); const node_tags = tree.nodes.items(.tag); var tag_type: ?[]const u8 = null; const arg = container_decl.ast.arg; if (arg != 0) { // Are there other cases that we should support? assert(node_tags[arg] == .identifier); const arg_token = main_tokens[container_decl.ast.arg]; tag_type = tree.tokenSlice(arg_token); } return tag_type; } fn extractContainerDecls(allocator: Allocator, tree: Ast, container_decl: Ast.full.ContainerDecl) ParseError![]Decl { const node_tags = tree.nodes.items(.tag); const container_members = container_decl.ast.members; var container_decls = ArrayList(Decl).init(allocator); errdefer container_decls.deinit(); for (container_members) |member_node| { const member_tag = node_tags[member_node]; if (member_tag != .simple_var_decl) continue; const decl = try parseVarDecl(allocator, tree, tree.simpleVarDecl(member_node)); if (decl) |d| try container_decls.append(d); } return container_decls.toOwnedSlice(); } // * Tests const testing = std.testing; const test_allocator = testing.allocator; const expect = testing.expect; const expectEqual = testing.expectEqual; const expectEqualStrings = testing.expectEqualStrings; const expectEqualSlices = testing.expectEqualSlices; test "struct decl" { const source = \\const Test = struct { \\ a: i32, \\}; \\ \\const Test2 = struct { \\ a: i32, \\ b: i32, \\}; \\ \\const Test3 = struct { \\ a: i32, \\ b: i32, \\ c: i32, \\}; ; var result = try parseZigSource(test_allocator, source); defer result.deinit(); try expect(deepEql( result.decls, &.{ .{ .Container = .{ .name = "Test", .data = .{ .kind = .Struct, .fields = &.{ .{ .name = "a", .type = .{ .Raw = "i32" } }, }, }, }, }, .{ .Container = .{ .name = "Test2", .data = .{ .kind = .Struct, .fields = &.{ .{ .name = "a", .type = .{ .Raw = "i32" } }, .{ .name = "b", .type = .{ .Raw = "i32" } }, }, }, }, }, .{ .Container = .{ .name = "Test3", .data = .{ .kind = .Struct, .fields = &.{ .{ .name = "a", .type = .{ .Raw = "i32" } }, .{ .name = "b", .type = .{ .Raw = "i32" } }, .{ .name = "c", .type = .{ .Raw = "i32" } }, }, }, }, }, }, )); } test "union decl" { const source = \\const Test = union { \\ a: i32, \\}; \\ \\const Test2 = union { \\ a: i32, \\ b: u32, \\}; \\ \\const Test3 = union { \\ a: i32, \\ b: u32, \\ c: u32, \\}; ; var result = try parseZigSource(test_allocator, source); defer result.deinit(); try expect(deepEql( result.decls, &.{ .{ .Container = .{ .name = "Test", .data = .{ .kind = .Union, .fields = &.{ .{ .name = "a", .type = .{ .Raw = "i32" } }, }, }, }, }, .{ .Container = .{ .name = "Test2", .data = .{ .kind = .Union, .fields = &.{ .{ .name = "a", .type = .{ .Raw = "i32" } }, .{ .name = "b", .type = .{ .Raw = "u32" } }, }, }, }, }, .{ .Container = .{ .name = "Test3", .data = .{ .kind = .Union, .fields = &.{ .{ .name = "a", .type = .{ .Raw = "i32" } }, .{ .name = "b", .type = .{ .Raw = "u32" } }, .{ .name = "c", .type = .{ .Raw = "u32" } }, }, }, }, }, }, )); } test "enum decl" { const source = \\const Test = enum { \\ a, \\}; \\ \\const Test2 = enum { \\ a, \\ b \\}; \\ \\const Test3 = enum { \\ a, \\ b, \\ c, \\}; ; var result = try parseZigSource(test_allocator, source); defer result.deinit(); try expect(deepEql( result.decls, &.{ .{ .Container = .{ .name = "Test", .data = .{ .kind = .Enum, .fields = &.{.{ .name = "a", .type = .EnumMember }}, }, }, }, .{ .Container = .{ .name = "Test2", .data = .{ .kind = .Enum, .fields = &.{ .{ .name = "a", .type = .EnumMember }, .{ .name = "b", .type = .EnumMember } }, }, }, }, .{ .Container = .{ .name = "Test3", .data = .{ .kind = .Enum, .fields = &.{ .{ .name = "a", .type = .EnumMember }, .{ .name = "b", .type = .EnumMember }, .{ .name = "c", .type = .EnumMember } }, }, }, }, }, )); } test "enum with explicit tag type" { const source = \\const Test = enum(u8) { a, b, c }; ; var result = try parseZigSource(test_allocator, source); defer result.deinit(); try expect(deepEql( result.decls, &.{ .{ .Container = .{ .name = "Test", .data = .{ .kind = .Enum, .tag_type = "u8", .fields = &.{ .{ .name = "a", .type = .EnumMember }, .{ .name = "b", .type = .EnumMember }, .{ .name = "c", .type = .EnumMember }, }, }, }, }, }, )); } test "anonymous struct" { const source = \\pub const Event = struct { \\ field: struct { x: f32, y: f32 }, \\}; ; var result = try parseZigSource(test_allocator, source); defer result.deinit(); try expect(deepEql( result.decls, &.{ .{ .Container = .{ .name = "Event", .data = .{ .kind = .Struct, .fields = &.{ .{ .name = "field", .type = .{ .AnonymousContainer = .{ .data = .{ .kind = .Struct, .fields = &.{ .{ .name = "x", .type = .{ .Raw = "f32" } }, .{ .name = "y", .type = .{ .Raw = "f32" } }, }, }, }, }, }, }, }, }, }, }, )); } test "anonymous enum" { const source = \\pub const Test = struct { \\ field: enum { yes, no }, \\}; ; var result = try parseZigSource(test_allocator, source); defer result.deinit(); try expect(deepEql( result.decls, &.{ .{ .Container = .{ .name = "Test", .data = .{ .kind = .Struct, .fields = &.{ .{ .name = "field", .type = .{ .AnonymousContainer = .{ .data = .{ .kind = .Enum, .fields = &.{ .{ .name = "yes", .type = .EnumMember }, .{ .name = "no", .type = .EnumMember } }, }, }, }, }, }, }, }, }, }, )); } test "anonymous enum with explicit tag type" { const source = \\pub const Test = struct { \\ field: enum(u8) { yes, no }, \\}; ; var result = try parseZigSource(test_allocator, source); defer result.deinit(); try expect(deepEql( result.decls, &.{ .{ .Container = .{ .name = "Test", .data = .{ .kind = .Struct, .fields = &.{ .{ .name = "field", .type = .{ .AnonymousContainer = .{ .data = .{ .kind = .Enum, .tag_type = "u8", .fields = &.{ .{ .name = "yes", .type = .EnumMember }, .{ .name = "no", .type = .EnumMember } }, }, }, }, }, }, }, }, }, }, )); } test "complex union" { const source = \\pub const Event = extern union { \\ quit: void, \\ cursor: struct { x: f32, y: f32 }, \\ input: InputEvent, \\}; \\ \\pub const InputEvent = struct { \\ const Key = enum { \\ mouse_left, \\ mouse_right, \\ }; \\ \\ const Action = enum { press, release }; \\ \\ key: Key, \\ action: Action, \\}; ; var result = try parseZigSource(test_allocator, source); defer result.deinit(); // TODO: is_extern try expect(deepEql( result.decls, &.{ .{ .Container = .{ .name = "Event", .data = .{ .kind = .Union, .fields = &.{ .{ .name = "quit", .type = .{ .Raw = "void" } }, .{ .name = "cursor", .type = .{ .AnonymousContainer = .{ .data = .{ .kind = .Struct, .fields = &.{ .{ .name = "x", .type = .{ .Raw = "f32" } }, .{ .name = "y", .type = .{ .Raw = "f32" } }, }, }, }, }, }, .{ .name = "input", .type = .{ .Raw = "InputEvent" }, }, }, }, }, }, .{ .Container = .{ .name = "InputEvent", .data = .{ .kind = .Struct, .decls = &.{ .{ .Container = .{ .name = "Key", .data = .{ .kind = .Enum, .fields = &.{ .{ .name = "mouse_left", .type = .EnumMember }, .{ .name = "mouse_right", .type = .EnumMember } }, }, }, }, .{ .Container = .{ .name = "Action", .data = .{ .kind = .Enum, .fields = &.{ .{ .name = "press", .type = .EnumMember }, .{ .name = "release", .type = .EnumMember } }, }, }, }, }, .fields = &.{ .{ .name = "key", .type = .{ .Raw = "Key" } }, .{ .name = "action", .type = .{ .Raw = "Action" } }, }, }, }, }, }, )); } test "fn decl simple" { const source = \\fn testfn(a: i32) u32 { \\ return a + 1; \\} ; var result = try parseZigSource(test_allocator, source); defer result.deinit(); try expect(deepEql( result.decls, &.{ .{ .Fun = .{ .name = "testfn", .params = &.{.{ .name = "a", .type = .{ .Raw = "i32" } }}, .return_type = "u32", .is_export = false, }, }, }, )); } test "fn decl multi" { const source = \\fn testfn(a: i32, b: i32) u32 { \\ return a + b; \\} ; var result = try parseZigSource(test_allocator, source); defer result.deinit(); try expect(deepEql( result.decls, &.{ .{ .Fun = .{ .name = "testfn", .params = &.{ .{ .name = "a", .type = .{ .Raw = "i32" } }, .{ .name = "b", .type = .{ .Raw = "i32" } }, }, .return_type = "u32", .is_export = false, }, }, }, )); } test "fn decl export" { const source = \\export fn testfn(a: i32) u32 { \\ return a + 1; \\} ; var result = try parseZigSource(test_allocator, source); defer result.deinit(); try expect(deepEql( result.decls, &.{ .{ .Fun = .{ .name = "testfn", .params = &.{.{ .name = "a", .type = .{ .Raw = "i32" } }}, .return_type = "u32", .is_export = true, }, }, }, )); } /// Like std.meta.eql, but follows pointers where possible. fn deepEql(a: anytype, b: @TypeOf(a)) bool { const T = @TypeOf(a); switch (@typeInfo(T)) { .Pointer => |info| { return switch (info.size) { .One => deepEql(a.*, b.*), .Many, .C => a == b, // We don't know how many items are pointed to, so just compare addresses .Slice => { if (a.len != b.len) return false; for (a) |item, index| { if (!deepEql(b[index], item)) { // std.log.warn("not eql {} {}", .{b[index], item}); return false; } } return true; }, }; }, // The rest are copied from std.meta.eql (but calls to deepEql instead) .Struct => |info| { inline for (info.fields) |field_info| { if (!deepEql(@field(a, field_info.name), @field(b, field_info.name))) return false; } return true; }, .ErrorUnion => { if (a) |a_p| { if (b) |b_p| return deepEql(a_p, b_p) else |_| return false; } else |a_e| { if (b) |_| return false else |b_e| return a_e == b_e; } }, .Union => |info| { if (info.tag_type) |UnionTag| { const tag_a = std.meta.activeTag(a); const tag_b = std.meta.activeTag(b); if (tag_a != tag_b) return false; inline for (info.fields) |field_info| { if (@field(UnionTag, field_info.name) == tag_a) { return deepEql(@field(a, field_info.name), @field(b, field_info.name)); } } return false; } @compileError("cannot compare untagged union type " ++ @typeName(T)); }, .Array => { if (a.len != b.len) return false; for (a) |e, i| if (!deepEql(e, b[i])) return false; return true; }, .Vector => |info| { var i: usize = 0; while (i < info.len) : (i += 1) { if (!deepEql(a[i], b[i])) return false; } return true; }, .Optional => { if (a == null and b == null) return true; if (a == null or b == null) return false; return deepEql(a.?, b.?); }, else => return a == b, } }
src/main.zig
const std = @import("std"); const riscv = @import("../riscv.zig"); const Csr = riscv.Csr; // Machine Information Registers pub const mvendorid = Csr(0xF11, packed struct { offset: u7, bank: u25, _reserved_32: riscv.unsignedIntegerWithSize(riscv.xlen - 32) }); pub const marchid = Csr(0xF12, riscv.unsignedIntegerWithSize(riscv.xlen)); pub const mimpid = Csr(0xF13, riscv.unsignedIntegerWithSize(riscv.xlen)); pub const mhartid = Csr(0xF14, riscv.unsignedIntegerWithSize(riscv.xlen)); // Machine Trap Setup pub const mstatus = Csr(0x300, if (riscv.arch == .riscv32) packed struct { _reserved_0: bool, sie: bool, _reserved_2: bool, mie: bool, _reserved_4: bool, spie: bool, ube: bool, mpie: bool, spp: u1, vs: u2, mpp: u2, fs: u2, xs: u2, mprv: bool, sum: bool, mxr: bool, tvm: bool, tw: bool, tsr: bool, _reserved_23: u8, sd: bool, } else if (riscv.arch == .riscv64) packed struct { _reserved_0: bool, sie: bool, _reserved_2: bool, mie: bool, _reserved_4: bool, spie: bool, ube: bool, mpie: bool, spp: u1, vs: u2, mpp: u2, fs: u2, xs: u2, mprv: bool, sum: bool, mxr: bool, tvm: bool, tw: bool, tsr: bool, _reserved_23: u9, uxl: u2, sxl: u2, sbe: bool, mbe: bool, _reserved_38: u25, sd: bool, } else @compileError("Unsupported architecture")); pub const misa = Csr(0x301, packed struct { extensions: packed struct { a: bool, b: bool, c: bool, d: bool, e: bool, f: bool, g: bool, h: bool, i: bool, j: bool, k: bool, l: bool, m: bool, n: bool, o: bool, p: bool, q: bool, r: bool, s: bool, t: bool, u: bool, v: bool, w: bool, x: bool, y: bool, z: bool, }, _reserved_26: riscv.unsignedIntegerWithSize(riscv.xlen - 28), mxl: enum(u2) { unknown = 0, xlen32 = 1, xlen64 = 2, xlen128 = 3, }, }); pub const medeleg = Csr(0x302, usize); pub const mideleg = Csr(0x303, usize); pub const mie = Csr(0x304, usize); pub const mtvec = Csr(0x305, packed struct { mode: enum(u2) { direct = 0, vectored = 1, }, base: riscv.unsignedIntegerWithSize(riscv.xlen - 2), }); //pub const mcounteren = Csr(0x306); // Machine Trap Handling pub const mscratch = Csr(0x340, usize); pub const mepc = Csr(0x341, usize); pub const mcause = Csr(0x342, packed struct { exceptioncode: riscv.unsignedIntegerWithSize(riscv.xlen - 1), interrupt: bool, }); pub const mtval = Csr(0x343, usize); pub const mip = Csr(0x344, usize);
src/target/arch/riscv/mcsr.zig
const std = @import("std"); /// Super simple "perfect hash" algorithm /// Only really useful for switching on strings // TODO: can we auto detect and promote the underlying type? pub fn Swhash(comptime max_bytes: comptime_int) type { const T = std.meta.Int(.unsigned, max_bytes * 8); return struct { pub fn match(string: []const u8) T { return hash(string) orelse std.math.maxInt(T); } pub fn case(comptime string: []const u8) T { return hash(string) orelse @compileError("Cannot hash '" ++ string ++ "'"); } fn hash(string: []const u8) ?T { if (string.len > max_bytes) return null; var tmp = [_]u8{0} ** max_bytes; std.mem.copy(u8, &tmp, string); return std.mem.readIntNative(T, &tmp); } }; } pub const RingAllocator = struct { buffer: []u8, alignment: u29, max_alloc_size: usize, curr_index: usize = 0, allocator: std.mem.Allocator = .{ .allocFn = alloc, .resizeFn = resize, }, pub fn init(buffer: []u8, max_alloc_size: usize) RingAllocator { std.debug.assert(@popCount(usize, max_alloc_size) == 1); std.debug.assert(buffer.len % max_alloc_size == 0); return .{ .buffer = buffer, .alignment = @as(u29, 1) << @intCast(std.math.Log2Int(u29), @ctz(usize, max_alloc_size | @ptrToInt(buffer.ptr))), .max_alloc_size = max_alloc_size, }; } const ShiftSize = std.math.Log2Int(usize); fn shiftSize(self: RingAllocator) ShiftSize { return @intCast(ShiftSize, @ctz(usize, self.max_alloc_size)); } fn totalSlots(self: RingAllocator) usize { return self.buffer.len >> self.shiftSize(); } pub fn ownsSlice(self: *const RingAllocator, slice: []u8) bool { return @ptrToInt(slice.ptr) >= @ptrToInt(self.buffer.ptr) and (@ptrToInt(slice.ptr) + slice.len) <= (@ptrToInt(self.buffer.ptr) + self.buffer.len); } fn alloc(allocator: *std.mem.Allocator, n: usize, ptr_align: u29, len_align: u29, return_address: usize) error{OutOfMemory}![]u8 { const self = @fieldParentPtr(RingAllocator, "allocator", allocator); std.debug.assert(ptr_align <= self.alignment); if (n >= self.max_alloc_size) { return error.OutOfMemory; } const start = self.curr_index << self.shiftSize(); self.curr_index += 1; if (self.curr_index >= self.totalSlots()) { // Wrap around the ring self.curr_index = 0; } return self.buffer[start..][0..self.max_alloc_size]; } fn resize(allocator: *std.mem.Allocator, buf: []u8, buf_align: u29, new_size: usize, len_align: u29, return_address: usize) error{OutOfMemory}!usize { const self = @fieldParentPtr(RingAllocator, "allocator", allocator); std.debug.assert(self.ownsSlice(buf)); // sanity check std.debug.assert(buf_align == 1); if (new_size >= self.max_alloc_size) { return error.OutOfMemory; } return new_size; } };
src/util.zig
const std = @import("std"); const Compilation = @import("Compilation.zig"); const Preprocessor = @import("Preprocessor.zig"); const Parser = @import("Parser.zig"); const TokenIndex = @import("Tree.zig").TokenIndex; const Pragma = @This(); pub const Error = Compilation.Error || error{ UnknownPragma, StopPreprocessing }; /// Called during Preprocessor.init beforePreprocess: ?fn (*Pragma, *Compilation) void = null, /// Called at the beginning of Parser.parse beforeParse: ?fn (*Pragma, *Compilation) void = null, /// Called at the end of Parser.parse if a Tree was successfully parsed afterParse: ?fn (*Pragma, *Compilation) void = null, /// Called during Compilation.deinit deinit: fn (*Pragma, *Compilation) void, /// Called whenever the preprocessor encounters this pragma. `start_idx` is the index /// within `pp.tokens` of the pragma name token. The pragma end is indicated by a /// .nl token (which may be generated if the source ends with a pragma with no newline) /// As an example, given the following line: /// #pragma GCC diagnostic error "-Wnewline-eof" \n /// Then pp.tokens.get(start_idx) will return the `GCC` token. /// Return error.UnknownPragma to emit an `unknown_pragma` diagnostic /// Return error.StopPreprocessing to stop preprocessing the current file (see once.zig) preprocessorHandler: ?fn (*Pragma, *Preprocessor, start_idx: TokenIndex) Error!void = null, /// Called during token pretty-printing (`-E` option). If this returns true, the pragma will /// be printed; otherwise it will be omitted. start_idx is the index of the pragma name token preserveTokens: ?fn (*Pragma, *Preprocessor, start_idx: TokenIndex) bool = null, /// Same as preprocessorHandler except called during parsing /// The parser's `p.tok_i` field must not be changed parserHandler: ?fn (*Pragma, *Parser, start_idx: TokenIndex) Compilation.Error!void = null, pub fn pasteTokens(pp: *Preprocessor, start_idx: TokenIndex) ![]const u8 { if (pp.tokens.get(start_idx).id == .nl) return error.ExpectedStringLiteral; const char_top = pp.char_buf.items.len; defer pp.char_buf.items.len = char_top; var i: usize = 0; var lparen_count: u32 = 0; var rparen_count: u32 = 0; while (true) : (i += 1) { const tok = pp.tokens.get(start_idx + i); if (tok.id == .nl) break; switch (tok.id) { .l_paren => { if (lparen_count != i) return error.ExpectedStringLiteral; lparen_count += 1; }, .r_paren => rparen_count += 1, .string_literal => { if (rparen_count != 0) return error.ExpectedStringLiteral; const str = pp.expandedSlice(tok); try pp.char_buf.appendSlice(str[1 .. str.len - 1]); }, else => return error.ExpectedStringLiteral, } } if (lparen_count != rparen_count) return error.ExpectedStringLiteral; return pp.char_buf.items[char_top..]; } pub fn shouldPreserveTokens(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) bool { if (self.preserveTokens) |func| return func(self, pp, start_idx); return false; } pub fn preprocessorCB(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Error!void { if (self.preprocessorHandler) |func| return func(self, pp, start_idx); } pub fn parserCB(self: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation.Error!void { const tok_index = p.tok_i; defer std.debug.assert(tok_index == p.tok_i); if (self.parserHandler) |func| return func(self, p, start_idx); }
src/Pragma.zig
// kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164 // Input x is assumed to be bounded by ~pi/4 in magnitude. // Input y is the tail of x. // // Algorithm // 1. Since cos(-x) = cos(x), we need only to consider positive x. // 2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0. // 3. cos(x) is approximated by a polynomial of degree 14 on // [0,pi/4] // 4 14 // cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x // where the remez error is // // | 2 4 6 8 10 12 14 | -58 // |cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x +C6*x )| <= 2 // | | // // 4 6 8 10 12 14 // 4. let r = C1*x +C2*x +C3*x +C4*x +C5*x +C6*x , then // cos(x) ~ 1 - x*x/2 + r // since cos(x+y) ~ cos(x) - sin(x)*y // ~ cos(x) - x*y, // a correction term is necessary in cos(x) and hence // cos(x+y) = 1 - (x*x/2 - (r - x*y)) // For better accuracy, rearrange to // cos(x+y) ~ w + (tmp + (r-x*y)) // where w = 1 - x*x/2 and tmp is a tiny correction term // (1 - x*x/2 == w + tmp exactly in infinite precision). // The exactness of w + tmp in infinite precision depends on w // and tmp having the same precision as x. If they have extra // precision due to compiler bugs, then the extra precision is // only good provided it is retained in all terms of the final // expression for cos(). Retention happens in all cases tested // under FreeBSD, so don't pessimize things by forcibly clipping // any extra precision in w. pub fn __cos(x: f64, y: f64) f64 { const C1 = 4.16666666666666019037e-02; // 0x3FA55555, 0x5555554C const C2 = -1.38888888888741095749e-03; // 0xBF56C16C, 0x16C15177 const C3 = 2.48015872894767294178e-05; // 0x3EFA01A0, 0x19CB1590 const C4 = -2.75573143513906633035e-07; // 0xBE927E4F, 0x809C52AD const C5 = 2.08757232129817482790e-09; // 0x3E21EE9E, 0xBDB4B1C4 const C6 = -1.13596475577881948265e-11; // 0xBDA8FAE9, 0xBE8838D4 const z = x * x; const zs = z * z; const r = z * (C1 + z * (C2 + z * C3)) + zs * zs * (C4 + z * (C5 + z * C6)); const hz = 0.5 * z; const w = 1.0 - hz; return w + (((1.0 - w) - hz) + (z * r - x * y)); } pub fn __cosdf(x: f64) f32 { // |cos(x) - c(x)| < 2**-34.1 (~[-5.37e-11, 5.295e-11]). const C0 = -0x1ffffffd0c5e81.0p-54; // -0.499999997251031003120 const C1 = 0x155553e1053a42.0p-57; // 0.0416666233237390631894 const C2 = -0x16c087e80f1e27.0p-62; // -0.00138867637746099294692 const C3 = 0x199342e0ee5069.0p-68; // 0.0000243904487962774090654 // Try to optimize for parallel evaluation as in __tandf.c. const z = x * x; const w = z * z; const r = C2 + z * C3; return @floatCast(f32, ((1.0 + z * C0) + w * C1) + (w * z) * r); } // kernel sin function on ~[-pi/4, pi/4] (except on -0), pi/4 ~ 0.7854 // Input x is assumed to be bounded by ~pi/4 in magnitude. // Input y is the tail of x. // Input iy indicates whether y is 0. (if iy=0, y assume to be 0). // // Algorithm // 1. Since sin(-x) = -sin(x), we need only to consider positive x. // 2. Callers must return sin(-0) = -0 without calling here since our // odd polynomial is not evaluated in a way that preserves -0. // Callers may do the optimization sin(x) ~ x for tiny x. // 3. sin(x) is approximated by a polynomial of degree 13 on // [0,pi/4] // 3 13 // sin(x) ~ x + S1*x + ... + S6*x // where // // |sin(x) 2 4 6 8 10 12 | -58 // |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x +S6*x )| <= 2 // | x | // // 4. sin(x+y) = sin(x) + sin'(x')*y // ~ sin(x) + (1-x*x/2)*y // For better accuracy, let // 3 2 2 2 2 // r = x *(S2+x *(S3+x *(S4+x *(S5+x *S6)))) // then 3 2 // sin(x) = x + (S1*x + (x *(r-y/2)+y)) pub fn __sin(x: f64, y: f64, iy: i32) f64 { const S1 = -1.66666666666666324348e-01; // 0xBFC55555, 0x55555549 const S2 = 8.33333333332248946124e-03; // 0x3F811111, 0x1110F8A6 const S3 = -1.98412698298579493134e-04; // 0xBF2A01A0, 0x19C161D5 const S4 = 2.75573137070700676789e-06; // 0x3EC71DE3, 0x57B1FE7D const S5 = -2.50507602534068634195e-08; // 0xBE5AE5E6, 0x8A2B9CEB const S6 = 1.58969099521155010221e-10; // 0x3DE5D93A, 0x5ACFD57C const z = x * x; const w = z * z; const r = S2 + z * (S3 + z * S4) + z * w * (S5 + z * S6); const v = z * x; if (iy == 0) { return x + v * (S1 + z * r); } else { return x - ((z * (0.5 * y - v * r) - y) - v * S1); } } pub fn __sindf(x: f64) f32 { // |sin(x)/x - s(x)| < 2**-37.5 (~[-4.89e-12, 4.824e-12]). const S1 = -0x15555554cbac77.0p-55; // -0.166666666416265235595 const S2 = 0x111110896efbb2.0p-59; // 0.0083333293858894631756 const S3 = -0x1a00f9e2cae774.0p-65; // -0.000198393348360966317347 const S4 = 0x16cd878c3b46a7.0p-71; // 0.0000027183114939898219064 // Try to optimize for parallel evaluation as in __tandf.c. const z = x * x; const w = z * z; const r = S3 + z * S4; const s = z * x; return @floatCast(f32, (x + s * (S1 + z * S2)) + s * w * r); } // kernel tan function on ~[-pi/4, pi/4] (except on -0), pi/4 ~ 0.7854 // Input x is assumed to be bounded by ~pi/4 in magnitude. // Input y is the tail of x. // Input odd indicates whether tan (if odd = 0) or -1/tan (if odd = 1) is returned. // // Algorithm // 1. Since tan(-x) = -tan(x), we need only to consider positive x. // 2. Callers must return tan(-0) = -0 without calling here since our // odd polynomial is not evaluated in a way that preserves -0. // Callers may do the optimization tan(x) ~ x for tiny x. // 3. tan(x) is approximated by a odd polynomial of degree 27 on // [0,0.67434] // 3 27 // tan(x) ~ x + T1*x + ... + T13*x // where // // |tan(x) 2 4 26 | -59.2 // |----- - (1+T1*x +T2*x +.... +T13*x )| <= 2 // | x | // // Note: tan(x+y) = tan(x) + tan'(x)*y // ~ tan(x) + (1+x*x)*y // Therefore, for better accuracy in computing tan(x+y), let // 3 2 2 2 2 // r = x *(T2+x *(T3+x *(...+x *(T12+x *T13)))) // then // 3 2 // tan(x+y) = x + (T1*x + (x *(r+y)+y)) // // 4. For x in [0.67434,pi/4], let y = pi/4 - x, then // tan(x) = tan(pi/4-y) = (1-tan(y))/(1+tan(y)) // = 1 - 2*(tan(y) - (tan(y)^2)/(1+tan(y))) pub fn __tan(x_: f64, y_: f64, odd: bool) f64 { var x = x_; var y = y_; const T = [_]f64{ 3.33333333333334091986e-01, // 3FD55555, 55555563 1.33333333333201242699e-01, // 3FC11111, 1110FE7A 5.39682539762260521377e-02, // 3FABA1BA, 1BB341FE 2.18694882948595424599e-02, // 3F9664F4, 8406D637 8.86323982359930005737e-03, // 3F8226E3, E96E8493 3.59207910759131235356e-03, // 3F6D6D22, C9560328 1.45620945432529025516e-03, // 3F57DBC8, FEE08315 5.88041240820264096874e-04, // 3F4344D8, F2F26501 2.46463134818469906812e-04, // 3F3026F7, 1A8D1068 7.81794442939557092300e-05, // 3F147E88, A03792A6 7.14072491382608190305e-05, // 3F12B80F, 32F0A7E9 -1.85586374855275456654e-05, // BEF375CB, DB605373 2.59073051863633712884e-05, // 3EFB2A70, 74BF7AD4 }; const pio4 = 7.85398163397448278999e-01; // 3FE921FB, 54442D18 const pio4lo = 3.06161699786838301793e-17; // 3C81A626, 33145C07 var z: f64 = undefined; var r: f64 = undefined; var v: f64 = undefined; var w: f64 = undefined; var s: f64 = undefined; var a: f64 = undefined; var w0: f64 = undefined; var a0: f64 = undefined; var hx: u32 = undefined; var sign: bool = undefined; hx = @intCast(u32, @bitCast(u64, x) >> 32); const big = (hx & 0x7fffffff) >= 0x3FE59428; // |x| >= 0.6744 if (big) { sign = hx >> 31 != 0; if (sign) { x = -x; y = -y; } x = (pio4 - x) + (pio4lo - y); y = 0.0; } z = x * x; w = z * z; // Break x^5*(T[1]+x^2*T[2]+...) into // x^5(T[1]+x^4*T[3]+...+x^20*T[11]) + // x^5(x^2*(T[2]+x^4*T[4]+...+x^22*[T12])) r = T[1] + w * (T[3] + w * (T[5] + w * (T[7] + w * (T[9] + w * T[11])))); v = z * (T[2] + w * (T[4] + w * (T[6] + w * (T[8] + w * (T[10] + w * T[12]))))); s = z * x; r = y + z * (s * (r + v) + y) + s * T[0]; w = x + r; if (big) { s = 1 - 2 * @intToFloat(f64, @boolToInt(odd)); v = s - 2.0 * (x + (r - w * w / (w + s))); return if (sign) -v else v; } if (!odd) { return w; } // -1.0/(x+r) has up to 2ulp error, so compute it accurately w0 = w; w0 = @bitCast(f64, @bitCast(u64, w0) & 0xffffffff00000000); v = r - (w0 - x); // w0+v = r+x a = -1.0 / w; a0 = a; a0 = @bitCast(f64, @bitCast(u64, a0) & 0xffffffff00000000); return a0 + a * (1.0 + a0 * w0 + a0 * v); } pub fn __tandf(x: f64, odd: bool) f32 { // |tan(x)/x - t(x)| < 2**-25.5 (~[-2e-08, 2e-08]). const T = [_]f64{ 0x15554d3418c99f.0p-54, // 0.333331395030791399758 0x1112fd38999f72.0p-55, // 0.133392002712976742718 0x1b54c91d865afe.0p-57, // 0.0533812378445670393523 0x191df3908c33ce.0p-58, // 0.0245283181166547278873 0x185dadfcecf44e.0p-61, // 0.00297435743359967304927 0x1362b9bf971bcd.0p-59, // 0.00946564784943673166728 }; const z = x * x; // Split up the polynomial into small independent terms to give // opportunities for parallel evaluation. The chosen splitting is // micro-optimized for Athlons (XP, X64). It costs 2 multiplications // relative to Horner's method on sequential machines. // // We add the small terms from lowest degree up for efficiency on // non-sequential machines (the lowest degree terms tend to be ready // earlier). Apart from this, we don't care about order of // operations, and don't need to to care since we have precision to // spare. However, the chosen splitting is good for accuracy too, // and would give results as accurate as Horner's method if the // small terms were added from highest degree down. const r = T[4] + z * T[5]; const t = T[2] + z * T[3]; const w = z * z; const s = z * x; const u = T[0] + z * T[1]; const r0 = (x + s * u) + (s * w) * (t + w * r); return @floatCast(f32, if (odd) -1.0 / r0 else r0); }
lib/std/math/__trig.zig
const std = @import("std"); const Client = @import("requestz").Client; const Allocator = std.mem.Allocator; const Headers = @import("http").Headers; var cloudflareApi ="https://api.cloudflare.com/client/v4/"; pub const Cloudflare = struct { allocator: *Allocator, email: []const u8, apikey: []const u8, zoneid: []const u8, pub fn init(allocator: *Allocator,email: []const u8,apikey: []const u8,zoneid: []const u8) !Cloudflare { return Cloudflare{ .allocator = allocator,.email=email,.apikey=apikey,.zoneid=zoneid }; } pub fn deinit(_: *Cloudflare) void { } pub fn get(self: Cloudflare, cmd: []const u8) ![]const u8 { var headers = Headers.init(std.testing.allocator); defer headers.deinit(); try headers.append("X-Auth-Email", self.email); try headers.append("X-Auth-Key", self.apikey); try headers.append("Content-type", "application/json"); var client = try Client.init(std.testing.allocator); defer client.deinit(); const urls = [_][]const u8{ cloudflareApi,cmd}; const url = try std.mem.join(std.testing.allocator, "", &urls); defer std.testing.allocator.free(url); var response = try client.get(url,.{ .headers = headers.items() }); defer response.deinit(); std.debug.print("body: {s}\n", .{response.body}); std.debug.print("cmd: {s}\n", .{cmd}); std.debug.print("url: {s}\n", .{url}); return response.body; } pub fn post(self: Cloudflare, cmd: []const u8,content :[]const u8) ![]const u8 { var headers = Headers.init(std.testing.allocator); defer headers.deinit(); try headers.append("X-Auth-Email", self.email); try headers.append("X-Auth-Key", self.apikey); try headers.append("Content-type", "application/json"); var client = try Client.init(std.testing.allocator); defer client.deinit(); const urls = [_][]const u8{ cloudflareApi,cmd}; const url = try std.mem.join(std.testing.allocator, "", &urls); defer std.testing.allocator.free(url); var response = try client.post(url,.{ .headers = headers.items(),.content = content }); defer response.deinit(); return response.body; } pub fn put(self: Cloudflare, cmd: []const u8,content :[]const u8) ![]const u8 { var headers = Headers.init(std.testing.allocator); defer headers.deinit(); try headers.append("X-Auth-Email", self.email); try headers.append("X-Auth-Key", self.apikey); try headers.append("Content-type", "application/json"); var client = try Client.init(std.testing.allocator); defer client.deinit(); const urls = [_][]const u8{ cloudflareApi,cmd}; const url = try std.mem.join(std.testing.allocator, "", &urls); defer std.testing.allocator.free(url); var response = try client.put(url,.{ .headers = headers.items(),.content = content }); defer response.deinit(); return response.body; } pub fn getDomainID(self: Cloudflare,domain : []const u8) ![]const u8{ const cmds = [_][]const u8{ "zones/",self.zoneid,"/dns_records?name=",domain}; const cmd = try std.mem.join(std.testing.allocator, "", &cmds); var result=try self.get(cmd); var parser = std.json.Parser.init(self.allocator, false); defer parser.deinit(); var jsonTree= try parser.parse(result); defer jsonTree.deinit(); const success= jsonTree.root.Object.get("success").?.Bool; if (!success) { std.debug.print("cmd:{s}\n",.{cmd}); } return ""; } };
src/cloudflare.zig
const std = @import("std"); // First command-line argument: memory dump (defaults to `challenge.bin`) // Second command-line argument: command // - If `history`, writes a history file // - If `explain`, writes commands to stderr // // Memory dump is written when stdin ends (C-d or redirect stdin from history // file) const Stack = struct { head: usize = 0, values: [65536]u16 = undefined, pub fn push(self: *Stack, value: u16) void { self.head += 1; self.values[self.head] = value; } pub fn peek(self: Stack) u16 { return self.values[self.head]; } pub fn pop(self: *Stack) u16 { const value = self.peek(); self.values[self.head] = undefined; self.head -= 1; return value; } }; fn arg(memory: anytype, registers: anytype, index: usize) u16 { const value = memory[index]; if (value < 32768) { return value; } else { return registers[value - 32768]; } } fn explainArg(memory: anytype, index: usize) []u8 { const value = memory[index]; if (value < 32768) { return std.fmt.allocPrint(std.heap.page_allocator, "{}", .{value}) catch unreachable; } else { return std.fmt.allocPrint(std.heap.page_allocator, "R{}", .{value - 32768}) catch unreachable; } } fn printExplain(cmd: anytype, explain: bool, stream: anytype, memory: anytype, i: usize, args: usize) !void { if (!explain) { return; } if (args == 0) { try stream.print("{}\n", .{cmd}); } else if (args == 1) { try stream.print("{} {}\n", .{ cmd, explainArg(memory, i + 1) }); } else if (args == 2) { try stream.print("{} {} {}\n", .{ cmd, explainArg(memory, i + 1), explainArg(memory, i + 2) }); } else if (args == 3) { try stream.print("{} {} {} {}\n", .{ cmd, explainArg(memory, i + 1), explainArg(memory, i + 2), explainArg(memory, i + 3) }); } } fn mod(value: u16) u16 { return value % 32768; } fn argv(i: u8) []u8 { var args = std.process.args(); var j: u8 = 0; while (j <= i) : (j += 1) { _ = args.skip(); } const flag = (args.next(std.heap.page_allocator) orelse "") catch ""; return flag; } fn historyFile(flag: []u8) ?std.fs.File { if (std.mem.eql(u8, flag, "history")) { var history_filename: [21]u8 = undefined; _ = std.fmt.bufPrint(history_filename[0..], "{}.{}", .{ "history", std.time.milliTimestamp() }) catch unreachable; return std.fs.cwd().createFile(&history_filename, .{}) catch unreachable; } else { return undefined; } } fn sourceFile(flag: []u8) !std.fs.File { if (std.mem.eql(u8, flag, "")) { return std.fs.cwd().openFile("challenge.bin", .{}); } else { return std.fs.cwd().openFile(flag, .{}); } } fn dump(memory: [32768]u16, registers: [8]u16, stack: Stack, pointer: usize) !void { var memory_filename: [20]u8 = undefined; _ = try std.fmt.bufPrint(memory_filename[0..], "{}.{}", .{ "memory", std.time.milliTimestamp() }); const file = try std.fs.cwd().createFile(&memory_filename, .{}); defer file.close(); var bytes: [2]u8 = undefined; var pointer_bytes: [8]u8 = undefined; for (memory) |item| { std.mem.writeIntSliceLittle(u16, bytes[0..], item); _ = try file.write(&bytes); } for (registers) |item| { std.mem.writeIntSliceLittle(u16, bytes[0..], item); _ = try file.write(&bytes); } for (stack.values) |item| { std.mem.writeIntSliceLittle(u16, bytes[0..], item); _ = try file.write(&bytes); } std.mem.writeIntSliceLittle(usize, pointer_bytes[0..], stack.head); _ = try file.write(&pointer_bytes); std.mem.writeIntSliceLittle(usize, pointer_bytes[0..], pointer); _ = try file.write(&pointer_bytes); } fn load(flag: []u8, memory: *[32768]u16, registers: *[8]u16, stack: *Stack) !usize { // Memory + registers + stack + stack head + pointer var raw: [196640]u8 = undefined; const source = try sourceFile(flag); defer source.close(); const source_size = try source.getEndPos(); _ = try source.read(raw[0..source_size]); var i: usize = 0; while (i < std.math.min(source_size, 65536)) : (i += 2) { memory[i / 2] = std.mem.readIntSliceLittle(u16, raw[i .. i + 2]); } if (source_size > 65536) { // Dump while (i < std.math.min(source_size, 65536 + 16)) : (i += 2) { registers[(i - 65536) / 2] = std.mem.readIntSliceLittle(u16, raw[i .. i + 2]); } while (i < std.math.min(source_size, (65536 * 3) + 16)) : (i += 2) { stack.values[(i - 65536 - 16) / 2] = std.mem.readIntSliceLittle(u16, raw[i .. i + 2]); } stack.head = std.mem.readIntSliceLittle(usize, raw[i .. i + 8]); return std.mem.readIntSliceLittle(usize, raw[i + 8 .. i + 16]); } else { // Initial challenge return 0; } } pub fn main() !void { // 15-bit address space for memory. var memory: [32768]u16 = undefined; var registers = [_]u16{0} ** 8; var stack = Stack{}; var i = try load(argv(0), &memory, &registers, &stack); const stdout = std.io.getStdOut().outStream(); const stderr = std.io.getStdErr().outStream(); const stdin = std.io.getStdIn().inStream(); const cmd = argv(1); const explain = std.mem.eql(u8, cmd, "explain"); const history = historyFile(cmd); defer if (history) |h| { h.close(); }; var opcode: u16 = 0; var a_: u16 = 0; var a: u16 = 0; var b: u16 = 0; var c: u16 = 0; while (true) { opcode = memory[i]; a = arg(memory, registers, i + 1); b = arg(memory, registers, i + 2); c = arg(memory, registers, i + 3); if (memory[i + 1] >= 32768) { a_ = memory[i + 1] - 32768; } switch (opcode) { // halt 0 => { try printExplain("halt", explain, stderr, memory, i, 0); break; }, // set 1 => { try printExplain("set", explain, stderr, memory, i, 2); registers[a_] = b; i += 3; }, // push 2 => { try printExplain("push", explain, stderr, memory, i, 1); stack.push(a); i += 2; }, // pop 3 => { try printExplain("pop", explain, stderr, memory, i, 1); registers[a_] = stack.pop(); i += 2; }, // eq 4 => { try printExplain("eq", explain, stderr, memory, i, 3); registers[a_] = @boolToInt(b == c); i += 4; }, // gt 5 => { try printExplain("gt", explain, stderr, memory, i, 3); registers[a_] = @boolToInt(b > c); i += 4; }, // jmp 6 => { try printExplain("jmp", explain, stderr, memory, i, 1); i = a; }, // jt 7 => { try printExplain("jt", explain, stderr, memory, i, 2); if (a != 0) { i = b; } else { i += 3; } }, // jf 8 => { try printExplain("jf", explain, stderr, memory, i, 2); if (a == 0) { i = b; } else { i += 3; } }, // add 9 => { try printExplain("add", explain, stderr, memory, i, 3); registers[a_] = mod(b + c); i += 4; }, // mult 10 => { try printExplain("mult", explain, stderr, memory, i, 3); registers[a_] = @intCast(u16, (@intCast(u32, b) * @intCast(u32, c)) % 32768); i += 4; }, // mod 11 => { try printExplain("mod", explain, stderr, memory, i, 3); registers[a_] = mod(b % c); i += 4; }, // and 12 => { try printExplain("and", explain, stderr, memory, i, 3); registers[a_] = b & c; i += 4; }, // or 13 => { try printExplain("or", explain, stderr, memory, i, 3); registers[a_] = b | c; i += 4; }, // not 14 => { try printExplain("not", explain, stderr, memory, i, 2); registers[a_] = mod(~b); i += 3; }, // rmem 15 => { try printExplain("rmem", explain, stderr, memory, i, 2); registers[a_] = memory[b]; i += 3; }, // wmem 16 => { try printExplain("wmem", explain, stderr, memory, i, 2); memory[a] = b; i += 3; }, // call 17 => { try printExplain("call", explain, stderr, memory, i, 1); if (a == 6027) { registers[0] = 6; registers[7] = 25734; i = i + 2; } else { stack.push(@intCast(u16, i + 2)); i = a; } }, // ret 18 => { try printExplain("ret", explain, stderr, memory, i, 0); i = stack.pop(); }, // out 19 => { try printExplain("out", explain, stderr, memory, i, 1); _ = try std.fmt.formatAsciiChar(@intCast(u8, a), .{}, stdout); i += 2; }, // in 20 => { try printExplain("in", explain, stderr, memory, i, 1); if (stdin.readByte()) |input| { registers[a_] = input; if (history) |h| { _ = try h.write(&[1]u8{input}); } } else |err| { try dump(memory, registers, stack, i); return err; } i += 2; }, // noop 21 => { try printExplain("noop", explain, stderr, memory, i, 0); i += 1; }, else => { try stdout.print("unhandled opcode: {}\n", .{opcode}); break; }, } } }
synacor.zig
const std = @import("std"); const builtin = @import("builtin"); const Pkg = std.build.Pkg; const string = []const u8; pub const cache = ".zigmod/deps"; pub fn addAllTo( exe: *std.build.LibExeObjStep, b: *std.build.Builder, target: std.zig.CrossTarget, mode: std.builtin.Mode, ) *std.build.LibExeObjStep { @setEvalBranchQuota(1_000_000); exe.setTarget(target); exe.setBuildMode(mode); // lazy if (c_libs[0] == null) resolveCLibs(b, target, mode); for (c_libs) |c_lib| exe.linkLibrary(c_lib.?); for (packages) |pkg| { exe.addPackage(pkg.pkg.?); } inline for (std.meta.declarations(package_data)) |decl| { const pkg = @as(Package, @field(package_data, decl.name)); inline for (pkg.system_libs) |item| { exe.linkSystemLibrary(item); } inline for (pkg.c_include_dirs) |item| { exe.addIncludeDir(@field(dirs, decl.name) ++ "/" ++ item); } inline for (pkg.c_source_files) |item| { exe.addCSourceFile(@field(dirs, decl.name) ++ "/" ++ item, pkg.c_source_flags); } } exe.linkLibC(); return exe; } pub const CLib = struct { name: string, idx: usize, pub fn getStep(self: *CLib) ?*std.build.LibExeObjStep { return c_libs[self.idx]; } }; pub const Package = struct { directory: string, pkg: ?Pkg = null, c_include_dirs: []const string = &.{}, c_libs: []const CLib = &.{}, c_source_files: []const string = &.{}, c_source_flags: []const string = &.{}, system_libs: []const string = &.{}, vcpkg: bool = false, }; pub const dirs = struct { pub const _root = ""; pub const _ju1a3i1krntg = "."; }; const zero_deps_map = std.ComptimeStringMap(string, .{ .{ "", "" } }); pub const dep_dirs = struct { pub const _root = std.ComptimeStringMap(string, .{ .{ "zpp-crc32c", dirs._ju1a3i1krntg }, }); pub const _ju1a3i1krntg = zero_deps_map; }; pub const package_data = struct { pub const _ju1a3i1krntg = Package{ .directory = dirs._ju1a3i1krntg, .pkg = Pkg{ .name = "zpp-crc32c", .path = .{ .path = dirs._ju1a3i1krntg ++ "/src/lib.zig" }, .dependencies = null }, .c_include_dirs = &.{ "crc32c/include" }, .c_libs = &.{ .{ .name = "crc32c", .idx = 0 }, }, }; pub const _root = Package{ .directory = dirs._root, }; }; pub const packages = &[_]Package{ package_data._ju1a3i1krntg, }; pub const pkgs = struct { pub const zpp_crc32c = package_data._ju1a3i1krntg; }; // lazy var c_libs = std.mem.zeroes([1]?*std.build.LibExeObjStep); fn resolveCLibs( b: *std.build.Builder, target: std.zig.CrossTarget, mode: std.builtin.Mode, ) void { c_libs[0] = @import("crc32c_lib.zig").configure( dirs._ju1a3i1krntg, dep_dirs._ju1a3i1krntg, dep_dirs._root, b.allocator, b.addStaticLibrary("crc32c", null), target, mode, ); }
deps.zig
const std = @import("std"); // dross-zig const Vector2 = @import("../core/vector2.zig").Vector2; const TextureRegion = @import("../renderer/texture_region.zig").TextureRegion; const Texture = @import("../renderer/texture.zig").Texture; const Frame2d = @import("frame_2d.zig").Frame2d; const Math = @import("../math/math.zig").Math; // ----------------------------------------- // - Animation2D - // ----------------------------------------- /// Animation2d bundles individual Frame2d instances and /// controls the flow of them. pub const Animation2d = struct { /// The allocator required to populate the frames list. allocator: *std.mem.Allocator = undefined, /// The name of the animation animation_name: []const u8 = undefined, /// Cache for the frames used in the animation frames: std.ArrayList(*Frame2d) = undefined, /// The current frame current_frame: u16 = 0, /// Flag to control if the animation should be looped or not /// Default: true loop_animation: bool = true, /// Is the animation currently playing animation_playing: bool = false, /// Determines if the animation will sit on the last frame or /// the first frame if the animation does NOT loop. sit_on_final_frame: bool = true, const Self = @This(); /// Allocates and builds a new Animation2d instance. /// Comments: The caller will own the allocated memory. pub fn new(allocator: *std.mem.Allocator, animation_name: []const u8) !*Self { var self = try allocator.create(Animation2d); self.allocator = allocator; self.animation_name = animation_name; self.frames = std.ArrayList(*Frame2d).init(allocator); self.current_frame = 0; self.loop_animation = true; self.animation_playing = false; self.sit_on_final_frame = true; return self; } /// Cleans up and de-allocates the Animation2d. pub fn free(allocator: *std.mem.Allocator, self: *Self) void { for (self.frames.items) |animation_frame| { Frame2d.free(allocator, animation_frame); } self.frames.deinit(); allocator.destroy(self); } /// Creates and adds the Animation2d's frames /// Comments: The Animation2d will owe the allocated memory. pub fn createFromTexture( self: *Self, texture_atlas: *Texture, start_coordinate: Vector2, region_size: Vector2, // i.e: 16x16 grid comptime region_count: u16, // The number of frames/cells/regions to extract comptime regions_sprite_occupies: []const Vector2, // e.x.: a 16x32 sprite on a 16x16 grid will have need Vector2(1.0, 2.0). comptime frame_durations: []const f32, ) !void { // Check to see if the length of frame_durations slice and regions_sprite_occupies // slice matches the region_count if (region_count != regions_sprite_occupies.len or region_count != frame_durations.len) { std.debug.print("{} | {} | {}\n", .{ region_count, regions_sprite_occupies.len, frame_durations.len }); @compileError("[Animation2D]: Region count, length of the slice the regions the sprite occupies, and length of the slice of frame durations MUST be uniform!"); } // Loop the number of regions var index: usize = 0; while (index < region_count) : (index += 1) { // Create the TextureRegion const frame_coordinate = start_coordinate.add( Vector2.new(@intToFloat(f32, index), 0.0), ); var frame_region = try TextureRegion.new( self.allocator, texture_atlas, frame_coordinate, region_size, regions_sprite_occupies[index], ); // Add Frame2d to animation try self.addFrame(frame_region, frame_durations[index]); } } /// Appends a new frame to the Animation2d's frame list. pub fn addFrame(self: *Self, region: *TextureRegion, frame_duration: f32) !void { var new_frame = try Frame2d.new(self.allocator, region, frame_duration); try self.frames.append(new_frame); } /// Begins the animation. If `force_restart` is true, then /// the animation will restart from the beginning if it is /// already playing. pub fn play(self: *Self, force_restart: bool) void { if (self.animation_playing and force_restart) { self.reset(); } if (self.animation_playing) return; self.animation_playing = true; } /// Stops the animation from playing, maintaining the current /// frame. pub fn stop(self: *Self) void { self.animation_playing = false; } /// Resets the animation from the beginnning pub fn reset(self: *Self) void { self.current_frame = 0; } /// Moves to the Animation2d's next frame /// Returns true if the animation is over pub fn next(self: *Self) bool { // Check to see if we're at the end of the animation if (self.current_frame + 1 >= self.frames.items.len) { //self.onAnimationEnd(); return true; } // Increment the frame index const target_value: u16 = self.current_frame + 1; const min_value: u16 = 0; const max_value: u16 = @intCast(u16, self.frames.items.len) - 1; self.current_frame = Math.clamp(target_value, min_value, max_value); return false; } /// Moves to the Animation2d's previous frame /// Returns true if the animation is over pub fn previous(self: *Self) bool { // Check to see if we're at the start of the animation if (self.current_frame <= 0) { //self.onAnimationEnd(); return true; } // Decrement the frame index self.current_frame = Math.clamp(self.current_frame - 1, 0, self.current_frame); return false; } /// Called when the animation has completed. pub fn onAnimationEnd(self: *Self) void { // TODO(devon): Send messsage to observers when events are implemented // Loops the animation if (self.loop_animation) { self.reset(); return; } self.animation_playing = false; // Reset to the first frame if (!self.sit_on_final_frame) { self.reset(); } } /// Sets the loop flag that tells the Animation2d to loop the animation when it completes. pub fn setLoop(self: *Self, loop_animation: bool) void { self.loop_animation = loop_animation; } /// Sets the flag that tells the Animation2d what frame to remain on, /// final frame (if the animation does NOT loop), or the /// first frame. pub fn setSitOnFinalFrame(self: *Self, bool: sit_on_final_frame) void { self.sit_on_final_frame = sit_on_final_frame; } /// Returns if the Animation2d is currently playing pub fn playing(self: *Self) bool { return self.animation_playing; } /// Returns the current Frame2d pub fn frame(self: *Self) *Frame2d { return self.frames.items[self.current_frame]; } /// Returns the current frame index pub fn frameIndex(self: *Self) u16 { return self.current_frame; } /// Returns the current frame's TextureRegion pub fn textureRegion(self: *Self) ?*TextureRegion { return self.frames.items[self.current_frame].textureRegion(); } /// Returns whether the Animation2d is set to loop. pub fn loop(self: *Self) bool { return self.loop_animation; } /// Returns whether the Animation2d will remain the the /// final frame (if the animation does NOT loop), or the /// first frame. pub fn sitOnFinalFrame(self: *Self) bool { return self.sit_on_final_frame; } /// Returns the given name of the animation pub fn name(self: *Self) []const u8 { return self.animation_name; } };
src/animation/animation_2d.zig
const std = @import("std"); pub fn RaxStack(comptime NodeT: type, comptime StaticSize: usize) type { return struct { maxItems: usize, staticItems: [StaticSize]*NodeT, stack: []*NodeT, const Self = @This(); pub fn init(self: *Self) void { self.* = Self{ .maxItems = StaticSize, .staticItems = undefined, .stack = &self.staticItems, }; self.stack.len = 0; } pub fn deinit(self: *Self, allocator: *std.mem.Allocator) void { if (self.stack.ptr != &self.staticItems) { allocator.free(self.stack); } } // The self.stack slice has a lenght always corresponding to the // number of pointers stored in the backing array. Since we allocate // space for those in bulk (by doubling the current maxItems value), // when adding new items we also manipulate the slice's `.len` field. // This means that we use the slice unsafely in .push and .pop, but in // exchange external uses can just use the slice normally and be sure // that they won't read undefined values (aka garbage). pub fn push(self: *Self, allocator: *std.mem.Allocator, item: *NodeT) !void { if (self.stack.len == self.maxItems) { if (self.stack.ptr == &self.staticItems) { self.stack = try allocator.alloc(*NodeT, self.maxItems * 2); std.mem.copy(*NodeT, self.stack, &self.staticItems); self.stack.len = self.maxItems; } else { self.stack = try allocator.realloc(self.stack, self.maxItems * 2); self.stack.len = self.maxItems; } self.maxItems *= 2; } self.stack.ptr[self.stack.len] = item; self.stack.len += 1; } pub fn pop(self: *Self) *NodeT { if (self.stack.len == 0) @panic("tried to pop from an empty stack"); self.stack.len -= 1; return self.stack.ptr[self.stack.len]; } pub fn peek(self: *Self) ?*NodeT { if (self.stack.len == 0) return null; return self.stack.ptr[self.stack.len - 1]; } }; }
src/simple/stack.zig
const std = @import("std"); const builtin = @import("builtin"); const os = std.os; const io = std.io; const mem = std.mem; const Allocator = mem.Allocator; const ArrayList = std.ArrayList; const Buffer = std.Buffer; const ast = std.zig.ast; const arg = @import("fmt/arg.zig"); const self_hosted_main = @import("fmt/main.zig"); const Args = arg.Args; const Flag = arg.Flag; const errmsg = @import("fmt/errmsg.zig"); var stderr_file: os.File = undefined; var stderr: *io.OutStream(os.File.WriteError) = undefined; var stdout: *io.OutStream(os.File.WriteError) = undefined; // This brings `zig fmt` to stage 1. pub fn main() !void { // Here we use an ArenaAllocator backed by a DirectAllocator because `zig fmt` is a short-lived, // one shot program. We don't need to waste time freeing memory and finding places to squish // bytes into. So we free everything all at once at the very end. var direct_allocator = std.heap.DirectAllocator.init(); var arena = std.heap.ArenaAllocator.init(&direct_allocator.allocator); const allocator = &arena.allocator; var stdout_file = try std.io.getStdOut(); var stdout_out_stream = stdout_file.outStream(); stdout = &stdout_out_stream.stream; stderr_file = try std.io.getStdErr(); var stderr_out_stream = stderr_file.outStream(); stderr = &stderr_out_stream.stream; const args = try std.os.argsAlloc(allocator); var flags = try Args.parse(allocator, self_hosted_main.args_fmt_spec, args); defer flags.deinit(); if (flags.present("help")) { try stdout.write(self_hosted_main.usage_fmt); os.exit(0); } const color = blk: { if (flags.single("color")) |color_flag| { if (mem.eql(u8, color_flag, "auto")) { break :blk errmsg.Color.Auto; } else if (mem.eql(u8, color_flag, "on")) { break :blk errmsg.Color.On; } else if (mem.eql(u8, color_flag, "off")) { break :blk errmsg.Color.Off; } else unreachable; } else { break :blk errmsg.Color.Auto; } }; if (flags.present("stdin")) { if (flags.positionals.len != 0) { try stderr.write("cannot use --stdin with positional arguments\n"); os.exit(1); } var stdin_file = try io.getStdIn(); var stdin = stdin_file.inStream(); const source_code = try stdin.stream.readAllAlloc(allocator, self_hosted_main.max_src_size); defer allocator.free(source_code); var tree = std.zig.parse(allocator, source_code) catch |err| { try stderr.print("error parsing stdin: {}\n", err); os.exit(1); }; defer tree.deinit(); var error_it = tree.errors.iterator(0); while (error_it.next()) |parse_error| { try printErrMsgToFile(allocator, parse_error, &tree, "<stdin>", stderr_file, color); } if (tree.errors.len != 0) { os.exit(1); } if (flags.present("check")) { const anything_changed = try std.zig.render(allocator, io.null_out_stream, &tree); const code = if (anything_changed) u8(1) else u8(0); os.exit(code); } _ = try std.zig.render(allocator, stdout, &tree); return; } if (flags.positionals.len == 0) { try stderr.write("expected at least one source file argument\n"); os.exit(1); } var fmt = Fmt{ .seen = Fmt.SeenMap.init(allocator), .any_error = false, .color = color, .allocator = allocator, }; const check_mode = flags.present("check"); for (flags.positionals.toSliceConst()) |file_path| { try fmtPath(&fmt, file_path, check_mode); } if (fmt.any_error) { os.exit(1); } } const FmtError = error{ SystemResources, OperationAborted, IoPending, BrokenPipe, Unexpected, WouldBlock, FileClosed, DestinationAddressRequired, DiskQuota, FileTooBig, InputOutput, NoSpaceLeft, AccessDenied, OutOfMemory, RenameAcrossMountPoints, ReadOnlyFileSystem, LinkQuotaExceeded, FileBusy, } || os.File.OpenError; fn fmtPath(fmt: *Fmt, file_path_ref: []const u8, check_mode: bool) FmtError!void { const file_path = try std.mem.dupe(fmt.allocator, u8, file_path_ref); defer fmt.allocator.free(file_path); if (try fmt.seen.put(file_path, {})) |_| return; const source_code = io.readFileAlloc(fmt.allocator, file_path) catch |err| switch (err) { error.IsDir, error.AccessDenied => { // TODO make event based (and dir.next()) var dir = try std.os.Dir.open(fmt.allocator, file_path); defer dir.close(); while (try dir.next()) |entry| { if (entry.kind == std.os.Dir.Entry.Kind.Directory or mem.endsWith(u8, entry.name, ".zig")) { const full_path = try os.path.join(fmt.allocator, [][]const u8{ file_path, entry.name }); try fmtPath(fmt, full_path, check_mode); } } return; }, else => { // TODO lock stderr printing try stderr.print("unable to open '{}': {}\n", file_path, err); fmt.any_error = true; return; }, }; defer fmt.allocator.free(source_code); var tree = std.zig.parse(fmt.allocator, source_code) catch |err| { try stderr.print("error parsing file '{}': {}\n", file_path, err); fmt.any_error = true; return; }; defer tree.deinit(); var error_it = tree.errors.iterator(0); while (error_it.next()) |parse_error| { try printErrMsgToFile(fmt.allocator, parse_error, &tree, file_path, stderr_file, fmt.color); } if (tree.errors.len != 0) { fmt.any_error = true; return; } if (check_mode) { const anything_changed = try std.zig.render(fmt.allocator, io.null_out_stream, &tree); if (anything_changed) { try stderr.print("{}\n", file_path); fmt.any_error = true; } } else { // TODO make this evented const baf = try io.BufferedAtomicFile.create(fmt.allocator, file_path); defer baf.destroy(); const anything_changed = try std.zig.render(fmt.allocator, baf.stream(), &tree); if (anything_changed) { try stderr.print("{}\n", file_path); try baf.finish(); } } } const Fmt = struct { seen: SeenMap, any_error: bool, color: errmsg.Color, allocator: *mem.Allocator, const SeenMap = std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8); }; fn printErrMsgToFile(allocator: *mem.Allocator, parse_error: *const ast.Error, tree: *ast.Tree, path: []const u8, file: os.File, color: errmsg.Color,) !void { const color_on = switch (color) { errmsg.Color.Auto => file.isTty(), errmsg.Color.On => true, errmsg.Color.Off => false, }; const lok_token = parse_error.loc(); const span = errmsg.Span{ .first = lok_token, .last = lok_token, }; const first_token = tree.tokens.at(span.first); const last_token = tree.tokens.at(span.last); const start_loc = tree.tokenLocationPtr(0, first_token); const end_loc = tree.tokenLocationPtr(first_token.end, last_token); var text_buf = try std.Buffer.initSize(allocator, 0); var out_stream = &std.io.BufferOutStream.init(&text_buf).stream; try parse_error.render(&tree.tokens, out_stream); const text = text_buf.toOwnedSlice(); const stream = &file.outStream().stream; if (!color_on) { try stream.print( "{}:{}:{}: error: {}\n", path, start_loc.line + 1, start_loc.column + 1, text, ); return; } try stream.print( "{}:{}:{}: error: {}\n{}\n", path, start_loc.line + 1, start_loc.column + 1, text, tree.source[start_loc.line_start..start_loc.line_end], ); try stream.writeByteNTimes(' ', start_loc.column); try stream.writeByteNTimes('~', last_token.end - first_token.start); try stream.write("\n"); }
std/special/fmt_runner.zig
const std = @import("std"); const blo = @import("blo.zig"); pub const Token = struct { color: blo.Color, start: usize, end: usize, }; pub const Language = enum { json, }; pub const Theme = struct { string: blo.Color, keyword: blo.Color, number: blo.Color, operator: blo.Color, bracket: blo.Color, comment: blo.Color, variable: blo.Color, declaration: blo.Color, meaning: blo.Color, }; pub const SyntaxIterator = struct { const default_theme = Theme{ .string = .Yellow, .keyword = .Red, .number = .BrightBlue, .operator = .BrightRed, .bracket = .BrightMagenta, .comment = .Gray, .variable = .White, .declaration = .BrightGreen, .meaning = .Gray, }; lang: Language, theme: Theme, src: []const u8, index: usize, token: Token, pub fn init(lang: Language, theme: ?Theme, src: []const u8) SyntaxIterator { return SyntaxIterator{ .lang = lang, .theme = theme orelse default_theme, .src = src, .index = 0, .token = undefined, }; } fn c(self: *SyntaxIterator) u8 { return self.src[self.index]; } fn lexJSON(self: *SyntaxIterator) void { var state: enum { start, string_literal, string_double_literal, string_literal_backslash, number, hexadecimal, zero, slash, comment_start, comment_end, whitespace, } = .start; while (self.index < self.src.len) : (self.index += 1) { switch (state) { .start => switch (self.c()) { ' ', '\t', '\r' => { state = .whitespace; }, '\n' => { self.index += 1; break; }, '"' => { self.token.color = .Yellow; state = .string_double_literal; }, '\'' => { self.token.color = .Yellow; state = .string_literal; }, '{', '}', '[', ']' => { self.token.color = .Magenta; self.index += 1; break; }, '-' => { self.token.color = .BrightRed; self.index += 1; break; }, '/' => { self.token.color = self.theme.comment; state = .slash; }, ',', ':' => { self.token.color = .Gray; self.index += 1; break; }, '0' => { self.token.color = .Cyan; state = .zero; }, '1'...'9' => { self.token.color = .Cyan; state = .number; }, else => { self.index += 1; break; }, }, .whitespace => switch (self.c()) { ' ', '\t', '\r' => {}, else => { break; }, }, .string_literal, .string_double_literal => switch (self.c()) { '\\' => { state = .string_literal_backslash; }, '"' => { if (state == .string_double_literal) { self.index += 1; break; } }, '\'' => { if (state == .string_literal) { self.index += 1; break; } }, else => {}, }, .string_literal_backslash => state = .string_literal, .zero => switch (self.c()) { 'x' => { state = .hexadecimal; }, else => { break; }, }, .number => switch (self.c()) { '0'...'9' => {}, else => { break; }, }, .hexadecimal => switch (self.c()) { 'a'...'f', 'A'...'F', '0'...'9' => {}, else => { break; }, }, .slash => switch (self.c()) { '*' => state = .comment_start, else => { break; }, }, .comment_start => switch (self.c()) { '*' => state = .comment_end, else => {}, }, .comment_end => switch (self.c()) { '/' => { self.index += 1; break; }, else => state = .comment_start, }, } } } pub fn next(self: *SyntaxIterator) ?Token { if (self.index == self.src.len) return null; self.token = Token{ .color = .Reset, .start = self.index, .end = undefined, }; switch (self.lang) { .json => self.lexJSON(), } self.token.end = self.index; return self.token; } }; test "json" { const value = @embedFile("../test/json.json"); var syntax = SyntaxIterator.init(.json, null, value); std.debug.print("\n\n-----JSON-----\n", .{}); var prev_token_end: usize = 0; while (syntax.next()) |v| { std.debug.assert(prev_token_end == v.start); std.debug.print("{s}{s}", .{ v.color.getColor(), value[v.start..v.end], }); prev_token_end = v.end; } std.debug.print("\x1b[0m\n--------------\n\n", .{}); }
src/syntax.zig
const std = @import("std"); usingnamespace (@import("../machine.zig")); usingnamespace (@import("../util.zig")); const imm = Operand.immediate; const mem = Operand.memory; const memRm = Operand.memoryRm; const reg = Operand.register; test "string and XLAT instructions" { const m16 = Machine.init(.x86_16); const m32 = Machine.init(.x86_32); const m64 = Machine.init(.x64); debugPrint(false); // INS BYTE ES:[(E/R)DI], DX // INS WORD ES:[(E/R)DI], DX // INS DWORD ES:[(E/R)DI], DX // { { testOp2(m16, .INS, memRm(.ES, .BYTE, .DI, 0), reg(.DX), "6c"); testOp2(m32, .INS, memRm(.ES, .BYTE, .DI, 0), reg(.DX), "67 6c"); testOp2(m64, .INS, memRm(.ES, .BYTE, .DI, 0), reg(.DX), AsmError.InvalidOperand); // testOp2(m16, .INS, memRm(.ES, .BYTE, .EDI, 0), reg(.DX), "67 6c"); testOp2(m32, .INS, memRm(.ES, .BYTE, .EDI, 0), reg(.DX), "6c"); testOp2(m64, .INS, memRm(.ES, .BYTE, .EDI, 0), reg(.DX), "67 6c"); // testOp2(m16, .INS, memRm(.ES, .BYTE, .RDI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m32, .INS, memRm(.ES, .BYTE, .RDI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m64, .INS, memRm(.ES, .BYTE, .RDI, 0), reg(.DX), "6c"); } { testOp2(m16, .INS, memRm(.ES, .WORD, .DI, 0), reg(.DX), "6d"); testOp2(m32, .INS, memRm(.ES, .WORD, .DI, 0), reg(.DX), "66 67 6d"); testOp2(m64, .INS, memRm(.ES, .WORD, .DI, 0), reg(.DX), AsmError.InvalidOperand); // testOp2(m16, .INS, memRm(.ES, .WORD, .EDI, 0), reg(.DX), "67 6d"); testOp2(m32, .INS, memRm(.ES, .WORD, .EDI, 0), reg(.DX), "66 6d"); testOp2(m64, .INS, memRm(.ES, .WORD, .EDI, 0), reg(.DX), "66 67 6d"); // testOp2(m16, .INS, memRm(.ES, .WORD, .RDI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m32, .INS, memRm(.ES, .WORD, .RDI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m64, .INS, memRm(.ES, .WORD, .RDI, 0), reg(.DX), "66 6d"); } { testOp2(m16, .INS, memRm(.ES, .DWORD, .DI, 0), reg(.DX), "66 6d"); testOp2(m32, .INS, memRm(.ES, .DWORD, .DI, 0), reg(.DX), "67 6d"); testOp2(m64, .INS, memRm(.ES, .DWORD, .DI, 0), reg(.DX), AsmError.InvalidOperand); // testOp2(m16, .INS, memRm(.ES, .DWORD, .EDI, 0), reg(.DX), "66 67 6d"); testOp2(m32, .INS, memRm(.ES, .DWORD, .EDI, 0), reg(.DX), "6d"); testOp2(m64, .INS, memRm(.ES, .DWORD, .EDI, 0), reg(.DX), "67 6d"); // testOp2(m16, .INS, memRm(.ES, .DWORD, .RDI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m32, .INS, memRm(.ES, .DWORD, .RDI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m64, .INS, memRm(.ES, .DWORD, .RDI, 0), reg(.DX), "6d"); } { testOp2(m16, .INS, memRm(.ES, .QWORD, .DI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m32, .INS, memRm(.ES, .QWORD, .DI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m64, .INS, memRm(.ES, .QWORD, .DI, 0), reg(.DX), AsmError.InvalidOperand); // testOp2(m16, .INS, memRm(.ES, .QWORD, .EDI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m32, .INS, memRm(.ES, .QWORD, .EDI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m64, .INS, memRm(.ES, .QWORD, .EDI, 0), reg(.DX), AsmError.InvalidOperand); // testOp2(m16, .INS, memRm(.ES, .QWORD, .RDI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m32, .INS, memRm(.ES, .QWORD, .RDI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m64, .INS, memRm(.ES, .QWORD, .RDI, 0), reg(.DX), AsmError.InvalidOperand); } { testOp2(m16, .INS, mem(.ES, .BYTE, 0, null, .EAX, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m32, .INS, mem(.ES, .BYTE, 0, null, .EAX, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m64, .INS, mem(.ES, .BYTE, 0, null, .EAX, 0), reg(.DX), AsmError.InvalidOperand); // TODO // testOp2(m16, .INS, mem(.ES, .BYTE, 0, .EAX, null, 0), reg(.DX), AsmError.InvalidOperand); // testOp2(m32, .INS, mem(.ES, .BYTE, 0, .EAX, null, 0), reg(.DX), AsmError.InvalidOperand); // testOp2(m64, .INS, mem(.ES, .BYTE, 0, .EAX, null, 0), reg(.DX), AsmError.InvalidOperand); // testOp2(m16, .INS, mem(.ES, .BYTE, 0, null, .EDI, 0), reg(.CX), AsmError.InvalidOperand); testOp2(m32, .INS, mem(.ES, .BYTE, 0, null, .EDI, 0), reg(.CX), AsmError.InvalidOperand); testOp2(m64, .INS, mem(.ES, .BYTE, 0, null, .EDI, 0), reg(.CX), AsmError.InvalidOperand); // testOp2(m16, .INS, mem(.ES, .XMM_WORD, 0, null, .EDI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m32, .INS, mem(.ES, .XMM_WORD, 0, null, .EDI, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m64, .INS, mem(.ES, .XMM_WORD, 0, null, .EDI, 0), reg(.DX), AsmError.InvalidOperand); // testOp2(m16, .INS, mem(.ES, .BYTE, 1, .BL, .AL, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m32, .INS, mem(.ES, .BYTE, 1, .BL, .AL, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m64, .INS, mem(.ES, .BYTE, 1, .BL, .AL, 0), reg(.DX), AsmError.InvalidOperand); // testOp2(m16, .INS, memRm(.ES, .DWORD, .EDI, 1), reg(.DX), AsmError.InvalidOperand); testOp2(m32, .INS, memRm(.ES, .DWORD, .EDI, 1), reg(.DX), AsmError.InvalidOperand); testOp2(m64, .INS, memRm(.ES, .DWORD, .EDI, 1), reg(.DX), AsmError.InvalidOperand); // testOp2(m16, .INS, mem(.ES, .DWORD, 1, .EDI, null, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m32, .INS, mem(.ES, .DWORD, 1, .EDI, null, 0), reg(.DX), AsmError.InvalidOperand); testOp2(m64, .INS, mem(.ES, .DWORD, 1, .EDI, null, 0), reg(.DX), AsmError.InvalidOperand); } } // STOS BYTE ES:[(E/R)DI], AL // STOS WORD ES:[(E/R)DI], AX // STOS DWORD ES:[(E/R)DI], EAX // STOS QWORD ES:[(E/R)DI], RAX { testOp2(m64, .STOS, memRm(.ES, .BYTE, .DI, 0), reg(.AL), AsmError.InvalidOperand); testOp2(m64, .STOS, memRm(.ES, .BYTE, .EDI, 0), reg(.AL), "67 aa"); testOp2(m64, .STOS, memRm(.ES, .BYTE, .RDI, 0), reg(.AL), "aa"); // testOp2(m64, .STOS, memRm(.ES, .WORD, .DI, 0), reg(.AX), AsmError.InvalidOperand); testOp2(m64, .STOS, memRm(.ES, .WORD, .EDI, 0), reg(.AX), "66 67 ab"); testOp2(m64, .STOS, memRm(.ES, .WORD, .RDI, 0), reg(.AX), "66 ab"); // testOp2(m64, .STOS, memRm(.ES, .DWORD, .DI, 0), reg(.EAX), AsmError.InvalidOperand); testOp2(m64, .STOS, memRm(.ES, .DWORD, .EDI, 0), reg(.EAX), "67 ab"); testOp2(m64, .STOS, memRm(.ES, .DWORD, .RDI, 0), reg(.EAX), "ab"); // testOp2(m64, .STOS, memRm(.ES, .QWORD, .DI, 0), reg(.RAX), AsmError.InvalidOperand); testOp2(m64, .STOS, memRm(.ES, .QWORD, .EDI, 0), reg(.RAX), "67 48 ab"); testOp2(m64, .STOS, memRm(.ES, .QWORD, .RDI, 0), reg(.RAX), "48 ab"); } // SCAS BYTE ES:[(E/R)DI], AL // SCAS WORD ES:[(E/R)DI], AX // SCAS DWORD ES:[(E/R)DI], EAX // SCAS QWORD ES:[(E/R)DI], RAX { testOp2(m64, .SCAS, memRm(.ES, .BYTE, .DI, 0), reg(.AL), AsmError.InvalidOperand); testOp2(m64, .SCAS, memRm(.ES, .BYTE, .EDI, 0), reg(.AL), "67 ae"); testOp2(m64, .SCAS, memRm(.ES, .BYTE, .RDI, 0), reg(.AL), "ae"); // testOp2(m64, .SCAS, memRm(.ES, .WORD, .DI, 0), reg(.AX), AsmError.InvalidOperand); testOp2(m64, .SCAS, memRm(.ES, .WORD, .EDI, 0), reg(.AX), "66 67 af"); testOp2(m64, .SCAS, memRm(.ES, .WORD, .RDI, 0), reg(.AX), "66 af"); // testOp2(m64, .SCAS, memRm(.ES, .DWORD, .DI, 0), reg(.EAX), AsmError.InvalidOperand); testOp2(m64, .SCAS, memRm(.ES, .DWORD, .EDI, 0), reg(.EAX), "67 af"); testOp2(m64, .SCAS, memRm(.ES, .DWORD, .RDI, 0), reg(.EAX), "af"); // testOp2(m64, .SCAS, memRm(.ES, .QWORD, .DI, 0), reg(.RAX), AsmError.InvalidOperand); testOp2(m64, .SCAS, memRm(.ES, .QWORD, .EDI, 0), reg(.RAX), "67 48 af"); testOp2(m64, .SCAS, memRm(.ES, .QWORD, .RDI, 0), reg(.RAX), "48 af"); // testOp2(m64, .SCAS, reg(.AL), memRm(.ES, .BYTE, .DI, 0), AsmError.InvalidOperand); testOp2(m64, .SCAS, reg(.AL), memRm(.ES, .BYTE, .EDI, 0), AsmError.InvalidOperand); testOp2(m64, .SCAS, reg(.AL), memRm(.ES, .BYTE, .RDI, 0), AsmError.InvalidOperand); } // LODS AL, BYTE DS:[(E/R)SI] // LODS AX, WORD DS:[(E/R)SI] // LODS EAX, DWORD DS:[(E/R)SI] // LODS RAX, QWORD DS:[(E/R)SI] { { testOp2(m16, .LODS, reg(.AL), memRm(.DS, .BYTE, .SI, 0), "ac"); testOp2(m32, .LODS, reg(.AL), memRm(.DS, .BYTE, .SI, 0), "67 ac"); testOp2(m64, .LODS, reg(.AL), memRm(.DS, .BYTE, .SI, 0), AsmError.InvalidOperand); // testOp2(m16, .LODS, reg(.AL), memRm(.DS, .BYTE, .ESI, 0), "67 ac"); testOp2(m32, .LODS, reg(.AL), memRm(.DS, .BYTE, .ESI, 0), "ac"); testOp2(m64, .LODS, reg(.AL), memRm(.DS, .BYTE, .ESI, 0), "67 ac"); // testOp2(m16, .LODS, reg(.AL), memRm(.DS, .BYTE, .RSI, 0), AsmError.InvalidOperand); testOp2(m32, .LODS, reg(.AL), memRm(.DS, .BYTE, .RSI, 0), AsmError.InvalidOperand); testOp2(m64, .LODS, reg(.AL), memRm(.DS, .BYTE, .RSI, 0), "ac"); } { testOp2(m16, .LODS, reg(.AX), memRm(.DS, .WORD, .SI, 0), "ad"); testOp2(m32, .LODS, reg(.AX), memRm(.DS, .WORD, .SI, 0), "66 67 ad"); testOp2(m64, .LODS, reg(.AX), memRm(.DS, .WORD, .SI, 0), AsmError.InvalidOperand); // testOp2(m16, .LODS, reg(.AX), memRm(.DS, .WORD, .ESI, 0), "67 ad"); testOp2(m32, .LODS, reg(.AX), memRm(.DS, .WORD, .ESI, 0), "66 ad"); testOp2(m64, .LODS, reg(.AX), memRm(.DS, .WORD, .ESI, 0), "66 67 ad"); // testOp2(m16, .LODS, reg(.AX), memRm(.DS, .WORD, .RSI, 0), AsmError.InvalidOperand); testOp2(m32, .LODS, reg(.AX), memRm(.DS, .WORD, .RSI, 0), AsmError.InvalidOperand); testOp2(m64, .LODS, reg(.AX), memRm(.DS, .WORD, .RSI, 0), "66 ad"); } { testOp2(m16, .LODS, reg(.EAX), memRm(.DS, .DWORD, .SI, 0), "66 ad"); testOp2(m32, .LODS, reg(.EAX), memRm(.DS, .DWORD, .SI, 0), "67 ad"); testOp2(m64, .LODS, reg(.EAX), memRm(.DS, .DWORD, .SI, 0), AsmError.InvalidOperand); // testOp2(m16, .LODS, reg(.EAX), memRm(.DS, .DWORD, .ESI, 0), "66 67 ad"); testOp2(m32, .LODS, reg(.EAX), memRm(.DS, .DWORD, .ESI, 0), "ad"); testOp2(m64, .LODS, reg(.EAX), memRm(.DS, .DWORD, .ESI, 0), "67 ad"); // testOp2(m16, .LODS, reg(.EAX), memRm(.DS, .DWORD, .RSI, 0), AsmError.InvalidOperand); testOp2(m32, .LODS, reg(.EAX), memRm(.DS, .DWORD, .RSI, 0), AsmError.InvalidOperand); testOp2(m64, .LODS, reg(.EAX), memRm(.DS, .DWORD, .RSI, 0), "ad"); } { testOp2(m16, .LODS, reg(.RAX), memRm(.DS, .QWORD, .SI, 0), AsmError.InvalidOperand); testOp2(m32, .LODS, reg(.RAX), memRm(.DS, .QWORD, .SI, 0), AsmError.InvalidOperand); testOp2(m64, .LODS, reg(.RAX), memRm(.DS, .QWORD, .SI, 0), AsmError.InvalidOperand); // testOp2(m16, .LODS, reg(.RAX), memRm(.DS, .QWORD, .ESI, 0), AsmError.InvalidOperand); testOp2(m32, .LODS, reg(.RAX), memRm(.DS, .QWORD, .ESI, 0), AsmError.InvalidOperand); testOp2(m64, .LODS, reg(.RAX), memRm(.DS, .QWORD, .ESI, 0), "67 48 ad"); // testOp2(m16, .LODS, reg(.RAX), memRm(.DS, .QWORD, .RSI, 0), AsmError.InvalidOperand); testOp2(m32, .LODS, reg(.RAX), memRm(.DS, .QWORD, .RSI, 0), AsmError.InvalidOperand); testOp2(m64, .LODS, reg(.RAX), memRm(.DS, .QWORD, .RSI, 0), "48 ad"); } { testOp2(m16, .LODS, reg(.EAX), memRm(.DS, .BYTE, .EAX, 0), AsmError.InvalidOperand); testOp2(m32, .LODS, reg(.EAX), memRm(.DS, .BYTE, .EAX, 0), AsmError.InvalidOperand); testOp2(m64, .LODS, reg(.EAX), memRm(.DS, .BYTE, .EAX, 0), AsmError.InvalidOperand); // testOp2(m16, .LODS, reg(.EAX), memRm(.DS, .BYTE, .ESI, 0), AsmError.InvalidOperand); testOp2(m32, .LODS, reg(.EAX), memRm(.DS, .BYTE, .ESI, 0), AsmError.InvalidOperand); testOp2(m64, .LODS, reg(.EAX), memRm(.DS, .BYTE, .ESI, 0), AsmError.InvalidOperand); // testOp2(m16, .LODS, reg(.EAX), memRm(.DS, .XMM_WORD, .ESI, 0), AsmError.InvalidOperand); testOp2(m32, .LODS, reg(.EAX), memRm(.DS, .XMM_WORD, .ESI, 0), AsmError.InvalidOperand); testOp2(m64, .LODS, reg(.EAX), memRm(.DS, .XMM_WORD, .ESI, 0), AsmError.InvalidOperand); // testOp2(m16, .LODS, reg(.EAX), mem(.DS, .BYTE, 1, .BL, .AL, 0), AsmError.InvalidOperand); testOp2(m32, .LODS, reg(.EAX), mem(.DS, .BYTE, 1, .BL, .AL, 0), AsmError.InvalidOperand); testOp2(m64, .LODS, reg(.EAX), mem(.DS, .BYTE, 1, .BL, .AL, 0), AsmError.InvalidOperand); } } // OUTS DX, BYTE DS:[(E/R)SI] // OUTS DX, WORD DS:[(E/R)SI] // OUTS DX, DWORD DS:[(E/R)SI] { { testOp2(m64, .OUTS, reg(.DX), memRm(.DS, .BYTE, .SI, 0), AsmError.InvalidOperand); testOp2(m64, .OUTS, reg(.DX), memRm(.DS, .BYTE, .ESI, 0), "67 6e"); testOp2(m64, .OUTS, reg(.DX), memRm(.DS, .BYTE, .RSI, 0), "6e"); // testOp2(m64, .OUTS, reg(.DX), memRm(.DS, .WORD, .SI, 0), AsmError.InvalidOperand); testOp2(m64, .OUTS, reg(.DX), memRm(.DS, .WORD, .ESI, 0), "66 67 6f"); testOp2(m64, .OUTS, reg(.DX), memRm(.DS, .WORD, .RSI, 0), "66 6f"); // testOp2(m64, .OUTS, reg(.DX), memRm(.DS, .DWORD, .SI, 0), AsmError.InvalidOperand); testOp2(m64, .OUTS, reg(.DX), memRm(.DS, .DWORD, .ESI, 0), "67 6f"); testOp2(m64, .OUTS, reg(.DX), memRm(.DS, .DWORD, .RSI, 0), "6f"); // testOp2(m64, .OUTS, reg(.DX), memRm(.DS, .QWORD, .SI, 0), AsmError.InvalidOperand); testOp2(m64, .OUTS, reg(.DX), memRm(.DS, .QWORD, .ESI, 0), AsmError.InvalidOperand); testOp2(m64, .OUTS, reg(.DX), memRm(.DS, .QWORD, .RSI, 0), AsmError.InvalidOperand); } } // CMPS BYTE ES:[(R/E)DI], BYTE DS:[(R/E)SI] // CMPS WORD ES:[(R/E)DI], WORD DS:[(R/E)SI] // CMPS DWORD ES:[(R/E)DI], DWORD DS:[(R/E)SI] // CMPS QWORD ES:[(R/E)DI], QWORD DS:[(R/E)SI] { { testOp2(m16, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .BYTE, .SI, 0), "a6"); testOp2(m32, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .BYTE, .SI, 0), "67 a6"); testOp2(m64, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .BYTE, .SI, 0), AsmError.InvalidOperand); // testOp2(m16, .CMPS, memRm(.ES, .BYTE, .EDI, 0), memRm(.DS, .BYTE, .ESI, 0), "67 a6"); testOp2(m32, .CMPS, memRm(.ES, .BYTE, .EDI, 0), memRm(.DS, .BYTE, .ESI, 0), "a6"); testOp2(m64, .CMPS, memRm(.ES, .BYTE, .EDI, 0), memRm(.DS, .BYTE, .ESI, 0), "67 a6"); // testOp2(m16, .CMPS, memRm(.ES, .BYTE, .RDI, 0), memRm(.DS, .BYTE, .RSI, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .BYTE, .RDI, 0), memRm(.DS, .BYTE, .RSI, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .BYTE, .RDI, 0), memRm(.DS, .BYTE, .RSI, 0), "a6"); } { testOp2(m16, .CMPS, memRm(.ES, .WORD, .DI, 0), memRm(.DS, .WORD, .SI, 0), "a7"); testOp2(m32, .CMPS, memRm(.ES, .WORD, .DI, 0), memRm(.DS, .WORD, .SI, 0), "66 67 a7"); testOp2(m64, .CMPS, memRm(.ES, .WORD, .DI, 0), memRm(.DS, .WORD, .SI, 0), AsmError.InvalidOperand); // testOp2(m16, .CMPS, memRm(.ES, .WORD, .EDI, 0), memRm(.DS, .WORD, .ESI, 0), "67 a7"); testOp2(m32, .CMPS, memRm(.ES, .WORD, .EDI, 0), memRm(.DS, .WORD, .ESI, 0), "66 a7"); testOp2(m64, .CMPS, memRm(.ES, .WORD, .EDI, 0), memRm(.DS, .WORD, .ESI, 0), "66 67 a7"); // testOp2(m16, .CMPS, memRm(.ES, .WORD, .RDI, 0), memRm(.DS, .WORD, .RSI, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .WORD, .RDI, 0), memRm(.DS, .WORD, .RSI, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .WORD, .RDI, 0), memRm(.DS, .WORD, .RSI, 0), "66 a7"); } { testOp2(m16, .CMPS, memRm(.ES, .DWORD, .DI, 0), memRm(.DS, .DWORD, .SI, 0), "66 a7"); testOp2(m32, .CMPS, memRm(.ES, .DWORD, .DI, 0), memRm(.DS, .DWORD, .SI, 0), "67 a7"); testOp2(m64, .CMPS, memRm(.ES, .DWORD, .DI, 0), memRm(.DS, .DWORD, .SI, 0), AsmError.InvalidOperand); // testOp2(m16, .CMPS, memRm(.ES, .DWORD, .EDI, 0), memRm(.DS, .DWORD, .ESI, 0), "66 67 a7"); testOp2(m32, .CMPS, memRm(.ES, .DWORD, .EDI, 0), memRm(.DS, .DWORD, .ESI, 0), "a7"); testOp2(m64, .CMPS, memRm(.ES, .DWORD, .EDI, 0), memRm(.DS, .DWORD, .ESI, 0), "67 a7"); // testOp2(m16, .CMPS, memRm(.ES, .DWORD, .RDI, 0), memRm(.DS, .DWORD, .RSI, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .DWORD, .RDI, 0), memRm(.DS, .DWORD, .RSI, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .DWORD, .RDI, 0), memRm(.DS, .DWORD, .RSI, 0), "a7"); } { testOp2(m16, .CMPS, memRm(.ES, .QWORD, .DI, 0), memRm(.DS, .QWORD, .SI, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .QWORD, .DI, 0), memRm(.DS, .QWORD, .SI, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .QWORD, .DI, 0), memRm(.DS, .QWORD, .SI, 0), AsmError.InvalidOperand); // testOp2(m16, .CMPS, memRm(.ES, .QWORD, .EDI, 0), memRm(.DS, .QWORD, .ESI, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .QWORD, .EDI, 0), memRm(.DS, .QWORD, .ESI, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .QWORD, .EDI, 0), memRm(.DS, .QWORD, .ESI, 0), "67 48 a7"); // testOp2(m16, .CMPS, memRm(.ES, .QWORD, .RDI, 0), memRm(.DS, .QWORD, .RSI, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .QWORD, .RDI, 0), memRm(.DS, .QWORD, .RSI, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .QWORD, .RDI, 0), memRm(.DS, .QWORD, .RSI, 0), "48 a7"); } { testOp2(m16, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .BYTE, .EAX, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .BYTE, .EAX, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .BYTE, .EAX, 0), AsmError.InvalidOperand); // testOp2(m16, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .BYTE, .ESI, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .BYTE, .ESI, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .BYTE, .ESI, 0), AsmError.InvalidOperand); // testOp2(m16, .CMPS, memRm(.ES, .BYTE, .EDI, 0), memRm(.DS, .BYTE, .RSI, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .BYTE, .EDI, 0), memRm(.DS, .BYTE, .RSI, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .BYTE, .EDI, 0), memRm(.DS, .BYTE, .RSI, 0), AsmError.InvalidOperand); // testOp2(m16, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .BYTE, .RSI, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .BYTE, .RSI, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .BYTE, .RSI, 0), AsmError.InvalidOperand); // testOp2(m16, .CMPS, memRm(.ES, .BYTE, .ESI, 0), memRm(.DS, .BYTE, .EDI, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .BYTE, .ESI, 0), memRm(.DS, .BYTE, .EDI, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .BYTE, .ESI, 0), memRm(.DS, .BYTE, .EDI, 0), AsmError.InvalidOperand); // testOp2(m16, .CMPS, memRm(.ES, .WORD, .EDI, 0), memRm(.DS, .BYTE, .ESI, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .WORD, .EDI, 0), memRm(.DS, .BYTE, .ESI, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .WORD, .EDI, 0), memRm(.DS, .BYTE, .ESI, 0), AsmError.InvalidOperand); // testOp2(m16, .CMPS, memRm(.ES, .WORD, .EDI, 0), memRm(.DS, .DWORD, .ESI, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .WORD, .EDI, 0), memRm(.DS, .DWORD, .ESI, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .WORD, .EDI, 0), memRm(.DS, .DWORD, .ESI, 0), AsmError.InvalidOperand); // testOp2(m16, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .XMM_WORD, .ESI, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .XMM_WORD, .ESI, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .XMM_WORD, .ESI, 0), AsmError.InvalidOperand); // testOp2(m16, .CMPS, memRm(.ES, .BYTE, .DI, 0), mem(.DS, .BYTE, 1, .BL, .AL, 0), AsmError.InvalidOperand); testOp2(m32, .CMPS, memRm(.ES, .BYTE, .DI, 0), mem(.DS, .BYTE, 1, .BL, .AL, 0), AsmError.InvalidOperand); testOp2(m64, .CMPS, memRm(.ES, .BYTE, .DI, 0), mem(.DS, .BYTE, 1, .BL, .AL, 0), AsmError.InvalidOperand); } } // MOVS BYTE ES:[(E/R)DI], BYTE DS:[(E/R)SI] // MOVS WORD ES:[(E/R)DI], WORD DS:[(E/R)SI] // MOVS DWORD ES:[(E/R)DI], DWORD DS:[(E/R)SI] // MOVS QWORD ES:[(E/R)DI], QWORD DS:[(E/R)SI] { testOp2(m64, .MOVS, memRm(.ES, .BYTE, .DI, 0), memRm(.DS, .BYTE, .SI, 0), AsmError.InvalidOperand); testOp2(m64, .MOVS, memRm(.ES, .BYTE, .EDI, 0), memRm(.DS, .BYTE, .ESI, 0), "67 a4"); testOp2(m64, .MOVS, memRm(.ES, .BYTE, .RDI, 0), memRm(.DS, .BYTE, .RSI, 0), "a4"); // testOp2(m64, .MOVS, memRm(.ES, .WORD, .DI, 0), memRm(.DS, .WORD, .SI, 0), AsmError.InvalidOperand); testOp2(m64, .MOVS, memRm(.ES, .WORD, .EDI, 0), memRm(.DS, .WORD, .ESI, 0), "66 67 a5"); testOp2(m64, .MOVS, memRm(.ES, .WORD, .RDI, 0), memRm(.DS, .WORD, .RSI, 0), "66 a5"); // testOp2(m64, .MOVS, memRm(.ES, .DWORD, .DI, 0), memRm(.DS, .DWORD, .SI, 0), AsmError.InvalidOperand); testOp2(m64, .MOVS, memRm(.ES, .DWORD, .EDI, 0), memRm(.DS, .DWORD, .ESI, 0), "67 a5"); testOp2(m64, .MOVS, memRm(.ES, .DWORD, .RDI, 0), memRm(.DS, .DWORD, .RSI, 0), "a5"); // testOp2(m64, .MOVS, memRm(.ES, .QWORD, .DI, 0), memRm(.DS, .QWORD, .SI, 0), AsmError.InvalidOperand); testOp2(m64, .MOVS, memRm(.ES, .QWORD, .EDI, 0), memRm(.DS, .QWORD, .ESI, 0), "67 48 a5"); testOp2(m64, .MOVS, memRm(.ES, .QWORD, .RDI, 0), memRm(.DS, .QWORD, .RSI, 0), "48 a5"); } // XLAT AL, BYTE DS:[(E/R)BX + AL] { testOp0(m32, .XLATB, "D7"); // testOp2(m16, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .BL, .AL, 0), AsmError.InvalidOperand); testOp2(m32, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .BL, .AL, 0), AsmError.InvalidOperand); testOp2(m64, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .BL, .AL, 0), AsmError.InvalidOperand); // testOp2(m16, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .BX, .AL, 0), "D7"); testOp2(m32, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .BX, .AL, 0), "67 D7"); testOp2(m64, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .BX, .AL, 0), AsmError.InvalidOperand); // testOp2(m16, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .EBX, .AL, 0), "67 D7"); testOp2(m32, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .EBX, .AL, 0), "D7"); testOp2(m64, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .EBX, .AL, 0), "67 D7"); // testOp2(m16, .XLAT, reg(.AL), mem(.DefaultSeg, .BYTE, 1, .RBX, .AL, 0), AsmError.InvalidOperand); testOp2(m32, .XLAT, reg(.AL), mem(.DefaultSeg, .BYTE, 1, .RBX, .AL, 0), AsmError.InvalidOperand); testOp2(m64, .XLAT, reg(.AL), mem(.DefaultSeg, .BYTE, 1, .RBX, .AL, 0), "D7"); // testOp2(m16, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .AL, .BL, 0), AsmError.InvalidOperand); testOp2(m32, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .AL, .BL, 0), AsmError.InvalidOperand); testOp2(m64, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .AL, .BL, 0), AsmError.InvalidOperand); // testOp2(m16, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .AL, .BX, 0), "D7"); testOp2(m32, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .AL, .BX, 0), "67 D7"); testOp2(m64, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .AL, .BX, 0), AsmError.InvalidOperand); // testOp2(m16, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .AL, .EBX, 0), "67 D7"); testOp2(m32, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .AL, .EBX, 0), "D7"); testOp2(m64, .XLAT, reg(.AL), mem(.DS, .BYTE, 1, .AL, .EBX, 0), "67 D7"); // testOp2(m16, .XLAT, reg(.AL), mem(.DefaultSeg, .BYTE, 1, .AL, .RBX, 0), AsmError.InvalidOperand); testOp2(m32, .XLAT, reg(.AL), mem(.DefaultSeg, .BYTE, 1, .AL, .RBX, 0), AsmError.InvalidOperand); testOp2(m64, .XLAT, reg(.AL), mem(.DefaultSeg, .BYTE, 1, .AL, .RBX, 0), "D7"); } }
src/x86/tests/special_mem.zig
const std = @import("std"); const build_options = @import("build_options"); const dcommon = @import("../common/dcommon.zig"); pub var base: ?u64 = null; pub var width: ?u3 = null; pub fn init(entry_data: *dcommon.EntryData) void { base = entry_data.uart_base; width = entry_data.uart_width; carefully(.{ "\r\n\r\ndainkrnl ", build_options.version, " pre-MMU stage on ", build_options.board, "\r\n" }); carefully(.{ "entry_data (", @ptrToInt(entry_data), ")\r\n" }); carefully(.{ "memory_map: ", @ptrToInt(entry_data.memory_map), "\r\n" }); carefully(.{ "memory_map_size: ", entry_data.memory_map_size, "\r\n" }); carefully(.{ "descriptor_size: ", entry_data.descriptor_size, "\r\n" }); carefully(.{ "dtb_ptr: ", @ptrToInt(entry_data.dtb_ptr), "\r\n" }); carefully(.{ "dtb_len: ", entry_data.dtb_len, "\r\n" }); carefully(.{ "conventional_start: ", entry_data.conventional_start, "\r\n" }); carefully(.{ "conventional_bytes: ", entry_data.conventional_bytes, "\r\n" }); carefully(.{ "fb: ", @ptrToInt(entry_data.fb), "\r\n" }); carefully(.{ "fb_vert: ", entry_data.fb_vert, "\r\n" }); carefully(.{ "fb_horiz: ", entry_data.fb_horiz, "\r\n" }); carefully(.{ "uart_base: ", entry_data.uart_base, "\r\n" }); } const Writer = struct { base: u64, width: u3, fn w(self: Writer, c: u8) void { switch (self.width) { 1 => @intToPtr(*volatile u8, self.base).* = c, 4 => @intToPtr(*volatile u32, self.base).* = c, else => unreachable, } } }; fn writerFor(b: u64, w: u3) Writer { return .{ .base = b, .width = w }; } fn busyLoop() void { var i: usize = 0; const loop_count: usize = 100; while (i < loop_count) : (i += 1) { asm volatile ("nop"); } } pub fn carefully(parts: anytype) void { carefullyAt(writerFor(base.?, width.?), parts); } pub fn hex(n: u64) void { const writer = writerFor(base.?, width.?); writer.w('<'); busyLoop(); if (n == 0) { writer.w('0'); busyLoop(); writer.w('>'); busyLoop(); return; } var digits: usize = 0; var c = n; while (c > 0) : (c /= 16) { digits += 1; } c = n; var pow: usize = std.math.powi(u64, 16, digits - 1) catch 0; while (pow > 0) : (pow /= 16) { var digit = c / pow; if (digit >= 0 and digit <= 9) { writer.w('0' + @truncate(u8, digit)); } else if (digit >= 10 and digit <= 16) { writer.w('a' + @truncate(u8, digit) - 10); } else { writer.w('?'); } busyLoop(); c -= (digit * pow); } writer.w('>'); busyLoop(); } pub const Escape = enum { Runtime, Char, }; fn carefullyAt(writer: Writer, parts: anytype) void { comptime var next_escape: ?Escape = null; inline for (std.meta.fields(@TypeOf(parts))) |info, i| { if (info.field_type == Escape) { next_escape = parts[i]; } else if (next_escape) |escape| { next_escape = null; switch (escape) { .Runtime => writeRuntime(writer, parts[i]), .Char => { writer.w(parts[i]); busyLoop(); }, } } else if (comptime std.meta.trait.isPtrTo(.Array)(info.field_type) or comptime std.meta.trait.isSliceOf(.Int)(info.field_type)) { writeCarefully(writer, parts[i]); } else if (comptime std.meta.trait.isUnsignedInt(info.field_type)) { writeCarefully(writer, "0x"); writeCarefullyHex(writer, parts[i]); } else { @compileError("what do I do with this? " ++ @typeName(info.field_type)); } } } fn writeRuntime(writer: Writer, msg: []const u8) void { for (msg) |c| { writer.w(c); busyLoop(); } } fn writeCarefully(writer: Writer, comptime msg: []const u8) void { inline for (msg) |c| { writer.w(c); busyLoop(); } } fn writeCarefullyHex(writer: Writer, n: u64) void { if (n == 0) { writer.w('0'); busyLoop(); return; } var digits: usize = 0; var c = n; while (c > 0) : (c /= 16) { digits += 1; } c = n; var pow: usize = std.math.powi(u64, 16, digits - 1) catch 0; while (pow > 0) : (pow /= 16) { var digit = c / pow; if (digit >= 0 and digit <= 9) { writer.w('0' + @truncate(u8, digit)); } else if (digit >= 10 and digit <= 16) { writer.w('a' + @truncate(u8, digit) - 10); } else { writer.w('?'); } busyLoop(); c -= (digit * pow); } }
dainkrnl/src/hw/entry_uart.zig
pub const SI_TEMPORARY = @as(u32, 2147483648); pub const SUBSINFO_ALLFLAGS = @as(u32, 61311); pub const RS_READY = @as(u32, 1); pub const RS_SUSPENDED = @as(u32, 2); pub const RS_UPDATING = @as(u32, 4); pub const RS_SUSPENDONIDLE = @as(u32, 65536); pub const RS_MAYBOTHERUSER = @as(u32, 131072); pub const RS_COMPLETED = @as(u32, 2147483648); pub const SUBSMGRUPDATE_MINIMIZE = @as(u32, 1); pub const SUBSMGRUPDATE_MASK = @as(u32, 1); pub const SUBSMGRENUM_TEMP = @as(u32, 1); pub const SUBSMGRENUM_MASK = @as(u32, 1); pub const INET_E_AGENT_MAX_SIZE_EXCEEDED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2146693248)); pub const INET_S_AGENT_PART_FAIL = @import("../zig.zig").typedConst(HRESULT, @as(i32, 790401)); pub const INET_E_AGENT_CACHE_SIZE_EXCEEDED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2146693246)); pub const INET_E_AGENT_CONNECTION_FAILED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2146693245)); pub const INET_E_SCHEDULED_UPDATES_DISABLED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2146693244)); pub const INET_E_SCHEDULED_UPDATES_RESTRICTED = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2146693243)); pub const INET_E_SCHEDULED_UPDATE_INTERVAL = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2146693242)); pub const INET_E_SCHEDULED_EXCLUDE_RANGE = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2146693241)); pub const INET_E_AGENT_EXCEEDING_CACHE_SIZE = @import("../zig.zig").typedConst(HRESULT, @as(i32, -2146693232)); pub const INET_S_AGENT_INCREASED_CACHE_SIZE = @import("../zig.zig").typedConst(HRESULT, @as(i32, 790416)); pub const OLEDBVER = @as(u32, 624); pub const DB_NULL_HACCESSOR = @as(u32, 0); pub const DB_INVALID_HACCESSOR = @as(u32, 0); pub const DB_NULL_HROW = @as(u32, 0); pub const DB_NULL_HCHAPTER = @as(u32, 0); pub const DB_INVALID_HCHAPTER = @as(u32, 0); pub const STD_BOOKMARKLENGTH = @as(u32, 1); pub const DBPROPVAL_BMK_NUMERIC = @as(i32, 1); pub const DBPROPVAL_BMK_KEY = @as(i32, 2); pub const DBPROPVAL_CL_START = @as(i32, 1); pub const DBPROPVAL_CL_END = @as(i32, 2); pub const DBPROPVAL_CU_DML_STATEMENTS = @as(i32, 1); pub const DBPROPVAL_CU_TABLE_DEFINITION = @as(i32, 2); pub const DBPROPVAL_CU_INDEX_DEFINITION = @as(i32, 4); pub const DBPROPVAL_CU_PRIVILEGE_DEFINITION = @as(i32, 8); pub const DBPROPVAL_CD_NOTNULL = @as(i32, 1); pub const DBPROPVAL_CB_NULL = @as(i32, 1); pub const DBPROPVAL_CB_NON_NULL = @as(i32, 2); pub const DBPROPVAL_FU_NOT_SUPPORTED = @as(i32, 1); pub const DBPROPVAL_FU_COLUMN = @as(i32, 2); pub const DBPROPVAL_FU_TABLE = @as(i32, 4); pub const DBPROPVAL_FU_CATALOG = @as(i32, 8); pub const DBPROPVAL_GB_NOT_SUPPORTED = @as(i32, 1); pub const DBPROPVAL_GB_EQUALS_SELECT = @as(i32, 2); pub const DBPROPVAL_GB_CONTAINS_SELECT = @as(i32, 4); pub const DBPROPVAL_GB_NO_RELATION = @as(i32, 8); pub const DBPROPVAL_HT_DIFFERENT_CATALOGS = @as(i32, 1); pub const DBPROPVAL_HT_DIFFERENT_PROVIDERS = @as(i32, 2); pub const DBPROPVAL_IC_UPPER = @as(i32, 1); pub const DBPROPVAL_IC_LOWER = @as(i32, 2); pub const DBPROPVAL_IC_SENSITIVE = @as(i32, 4); pub const DBPROPVAL_IC_MIXED = @as(i32, 8); pub const DBPROPVAL_LM_NONE = @as(i32, 1); pub const DBPROPVAL_LM_READ = @as(i32, 2); pub const DBPROPVAL_LM_INTENT = @as(i32, 4); pub const DBPROPVAL_LM_RITE = @as(i32, 8); pub const DBPROPVAL_NP_OKTODO = @as(i32, 1); pub const DBPROPVAL_NP_ABOUTTODO = @as(i32, 2); pub const DBPROPVAL_NP_SYNCHAFTER = @as(i32, 4); pub const DBPROPVAL_NP_FAILEDTODO = @as(i32, 8); pub const DBPROPVAL_NP_DIDEVENT = @as(i32, 16); pub const DBPROPVAL_NC_END = @as(i32, 1); pub const DBPROPVAL_NC_HIGH = @as(i32, 2); pub const DBPROPVAL_NC_LOW = @as(i32, 4); pub const DBPROPVAL_NC_START = @as(i32, 8); pub const DBPROPVAL_OO_BLOB = @as(i32, 1); pub const DBPROPVAL_OO_IPERSIST = @as(i32, 2); pub const DBPROPVAL_CB_DELETE = @as(i32, 1); pub const DBPROPVAL_CB_PRESERVE = @as(i32, 2); pub const DBPROPVAL_SU_DML_STATEMENTS = @as(i32, 1); pub const DBPROPVAL_SU_TABLE_DEFINITION = @as(i32, 2); pub const DBPROPVAL_SU_INDEX_DEFINITION = @as(i32, 4); pub const DBPROPVAL_SU_PRIVILEGE_DEFINITION = @as(i32, 8); pub const DBPROPVAL_SQ_CORRELATEDSUBQUERIES = @as(i32, 1); pub const DBPROPVAL_SQ_COMPARISON = @as(i32, 2); pub const DBPROPVAL_SQ_EXISTS = @as(i32, 4); pub const DBPROPVAL_SQ_IN = @as(i32, 8); pub const DBPROPVAL_SQ_QUANTIFIED = @as(i32, 16); pub const DBPROPVAL_SQ_TABLE = @as(i32, 32); pub const DBPROPVAL_SS_ISEQUENTIALSTREAM = @as(i32, 1); pub const DBPROPVAL_SS_ISTREAM = @as(i32, 2); pub const DBPROPVAL_SS_ISTORAGE = @as(i32, 4); pub const DBPROPVAL_SS_ILOCKBYTES = @as(i32, 8); pub const DBPROPVAL_TI_CHAOS = @as(i32, 16); pub const DBPROPVAL_TI_READUNCOMMITTED = @as(i32, 256); pub const DBPROPVAL_TI_BROWSE = @as(i32, 256); pub const DBPROPVAL_TI_CURSORSTABILITY = @as(i32, 4096); pub const DBPROPVAL_TI_READCOMMITTED = @as(i32, 4096); pub const DBPROPVAL_TI_REPEATABLEREAD = @as(i32, 65536); pub const DBPROPVAL_TI_SERIALIZABLE = @as(i32, 1048576); pub const DBPROPVAL_TI_ISOLATED = @as(i32, 1048576); pub const DBPROPVAL_TR_COMMIT_DC = @as(i32, 1); pub const DBPROPVAL_TR_COMMIT = @as(i32, 2); pub const DBPROPVAL_TR_COMMIT_NO = @as(i32, 4); pub const DBPROPVAL_TR_ABORT_DC = @as(i32, 8); pub const DBPROPVAL_TR_ABORT = @as(i32, 16); pub const DBPROPVAL_TR_ABORT_NO = @as(i32, 32); pub const DBPROPVAL_TR_DONTCARE = @as(i32, 64); pub const DBPROPVAL_TR_BOTH = @as(i32, 128); pub const DBPROPVAL_TR_NONE = @as(i32, 256); pub const DBPROPVAL_TR_OPTIMISTIC = @as(i32, 512); pub const DBPROPVAL_RT_FREETHREAD = @as(i32, 1); pub const DBPROPVAL_RT_APTMTTHREAD = @as(i32, 2); pub const DBPROPVAL_RT_SINGLETHREAD = @as(i32, 4); pub const DBPROPVAL_UP_CHANGE = @as(i32, 1); pub const DBPROPVAL_UP_DELETE = @as(i32, 2); pub const DBPROPVAL_UP_INSERT = @as(i32, 4); pub const DBPROPVAL_SQL_NONE = @as(i32, 0); pub const DBPROPVAL_SQL_ODBC_MINIMUM = @as(i32, 1); pub const DBPROPVAL_SQL_ODBC_CORE = @as(i32, 2); pub const DBPROPVAL_SQL_ODBC_EXTENDED = @as(i32, 4); pub const DBPROPVAL_SQL_ANSI89_IEF = @as(i32, 8); pub const DBPROPVAL_SQL_ANSI92_ENTRY = @as(i32, 16); pub const DBPROPVAL_SQL_FIPS_TRANSITIONAL = @as(i32, 32); pub const DBPROPVAL_SQL_ANSI92_INTERMEDIATE = @as(i32, 64); pub const DBPROPVAL_SQL_ANSI92_FULL = @as(i32, 128); pub const DBPROPVAL_SQL_ESCAPECLAUSES = @as(i32, 256); pub const DBPROPVAL_IT_BTREE = @as(i32, 1); pub const DBPROPVAL_IT_HASH = @as(i32, 2); pub const DBPROPVAL_IT_CONTENT = @as(i32, 3); pub const DBPROPVAL_IT_OTHER = @as(i32, 4); pub const DBPROPVAL_IN_DISALLOWNULL = @as(i32, 1); pub const DBPROPVAL_IN_IGNORENULL = @as(i32, 2); pub const DBPROPVAL_IN_IGNOREANYNULL = @as(i32, 4); pub const DBPROPVAL_TC_NONE = @as(i32, 0); pub const DBPROPVAL_TC_DML = @as(i32, 1); pub const DBPROPVAL_TC_DDL_COMMIT = @as(i32, 2); pub const DBPROPVAL_TC_DDL_IGNORE = @as(i32, 4); pub const DBPROPVAL_TC_ALL = @as(i32, 8); pub const DBPROPVAL_OA_NOTSUPPORTED = @as(i32, 1); pub const DBPROPVAL_OA_ATEXECUTE = @as(i32, 2); pub const DBPROPVAL_OA_ATROWRELEASE = @as(i32, 4); pub const DBPROPVAL_MR_NOTSUPPORTED = @as(i32, 0); pub const DBPROPVAL_MR_SUPPORTED = @as(i32, 1); pub const DBPROPVAL_MR_CONCURRENT = @as(i32, 2); pub const DBPROPVAL_PT_GUID_NAME = @as(i32, 1); pub const DBPROPVAL_PT_GUID_PROPID = @as(i32, 2); pub const DBPROPVAL_PT_NAME = @as(i32, 4); pub const DBPROPVAL_PT_GUID = @as(i32, 8); pub const DBPROPVAL_PT_PROPID = @as(i32, 16); pub const DBPROPVAL_PT_PGUID_NAME = @as(i32, 32); pub const DBPROPVAL_PT_PGUID_PROPID = @as(i32, 64); pub const DBPROPVAL_NT_SINGLEROW = @as(i32, 1); pub const DBPROPVAL_NT_MULTIPLEROWS = @as(i32, 2); pub const DBPROPVAL_ASYNCH_INITIALIZE = @as(i32, 1); pub const DBPROPVAL_ASYNCH_SEQUENTIALPOPULATION = @as(i32, 2); pub const DBPROPVAL_ASYNCH_RANDOMPOPULATION = @as(i32, 4); pub const DBPROPVAL_OP_EQUAL = @as(i32, 1); pub const DBPROPVAL_OP_RELATIVE = @as(i32, 2); pub const DBPROPVAL_OP_STRING = @as(i32, 4); pub const DBPROPVAL_CO_EQUALITY = @as(i32, 1); pub const DBPROPVAL_CO_STRING = @as(i32, 2); pub const DBPROPVAL_CO_CASESENSITIVE = @as(i32, 4); pub const DBPROPVAL_CO_CASEINSENSITIVE = @as(i32, 8); pub const DBPROPVAL_CO_CONTAINS = @as(i32, 16); pub const DBPROPVAL_CO_BEGINSWITH = @as(i32, 32); pub const DBPROPVAL_ASYNCH_BACKGROUNDPOPULATION = @as(i32, 8); pub const DBPROPVAL_ASYNCH_PREPOPULATE = @as(i32, 16); pub const DBPROPVAL_ASYNCH_POPULATEONDEMAND = @as(i32, 32); pub const DBPROPVAL_LM_SINGLEROW = @as(i32, 2); pub const DBPROPVAL_SQL_SUBMINIMUM = @as(i32, 512); pub const DBPROPVAL_DST_TDP = @as(i32, 1); pub const DBPROPVAL_DST_MDP = @as(i32, 2); pub const DBPROPVAL_DST_TDPANDMDP = @as(i32, 3); pub const MDPROPVAL_AU_UNSUPPORTED = @as(i32, 0); pub const MDPROPVAL_AU_UNCHANGED = @as(i32, 1); pub const MDPROPVAL_AU_UNKNOWN = @as(i32, 2); pub const MDPROPVAL_MF_WITH_CALCMEMBERS = @as(i32, 1); pub const MDPROPVAL_MF_WITH_NAMEDSETS = @as(i32, 2); pub const MDPROPVAL_MF_CREATE_CALCMEMBERS = @as(i32, 4); pub const MDPROPVAL_MF_CREATE_NAMEDSETS = @as(i32, 8); pub const MDPROPVAL_MF_SCOPE_SESSION = @as(i32, 16); pub const MDPROPVAL_MF_SCOPE_GLOBAL = @as(i32, 32); pub const MDPROPVAL_MMF_COUSIN = @as(i32, 1); pub const MDPROPVAL_MMF_PARALLELPERIOD = @as(i32, 2); pub const MDPROPVAL_MMF_OPENINGPERIOD = @as(i32, 4); pub const MDPROPVAL_MMF_CLOSINGPERIOD = @as(i32, 8); pub const MDPROPVAL_MNF_MEDIAN = @as(i32, 1); pub const MDPROPVAL_MNF_VAR = @as(i32, 2); pub const MDPROPVAL_MNF_STDDEV = @as(i32, 4); pub const MDPROPVAL_MNF_RANK = @as(i32, 8); pub const MDPROPVAL_MNF_AGGREGATE = @as(i32, 16); pub const MDPROPVAL_MNF_COVARIANCE = @as(i32, 32); pub const MDPROPVAL_MNF_CORRELATION = @as(i32, 64); pub const MDPROPVAL_MNF_LINREGSLOPE = @as(i32, 128); pub const MDPROPVAL_MNF_LINREGVARIANCE = @as(i32, 256); pub const MDPROPVAL_MNF_LINREG2 = @as(i32, 512); pub const MDPROPVAL_MNF_LINREGPOINT = @as(i32, 1024); pub const MDPROPVAL_MNF_DRILLDOWNLEVEL = @as(i32, 2048); pub const MDPROPVAL_MNF_DRILLDOWNMEMBERTOP = @as(i32, 4096); pub const MDPROPVAL_MNF_DRILLDOWNMEMBERBOTTOM = @as(i32, 8192); pub const MDPROPVAL_MNF_DRILLDOWNLEVELTOP = @as(i32, 16384); pub const MDPROPVAL_MNF_DRILLDOWNLEVELBOTTOM = @as(i32, 32768); pub const MDPROPVAL_MNF_DRILLUPMEMBER = @as(i32, 65536); pub const MDPROPVAL_MNF_DRILLUPLEVEL = @as(i32, 131072); pub const MDPROPVAL_MSF_TOPPERCENT = @as(i32, 1); pub const MDPROPVAL_MSF_BOTTOMPERCENT = @as(i32, 2); pub const MDPROPVAL_MSF_TOPSUM = @as(i32, 4); pub const MDPROPVAL_MSF_BOTTOMSUM = @as(i32, 8); pub const MDPROPVAL_MSF_PERIODSTODATE = @as(i32, 16); pub const MDPROPVAL_MSF_LASTPERIODS = @as(i32, 32); pub const MDPROPVAL_MSF_YTD = @as(i32, 64); pub const MDPROPVAL_MSF_QTD = @as(i32, 128); pub const MDPROPVAL_MSF_MTD = @as(i32, 256); pub const MDPROPVAL_MSF_WTD = @as(i32, 512); pub const MDPROPVAL_MSF_DRILLDOWNMEMBBER = @as(i32, 1024); pub const MDPROPVAL_MSF_DRILLDOWNLEVEL = @as(i32, 2048); pub const MDPROPVAL_MSF_DRILLDOWNMEMBERTOP = @as(i32, 4096); pub const MDPROPVAL_MSF_DRILLDOWNMEMBERBOTTOM = @as(i32, 8192); pub const MDPROPVAL_MSF_DRILLDOWNLEVELTOP = @as(i32, 16384); pub const MDPROPVAL_MSF_DRILLDOWNLEVELBOTTOM = @as(i32, 32768); pub const MDPROPVAL_MSF_DRILLUPMEMBER = @as(i32, 65536); pub const MDPROPVAL_MSF_DRILLUPLEVEL = @as(i32, 131072); pub const MDPROPVAL_MSF_TOGGLEDRILLSTATE = @as(i32, 262144); pub const MDPROPVAL_MD_SELF = @as(i32, 1); pub const MDPROPVAL_MD_BEFORE = @as(i32, 2); pub const MDPROPVAL_MD_AFTER = @as(i32, 4); pub const MDPROPVAL_MSC_LESSTHAN = @as(i32, 1); pub const MDPROPVAL_MSC_GREATERTHAN = @as(i32, 2); pub const MDPROPVAL_MSC_LESSTHANEQUAL = @as(i32, 4); pub const MDPROPVAL_MSC_GREATERTHANEQUAL = @as(i32, 8); pub const MDPROPVAL_MC_SINGLECASE = @as(i32, 1); pub const MDPROPVAL_MC_SEARCHEDCASE = @as(i32, 2); pub const MDPROPVAL_MOQ_OUTERREFERENCE = @as(i32, 1); pub const MDPROPVAL_MOQ_DATASOURCE_CUBE = @as(i32, 1); pub const MDPROPVAL_MOQ_CATALOG_CUBE = @as(i32, 2); pub const MDPROPVAL_MOQ_SCHEMA_CUBE = @as(i32, 4); pub const MDPROPVAL_MOQ_CUBE_DIM = @as(i32, 8); pub const MDPROPVAL_MOQ_DIM_HIER = @as(i32, 16); pub const MDPROPVAL_MOQ_DIMHIER_LEVEL = @as(i32, 32); pub const MDPROPVAL_MOQ_LEVEL_MEMBER = @as(i32, 64); pub const MDPROPVAL_MOQ_MEMBER_MEMBER = @as(i32, 128); pub const MDPROPVAL_MOQ_DIMHIER_MEMBER = @as(i32, 256); pub const MDPROPVAL_FS_FULL_SUPPORT = @as(i32, 1); pub const MDPROPVAL_FS_GENERATED_COLUMN = @as(i32, 2); pub const MDPROPVAL_FS_GENERATED_DIMENSION = @as(i32, 3); pub const MDPROPVAL_FS_NO_SUPPORT = @as(i32, 4); pub const MDPROPVAL_NL_NAMEDLEVELS = @as(i32, 1); pub const MDPROPVAL_NL_NUMBEREDLEVELS = @as(i32, 2); pub const MDPROPVAL_MJC_SINGLECUBE = @as(i32, 1); pub const MDPROPVAL_MJC_MULTICUBES = @as(i32, 2); pub const MDPROPVAL_MJC_IMPLICITCUBE = @as(i32, 4); pub const MDPROPVAL_RR_NORANGEROWSET = @as(i32, 1); pub const MDPROPVAL_RR_READONLY = @as(i32, 2); pub const MDPROPVAL_RR_UPDATE = @as(i32, 4); pub const MDPROPVAL_MS_MULTIPLETUPLES = @as(i32, 1); pub const MDPROPVAL_MS_SINGLETUPLE = @as(i32, 2); pub const MDPROPVAL_NME_ALLDIMENSIONS = @as(i32, 0); pub const MDPROPVAL_NME_MEASURESONLY = @as(i32, 1); pub const DBPROPVAL_AO_SEQUENTIAL = @as(i32, 0); pub const DBPROPVAL_AO_SEQUENTIALSTORAGEOBJECTS = @as(i32, 1); pub const DBPROPVAL_AO_RANDOM = @as(i32, 2); pub const DBPROPVAL_BD_ROWSET = @as(i32, 0); pub const DBPROPVAL_BD_INTRANSACTION = @as(i32, 1); pub const DBPROPVAL_BD_XTRANSACTION = @as(i32, 2); pub const DBPROPVAL_BD_REORGANIZATION = @as(i32, 3); pub const DBPROPVAL_BO_NOLOG = @as(i32, 0); pub const DBPROPVAL_BO_NOINDEXUPDATE = @as(i32, 1); pub const DBPROPVAL_BO_REFINTEGRITY = @as(i32, 2); pub const DBPROPVAL_STGM_DIRECT = @as(u32, 65536); pub const DBPROPVAL_STGM_TRANSACTED = @as(u32, 131072); pub const DBPROPVAL_STGM_CONVERT = @as(u32, 262144); pub const DBPROPVAL_STGM_FAILIFTHERE = @as(u32, 524288); pub const DBPROPVAL_STGM_PRIORITY = @as(u32, 1048576); pub const DBPROPVAL_STGM_DELETEONRELEASE = @as(u32, 2097152); pub const DBPROPVAL_GB_COLLATE = @as(i32, 16); pub const DBPROPVAL_CS_UNINITIALIZED = @as(i32, 0); pub const DBPROPVAL_CS_INITIALIZED = @as(i32, 1); pub const DBPROPVAL_CS_COMMUNICATIONFAILURE = @as(i32, 2); pub const DBPROPVAL_RD_RESETALL = @as(i32, -1); pub const DBPROPVAL_OS_RESOURCEPOOLING = @as(i32, 1); pub const DBPROPVAL_OS_TXNENLISTMENT = @as(i32, 2); pub const DBPROPVAL_OS_CLIENTCURSOR = @as(i32, 4); pub const DBPROPVAL_OS_ENABLEALL = @as(i32, -1); pub const DBPROPVAL_BI_CROSSROWSET = @as(i32, 1); pub const MDPROPVAL_NL_SCHEMAONLY = @as(i32, 4); pub const DBPROPVAL_OS_DISABLEALL = @as(i32, 0); pub const DBPROPVAL_OO_ROWOBJECT = @as(i32, 4); pub const DBPROPVAL_OO_SCOPED = @as(i32, 8); pub const DBPROPVAL_OO_DIRECTBIND = @as(i32, 16); pub const DBPROPVAL_DST_DOCSOURCE = @as(i32, 4); pub const DBPROPVAL_GU_NOTSUPPORTED = @as(i32, 1); pub const DBPROPVAL_GU_SUFFIX = @as(i32, 2); pub const DB_BINDFLAGS_DELAYFETCHCOLUMNS = @as(i32, 1); pub const DB_BINDFLAGS_DELAYFETCHSTREAM = @as(i32, 2); pub const DB_BINDFLAGS_RECURSIVE = @as(i32, 4); pub const DB_BINDFLAGS_OUTPUT = @as(i32, 8); pub const DB_BINDFLAGS_COLLECTION = @as(i32, 16); pub const DB_BINDFLAGS_OPENIFEXISTS = @as(i32, 32); pub const DB_BINDFLAGS_OVERWRITE = @as(i32, 64); pub const DB_BINDFLAGS_ISSTRUCTUREDDOCUMENT = @as(i32, 128); pub const DBPROPVAL_ORS_TABLE = @as(i32, 0); pub const DBPROPVAL_ORS_INDEX = @as(i32, 1); pub const DBPROPVAL_ORS_INTEGRATEDINDEX = @as(i32, 2); pub const DBPROPVAL_TC_DDL_LOCK = @as(i32, 16); pub const DBPROPVAL_ORS_STOREDPROC = @as(i32, 4); pub const DBPROPVAL_IN_ALLOWNULL = @as(i32, 0); pub const DBPROPVAL_OO_SINGLETON = @as(i32, 32); pub const DBPROPVAL_OS_AGR_AFTERSESSION = @as(i32, 8); pub const DBPROPVAL_CM_TRANSACTIONS = @as(i32, 1); pub const DBPROPVAL_TS_CARDINALITY = @as(i32, 1); pub const DBPROPVAL_TS_HISTOGRAM = @as(i32, 2); pub const DBPROPVAL_ORS_HISTOGRAM = @as(i32, 8); pub const MDPROPVAL_VISUAL_MODE_DEFAULT = @as(i32, 0); pub const MDPROPVAL_VISUAL_MODE_VISUAL = @as(i32, 1); pub const MDPROPVAL_VISUAL_MODE_VISUAL_OFF = @as(i32, 2); pub const DB_IMP_LEVEL_ANONYMOUS = @as(u32, 0); pub const DB_IMP_LEVEL_IDENTIFY = @as(u32, 1); pub const DB_IMP_LEVEL_IMPERSONATE = @as(u32, 2); pub const DB_IMP_LEVEL_DELEGATE = @as(u32, 3); pub const DBPROMPT_PROMPT = @as(u32, 1); pub const DBPROMPT_COMPLETE = @as(u32, 2); pub const DBPROMPT_COMPLETEREQUIRED = @as(u32, 3); pub const DBPROMPT_NOPROMPT = @as(u32, 4); pub const DB_PROT_LEVEL_NONE = @as(u32, 0); pub const DB_PROT_LEVEL_CONNECT = @as(u32, 1); pub const DB_PROT_LEVEL_CALL = @as(u32, 2); pub const DB_PROT_LEVEL_PKT = @as(u32, 3); pub const DB_PROT_LEVEL_PKT_INTEGRITY = @as(u32, 4); pub const DB_PROT_LEVEL_PKT_PRIVACY = @as(u32, 5); pub const DB_MODE_READ = @as(u32, 1); pub const DB_MODE_WRITE = @as(u32, 2); pub const DB_MODE_READWRITE = @as(u32, 3); pub const DB_MODE_SHARE_DENY_READ = @as(u32, 4); pub const DB_MODE_SHARE_DENY_WRITE = @as(u32, 8); pub const DB_MODE_SHARE_EXCLUSIVE = @as(u32, 12); pub const DB_MODE_SHARE_DENY_NONE = @as(u32, 16); pub const DBCOMPUTEMODE_COMPUTED = @as(u32, 1); pub const DBCOMPUTEMODE_DYNAMIC = @as(u32, 2); pub const DBCOMPUTEMODE_NOTCOMPUTED = @as(u32, 3); pub const DBPROPVAL_DF_INITIALLY_DEFERRED = @as(u32, 1); pub const DBPROPVAL_DF_INITIALLY_IMMEDIATE = @as(u32, 2); pub const DBPROPVAL_DF_NOT_DEFERRABLE = @as(u32, 3); pub const DBPARAMTYPE_INPUT = @as(u32, 1); pub const DBPARAMTYPE_INPUTOUTPUT = @as(u32, 2); pub const DBPARAMTYPE_OUTPUT = @as(u32, 3); pub const DBPARAMTYPE_RETURNVALUE = @as(u32, 4); pub const DB_PT_UNKNOWN = @as(u32, 1); pub const DB_PT_PROCEDURE = @as(u32, 2); pub const DB_PT_FUNCTION = @as(u32, 3); pub const DB_REMOTE = @as(u32, 1); pub const DB_LOCAL_SHARED = @as(u32, 2); pub const DB_LOCAL_EXCLUSIVE = @as(u32, 3); pub const DB_COLLATION_ASC = @as(u32, 1); pub const DB_COLLATION_DESC = @as(u32, 2); pub const DB_UNSEARCHABLE = @as(u32, 1); pub const DB_LIKE_ONLY = @as(u32, 2); pub const DB_ALL_EXCEPT_LIKE = @as(u32, 3); pub const DB_SEARCHABLE = @as(u32, 4); pub const MDTREEOP_CHILDREN = @as(u32, 1); pub const MDTREEOP_SIBLINGS = @as(u32, 2); pub const MDTREEOP_PARENT = @as(u32, 4); pub const MDTREEOP_SELF = @as(u32, 8); pub const MDTREEOP_DESCENDANTS = @as(u32, 16); pub const MDTREEOP_ANCESTORS = @as(u32, 32); pub const MD_DIMTYPE_UNKNOWN = @as(u32, 0); pub const MD_DIMTYPE_TIME = @as(u32, 1); pub const MD_DIMTYPE_MEASURE = @as(u32, 2); pub const MD_DIMTYPE_OTHER = @as(u32, 3); pub const MDLEVEL_TYPE_UNKNOWN = @as(u32, 0); pub const MDLEVEL_TYPE_REGULAR = @as(u32, 0); pub const MDLEVEL_TYPE_ALL = @as(u32, 1); pub const MDLEVEL_TYPE_CALCULATED = @as(u32, 2); pub const MDLEVEL_TYPE_TIME = @as(u32, 4); pub const MDLEVEL_TYPE_RESERVED1 = @as(u32, 8); pub const MDLEVEL_TYPE_TIME_YEARS = @as(u32, 20); pub const MDLEVEL_TYPE_TIME_HALF_YEAR = @as(u32, 36); pub const MDLEVEL_TYPE_TIME_QUARTERS = @as(u32, 68); pub const MDLEVEL_TYPE_TIME_MONTHS = @as(u32, 132); pub const MDLEVEL_TYPE_TIME_WEEKS = @as(u32, 260); pub const MDLEVEL_TYPE_TIME_DAYS = @as(u32, 516); pub const MDLEVEL_TYPE_TIME_HOURS = @as(u32, 772); pub const MDLEVEL_TYPE_TIME_MINUTES = @as(u32, 1028); pub const MDLEVEL_TYPE_TIME_SECONDS = @as(u32, 2052); pub const MDLEVEL_TYPE_TIME_UNDEFINED = @as(u32, 4100); pub const MDMEASURE_AGGR_UNKNOWN = @as(u32, 0); pub const MDMEASURE_AGGR_SUM = @as(u32, 1); pub const MDMEASURE_AGGR_COUNT = @as(u32, 2); pub const MDMEASURE_AGGR_MIN = @as(u32, 3); pub const MDMEASURE_AGGR_MAX = @as(u32, 4); pub const MDMEASURE_AGGR_AVG = @as(u32, 5); pub const MDMEASURE_AGGR_VAR = @as(u32, 6); pub const MDMEASURE_AGGR_STD = @as(u32, 7); pub const MDMEASURE_AGGR_CALCULATED = @as(u32, 127); pub const MDPROP_MEMBER = @as(u32, 1); pub const MDPROP_CELL = @as(u32, 2); pub const MDMEMBER_TYPE_UNKNOWN = @as(u32, 0); pub const MDMEMBER_TYPE_REGULAR = @as(u32, 1); pub const MDMEMBER_TYPE_ALL = @as(u32, 2); pub const MDMEMBER_TYPE_MEASURE = @as(u32, 3); pub const MDMEMBER_TYPE_FORMULA = @as(u32, 4); pub const MDMEMBER_TYPE_RESERVE1 = @as(u32, 5); pub const MDMEMBER_TYPE_RESERVE2 = @as(u32, 6); pub const MDMEMBER_TYPE_RESERVE3 = @as(u32, 7); pub const MDMEMBER_TYPE_RESERVE4 = @as(u32, 8); pub const MDDISPINFO_DRILLED_DOWN = @as(u32, 65536); pub const MDDISPINFO_PARENT_SAME_AS_PREV = @as(u32, 131072); pub const DB_COUNTUNAVAILABLE = @as(i32, -1); pub const MDFF_BOLD = @as(u32, 1); pub const MDFF_ITALIC = @as(u32, 2); pub const MDFF_UNDERLINE = @as(u32, 4); pub const MDFF_STRIKEOUT = @as(u32, 8); pub const MDAXIS_COLUMNS = @as(u32, 0); pub const MDAXIS_ROWS = @as(u32, 1); pub const MDAXIS_PAGES = @as(u32, 2); pub const MDAXIS_SECTIONS = @as(u32, 3); pub const MDAXIS_CHAPTERS = @as(u32, 4); pub const MDAXIS_SLICERS = @as(u32, 4294967295); pub const CRESTRICTIONS_DBSCHEMA_ASSERTIONS = @as(u32, 3); pub const CRESTRICTIONS_DBSCHEMA_CATALOGS = @as(u32, 1); pub const CRESTRICTIONS_DBSCHEMA_CHARACTER_SETS = @as(u32, 3); pub const CRESTRICTIONS_DBSCHEMA_COLLATIONS = @as(u32, 3); pub const CRESTRICTIONS_DBSCHEMA_COLUMNS = @as(u32, 4); pub const CRESTRICTIONS_DBSCHEMA_CHECK_CONSTRAINTS = @as(u32, 3); pub const CRESTRICTIONS_DBSCHEMA_CONSTRAINT_COLUMN_USAGE = @as(u32, 4); pub const CRESTRICTIONS_DBSCHEMA_CONSTRAINT_TABLE_USAGE = @as(u32, 3); pub const CRESTRICTIONS_DBSCHEMA_KEY_COLUMN_USAGE = @as(u32, 7); pub const CRESTRICTIONS_DBSCHEMA_REFERENTIAL_CONSTRAINTS = @as(u32, 3); pub const CRESTRICTIONS_DBSCHEMA_TABLE_CONSTRAINTS = @as(u32, 7); pub const CRESTRICTIONS_DBSCHEMA_COLUMN_DOMAIN_USAGE = @as(u32, 4); pub const CRESTRICTIONS_DBSCHEMA_INDEXES = @as(u32, 5); pub const CRESTRICTIONS_DBSCHEMA_OBJECT_ACTIONS = @as(u32, 1); pub const CRESTRICTIONS_DBSCHEMA_OBJECTS = @as(u32, 1); pub const CRESTRICTIONS_DBSCHEMA_COLUMN_PRIVILEGES = @as(u32, 6); pub const CRESTRICTIONS_DBSCHEMA_TABLE_PRIVILEGES = @as(u32, 5); pub const CRESTRICTIONS_DBSCHEMA_USAGE_PRIVILEGES = @as(u32, 6); pub const CRESTRICTIONS_DBSCHEMA_PROCEDURES = @as(u32, 4); pub const CRESTRICTIONS_DBSCHEMA_SCHEMATA = @as(u32, 3); pub const CRESTRICTIONS_DBSCHEMA_SQL_LANGUAGES = @as(u32, 0); pub const CRESTRICTIONS_DBSCHEMA_STATISTICS = @as(u32, 3); pub const CRESTRICTIONS_DBSCHEMA_TABLES = @as(u32, 4); pub const CRESTRICTIONS_DBSCHEMA_TRANSLATIONS = @as(u32, 3); pub const CRESTRICTIONS_DBSCHEMA_PROVIDER_TYPES = @as(u32, 2); pub const CRESTRICTIONS_DBSCHEMA_VIEWS = @as(u32, 3); pub const CRESTRICTIONS_DBSCHEMA_VIEW_COLUMN_USAGE = @as(u32, 3); pub const CRESTRICTIONS_DBSCHEMA_VIEW_TABLE_USAGE = @as(u32, 3); pub const CRESTRICTIONS_DBSCHEMA_PROCEDURE_PARAMETERS = @as(u32, 4); pub const CRESTRICTIONS_DBSCHEMA_FOREIGN_KEYS = @as(u32, 6); pub const CRESTRICTIONS_DBSCHEMA_PRIMARY_KEYS = @as(u32, 3); pub const CRESTRICTIONS_DBSCHEMA_PROCEDURE_COLUMNS = @as(u32, 4); pub const CRESTRICTIONS_DBSCHEMA_TABLES_INFO = @as(u32, 4); pub const CRESTRICTIONS_MDSCHEMA_CUBES = @as(u32, 3); pub const CRESTRICTIONS_MDSCHEMA_DIMENSIONS = @as(u32, 5); pub const CRESTRICTIONS_MDSCHEMA_HIERARCHIES = @as(u32, 6); pub const CRESTRICTIONS_MDSCHEMA_LEVELS = @as(u32, 7); pub const CRESTRICTIONS_MDSCHEMA_MEASURES = @as(u32, 5); pub const CRESTRICTIONS_MDSCHEMA_PROPERTIES = @as(u32, 9); pub const CRESTRICTIONS_MDSCHEMA_MEMBERS = @as(u32, 12); pub const CRESTRICTIONS_DBSCHEMA_TRUSTEE = @as(u32, 4); pub const CRESTRICTIONS_DBSCHEMA_TABLE_STATISTICS = @as(u32, 7); pub const CRESTRICTIONS_DBSCHEMA_CHECK_CONSTRAINTS_BY_TABLE = @as(u32, 6); pub const CRESTRICTIONS_MDSCHEMA_FUNCTIONS = @as(u32, 4); pub const CRESTRICTIONS_MDSCHEMA_ACTIONS = @as(u32, 8); pub const CRESTRICTIONS_MDSCHEMA_COMMANDS = @as(u32, 5); pub const CRESTRICTIONS_MDSCHEMA_SETS = @as(u32, 5); pub const IDENTIFIER_SDK_MASK = @as(u32, 4026531840); pub const IDENTIFIER_SDK_ERROR = @as(u32, 268435456); //-------------------------------------------------------------------------------- // Section: Types (334) //-------------------------------------------------------------------------------- pub const IRowsetExactScroll = extern struct { placeholder: usize, // TODO: why is this type empty? }; const IID_IWordSink_Value = @import("../zig.zig").Guid.initString("cc907054-c058-101a-b554-08002b33b0e6"); pub const IID_IWordSink = &IID_IWordSink_Value; pub const IWordSink = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, PutWord: fn( self: *const IWordSink, cwc: u32, pwcInBuf: ?[*:0]const u16, cwcSrcLen: u32, cwcSrcPos: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, PutAltWord: fn( self: *const IWordSink, cwc: u32, pwcInBuf: ?[*:0]const u16, cwcSrcLen: u32, cwcSrcPos: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, StartAltPhrase: fn( self: *const IWordSink, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EndAltPhrase: fn( self: *const IWordSink, ) callconv(@import("std").os.windows.WINAPI) HRESULT, PutBreak: fn( self: *const IWordSink, breakType: WORDREP_BREAK_TYPE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWordSink_PutWord(self: *const T, cwc: u32, pwcInBuf: ?[*:0]const u16, cwcSrcLen: u32, cwcSrcPos: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IWordSink.VTable, self.vtable).PutWord(@ptrCast(*const IWordSink, self), cwc, pwcInBuf, cwcSrcLen, cwcSrcPos); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWordSink_PutAltWord(self: *const T, cwc: u32, pwcInBuf: ?[*:0]const u16, cwcSrcLen: u32, cwcSrcPos: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IWordSink.VTable, self.vtable).PutAltWord(@ptrCast(*const IWordSink, self), cwc, pwcInBuf, cwcSrcLen, cwcSrcPos); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWordSink_StartAltPhrase(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IWordSink.VTable, self.vtable).StartAltPhrase(@ptrCast(*const IWordSink, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWordSink_EndAltPhrase(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IWordSink.VTable, self.vtable).EndAltPhrase(@ptrCast(*const IWordSink, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWordSink_PutBreak(self: *const T, breakType: WORDREP_BREAK_TYPE) callconv(.Inline) HRESULT { return @ptrCast(*const IWordSink.VTable, self.vtable).PutBreak(@ptrCast(*const IWordSink, self), breakType); } };} pub usingnamespace MethodMixin(@This()); }; pub const PFNFILLTEXTBUFFER = fn( pTextSource: ?*TEXT_SOURCE, ) callconv(@import("std").os.windows.WINAPI) HRESULT; pub const TEXT_SOURCE = extern struct { pfnFillTextBuffer: ?PFNFILLTEXTBUFFER, awcBuffer: ?[*:0]const u16, iEnd: u32, iCur: u32, }; // TODO: this type is limited to platform 'windows5.0' const IID_IWordBreaker_Value = @import("../zig.zig").Guid.initString("d53552c8-77e3-101a-b552-08002b33b0e6"); pub const IID_IWordBreaker = &IID_IWordBreaker_Value; pub const IWordBreaker = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Init: fn( self: *const IWordBreaker, fQuery: BOOL, ulMaxTokenSize: u32, pfLicense: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, BreakText: fn( self: *const IWordBreaker, pTextSource: ?*TEXT_SOURCE, pWordSink: ?*IWordSink, pPhraseSink: ?*IPhraseSink, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ComposePhrase: fn( self: *const IWordBreaker, pwcNoun: ?[*:0]const u16, cwcNoun: u32, pwcModifier: ?[*:0]const u16, cwcModifier: u32, ulAttachmentType: u32, pwcPhrase: ?PWSTR, pcwcPhrase: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLicenseToUse: fn( self: *const IWordBreaker, ppwcsLicense: ?*const ?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWordBreaker_Init(self: *const T, fQuery: BOOL, ulMaxTokenSize: u32, pfLicense: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IWordBreaker.VTable, self.vtable).Init(@ptrCast(*const IWordBreaker, self), fQuery, ulMaxTokenSize, pfLicense); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWordBreaker_BreakText(self: *const T, pTextSource: ?*TEXT_SOURCE, pWordSink: ?*IWordSink, pPhraseSink: ?*IPhraseSink) callconv(.Inline) HRESULT { return @ptrCast(*const IWordBreaker.VTable, self.vtable).BreakText(@ptrCast(*const IWordBreaker, self), pTextSource, pWordSink, pPhraseSink); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWordBreaker_ComposePhrase(self: *const T, pwcNoun: ?[*:0]const u16, cwcNoun: u32, pwcModifier: ?[*:0]const u16, cwcModifier: u32, ulAttachmentType: u32, pwcPhrase: ?PWSTR, pcwcPhrase: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IWordBreaker.VTable, self.vtable).ComposePhrase(@ptrCast(*const IWordBreaker, self), pwcNoun, cwcNoun, pwcModifier, cwcModifier, ulAttachmentType, pwcPhrase, pcwcPhrase); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWordBreaker_GetLicenseToUse(self: *const T, ppwcsLicense: ?*const ?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const IWordBreaker.VTable, self.vtable).GetLicenseToUse(@ptrCast(*const IWordBreaker, self), ppwcsLicense); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.0' const IID_IWordFormSink_Value = @import("../zig.zig").Guid.initString("fe77c330-7f42-11ce-be57-00aa0051fe20"); pub const IID_IWordFormSink = &IID_IWordFormSink_Value; pub const IWordFormSink = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, PutAltWord: fn( self: *const IWordFormSink, pwcInBuf: ?[*:0]const u16, cwc: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, PutWord: fn( self: *const IWordFormSink, pwcInBuf: ?[*:0]const u16, cwc: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWordFormSink_PutAltWord(self: *const T, pwcInBuf: ?[*:0]const u16, cwc: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IWordFormSink.VTable, self.vtable).PutAltWord(@ptrCast(*const IWordFormSink, self), pwcInBuf, cwc); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IWordFormSink_PutWord(self: *const T, pwcInBuf: ?[*:0]const u16, cwc: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IWordFormSink.VTable, self.vtable).PutWord(@ptrCast(*const IWordFormSink, self), pwcInBuf, cwc); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.0' const IID_IStemmer_Value = @import("../zig.zig").Guid.initString("efbaf140-7f42-11ce-be57-00aa0051fe20"); pub const IID_IStemmer = &IID_IStemmer_Value; pub const IStemmer = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Init: fn( self: *const IStemmer, ulMaxTokenSize: u32, pfLicense: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GenerateWordForms: fn( self: *const IStemmer, pwcInBuf: ?[*:0]const u16, cwc: u32, pStemSink: ?*IWordFormSink, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLicenseToUse: fn( self: *const IStemmer, ppwcsLicense: ?*const ?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IStemmer_Init(self: *const T, ulMaxTokenSize: u32, pfLicense: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IStemmer.VTable, self.vtable).Init(@ptrCast(*const IStemmer, self), ulMaxTokenSize, pfLicense); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IStemmer_GenerateWordForms(self: *const T, pwcInBuf: ?[*:0]const u16, cwc: u32, pStemSink: ?*IWordFormSink) callconv(.Inline) HRESULT { return @ptrCast(*const IStemmer.VTable, self.vtable).GenerateWordForms(@ptrCast(*const IStemmer, self), pwcInBuf, cwc, pStemSink); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IStemmer_GetLicenseToUse(self: *const T, ppwcsLicense: ?*const ?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const IStemmer.VTable, self.vtable).GetLicenseToUse(@ptrCast(*const IStemmer, self), ppwcsLicense); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ISimpleCommandCreator_Value = @import("../zig.zig").Guid.initString("5e341ab7-02d0-11d1-900c-00a0c9063796"); pub const IID_ISimpleCommandCreator = &IID_ISimpleCommandCreator_Value; pub const ISimpleCommandCreator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateICommand: fn( self: *const ISimpleCommandCreator, ppIUnknown: ?*?*IUnknown, pOuterUnk: ?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, VerifyCatalog: fn( self: *const ISimpleCommandCreator, pwszMachine: ?[*:0]const u16, pwszCatalogName: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDefaultCatalog: fn( self: *const ISimpleCommandCreator, pwszCatalogName: ?PWSTR, cwcIn: u32, pcwcOut: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimpleCommandCreator_CreateICommand(self: *const T, ppIUnknown: ?*?*IUnknown, pOuterUnk: ?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ISimpleCommandCreator.VTable, self.vtable).CreateICommand(@ptrCast(*const ISimpleCommandCreator, self), ppIUnknown, pOuterUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimpleCommandCreator_VerifyCatalog(self: *const T, pwszMachine: ?[*:0]const u16, pwszCatalogName: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISimpleCommandCreator.VTable, self.vtable).VerifyCatalog(@ptrCast(*const ISimpleCommandCreator, self), pwszMachine, pwszCatalogName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISimpleCommandCreator_GetDefaultCatalog(self: *const T, pwszCatalogName: ?PWSTR, cwcIn: u32, pcwcOut: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISimpleCommandCreator.VTable, self.vtable).GetDefaultCatalog(@ptrCast(*const ISimpleCommandCreator, self), pwszCatalogName, cwcIn, pcwcOut); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IColumnMapper_Value = @import("../zig.zig").Guid.initString("0b63e37a-9ccc-11d0-bcdb-00805fccce04"); pub const IID_IColumnMapper = &IID_IColumnMapper_Value; pub const IColumnMapper = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetPropInfoFromName: fn( self: *const IColumnMapper, wcsPropName: ?[*:0]const u16, ppPropId: ?*?*DBID, pPropType: ?*u16, puiWidth: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPropInfoFromId: fn( self: *const IColumnMapper, pPropId: ?*const DBID, pwcsName: ?*?*u16, pPropType: ?*u16, puiWidth: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EnumPropInfo: fn( self: *const IColumnMapper, iEntry: u32, pwcsName: ?*const ?*u16, ppPropId: ?*?*DBID, pPropType: ?*u16, puiWidth: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsMapUpToDate: fn( self: *const IColumnMapper, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IColumnMapper_GetPropInfoFromName(self: *const T, wcsPropName: ?[*:0]const u16, ppPropId: ?*?*DBID, pPropType: ?*u16, puiWidth: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IColumnMapper.VTable, self.vtable).GetPropInfoFromName(@ptrCast(*const IColumnMapper, self), wcsPropName, ppPropId, pPropType, puiWidth); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IColumnMapper_GetPropInfoFromId(self: *const T, pPropId: ?*const DBID, pwcsName: ?*?*u16, pPropType: ?*u16, puiWidth: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IColumnMapper.VTable, self.vtable).GetPropInfoFromId(@ptrCast(*const IColumnMapper, self), pPropId, pwcsName, pPropType, puiWidth); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IColumnMapper_EnumPropInfo(self: *const T, iEntry: u32, pwcsName: ?*const ?*u16, ppPropId: ?*?*DBID, pPropType: ?*u16, puiWidth: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IColumnMapper.VTable, self.vtable).EnumPropInfo(@ptrCast(*const IColumnMapper, self), iEntry, pwcsName, ppPropId, pPropType, puiWidth); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IColumnMapper_IsMapUpToDate(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IColumnMapper.VTable, self.vtable).IsMapUpToDate(@ptrCast(*const IColumnMapper, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IColumnMapperCreator_Value = @import("../zig.zig").Guid.initString("0b63e37b-9ccc-11d0-bcdb-00805fccce04"); pub const IID_IColumnMapperCreator = &IID_IColumnMapperCreator_Value; pub const IColumnMapperCreator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetColumnMapper: fn( self: *const IColumnMapperCreator, wcsMachineName: ?[*:0]const u16, wcsCatalogName: ?[*:0]const u16, ppColumnMapper: ?*?*IColumnMapper, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IColumnMapperCreator_GetColumnMapper(self: *const T, wcsMachineName: ?[*:0]const u16, wcsCatalogName: ?[*:0]const u16, ppColumnMapper: ?*?*IColumnMapper) callconv(.Inline) HRESULT { return @ptrCast(*const IColumnMapperCreator.VTable, self.vtable).GetColumnMapper(@ptrCast(*const IColumnMapperCreator, self), wcsMachineName, wcsCatalogName, ppColumnMapper); } };} pub usingnamespace MethodMixin(@This()); }; const CLSID_CSearchManager_Value = @import("../zig.zig").Guid.initString("7d096c5f-ac08-4f1f-beb7-5c22c517ce39"); pub const CLSID_CSearchManager = &CLSID_CSearchManager_Value; const CLSID_CSearchRoot_Value = @import("../zig.zig").Guid.initString("30766bd2-ea1c-4f28-bf27-0b44e2f68db7"); pub const CLSID_CSearchRoot = &CLSID_CSearchRoot_Value; const CLSID_CSearchScopeRule_Value = @import("../zig.zig").Guid.initString("e63de750-3bd7-4be5-9c84-6b4281988c44"); pub const CLSID_CSearchScopeRule = &CLSID_CSearchScopeRule_Value; const CLSID_FilterRegistration_Value = @import("../zig.zig").Guid.initString("9e175b8d-f52a-11d8-b9a5-505054503030"); pub const CLSID_FilterRegistration = &CLSID_FilterRegistration_Value; pub const FILTERED_DATA_SOURCES = extern struct { pwcsExtension: ?[*:0]const u16, pwcsMime: ?[*:0]const u16, pClsid: ?*const Guid, pwcsOverride: ?[*:0]const u16, }; // TODO: this type is limited to platform 'windows6.1' const IID_ILoadFilter_Value = @import("../zig.zig").Guid.initString("c7310722-ac80-11d1-8df3-00c04fb6ef4f"); pub const IID_ILoadFilter = &IID_ILoadFilter_Value; pub const ILoadFilter = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, LoadIFilter: fn( self: *const ILoadFilter, pwcsPath: ?[*:0]const u16, pFilteredSources: ?*FILTERED_DATA_SOURCES, pUnkOuter: ?*IUnknown, fUseDefault: BOOL, pFilterClsid: ?*Guid, SearchDecSize: ?*i32, pwcsSearchDesc: ?*?*u16, ppIFilt: ?*?*IFilter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LoadIFilterFromStorage: fn( self: *const ILoadFilter, pStg: ?*IStorage, pUnkOuter: ?*IUnknown, pwcsOverride: ?[*:0]const u16, fUseDefault: BOOL, pFilterClsid: ?*Guid, SearchDecSize: ?*i32, pwcsSearchDesc: ?*?*u16, ppIFilt: ?*?*IFilter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LoadIFilterFromStream: fn( self: *const ILoadFilter, pStm: ?*IStream, pFilteredSources: ?*FILTERED_DATA_SOURCES, pUnkOuter: ?*IUnknown, fUseDefault: BOOL, pFilterClsid: ?*Guid, SearchDecSize: ?*i32, pwcsSearchDesc: ?*?*u16, ppIFilt: ?*?*IFilter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ILoadFilter_LoadIFilter(self: *const T, pwcsPath: ?[*:0]const u16, pFilteredSources: ?*FILTERED_DATA_SOURCES, pUnkOuter: ?*IUnknown, fUseDefault: BOOL, pFilterClsid: ?*Guid, SearchDecSize: ?*i32, pwcsSearchDesc: ?*?*u16, ppIFilt: ?*?*IFilter) callconv(.Inline) HRESULT { return @ptrCast(*const ILoadFilter.VTable, self.vtable).LoadIFilter(@ptrCast(*const ILoadFilter, self), pwcsPath, pFilteredSources, pUnkOuter, fUseDefault, pFilterClsid, SearchDecSize, pwcsSearchDesc, ppIFilt); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ILoadFilter_LoadIFilterFromStorage(self: *const T, pStg: ?*IStorage, pUnkOuter: ?*IUnknown, pwcsOverride: ?[*:0]const u16, fUseDefault: BOOL, pFilterClsid: ?*Guid, SearchDecSize: ?*i32, pwcsSearchDesc: ?*?*u16, ppIFilt: ?*?*IFilter) callconv(.Inline) HRESULT { return @ptrCast(*const ILoadFilter.VTable, self.vtable).LoadIFilterFromStorage(@ptrCast(*const ILoadFilter, self), pStg, pUnkOuter, pwcsOverride, fUseDefault, pFilterClsid, SearchDecSize, pwcsSearchDesc, ppIFilt); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ILoadFilter_LoadIFilterFromStream(self: *const T, pStm: ?*IStream, pFilteredSources: ?*FILTERED_DATA_SOURCES, pUnkOuter: ?*IUnknown, fUseDefault: BOOL, pFilterClsid: ?*Guid, SearchDecSize: ?*i32, pwcsSearchDesc: ?*?*u16, ppIFilt: ?*?*IFilter) callconv(.Inline) HRESULT { return @ptrCast(*const ILoadFilter.VTable, self.vtable).LoadIFilterFromStream(@ptrCast(*const ILoadFilter, self), pStm, pFilteredSources, pUnkOuter, fUseDefault, pFilterClsid, SearchDecSize, pwcsSearchDesc, ppIFilt); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ILoadFilterWithPrivateComActivation_Value = @import("../zig.zig").Guid.initString("40bdbd34-780b-48d3-9bb6-12ebd4ad2e75"); pub const IID_ILoadFilterWithPrivateComActivation = &IID_ILoadFilterWithPrivateComActivation_Value; pub const ILoadFilterWithPrivateComActivation = extern struct { pub const VTable = extern struct { base: ILoadFilter.VTable, LoadIFilterWithPrivateComActivation: fn( self: *const ILoadFilterWithPrivateComActivation, filteredSources: ?*FILTERED_DATA_SOURCES, useDefault: BOOL, filterClsid: ?*Guid, isFilterPrivateComActivated: ?*BOOL, filterObj: ?*?*IFilter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ILoadFilter.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ILoadFilterWithPrivateComActivation_LoadIFilterWithPrivateComActivation(self: *const T, filteredSources: ?*FILTERED_DATA_SOURCES, useDefault: BOOL, filterClsid: ?*Guid, isFilterPrivateComActivated: ?*BOOL, filterObj: ?*?*IFilter) callconv(.Inline) HRESULT { return @ptrCast(*const ILoadFilterWithPrivateComActivation.VTable, self.vtable).LoadIFilterWithPrivateComActivation(@ptrCast(*const ILoadFilterWithPrivateComActivation, self), filteredSources, useDefault, filterClsid, isFilterPrivateComActivated, filterObj); } };} pub usingnamespace MethodMixin(@This()); }; pub const CONDITION_TYPE = enum(i32) { AND_CONDITION = 0, OR_CONDITION = 1, NOT_CONDITION = 2, LEAF_CONDITION = 3, }; pub const CT_AND_CONDITION = CONDITION_TYPE.AND_CONDITION; pub const CT_OR_CONDITION = CONDITION_TYPE.OR_CONDITION; pub const CT_NOT_CONDITION = CONDITION_TYPE.NOT_CONDITION; pub const CT_LEAF_CONDITION = CONDITION_TYPE.LEAF_CONDITION; pub const CONDITION_OPERATION = enum(i32) { IMPLICIT = 0, EQUAL = 1, NOTEQUAL = 2, LESSTHAN = 3, GREATERTHAN = 4, LESSTHANOREQUAL = 5, GREATERTHANOREQUAL = 6, VALUE_STARTSWITH = 7, VALUE_ENDSWITH = 8, VALUE_CONTAINS = 9, VALUE_NOTCONTAINS = 10, DOSWILDCARDS = 11, WORD_EQUAL = 12, WORD_STARTSWITH = 13, APPLICATION_SPECIFIC = 14, }; pub const COP_IMPLICIT = CONDITION_OPERATION.IMPLICIT; pub const COP_EQUAL = CONDITION_OPERATION.EQUAL; pub const COP_NOTEQUAL = CONDITION_OPERATION.NOTEQUAL; pub const COP_LESSTHAN = CONDITION_OPERATION.LESSTHAN; pub const COP_GREATERTHAN = CONDITION_OPERATION.GREATERTHAN; pub const COP_LESSTHANOREQUAL = CONDITION_OPERATION.LESSTHANOREQUAL; pub const COP_GREATERTHANOREQUAL = CONDITION_OPERATION.GREATERTHANOREQUAL; pub const COP_VALUE_STARTSWITH = CONDITION_OPERATION.VALUE_STARTSWITH; pub const COP_VALUE_ENDSWITH = CONDITION_OPERATION.VALUE_ENDSWITH; pub const COP_VALUE_CONTAINS = CONDITION_OPERATION.VALUE_CONTAINS; pub const COP_VALUE_NOTCONTAINS = CONDITION_OPERATION.VALUE_NOTCONTAINS; pub const COP_DOSWILDCARDS = CONDITION_OPERATION.DOSWILDCARDS; pub const COP_WORD_EQUAL = CONDITION_OPERATION.WORD_EQUAL; pub const COP_WORD_STARTSWITH = CONDITION_OPERATION.WORD_STARTSWITH; pub const COP_APPLICATION_SPECIFIC = CONDITION_OPERATION.APPLICATION_SPECIFIC; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IRichChunk_Value = @import("../zig.zig").Guid.initString("4fdef69c-dbc9-454e-9910-b34f3c64b510"); pub const IID_IRichChunk = &IID_IRichChunk_Value; pub const IRichChunk = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetData: fn( self: *const IRichChunk, pFirstPos: ?*u32, pLength: ?*u32, ppsz: ?*?PWSTR, pValue: ?*PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRichChunk_GetData(self: *const T, pFirstPos: ?*u32, pLength: ?*u32, ppsz: ?*?PWSTR, pValue: ?*PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IRichChunk.VTable, self.vtable).GetData(@ptrCast(*const IRichChunk, self), pFirstPos, pLength, ppsz, pValue); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ICondition_Value = @import("../zig.zig").Guid.initString("0fc988d4-c935-4b97-a973-46282ea175c8"); pub const IID_ICondition = &IID_ICondition_Value; pub const ICondition = extern struct { pub const VTable = extern struct { base: IPersistStream.VTable, GetConditionType: fn( self: *const ICondition, pNodeType: ?*CONDITION_TYPE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSubConditions: fn( self: *const ICondition, riid: ?*const Guid, ppv: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetComparisonInfo: fn( self: *const ICondition, ppszPropertyName: ?*?PWSTR, pcop: ?*CONDITION_OPERATION, ppropvar: ?*PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetValueType: fn( self: *const ICondition, ppszValueTypeName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetValueNormalization: fn( self: *const ICondition, ppszNormalization: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetInputTerms: fn( self: *const ICondition, ppPropertyTerm: ?*?*IRichChunk, ppOperationTerm: ?*?*IRichChunk, ppValueTerm: ?*?*IRichChunk, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const ICondition, ppc: ?*?*ICondition, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IPersistStream.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICondition_GetConditionType(self: *const T, pNodeType: ?*CONDITION_TYPE) callconv(.Inline) HRESULT { return @ptrCast(*const ICondition.VTable, self.vtable).GetConditionType(@ptrCast(*const ICondition, self), pNodeType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICondition_GetSubConditions(self: *const T, riid: ?*const Guid, ppv: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const ICondition.VTable, self.vtable).GetSubConditions(@ptrCast(*const ICondition, self), riid, ppv); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICondition_GetComparisonInfo(self: *const T, ppszPropertyName: ?*?PWSTR, pcop: ?*CONDITION_OPERATION, ppropvar: ?*PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const ICondition.VTable, self.vtable).GetComparisonInfo(@ptrCast(*const ICondition, self), ppszPropertyName, pcop, ppropvar); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICondition_GetValueType(self: *const T, ppszValueTypeName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ICondition.VTable, self.vtable).GetValueType(@ptrCast(*const ICondition, self), ppszValueTypeName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICondition_GetValueNormalization(self: *const T, ppszNormalization: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ICondition.VTable, self.vtable).GetValueNormalization(@ptrCast(*const ICondition, self), ppszNormalization); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICondition_GetInputTerms(self: *const T, ppPropertyTerm: ?*?*IRichChunk, ppOperationTerm: ?*?*IRichChunk, ppValueTerm: ?*?*IRichChunk) callconv(.Inline) HRESULT { return @ptrCast(*const ICondition.VTable, self.vtable).GetInputTerms(@ptrCast(*const ICondition, self), ppPropertyTerm, ppOperationTerm, ppValueTerm); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICondition_Clone(self: *const T, ppc: ?*?*ICondition) callconv(.Inline) HRESULT { return @ptrCast(*const ICondition.VTable, self.vtable).Clone(@ptrCast(*const ICondition, self), ppc); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ICondition2_Value = @import("../zig.zig").Guid.initString("0db8851d-2e5b-47eb-9208-d28c325a01d7"); pub const IID_ICondition2 = &IID_ICondition2_Value; pub const ICondition2 = extern struct { pub const VTable = extern struct { base: ICondition.VTable, GetLocale: fn( self: *const ICondition2, ppszLocaleName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLeafConditionInfo: fn( self: *const ICondition2, ppropkey: ?*PROPERTYKEY, pcop: ?*CONDITION_OPERATION, ppropvar: ?*PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ICondition.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICondition2_GetLocale(self: *const T, ppszLocaleName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ICondition2.VTable, self.vtable).GetLocale(@ptrCast(*const ICondition2, self), ppszLocaleName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICondition2_GetLeafConditionInfo(self: *const T, ppropkey: ?*PROPERTYKEY, pcop: ?*CONDITION_OPERATION, ppropvar: ?*PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const ICondition2.VTable, self.vtable).GetLeafConditionInfo(@ptrCast(*const ICondition2, self), ppropkey, pcop, ppropvar); } };} pub usingnamespace MethodMixin(@This()); }; pub const DB_NUMERIC = extern struct { precision: u8, scale: u8, sign: u8, val: [16]u8, }; pub const DBDATE = extern struct { year: i16, month: u16, day: u16, }; pub const DBTIME = extern struct { hour: u16, minute: u16, second: u16, }; pub const DB_VARNUMERIC = extern struct { precision: u8, scale: i8, sign: u8, val: [1]u8, }; pub const DBTYPEENUM = enum(i32) { EMPTY = 0, NULL = 1, I2 = 2, I4 = 3, R4 = 4, R8 = 5, CY = 6, DATE = 7, BSTR = 8, IDISPATCH = 9, ERROR = 10, BOOL = 11, VARIANT = 12, IUNKNOWN = 13, DECIMAL = 14, UI1 = 17, ARRAY = 8192, BYREF = 16384, I1 = 16, UI2 = 18, UI4 = 19, I8 = 20, UI8 = 21, GUID = 72, VECTOR = 4096, RESERVED = 32768, BYTES = 128, STR = 129, WSTR = 130, NUMERIC = 131, UDT = 132, DBDATE = 133, DBTIME = 134, DBTIMESTAMP = 135, }; pub const DBTYPE_EMPTY = DBTYPEENUM.EMPTY; pub const DBTYPE_NULL = DBTYPEENUM.NULL; pub const DBTYPE_I2 = DBTYPEENUM.I2; pub const DBTYPE_I4 = DBTYPEENUM.I4; pub const DBTYPE_R4 = DBTYPEENUM.R4; pub const DBTYPE_R8 = DBTYPEENUM.R8; pub const DBTYPE_CY = DBTYPEENUM.CY; pub const DBTYPE_DATE = DBTYPEENUM.DATE; pub const DBTYPE_BSTR = DBTYPEENUM.BSTR; pub const DBTYPE_IDISPATCH = DBTYPEENUM.IDISPATCH; pub const DBTYPE_ERROR = DBTYPEENUM.ERROR; pub const DBTYPE_BOOL = DBTYPEENUM.BOOL; pub const DBTYPE_VARIANT = DBTYPEENUM.VARIANT; pub const DBTYPE_IUNKNOWN = DBTYPEENUM.IUNKNOWN; pub const DBTYPE_DECIMAL = DBTYPEENUM.DECIMAL; pub const DBTYPE_UI1 = DBTYPEENUM.UI1; pub const DBTYPE_ARRAY = DBTYPEENUM.ARRAY; pub const DBTYPE_BYREF = DBTYPEENUM.BYREF; pub const DBTYPE_I1 = DBTYPEENUM.I1; pub const DBTYPE_UI2 = DBTYPEENUM.UI2; pub const DBTYPE_UI4 = DBTYPEENUM.UI4; pub const DBTYPE_I8 = DBTYPEENUM.I8; pub const DBTYPE_UI8 = DBTYPEENUM.UI8; pub const DBTYPE_GUID = DBTYPEENUM.GUID; pub const DBTYPE_VECTOR = DBTYPEENUM.VECTOR; pub const DBTYPE_RESERVED = DBTYPEENUM.RESERVED; pub const DBTYPE_BYTES = DBTYPEENUM.BYTES; pub const DBTYPE_STR = DBTYPEENUM.STR; pub const DBTYPE_WSTR = DBTYPEENUM.WSTR; pub const DBTYPE_NUMERIC = DBTYPEENUM.NUMERIC; pub const DBTYPE_UDT = DBTYPEENUM.UDT; pub const DBTYPE_DBDATE = DBTYPEENUM.DBDATE; pub const DBTYPE_DBTIME = DBTYPEENUM.DBTIME; pub const DBTYPE_DBTIMESTAMP = DBTYPEENUM.DBTIMESTAMP; pub const DBTYPEENUM15 = enum(i32) { R = 136, }; pub const DBTYPE_HCHAPTER = DBTYPEENUM15.R; pub const DBTYPEENUM20 = enum(i32) { FILETIME = 64, PROPVARIANT = 138, VARNUMERIC = 139, }; pub const DBTYPE_FILETIME = DBTYPEENUM20.FILETIME; pub const DBTYPE_PROPVARIANT = DBTYPEENUM20.PROPVARIANT; pub const DBTYPE_VARNUMERIC = DBTYPEENUM20.VARNUMERIC; pub const DBPARTENUM = enum(i32) { INVALID = 0, VALUE = 1, LENGTH = 2, STATUS = 4, }; pub const DBPART_INVALID = DBPARTENUM.INVALID; pub const DBPART_VALUE = DBPARTENUM.VALUE; pub const DBPART_LENGTH = DBPARTENUM.LENGTH; pub const DBPART_STATUS = DBPARTENUM.STATUS; pub const DBPARAMIOENUM = enum(i32) { NOTPARAM = 0, INPUT = 1, OUTPUT = 2, }; pub const DBPARAMIO_NOTPARAM = DBPARAMIOENUM.NOTPARAM; pub const DBPARAMIO_INPUT = DBPARAMIOENUM.INPUT; pub const DBPARAMIO_OUTPUT = DBPARAMIOENUM.OUTPUT; pub const DBBINDFLAGENUM = enum(i32) { L = 1, }; pub const DBBINDFLAG_HTML = DBBINDFLAGENUM.L; pub const DBMEMOWNERENUM = enum(i32) { CLIENTOWNED = 0, PROVIDEROWNED = 1, }; pub const DBMEMOWNER_CLIENTOWNED = DBMEMOWNERENUM.CLIENTOWNED; pub const DBMEMOWNER_PROVIDEROWNED = DBMEMOWNERENUM.PROVIDEROWNED; pub const DBSTATUSENUM = enum(i32) { S_OK = 0, E_BADACCESSOR = 1, E_CANTCONVERTVALUE = 2, S_ISNULL = 3, S_TRUNCATED = 4, E_SIGNMISMATCH = 5, E_DATAOVERFLOW = 6, E_CANTCREATE = 7, E_UNAVAILABLE = 8, E_PERMISSIONDENIED = 9, E_INTEGRITYVIOLATION = 10, E_SCHEMAVIOLATION = 11, E_BADSTATUS = 12, S_DEFAULT = 13, }; pub const DBSTATUS_S_OK = DBSTATUSENUM.S_OK; pub const DBSTATUS_E_BADACCESSOR = DBSTATUSENUM.E_BADACCESSOR; pub const DBSTATUS_E_CANTCONVERTVALUE = DBSTATUSENUM.E_CANTCONVERTVALUE; pub const DBSTATUS_S_ISNULL = DBSTATUSENUM.S_ISNULL; pub const DBSTATUS_S_TRUNCATED = DBSTATUSENUM.S_TRUNCATED; pub const DBSTATUS_E_SIGNMISMATCH = DBSTATUSENUM.E_SIGNMISMATCH; pub const DBSTATUS_E_DATAOVERFLOW = DBSTATUSENUM.E_DATAOVERFLOW; pub const DBSTATUS_E_CANTCREATE = DBSTATUSENUM.E_CANTCREATE; pub const DBSTATUS_E_UNAVAILABLE = DBSTATUSENUM.E_UNAVAILABLE; pub const DBSTATUS_E_PERMISSIONDENIED = DBSTATUSENUM.E_PERMISSIONDENIED; pub const DBSTATUS_E_INTEGRITYVIOLATION = DBSTATUSENUM.E_INTEGRITYVIOLATION; pub const DBSTATUS_E_SCHEMAVIOLATION = DBSTATUSENUM.E_SCHEMAVIOLATION; pub const DBSTATUS_E_BADSTATUS = DBSTATUSENUM.E_BADSTATUS; pub const DBSTATUS_S_DEFAULT = DBSTATUSENUM.S_DEFAULT; pub const DBSTATUSENUM20 = enum(i32) { MDSTATUS_S_CELLEMPTY = 14, DBSTATUS_S_IGNORE = 15, }; pub const MDSTATUS_S_CELLEMPTY = DBSTATUSENUM20.MDSTATUS_S_CELLEMPTY; pub const DBSTATUS_S_IGNORE = DBSTATUSENUM20.DBSTATUS_S_IGNORE; pub const DBSTATUSENUM21 = enum(i32) { E_DOESNOTEXIST = 16, E_INVALIDURL = 17, E_RESOURCELOCKED = 18, E_RESOURCEEXISTS = 19, E_CANNOTCOMPLETE = 20, E_VOLUMENOTFOUND = 21, E_OUTOFSPACE = 22, S_CANNOTDELETESOURCE = 23, E_READONLY = 24, E_RESOURCEOUTOFSCOPE = 25, S_ALREADYEXISTS = 26, }; pub const DBSTATUS_E_DOESNOTEXIST = DBSTATUSENUM21.E_DOESNOTEXIST; pub const DBSTATUS_E_INVALIDURL = DBSTATUSENUM21.E_INVALIDURL; pub const DBSTATUS_E_RESOURCELOCKED = DBSTATUSENUM21.E_RESOURCELOCKED; pub const DBSTATUS_E_RESOURCEEXISTS = DBSTATUSENUM21.E_RESOURCEEXISTS; pub const DBSTATUS_E_CANNOTCOMPLETE = DBSTATUSENUM21.E_CANNOTCOMPLETE; pub const DBSTATUS_E_VOLUMENOTFOUND = DBSTATUSENUM21.E_VOLUMENOTFOUND; pub const DBSTATUS_E_OUTOFSPACE = DBSTATUSENUM21.E_OUTOFSPACE; pub const DBSTATUS_S_CANNOTDELETESOURCE = DBSTATUSENUM21.S_CANNOTDELETESOURCE; pub const DBSTATUS_E_READONLY = DBSTATUSENUM21.E_READONLY; pub const DBSTATUS_E_RESOURCEOUTOFSCOPE = DBSTATUSENUM21.E_RESOURCEOUTOFSCOPE; pub const DBSTATUS_S_ALREADYEXISTS = DBSTATUSENUM21.S_ALREADYEXISTS; pub const DBBINDURLFLAGENUM = enum(i32) { READ = 1, WRITE = 2, READWRITE = 3, SHARE_DENY_READ = 4, SHARE_DENY_WRITE = 8, SHARE_EXCLUSIVE = 12, SHARE_DENY_NONE = 16, ASYNCHRONOUS = 4096, COLLECTION = 8192, DELAYFETCHSTREAM = 16384, DELAYFETCHCOLUMNS = 32768, RECURSIVE = 4194304, OUTPUT = 8388608, WAITFORINIT = 16777216, OPENIFEXISTS = 33554432, OVERWRITE = 67108864, ISSTRUCTUREDDOCUMENT = 134217728, }; pub const DBBINDURLFLAG_READ = DBBINDURLFLAGENUM.READ; pub const DBBINDURLFLAG_WRITE = DBBINDURLFLAGENUM.WRITE; pub const DBBINDURLFLAG_READWRITE = DBBINDURLFLAGENUM.READWRITE; pub const DBBINDURLFLAG_SHARE_DENY_READ = DBBINDURLFLAGENUM.SHARE_DENY_READ; pub const DBBINDURLFLAG_SHARE_DENY_WRITE = DBBINDURLFLAGENUM.SHARE_DENY_WRITE; pub const DBBINDURLFLAG_SHARE_EXCLUSIVE = DBBINDURLFLAGENUM.SHARE_EXCLUSIVE; pub const DBBINDURLFLAG_SHARE_DENY_NONE = DBBINDURLFLAGENUM.SHARE_DENY_NONE; pub const DBBINDURLFLAG_ASYNCHRONOUS = DBBINDURLFLAGENUM.ASYNCHRONOUS; pub const DBBINDURLFLAG_COLLECTION = DBBINDURLFLAGENUM.COLLECTION; pub const DBBINDURLFLAG_DELAYFETCHSTREAM = DBBINDURLFLAGENUM.DELAYFETCHSTREAM; pub const DBBINDURLFLAG_DELAYFETCHCOLUMNS = DBBINDURLFLAGENUM.DELAYFETCHCOLUMNS; pub const DBBINDURLFLAG_RECURSIVE = DBBINDURLFLAGENUM.RECURSIVE; pub const DBBINDURLFLAG_OUTPUT = DBBINDURLFLAGENUM.OUTPUT; pub const DBBINDURLFLAG_WAITFORINIT = DBBINDURLFLAGENUM.WAITFORINIT; pub const DBBINDURLFLAG_OPENIFEXISTS = DBBINDURLFLAGENUM.OPENIFEXISTS; pub const DBBINDURLFLAG_OVERWRITE = DBBINDURLFLAGENUM.OVERWRITE; pub const DBBINDURLFLAG_ISSTRUCTUREDDOCUMENT = DBBINDURLFLAGENUM.ISSTRUCTUREDDOCUMENT; pub const DBBINDURLSTATUSENUM = enum(i32) { OK = 0, DENYNOTSUPPORTED = 1, DENYTYPENOTSUPPORTED = 4, REDIRECTED = 8, }; pub const DBBINDURLSTATUS_S_OK = DBBINDURLSTATUSENUM.OK; pub const DBBINDURLSTATUS_S_DENYNOTSUPPORTED = DBBINDURLSTATUSENUM.DENYNOTSUPPORTED; pub const DBBINDURLSTATUS_S_DENYTYPENOTSUPPORTED = DBBINDURLSTATUSENUM.DENYTYPENOTSUPPORTED; pub const DBBINDURLSTATUS_S_REDIRECTED = DBBINDURLSTATUSENUM.REDIRECTED; pub const DBSTATUSENUM25 = enum(i32) { CANCELED = 27, NOTCOLLECTION = 28, }; pub const DBSTATUS_E_CANCELED = DBSTATUSENUM25.CANCELED; pub const DBSTATUS_E_NOTCOLLECTION = DBSTATUSENUM25.NOTCOLLECTION; pub const DBROWSTATUSENUM = enum(i32) { S_OK = 0, S_MULTIPLECHANGES = 2, S_PENDINGCHANGES = 3, E_CANCELED = 4, E_CANTRELEASE = 6, E_CONCURRENCYVIOLATION = 7, E_DELETED = 8, E_PENDINGINSERT = 9, E_NEWLYINSERTED = 10, E_INTEGRITYVIOLATION = 11, E_INVALID = 12, E_MAXPENDCHANGESEXCEEDED = 13, E_OBJECTOPEN = 14, E_OUTOFMEMORY = 15, E_PERMISSIONDENIED = 16, E_LIMITREACHED = 17, E_SCHEMAVIOLATION = 18, E_FAIL = 19, }; pub const DBROWSTATUS_S_OK = DBROWSTATUSENUM.S_OK; pub const DBROWSTATUS_S_MULTIPLECHANGES = DBROWSTATUSENUM.S_MULTIPLECHANGES; pub const DBROWSTATUS_S_PENDINGCHANGES = DBROWSTATUSENUM.S_PENDINGCHANGES; pub const DBROWSTATUS_E_CANCELED = DBROWSTATUSENUM.E_CANCELED; pub const DBROWSTATUS_E_CANTRELEASE = DBROWSTATUSENUM.E_CANTRELEASE; pub const DBROWSTATUS_E_CONCURRENCYVIOLATION = DBROWSTATUSENUM.E_CONCURRENCYVIOLATION; pub const DBROWSTATUS_E_DELETED = DBROWSTATUSENUM.E_DELETED; pub const DBROWSTATUS_E_PENDINGINSERT = DBROWSTATUSENUM.E_PENDINGINSERT; pub const DBROWSTATUS_E_NEWLYINSERTED = DBROWSTATUSENUM.E_NEWLYINSERTED; pub const DBROWSTATUS_E_INTEGRITYVIOLATION = DBROWSTATUSENUM.E_INTEGRITYVIOLATION; pub const DBROWSTATUS_E_INVALID = DBROWSTATUSENUM.E_INVALID; pub const DBROWSTATUS_E_MAXPENDCHANGESEXCEEDED = DBROWSTATUSENUM.E_MAXPENDCHANGESEXCEEDED; pub const DBROWSTATUS_E_OBJECTOPEN = DBROWSTATUSENUM.E_OBJECTOPEN; pub const DBROWSTATUS_E_OUTOFMEMORY = DBROWSTATUSENUM.E_OUTOFMEMORY; pub const DBROWSTATUS_E_PERMISSIONDENIED = DBROWSTATUSENUM.E_PERMISSIONDENIED; pub const DBROWSTATUS_E_LIMITREACHED = DBROWSTATUSENUM.E_LIMITREACHED; pub const DBROWSTATUS_E_SCHEMAVIOLATION = DBROWSTATUSENUM.E_SCHEMAVIOLATION; pub const DBROWSTATUS_E_FAIL = DBROWSTATUSENUM.E_FAIL; pub const DBROWSTATUSENUM20 = enum(i32) { E = 20, }; pub const DBROWSTATUS_S_NOCHANGE = DBROWSTATUSENUM20.E; pub const DBSTATUSENUM26 = enum(i32) { N = 29, }; pub const DBSTATUS_S_ROWSETCOLUMN = DBSTATUSENUM26.N; pub const DBCOLUMNFLAGSENUM = enum(i32) { ISBOOKMARK = 1, MAYDEFER = 2, WRITE = 4, WRITEUNKNOWN = 8, ISFIXEDLENGTH = 16, ISNULLABLE = 32, MAYBENULL = 64, ISLONG = 128, ISROWID = 256, ISROWVER = 512, CACHEDEFERRED = 4096, }; pub const DBCOLUMNFLAGS_ISBOOKMARK = DBCOLUMNFLAGSENUM.ISBOOKMARK; pub const DBCOLUMNFLAGS_MAYDEFER = DBCOLUMNFLAGSENUM.MAYDEFER; pub const DBCOLUMNFLAGS_WRITE = DBCOLUMNFLAGSENUM.WRITE; pub const DBCOLUMNFLAGS_WRITEUNKNOWN = DBCOLUMNFLAGSENUM.WRITEUNKNOWN; pub const DBCOLUMNFLAGS_ISFIXEDLENGTH = DBCOLUMNFLAGSENUM.ISFIXEDLENGTH; pub const DBCOLUMNFLAGS_ISNULLABLE = DBCOLUMNFLAGSENUM.ISNULLABLE; pub const DBCOLUMNFLAGS_MAYBENULL = DBCOLUMNFLAGSENUM.MAYBENULL; pub const DBCOLUMNFLAGS_ISLONG = DBCOLUMNFLAGSENUM.ISLONG; pub const DBCOLUMNFLAGS_ISROWID = DBCOLUMNFLAGSENUM.ISROWID; pub const DBCOLUMNFLAGS_ISROWVER = DBCOLUMNFLAGSENUM.ISROWVER; pub const DBCOLUMNFLAGS_CACHEDEFERRED = DBCOLUMNFLAGSENUM.CACHEDEFERRED; pub const DBCOLUMNFLAGSENUM20 = enum(i32) { SCALEISNEGATIVE = 16384, RESERVED = 32768, }; pub const DBCOLUMNFLAGS_SCALEISNEGATIVE = DBCOLUMNFLAGSENUM20.SCALEISNEGATIVE; pub const DBCOLUMNFLAGS_RESERVED = DBCOLUMNFLAGSENUM20.RESERVED; pub const DBCOLUMNFLAGS15ENUM = enum(i32) { R = 8192, }; pub const DBCOLUMNFLAGS_ISCHAPTER = DBCOLUMNFLAGS15ENUM.R; pub const DBCOLUMNFLAGSENUM21 = enum(i32) { ROWURL = 65536, DEFAULTSTREAM = 131072, COLLECTION = 262144, }; pub const DBCOLUMNFLAGS_ISROWURL = DBCOLUMNFLAGSENUM21.ROWURL; pub const DBCOLUMNFLAGS_ISDEFAULTSTREAM = DBCOLUMNFLAGSENUM21.DEFAULTSTREAM; pub const DBCOLUMNFLAGS_ISCOLLECTION = DBCOLUMNFLAGSENUM21.COLLECTION; pub const DBCOLUMNFLAGSENUM26 = enum(i32) { ISSTREAM = 524288, ISROWSET = 1048576, ISROW = 2097152, ROWSPECIFICCOLUMN = 4194304, }; pub const DBCOLUMNFLAGS_ISSTREAM = DBCOLUMNFLAGSENUM26.ISSTREAM; pub const DBCOLUMNFLAGS_ISROWSET = DBCOLUMNFLAGSENUM26.ISROWSET; pub const DBCOLUMNFLAGS_ISROW = DBCOLUMNFLAGSENUM26.ISROW; pub const DBCOLUMNFLAGS_ROWSPECIFICCOLUMN = DBCOLUMNFLAGSENUM26.ROWSPECIFICCOLUMN; pub const DBTABLESTATISTICSTYPE26 = enum(i32) { HISTOGRAM = 1, COLUMN_CARDINALITY = 2, TUPLE_CARDINALITY = 4, }; pub const DBSTAT_HISTOGRAM = DBTABLESTATISTICSTYPE26.HISTOGRAM; pub const DBSTAT_COLUMN_CARDINALITY = DBTABLESTATISTICSTYPE26.COLUMN_CARDINALITY; pub const DBSTAT_TUPLE_CARDINALITY = DBTABLESTATISTICSTYPE26.TUPLE_CARDINALITY; pub const DBBOOKMARK = enum(i32) { INVALID = 0, FIRST = 1, LAST = 2, }; pub const DBBMK_INVALID = DBBOOKMARK.INVALID; pub const DBBMK_FIRST = DBBOOKMARK.FIRST; pub const DBBMK_LAST = DBBOOKMARK.LAST; pub const DBPROPENUM = enum(i32) { ABORTPRESERVE = 2, ACTIVESESSIONS = 3, APPENDONLY = 187, ASYNCTXNABORT = 168, ASYNCTXNCOMMIT = 4, AUTH_CACHE_AUTHINFO = 5, AUTH_ENCRYPT_PASSWORD = 6, AUTH_INTEGRATED = 7, AUTH_MASK_PASSWORD = 8, AUTH_PASSWORD = 9, AUTH_PERSIST_ENCRYPTED = 10, AUTH_PERSIST_SENSITIVE_AUTHINFO = 11, AUTH_USERID = 12, BLOCKINGSTORAGEOBJECTS = 13, BOOKMARKS = 14, BOOKMARKSKIPPED = 15, BOOKMARKTYPE = 16, BYREFACCESSORS = 120, CACHEDEFERRED = 17, CANFETCHBACKWARDS = 18, CANHOLDROWS = 19, CANSCROLLBACKWARDS = 21, CATALOGLOCATION = 22, CATALOGTERM = 23, CATALOGUSAGE = 24, CHANGEINSERTEDROWS = 188, COL_AUTOINCREMENT = 26, COL_DEFAULT = 27, COL_DESCRIPTION = 28, COL_FIXEDLENGTH = 167, COL_NULLABLE = 29, COL_PRIMARYKEY = 30, COL_UNIQUE = 31, COLUMNDEFINITION = 32, COLUMNRESTRICT = 33, COMMANDTIMEOUT = 34, COMMITPRESERVE = 35, CONCATNULLBEHAVIOR = 36, CURRENTCATALOG = 37, DATASOURCENAME = 38, DATASOURCEREADONLY = 39, DBMSNAME = 40, DBMSVER = 41, DEFERRED = 42, DELAYSTORAGEOBJECTS = 43, DSOTHREADMODEL = 169, GROUPBY = 44, HETEROGENEOUSTABLES = 45, IAccessor = 121, IColumnsInfo = 122, IColumnsRowset = 123, IConnectionPointContainer = 124, IConvertType = 194, IRowset = 126, IRowsetChange = 127, IRowsetIdentity = 128, IRowsetIndex = 159, IRowsetInfo = 129, IRowsetLocate = 130, IRowsetResynch = 132, IRowsetScroll = 133, IRowsetUpdate = 134, ISupportErrorInfo = 135, ILockBytes = 136, ISequentialStream = 137, IStorage = 138, IStream = 139, IDENTIFIERCASE = 46, IMMOBILEROWS = 47, INDEX_AUTOUPDATE = 48, INDEX_CLUSTERED = 49, INDEX_FILLFACTOR = 50, INDEX_INITIALSIZE = 51, INDEX_NULLCOLLATION = 52, INDEX_NULLS = 53, INDEX_PRIMARYKEY = 54, INDEX_SORTBOOKMARKS = 55, INDEX_TEMPINDEX = 163, INDEX_TYPE = 56, INDEX_UNIQUE = 57, INIT_DATASOURCE = 59, INIT_HWND = 60, INIT_IMPERSONATION_LEVEL = 61, INIT_LCID = 186, INIT_LOCATION = 62, INIT_MODE = 63, INIT_PROMPT = 64, INIT_PROTECTION_LEVEL = 65, INIT_PROVIDERSTRING = 160, INIT_TIMEOUT = 66, LITERALBOOKMARKS = 67, LITERALIDENTITY = 68, MAXINDEXSIZE = 70, MAXOPENROWS = 71, MAXPENDINGROWS = 72, MAXROWS = 73, MAXROWSIZE = 74, MAXROWSIZEINCLUDESBLOB = 75, MAXTABLESINSELECT = 76, MAYWRITECOLUMN = 77, MEMORYUSAGE = 78, MULTIPLEPARAMSETS = 191, MULTIPLERESULTS = 196, MULTIPLESTORAGEOBJECTS = 80, MULTITABLEUPDATE = 81, NOTIFICATIONGRANULARITY = 198, NOTIFICATIONPHASES = 82, NOTIFYCOLUMNSET = 171, NOTIFYROWDELETE = 173, NOTIFYROWFIRSTCHANGE = 174, NOTIFYROWINSERT = 175, NOTIFYROWRESYNCH = 177, NOTIFYROWSETCHANGED = 211, NOTIFYROWSETRELEASE = 178, NOTIFYROWSETFETCHPOSITIONCHANGE = 179, NOTIFYROWUNDOCHANGE = 180, NOTIFYROWUNDODELETE = 181, NOTIFYROWUNDOINSERT = 182, NOTIFYROWUPDATE = 183, NULLCOLLATION = 83, OLEOBJECTS = 84, ORDERBYCOLUMNSINSELECT = 85, ORDEREDBOOKMARKS = 86, OTHERINSERT = 87, OTHERUPDATEDELETE = 88, OUTPUTPARAMETERAVAILABILITY = 184, OWNINSERT = 89, OWNUPDATEDELETE = 90, PERSISTENTIDTYPE = 185, PREPAREABORTBEHAVIOR = 91, PREPARECOMMITBEHAVIOR = 92, PROCEDURETERM = 93, PROVIDERNAME = 96, PROVIDEROLEDBVER = 97, PROVIDERVER = 98, QUICKRESTART = 99, QUOTEDIDENTIFIERCASE = 100, REENTRANTEVENTS = 101, REMOVEDELETED = 102, REPORTMULTIPLECHANGES = 103, RETURNPENDINGINSERTS = 189, ROWRESTRICT = 104, ROWSETCONVERSIONSONCOMMAND = 192, ROWTHREADMODEL = 105, SCHEMATERM = 106, SCHEMAUSAGE = 107, SERVERCURSOR = 108, SESS_AUTOCOMMITISOLEVELS = 190, SQLSUPPORT = 109, STRONGIDENTITY = 119, STRUCTUREDSTORAGE = 111, SUBQUERIES = 112, SUPPORTEDTXNDDL = 161, SUPPORTEDTXNISOLEVELS = 113, SUPPORTEDTXNISORETAIN = 114, TABLETERM = 115, TBL_TEMPTABLE = 140, TRANSACTEDOBJECT = 116, UPDATABILITY = 117, USERNAME = 118, }; pub const DBPROP_ABORTPRESERVE = DBPROPENUM.ABORTPRESERVE; pub const DBPROP_ACTIVESESSIONS = DBPROPENUM.ACTIVESESSIONS; pub const DBPROP_APPENDONLY = DBPROPENUM.APPENDONLY; pub const DBPROP_ASYNCTXNABORT = DBPROPENUM.ASYNCTXNABORT; pub const DBPROP_ASYNCTXNCOMMIT = DBPROPENUM.ASYNCTXNCOMMIT; pub const DBPROP_AUTH_CACHE_AUTHINFO = DBPROPENUM.AUTH_CACHE_AUTHINFO; pub const DBPROP_AUTH_ENCRYPT_PASSWORD = DBPROPENUM.AUTH_ENCRYPT_PASSWORD; pub const DBPROP_AUTH_INTEGRATED = DBPROPENUM.AUTH_INTEGRATED; pub const DBPROP_AUTH_MASK_PASSWORD = DBPROPENUM.AUTH_MASK_PASSWORD; pub const DBPROP_AUTH_PASSWORD = DBPROPENUM.AUTH_PASSWORD; pub const DBPROP_AUTH_PERSIST_ENCRYPTED = DBPROPENUM.AUTH_PERSIST_ENCRYPTED; pub const DBPROP_AUTH_PERSIST_SENSITIVE_AUTHINFO = DBPROPENUM.AUTH_PERSIST_SENSITIVE_AUTHINFO; pub const DBPROP_AUTH_USERID = DBPROPENUM.AUTH_USERID; pub const DBPROP_BLOCKINGSTORAGEOBJECTS = DBPROPENUM.BLOCKINGSTORAGEOBJECTS; pub const DBPROP_BOOKMARKS = DBPROPENUM.BOOKMARKS; pub const DBPROP_BOOKMARKSKIPPED = DBPROPENUM.BOOKMARKSKIPPED; pub const DBPROP_BOOKMARKTYPE = DBPROPENUM.BOOKMARKTYPE; pub const DBPROP_BYREFACCESSORS = DBPROPENUM.BYREFACCESSORS; pub const DBPROP_CACHEDEFERRED = DBPROPENUM.CACHEDEFERRED; pub const DBPROP_CANFETCHBACKWARDS = DBPROPENUM.CANFETCHBACKWARDS; pub const DBPROP_CANHOLDROWS = DBPROPENUM.CANHOLDROWS; pub const DBPROP_CANSCROLLBACKWARDS = DBPROPENUM.CANSCROLLBACKWARDS; pub const DBPROP_CATALOGLOCATION = DBPROPENUM.CATALOGLOCATION; pub const DBPROP_CATALOGTERM = DBPROPENUM.CATALOGTERM; pub const DBPROP_CATALOGUSAGE = DBPROPENUM.CATALOGUSAGE; pub const DBPROP_CHANGEINSERTEDROWS = DBPROPENUM.CHANGEINSERTEDROWS; pub const DBPROP_COL_AUTOINCREMENT = DBPROPENUM.COL_AUTOINCREMENT; pub const DBPROP_COL_DEFAULT = DBPROPENUM.COL_DEFAULT; pub const DBPROP_COL_DESCRIPTION = DBPROPENUM.COL_DESCRIPTION; pub const DBPROP_COL_FIXEDLENGTH = DBPROPENUM.COL_FIXEDLENGTH; pub const DBPROP_COL_NULLABLE = DBPROPENUM.COL_NULLABLE; pub const DBPROP_COL_PRIMARYKEY = DBPROPENUM.COL_PRIMARYKEY; pub const DBPROP_COL_UNIQUE = DBPROPENUM.COL_UNIQUE; pub const DBPROP_COLUMNDEFINITION = DBPROPENUM.COLUMNDEFINITION; pub const DBPROP_COLUMNRESTRICT = DBPROPENUM.COLUMNRESTRICT; pub const DBPROP_COMMANDTIMEOUT = DBPROPENUM.COMMANDTIMEOUT; pub const DBPROP_COMMITPRESERVE = DBPROPENUM.COMMITPRESERVE; pub const DBPROP_CONCATNULLBEHAVIOR = DBPROPENUM.CONCATNULLBEHAVIOR; pub const DBPROP_CURRENTCATALOG = DBPROPENUM.CURRENTCATALOG; pub const DBPROP_DATASOURCENAME = DBPROPENUM.DATASOURCENAME; pub const DBPROP_DATASOURCEREADONLY = DBPROPENUM.DATASOURCEREADONLY; pub const DBPROP_DBMSNAME = DBPROPENUM.DBMSNAME; pub const DBPROP_DBMSVER = DBPROPENUM.DBMSVER; pub const DBPROP_DEFERRED = DBPROPENUM.DEFERRED; pub const DBPROP_DELAYSTORAGEOBJECTS = DBPROPENUM.DELAYSTORAGEOBJECTS; pub const DBPROP_DSOTHREADMODEL = DBPROPENUM.DSOTHREADMODEL; pub const DBPROP_GROUPBY = DBPROPENUM.GROUPBY; pub const DBPROP_HETEROGENEOUSTABLES = DBPROPENUM.HETEROGENEOUSTABLES; pub const DBPROP_IAccessor = DBPROPENUM.IAccessor; pub const DBPROP_IColumnsInfo = DBPROPENUM.IColumnsInfo; pub const DBPROP_IColumnsRowset = DBPROPENUM.IColumnsRowset; pub const DBPROP_IConnectionPointContainer = DBPROPENUM.IConnectionPointContainer; pub const DBPROP_IConvertType = DBPROPENUM.IConvertType; pub const DBPROP_IRowset = DBPROPENUM.IRowset; pub const DBPROP_IRowsetChange = DBPROPENUM.IRowsetChange; pub const DBPROP_IRowsetIdentity = DBPROPENUM.IRowsetIdentity; pub const DBPROP_IRowsetIndex = DBPROPENUM.IRowsetIndex; pub const DBPROP_IRowsetInfo = DBPROPENUM.IRowsetInfo; pub const DBPROP_IRowsetLocate = DBPROPENUM.IRowsetLocate; pub const DBPROP_IRowsetResynch = DBPROPENUM.IRowsetResynch; pub const DBPROP_IRowsetScroll = DBPROPENUM.IRowsetScroll; pub const DBPROP_IRowsetUpdate = DBPROPENUM.IRowsetUpdate; pub const DBPROP_ISupportErrorInfo = DBPROPENUM.ISupportErrorInfo; pub const DBPROP_ILockBytes = DBPROPENUM.ILockBytes; pub const DBPROP_ISequentialStream = DBPROPENUM.ISequentialStream; pub const DBPROP_IStorage = DBPROPENUM.IStorage; pub const DBPROP_IStream = DBPROPENUM.IStream; pub const DBPROP_IDENTIFIERCASE = DBPROPENUM.IDENTIFIERCASE; pub const DBPROP_IMMOBILEROWS = DBPROPENUM.IMMOBILEROWS; pub const DBPROP_INDEX_AUTOUPDATE = DBPROPENUM.INDEX_AUTOUPDATE; pub const DBPROP_INDEX_CLUSTERED = DBPROPENUM.INDEX_CLUSTERED; pub const DBPROP_INDEX_FILLFACTOR = DBPROPENUM.INDEX_FILLFACTOR; pub const DBPROP_INDEX_INITIALSIZE = DBPROPENUM.INDEX_INITIALSIZE; pub const DBPROP_INDEX_NULLCOLLATION = DBPROPENUM.INDEX_NULLCOLLATION; pub const DBPROP_INDEX_NULLS = DBPROPENUM.INDEX_NULLS; pub const DBPROP_INDEX_PRIMARYKEY = DBPROPENUM.INDEX_PRIMARYKEY; pub const DBPROP_INDEX_SORTBOOKMARKS = DBPROPENUM.INDEX_SORTBOOKMARKS; pub const DBPROP_INDEX_TEMPINDEX = DBPROPENUM.INDEX_TEMPINDEX; pub const DBPROP_INDEX_TYPE = DBPROPENUM.INDEX_TYPE; pub const DBPROP_INDEX_UNIQUE = DBPROPENUM.INDEX_UNIQUE; pub const DBPROP_INIT_DATASOURCE = DBPROPENUM.INIT_DATASOURCE; pub const DBPROP_INIT_HWND = DBPROPENUM.INIT_HWND; pub const DBPROP_INIT_IMPERSONATION_LEVEL = DBPROPENUM.INIT_IMPERSONATION_LEVEL; pub const DBPROP_INIT_LCID = DBPROPENUM.INIT_LCID; pub const DBPROP_INIT_LOCATION = DBPROPENUM.INIT_LOCATION; pub const DBPROP_INIT_MODE = DBPROPENUM.INIT_MODE; pub const DBPROP_INIT_PROMPT = DBPROPENUM.INIT_PROMPT; pub const DBPROP_INIT_PROTECTION_LEVEL = DBPROPENUM.INIT_PROTECTION_LEVEL; pub const DBPROP_INIT_PROVIDERSTRING = DBPROPENUM.INIT_PROVIDERSTRING; pub const DBPROP_INIT_TIMEOUT = DBPROPENUM.INIT_TIMEOUT; pub const DBPROP_LITERALBOOKMARKS = DBPROPENUM.LITERALBOOKMARKS; pub const DBPROP_LITERALIDENTITY = DBPROPENUM.LITERALIDENTITY; pub const DBPROP_MAXINDEXSIZE = DBPROPENUM.MAXINDEXSIZE; pub const DBPROP_MAXOPENROWS = DBPROPENUM.MAXOPENROWS; pub const DBPROP_MAXPENDINGROWS = DBPROPENUM.MAXPENDINGROWS; pub const DBPROP_MAXROWS = DBPROPENUM.MAXROWS; pub const DBPROP_MAXROWSIZE = DBPROPENUM.MAXROWSIZE; pub const DBPROP_MAXROWSIZEINCLUDESBLOB = DBPROPENUM.MAXROWSIZEINCLUDESBLOB; pub const DBPROP_MAXTABLESINSELECT = DBPROPENUM.MAXTABLESINSELECT; pub const DBPROP_MAYWRITECOLUMN = DBPROPENUM.MAYWRITECOLUMN; pub const DBPROP_MEMORYUSAGE = DBPROPENUM.MEMORYUSAGE; pub const DBPROP_MULTIPLEPARAMSETS = DBPROPENUM.MULTIPLEPARAMSETS; pub const DBPROP_MULTIPLERESULTS = DBPROPENUM.MULTIPLERESULTS; pub const DBPROP_MULTIPLESTORAGEOBJECTS = DBPROPENUM.MULTIPLESTORAGEOBJECTS; pub const DBPROP_MULTITABLEUPDATE = DBPROPENUM.MULTITABLEUPDATE; pub const DBPROP_NOTIFICATIONGRANULARITY = DBPROPENUM.NOTIFICATIONGRANULARITY; pub const DBPROP_NOTIFICATIONPHASES = DBPROPENUM.NOTIFICATIONPHASES; pub const DBPROP_NOTIFYCOLUMNSET = DBPROPENUM.NOTIFYCOLUMNSET; pub const DBPROP_NOTIFYROWDELETE = DBPROPENUM.NOTIFYROWDELETE; pub const DBPROP_NOTIFYROWFIRSTCHANGE = DBPROPENUM.NOTIFYROWFIRSTCHANGE; pub const DBPROP_NOTIFYROWINSERT = DBPROPENUM.NOTIFYROWINSERT; pub const DBPROP_NOTIFYROWRESYNCH = DBPROPENUM.NOTIFYROWRESYNCH; pub const DBPROP_NOTIFYROWSETCHANGED = DBPROPENUM.NOTIFYROWSETCHANGED; pub const DBPROP_NOTIFYROWSETRELEASE = DBPROPENUM.NOTIFYROWSETRELEASE; pub const DBPROP_NOTIFYROWSETFETCHPOSITIONCHANGE = DBPROPENUM.NOTIFYROWSETFETCHPOSITIONCHANGE; pub const DBPROP_NOTIFYROWUNDOCHANGE = DBPROPENUM.NOTIFYROWUNDOCHANGE; pub const DBPROP_NOTIFYROWUNDODELETE = DBPROPENUM.NOTIFYROWUNDODELETE; pub const DBPROP_NOTIFYROWUNDOINSERT = DBPROPENUM.NOTIFYROWUNDOINSERT; pub const DBPROP_NOTIFYROWUPDATE = DBPROPENUM.NOTIFYROWUPDATE; pub const DBPROP_NULLCOLLATION = DBPROPENUM.NULLCOLLATION; pub const DBPROP_OLEOBJECTS = DBPROPENUM.OLEOBJECTS; pub const DBPROP_ORDERBYCOLUMNSINSELECT = DBPROPENUM.ORDERBYCOLUMNSINSELECT; pub const DBPROP_ORDEREDBOOKMARKS = DBPROPENUM.ORDEREDBOOKMARKS; pub const DBPROP_OTHERINSERT = DBPROPENUM.OTHERINSERT; pub const DBPROP_OTHERUPDATEDELETE = DBPROPENUM.OTHERUPDATEDELETE; pub const DBPROP_OUTPUTPARAMETERAVAILABILITY = DBPROPENUM.OUTPUTPARAMETERAVAILABILITY; pub const DBPROP_OWNINSERT = DBPROPENUM.OWNINSERT; pub const DBPROP_OWNUPDATEDELETE = DBPROPENUM.OWNUPDATEDELETE; pub const DBPROP_PERSISTENTIDTYPE = DBPROPENUM.PERSISTENTIDTYPE; pub const DBPROP_PREPAREABORTBEHAVIOR = DBPROPENUM.PREPAREABORTBEHAVIOR; pub const DBPROP_PREPARECOMMITBEHAVIOR = DBPROPENUM.PREPARECOMMITBEHAVIOR; pub const DBPROP_PROCEDURETERM = DBPROPENUM.PROCEDURETERM; pub const DBPROP_PROVIDERNAME = DBPROPENUM.PROVIDERNAME; pub const DBPROP_PROVIDEROLEDBVER = DBPROPENUM.PROVIDEROLEDBVER; pub const DBPROP_PROVIDERVER = DBPROPENUM.PROVIDERVER; pub const DBPROP_QUICKRESTART = DBPROPENUM.QUICKRESTART; pub const DBPROP_QUOTEDIDENTIFIERCASE = DBPROPENUM.QUOTEDIDENTIFIERCASE; pub const DBPROP_REENTRANTEVENTS = DBPROPENUM.REENTRANTEVENTS; pub const DBPROP_REMOVEDELETED = DBPROPENUM.REMOVEDELETED; pub const DBPROP_REPORTMULTIPLECHANGES = DBPROPENUM.REPORTMULTIPLECHANGES; pub const DBPROP_RETURNPENDINGINSERTS = DBPROPENUM.RETURNPENDINGINSERTS; pub const DBPROP_ROWRESTRICT = DBPROPENUM.ROWRESTRICT; pub const DBPROP_ROWSETCONVERSIONSONCOMMAND = DBPROPENUM.ROWSETCONVERSIONSONCOMMAND; pub const DBPROP_ROWTHREADMODEL = DBPROPENUM.ROWTHREADMODEL; pub const DBPROP_SCHEMATERM = DBPROPENUM.SCHEMATERM; pub const DBPROP_SCHEMAUSAGE = DBPROPENUM.SCHEMAUSAGE; pub const DBPROP_SERVERCURSOR = DBPROPENUM.SERVERCURSOR; pub const DBPROP_SESS_AUTOCOMMITISOLEVELS = DBPROPENUM.SESS_AUTOCOMMITISOLEVELS; pub const DBPROP_SQLSUPPORT = DBPROPENUM.SQLSUPPORT; pub const DBPROP_STRONGIDENTITY = DBPROPENUM.STRONGIDENTITY; pub const DBPROP_STRUCTUREDSTORAGE = DBPROPENUM.STRUCTUREDSTORAGE; pub const DBPROP_SUBQUERIES = DBPROPENUM.SUBQUERIES; pub const DBPROP_SUPPORTEDTXNDDL = DBPROPENUM.SUPPORTEDTXNDDL; pub const DBPROP_SUPPORTEDTXNISOLEVELS = DBPROPENUM.SUPPORTEDTXNISOLEVELS; pub const DBPROP_SUPPORTEDTXNISORETAIN = DBPROPENUM.SUPPORTEDTXNISORETAIN; pub const DBPROP_TABLETERM = DBPROPENUM.TABLETERM; pub const DBPROP_TBL_TEMPTABLE = DBPROPENUM.TBL_TEMPTABLE; pub const DBPROP_TRANSACTEDOBJECT = DBPROPENUM.TRANSACTEDOBJECT; pub const DBPROP_UPDATABILITY = DBPROPENUM.UPDATABILITY; pub const DBPROP_USERNAME = DBPROPENUM.USERNAME; pub const DBPROPENUM15 = enum(i32) { FILTERCOMPAREOPS = 209, FINDCOMPAREOPS = 210, IChapteredRowset = 202, IDBAsynchStatus = 203, IRowsetFind = 204, IRowsetView = 212, IViewChapter = 213, IViewFilter = 214, IViewRowset = 215, IViewSort = 216, INIT_ASYNCH = 200, MAXOPENCHAPTERS = 199, MAXORSINFILTER = 205, MAXSORTCOLUMNS = 206, ROWSET_ASYNCH = 201, SORTONINDEX = 207, }; pub const DBPROP_FILTERCOMPAREOPS = DBPROPENUM15.FILTERCOMPAREOPS; pub const DBPROP_FINDCOMPAREOPS = DBPROPENUM15.FINDCOMPAREOPS; pub const DBPROP_IChapteredRowset = DBPROPENUM15.IChapteredRowset; pub const DBPROP_IDBAsynchStatus = DBPROPENUM15.IDBAsynchStatus; pub const DBPROP_IRowsetFind = DBPROPENUM15.IRowsetFind; pub const DBPROP_IRowsetView = DBPROPENUM15.IRowsetView; pub const DBPROP_IViewChapter = DBPROPENUM15.IViewChapter; pub const DBPROP_IViewFilter = DBPROPENUM15.IViewFilter; pub const DBPROP_IViewRowset = DBPROPENUM15.IViewRowset; pub const DBPROP_IViewSort = DBPROPENUM15.IViewSort; pub const DBPROP_INIT_ASYNCH = DBPROPENUM15.INIT_ASYNCH; pub const DBPROP_MAXOPENCHAPTERS = DBPROPENUM15.MAXOPENCHAPTERS; pub const DBPROP_MAXORSINFILTER = DBPROPENUM15.MAXORSINFILTER; pub const DBPROP_MAXSORTCOLUMNS = DBPROPENUM15.MAXSORTCOLUMNS; pub const DBPROP_ROWSET_ASYNCH = DBPROPENUM15.ROWSET_ASYNCH; pub const DBPROP_SORTONINDEX = DBPROPENUM15.SORTONINDEX; pub const DBPROPENUM20 = enum(i32) { DBPROP_IMultipleResults = 217, DBPROP_DATASOURCE_TYPE = 251, MDPROP_AXES = 252, MDPROP_FLATTENING_SUPPORT = 253, MDPROP_MDX_JOINCUBES = 254, MDPROP_NAMED_LEVELS = 255, MDPROP_RANGEROWSET = 256, MDPROP_MDX_SLICER = 218, MDPROP_MDX_CUBEQUALIFICATION = 219, MDPROP_MDX_OUTERREFERENCE = 220, MDPROP_MDX_QUERYBYPROPERTY = 221, MDPROP_MDX_CASESUPPORT = 222, MDPROP_MDX_STRING_COMPOP = 224, MDPROP_MDX_DESCFLAGS = 225, MDPROP_MDX_SET_FUNCTIONS = 226, MDPROP_MDX_MEMBER_FUNCTIONS = 227, MDPROP_MDX_NUMERIC_FUNCTIONS = 228, MDPROP_MDX_FORMULAS = 229, MDPROP_AGGREGATECELL_UPDATE = 230, // MDPROP_MDX_AGGREGATECELL_UPDATE = 230, this enum value conflicts with MDPROP_AGGREGATECELL_UPDATE MDPROP_MDX_OBJQUALIFICATION = 261, MDPROP_MDX_NONMEASURE_EXPRESSIONS = 262, DBPROP_ACCESSORDER = 231, DBPROP_BOOKMARKINFO = 232, DBPROP_INIT_CATALOG = 233, DBPROP_ROW_BULKOPS = 234, DBPROP_PROVIDERFRIENDLYNAME = 235, DBPROP_LOCKMODE = 236, DBPROP_MULTIPLECONNECTIONS = 237, DBPROP_UNIQUEROWS = 238, DBPROP_SERVERDATAONINSERT = 239, DBPROP_STORAGEFLAGS = 240, DBPROP_CONNECTIONSTATUS = 244, DBPROP_ALTERCOLUMN = 245, DBPROP_COLUMNLCID = 246, DBPROP_RESETDATASOURCE = 247, DBPROP_INIT_OLEDBSERVICES = 248, DBPROP_IRowsetRefresh = 249, DBPROP_SERVERNAME = 250, DBPROP_IParentRowset = 257, DBPROP_HIDDENCOLUMNS = 258, DBPROP_PROVIDERMEMORY = 259, DBPROP_CLIENTCURSOR = 260, }; pub const DBPROP_IMultipleResults = DBPROPENUM20.DBPROP_IMultipleResults; pub const DBPROP_DATASOURCE_TYPE = DBPROPENUM20.DBPROP_DATASOURCE_TYPE; pub const MDPROP_AXES = DBPROPENUM20.MDPROP_AXES; pub const MDPROP_FLATTENING_SUPPORT = DBPROPENUM20.MDPROP_FLATTENING_SUPPORT; pub const MDPROP_MDX_JOINCUBES = DBPROPENUM20.MDPROP_MDX_JOINCUBES; pub const MDPROP_NAMED_LEVELS = DBPROPENUM20.MDPROP_NAMED_LEVELS; pub const MDPROP_RANGEROWSET = DBPROPENUM20.MDPROP_RANGEROWSET; pub const MDPROP_MDX_SLICER = DBPROPENUM20.MDPROP_MDX_SLICER; pub const MDPROP_MDX_CUBEQUALIFICATION = DBPROPENUM20.MDPROP_MDX_CUBEQUALIFICATION; pub const MDPROP_MDX_OUTERREFERENCE = DBPROPENUM20.MDPROP_MDX_OUTERREFERENCE; pub const MDPROP_MDX_QUERYBYPROPERTY = DBPROPENUM20.MDPROP_MDX_QUERYBYPROPERTY; pub const MDPROP_MDX_CASESUPPORT = DBPROPENUM20.MDPROP_MDX_CASESUPPORT; pub const MDPROP_MDX_STRING_COMPOP = DBPROPENUM20.MDPROP_MDX_STRING_COMPOP; pub const MDPROP_MDX_DESCFLAGS = DBPROPENUM20.MDPROP_MDX_DESCFLAGS; pub const MDPROP_MDX_SET_FUNCTIONS = DBPROPENUM20.MDPROP_MDX_SET_FUNCTIONS; pub const MDPROP_MDX_MEMBER_FUNCTIONS = DBPROPENUM20.MDPROP_MDX_MEMBER_FUNCTIONS; pub const MDPROP_MDX_NUMERIC_FUNCTIONS = DBPROPENUM20.MDPROP_MDX_NUMERIC_FUNCTIONS; pub const MDPROP_MDX_FORMULAS = DBPROPENUM20.MDPROP_MDX_FORMULAS; pub const MDPROP_AGGREGATECELL_UPDATE = DBPROPENUM20.MDPROP_AGGREGATECELL_UPDATE; pub const MDPROP_MDX_AGGREGATECELL_UPDATE = DBPROPENUM20.MDPROP_AGGREGATECELL_UPDATE; pub const MDPROP_MDX_OBJQUALIFICATION = DBPROPENUM20.MDPROP_MDX_OBJQUALIFICATION; pub const MDPROP_MDX_NONMEASURE_EXPRESSIONS = DBPROPENUM20.MDPROP_MDX_NONMEASURE_EXPRESSIONS; pub const DBPROP_ACCESSORDER = DBPROPENUM20.DBPROP_ACCESSORDER; pub const DBPROP_BOOKMARKINFO = DBPROPENUM20.DBPROP_BOOKMARKINFO; pub const DBPROP_INIT_CATALOG = DBPROPENUM20.DBPROP_INIT_CATALOG; pub const DBPROP_ROW_BULKOPS = DBPROPENUM20.DBPROP_ROW_BULKOPS; pub const DBPROP_PROVIDERFRIENDLYNAME = DBPROPENUM20.DBPROP_PROVIDERFRIENDLYNAME; pub const DBPROP_LOCKMODE = DBPROPENUM20.DBPROP_LOCKMODE; pub const DBPROP_MULTIPLECONNECTIONS = DBPROPENUM20.DBPROP_MULTIPLECONNECTIONS; pub const DBPROP_UNIQUEROWS = DBPROPENUM20.DBPROP_UNIQUEROWS; pub const DBPROP_SERVERDATAONINSERT = DBPROPENUM20.DBPROP_SERVERDATAONINSERT; pub const DBPROP_STORAGEFLAGS = DBPROPENUM20.DBPROP_STORAGEFLAGS; pub const DBPROP_CONNECTIONSTATUS = DBPROPENUM20.DBPROP_CONNECTIONSTATUS; pub const DBPROP_ALTERCOLUMN = DBPROPENUM20.DBPROP_ALTERCOLUMN; pub const DBPROP_COLUMNLCID = DBPROPENUM20.DBPROP_COLUMNLCID; pub const DBPROP_RESETDATASOURCE = DBPROPENUM20.DBPROP_RESETDATASOURCE; pub const DBPROP_INIT_OLEDBSERVICES = DBPROPENUM20.DBPROP_INIT_OLEDBSERVICES; pub const DBPROP_IRowsetRefresh = DBPROPENUM20.DBPROP_IRowsetRefresh; pub const DBPROP_SERVERNAME = DBPROPENUM20.DBPROP_SERVERNAME; pub const DBPROP_IParentRowset = DBPROPENUM20.DBPROP_IParentRowset; pub const DBPROP_HIDDENCOLUMNS = DBPROPENUM20.DBPROP_HIDDENCOLUMNS; pub const DBPROP_PROVIDERMEMORY = DBPROPENUM20.DBPROP_PROVIDERMEMORY; pub const DBPROP_CLIENTCURSOR = DBPROPENUM20.DBPROP_CLIENTCURSOR; pub const DBPROPENUM21 = enum(i32) { TRUSTEE_USERNAME = 241, TRUSTEE_AUTHENTICATION = 242, TRUSTEE_NEWAUTHENTICATION = 243, IRow = 263, IRowChange = 264, IRowSchemaChange = 265, IGetRow = 266, IScopedOperations = 267, IBindResource = 268, ICreateRow = 269, INIT_BINDFLAGS = 270, INIT_LOCKOWNER = 271, GENERATEURL = 273, IDBBinderProperties = 274, IColumnsInfo2 = 275, IRegisterProvider = 276, IGetSession = 277, IGetSourceRow = 278, IRowsetCurrentIndex = 279, OPENROWSETSUPPORT = 280, COL_ISLONG = 281, }; pub const DBPROP_TRUSTEE_USERNAME = DBPROPENUM21.TRUSTEE_USERNAME; pub const DBPROP_TRUSTEE_AUTHENTICATION = DBPROPENUM21.TRUSTEE_AUTHENTICATION; pub const DBPROP_TRUSTEE_NEWAUTHENTICATION = DBPROPENUM21.TRUSTEE_NEWAUTHENTICATION; pub const DBPROP_IRow = DBPROPENUM21.IRow; pub const DBPROP_IRowChange = DBPROPENUM21.IRowChange; pub const DBPROP_IRowSchemaChange = DBPROPENUM21.IRowSchemaChange; pub const DBPROP_IGetRow = DBPROPENUM21.IGetRow; pub const DBPROP_IScopedOperations = DBPROPENUM21.IScopedOperations; pub const DBPROP_IBindResource = DBPROPENUM21.IBindResource; pub const DBPROP_ICreateRow = DBPROPENUM21.ICreateRow; pub const DBPROP_INIT_BINDFLAGS = DBPROPENUM21.INIT_BINDFLAGS; pub const DBPROP_INIT_LOCKOWNER = DBPROPENUM21.INIT_LOCKOWNER; pub const DBPROP_GENERATEURL = DBPROPENUM21.GENERATEURL; pub const DBPROP_IDBBinderProperties = DBPROPENUM21.IDBBinderProperties; pub const DBPROP_IColumnsInfo2 = DBPROPENUM21.IColumnsInfo2; pub const DBPROP_IRegisterProvider = DBPROPENUM21.IRegisterProvider; pub const DBPROP_IGetSession = DBPROPENUM21.IGetSession; pub const DBPROP_IGetSourceRow = DBPROPENUM21.IGetSourceRow; pub const DBPROP_IRowsetCurrentIndex = DBPROPENUM21.IRowsetCurrentIndex; pub const DBPROP_OPENROWSETSUPPORT = DBPROPENUM21.OPENROWSETSUPPORT; pub const DBPROP_COL_ISLONG = DBPROPENUM21.COL_ISLONG; pub const DBPROPENUM25 = enum(i32) { COL_SEED = 282, COL_INCREMENT = 283, INIT_GENERALTIMEOUT = 284, COMSERVICES = 285, }; pub const DBPROP_COL_SEED = DBPROPENUM25.COL_SEED; pub const DBPROP_COL_INCREMENT = DBPROPENUM25.COL_INCREMENT; pub const DBPROP_INIT_GENERALTIMEOUT = DBPROPENUM25.INIT_GENERALTIMEOUT; pub const DBPROP_COMSERVICES = DBPROPENUM25.COMSERVICES; pub const DBPROPENUM26 = enum(i32) { DBPROP_OUTPUTSTREAM = 286, DBPROP_OUTPUTENCODING = 287, DBPROP_TABLESTATISTICS = 288, DBPROP_SKIPROWCOUNTRESULTS = 291, DBPROP_IRowsetBookmark = 292, MDPROP_VISUALMODE = 293, }; pub const DBPROP_OUTPUTSTREAM = DBPROPENUM26.DBPROP_OUTPUTSTREAM; pub const DBPROP_OUTPUTENCODING = DBPROPENUM26.DBPROP_OUTPUTENCODING; pub const DBPROP_TABLESTATISTICS = DBPROPENUM26.DBPROP_TABLESTATISTICS; pub const DBPROP_SKIPROWCOUNTRESULTS = DBPROPENUM26.DBPROP_SKIPROWCOUNTRESULTS; pub const DBPROP_IRowsetBookmark = DBPROPENUM26.DBPROP_IRowsetBookmark; pub const MDPROP_VISUALMODE = DBPROPENUM26.MDPROP_VISUALMODE; pub const DBPARAMFLAGSENUM = enum(i32) { INPUT = 1, OUTPUT = 2, SIGNED = 16, NULLABLE = 64, LONG = 128, }; pub const DBPARAMFLAGS_ISINPUT = DBPARAMFLAGSENUM.INPUT; pub const DBPARAMFLAGS_ISOUTPUT = DBPARAMFLAGSENUM.OUTPUT; pub const DBPARAMFLAGS_ISSIGNED = DBPARAMFLAGSENUM.SIGNED; pub const DBPARAMFLAGS_ISNULLABLE = DBPARAMFLAGSENUM.NULLABLE; pub const DBPARAMFLAGS_ISLONG = DBPARAMFLAGSENUM.LONG; pub const DBPARAMFLAGSENUM20 = enum(i32) { E = 256, }; pub const DBPARAMFLAGS_SCALEISNEGATIVE = DBPARAMFLAGSENUM20.E; pub const DBPROPFLAGSENUM = enum(i32) { NOTSUPPORTED = 0, COLUMN = 1, DATASOURCE = 2, DATASOURCECREATE = 4, DATASOURCEINFO = 8, DBINIT = 16, INDEX = 32, ROWSET = 64, TABLE = 128, COLUMNOK = 256, READ = 512, WRITE = 1024, REQUIRED = 2048, SESSION = 4096, }; pub const DBPROPFLAGS_NOTSUPPORTED = DBPROPFLAGSENUM.NOTSUPPORTED; pub const DBPROPFLAGS_COLUMN = DBPROPFLAGSENUM.COLUMN; pub const DBPROPFLAGS_DATASOURCE = DBPROPFLAGSENUM.DATASOURCE; pub const DBPROPFLAGS_DATASOURCECREATE = DBPROPFLAGSENUM.DATASOURCECREATE; pub const DBPROPFLAGS_DATASOURCEINFO = DBPROPFLAGSENUM.DATASOURCEINFO; pub const DBPROPFLAGS_DBINIT = DBPROPFLAGSENUM.DBINIT; pub const DBPROPFLAGS_INDEX = DBPROPFLAGSENUM.INDEX; pub const DBPROPFLAGS_ROWSET = DBPROPFLAGSENUM.ROWSET; pub const DBPROPFLAGS_TABLE = DBPROPFLAGSENUM.TABLE; pub const DBPROPFLAGS_COLUMNOK = DBPROPFLAGSENUM.COLUMNOK; pub const DBPROPFLAGS_READ = DBPROPFLAGSENUM.READ; pub const DBPROPFLAGS_WRITE = DBPROPFLAGSENUM.WRITE; pub const DBPROPFLAGS_REQUIRED = DBPROPFLAGSENUM.REQUIRED; pub const DBPROPFLAGS_SESSION = DBPROPFLAGSENUM.SESSION; pub const DBPROPFLAGSENUM21 = enum(i32) { E = 8192, }; pub const DBPROPFLAGS_TRUSTEE = DBPROPFLAGSENUM21.E; pub const DBPROPFLAGSENUM25 = enum(i32) { W = 16384, }; pub const DBPROPFLAGS_VIEW = DBPROPFLAGSENUM25.W; pub const DBPROPFLAGSENUM26 = enum(i32) { M = 32768, }; pub const DBPROPFLAGS_STREAM = DBPROPFLAGSENUM26.M; pub const DBPROPOPTIONSENUM = enum(i32) { REQUIRED = 0, SETIFCHEAP = 1, // OPTIONAL = 1, this enum value conflicts with SETIFCHEAP }; pub const DBPROPOPTIONS_REQUIRED = DBPROPOPTIONSENUM.REQUIRED; pub const DBPROPOPTIONS_SETIFCHEAP = DBPROPOPTIONSENUM.SETIFCHEAP; pub const DBPROPOPTIONS_OPTIONAL = DBPROPOPTIONSENUM.SETIFCHEAP; pub const DBPROPSTATUSENUM = enum(i32) { OK = 0, NOTSUPPORTED = 1, BADVALUE = 2, BADOPTION = 3, BADCOLUMN = 4, NOTALLSETTABLE = 5, NOTSETTABLE = 6, NOTSET = 7, CONFLICTING = 8, }; pub const DBPROPSTATUS_OK = DBPROPSTATUSENUM.OK; pub const DBPROPSTATUS_NOTSUPPORTED = DBPROPSTATUSENUM.NOTSUPPORTED; pub const DBPROPSTATUS_BADVALUE = DBPROPSTATUSENUM.BADVALUE; pub const DBPROPSTATUS_BADOPTION = DBPROPSTATUSENUM.BADOPTION; pub const DBPROPSTATUS_BADCOLUMN = DBPROPSTATUSENUM.BADCOLUMN; pub const DBPROPSTATUS_NOTALLSETTABLE = DBPROPSTATUSENUM.NOTALLSETTABLE; pub const DBPROPSTATUS_NOTSETTABLE = DBPROPSTATUSENUM.NOTSETTABLE; pub const DBPROPSTATUS_NOTSET = DBPROPSTATUSENUM.NOTSET; pub const DBPROPSTATUS_CONFLICTING = DBPROPSTATUSENUM.CONFLICTING; pub const DBPROPSTATUSENUM21 = enum(i32) { E = 9, }; pub const DBPROPSTATUS_NOTAVAILABLE = DBPROPSTATUSENUM21.E; pub const DBINDEX_COL_ORDERENUM = enum(i32) { ASC = 0, DESC = 1, }; pub const DBINDEX_COL_ORDER_ASC = DBINDEX_COL_ORDERENUM.ASC; pub const DBINDEX_COL_ORDER_DESC = DBINDEX_COL_ORDERENUM.DESC; pub const DBCOLUMNDESCFLAGSENUM = enum(i32) { TYPENAME = 1, ITYPEINFO = 2, PROPERTIES = 4, CLSID = 8, COLSIZE = 16, DBCID = 32, WTYPE = 64, PRECISION = 128, SCALE = 256, }; pub const DBCOLUMNDESCFLAGS_TYPENAME = DBCOLUMNDESCFLAGSENUM.TYPENAME; pub const DBCOLUMNDESCFLAGS_ITYPEINFO = DBCOLUMNDESCFLAGSENUM.ITYPEINFO; pub const DBCOLUMNDESCFLAGS_PROPERTIES = DBCOLUMNDESCFLAGSENUM.PROPERTIES; pub const DBCOLUMNDESCFLAGS_CLSID = DBCOLUMNDESCFLAGSENUM.CLSID; pub const DBCOLUMNDESCFLAGS_COLSIZE = DBCOLUMNDESCFLAGSENUM.COLSIZE; pub const DBCOLUMNDESCFLAGS_DBCID = DBCOLUMNDESCFLAGSENUM.DBCID; pub const DBCOLUMNDESCFLAGS_WTYPE = DBCOLUMNDESCFLAGSENUM.WTYPE; pub const DBCOLUMNDESCFLAGS_PRECISION = DBCOLUMNDESCFLAGSENUM.PRECISION; pub const DBCOLUMNDESCFLAGS_SCALE = DBCOLUMNDESCFLAGSENUM.SCALE; pub const DBEVENTPHASEENUM = enum(i32) { OKTODO = 0, ABOUTTODO = 1, SYNCHAFTER = 2, FAILEDTODO = 3, DIDEVENT = 4, }; pub const DBEVENTPHASE_OKTODO = DBEVENTPHASEENUM.OKTODO; pub const DBEVENTPHASE_ABOUTTODO = DBEVENTPHASEENUM.ABOUTTODO; pub const DBEVENTPHASE_SYNCHAFTER = DBEVENTPHASEENUM.SYNCHAFTER; pub const DBEVENTPHASE_FAILEDTODO = DBEVENTPHASEENUM.FAILEDTODO; pub const DBEVENTPHASE_DIDEVENT = DBEVENTPHASEENUM.DIDEVENT; pub const DBREASONENUM = enum(i32) { ROWSET_FETCHPOSITIONCHANGE = 0, ROWSET_RELEASE = 1, COLUMN_SET = 2, COLUMN_RECALCULATED = 3, ROW_ACTIVATE = 4, ROW_RELEASE = 5, ROW_DELETE = 6, ROW_FIRSTCHANGE = 7, ROW_INSERT = 8, ROW_RESYNCH = 9, ROW_UNDOCHANGE = 10, ROW_UNDOINSERT = 11, ROW_UNDODELETE = 12, ROW_UPDATE = 13, ROWSET_CHANGED = 14, }; pub const DBREASON_ROWSET_FETCHPOSITIONCHANGE = DBREASONENUM.ROWSET_FETCHPOSITIONCHANGE; pub const DBREASON_ROWSET_RELEASE = DBREASONENUM.ROWSET_RELEASE; pub const DBREASON_COLUMN_SET = DBREASONENUM.COLUMN_SET; pub const DBREASON_COLUMN_RECALCULATED = DBREASONENUM.COLUMN_RECALCULATED; pub const DBREASON_ROW_ACTIVATE = DBREASONENUM.ROW_ACTIVATE; pub const DBREASON_ROW_RELEASE = DBREASONENUM.ROW_RELEASE; pub const DBREASON_ROW_DELETE = DBREASONENUM.ROW_DELETE; pub const DBREASON_ROW_FIRSTCHANGE = DBREASONENUM.ROW_FIRSTCHANGE; pub const DBREASON_ROW_INSERT = DBREASONENUM.ROW_INSERT; pub const DBREASON_ROW_RESYNCH = DBREASONENUM.ROW_RESYNCH; pub const DBREASON_ROW_UNDOCHANGE = DBREASONENUM.ROW_UNDOCHANGE; pub const DBREASON_ROW_UNDOINSERT = DBREASONENUM.ROW_UNDOINSERT; pub const DBREASON_ROW_UNDODELETE = DBREASONENUM.ROW_UNDODELETE; pub const DBREASON_ROW_UPDATE = DBREASONENUM.ROW_UPDATE; pub const DBREASON_ROWSET_CHANGED = DBREASONENUM.ROWSET_CHANGED; pub const DBREASONENUM15 = enum(i32) { POSITION_CHANGED = 15, POSITION_CHAPTERCHANGED = 16, POSITION_CLEARED = 17, _ASYNCHINSERT = 18, }; pub const DBREASON_ROWPOSITION_CHANGED = DBREASONENUM15.POSITION_CHANGED; pub const DBREASON_ROWPOSITION_CHAPTERCHANGED = DBREASONENUM15.POSITION_CHAPTERCHANGED; pub const DBREASON_ROWPOSITION_CLEARED = DBREASONENUM15.POSITION_CLEARED; pub const DBREASON_ROW_ASYNCHINSERT = DBREASONENUM15._ASYNCHINSERT; pub const DBCOMPAREOPSENUM = enum(i32) { LT = 0, LE = 1, EQ = 2, GE = 3, GT = 4, BEGINSWITH = 5, CONTAINS = 6, NE = 7, IGNORE = 8, CASESENSITIVE = 4096, CASEINSENSITIVE = 8192, }; pub const DBCOMPAREOPS_LT = DBCOMPAREOPSENUM.LT; pub const DBCOMPAREOPS_LE = DBCOMPAREOPSENUM.LE; pub const DBCOMPAREOPS_EQ = DBCOMPAREOPSENUM.EQ; pub const DBCOMPAREOPS_GE = DBCOMPAREOPSENUM.GE; pub const DBCOMPAREOPS_GT = DBCOMPAREOPSENUM.GT; pub const DBCOMPAREOPS_BEGINSWITH = DBCOMPAREOPSENUM.BEGINSWITH; pub const DBCOMPAREOPS_CONTAINS = DBCOMPAREOPSENUM.CONTAINS; pub const DBCOMPAREOPS_NE = DBCOMPAREOPSENUM.NE; pub const DBCOMPAREOPS_IGNORE = DBCOMPAREOPSENUM.IGNORE; pub const DBCOMPAREOPS_CASESENSITIVE = DBCOMPAREOPSENUM.CASESENSITIVE; pub const DBCOMPAREOPS_CASEINSENSITIVE = DBCOMPAREOPSENUM.CASEINSENSITIVE; pub const DBCOMPAREOPSENUM20 = enum(i32) { BEGINSWITH = 9, CONTAINS = 10, }; pub const DBCOMPAREOPS_NOTBEGINSWITH = DBCOMPAREOPSENUM20.BEGINSWITH; pub const DBCOMPAREOPS_NOTCONTAINS = DBCOMPAREOPSENUM20.CONTAINS; pub const DBASYNCHOPENUM = enum(i32) { N = 0, }; pub const DBASYNCHOP_OPEN = DBASYNCHOPENUM.N; pub const DBASYNCHPHASEENUM = enum(i32) { INITIALIZATION = 0, POPULATION = 1, COMPLETE = 2, CANCELED = 3, }; pub const DBASYNCHPHASE_INITIALIZATION = DBASYNCHPHASEENUM.INITIALIZATION; pub const DBASYNCHPHASE_POPULATION = DBASYNCHPHASEENUM.POPULATION; pub const DBASYNCHPHASE_COMPLETE = DBASYNCHPHASEENUM.COMPLETE; pub const DBASYNCHPHASE_CANCELED = DBASYNCHPHASEENUM.CANCELED; pub const DBSORTENUM = enum(i32) { ASCENDING = 0, DESCENDING = 1, }; pub const DBSORT_ASCENDING = DBSORTENUM.ASCENDING; pub const DBSORT_DESCENDING = DBSORTENUM.DESCENDING; pub const DBCOMMANDPERSISTFLAGENUM = enum(i32) { E = 1, }; pub const DBCOMMANDPERSISTFLAG_NOSAVE = DBCOMMANDPERSISTFLAGENUM.E; pub const DBCOMMANDPERSISTFLAGENUM21 = enum(i32) { DEFAULT = 0, PERSISTVIEW = 2, PERSISTPROCEDURE = 4, }; pub const DBCOMMANDPERSISTFLAG_DEFAULT = DBCOMMANDPERSISTFLAGENUM21.DEFAULT; pub const DBCOMMANDPERSISTFLAG_PERSISTVIEW = DBCOMMANDPERSISTFLAGENUM21.PERSISTVIEW; pub const DBCOMMANDPERSISTFLAG_PERSISTPROCEDURE = DBCOMMANDPERSISTFLAGENUM21.PERSISTPROCEDURE; pub const DBCONSTRAINTTYPEENUM = enum(i32) { UNIQUE = 0, FOREIGNKEY = 1, PRIMARYKEY = 2, CHECK = 3, }; pub const DBCONSTRAINTTYPE_UNIQUE = DBCONSTRAINTTYPEENUM.UNIQUE; pub const DBCONSTRAINTTYPE_FOREIGNKEY = DBCONSTRAINTTYPEENUM.FOREIGNKEY; pub const DBCONSTRAINTTYPE_PRIMARYKEY = DBCONSTRAINTTYPEENUM.PRIMARYKEY; pub const DBCONSTRAINTTYPE_CHECK = DBCONSTRAINTTYPEENUM.CHECK; pub const DBUPDELRULEENUM = enum(i32) { NOACTION = 0, CASCADE = 1, SETNULL = 2, SETDEFAULT = 3, }; pub const DBUPDELRULE_NOACTION = DBUPDELRULEENUM.NOACTION; pub const DBUPDELRULE_CASCADE = DBUPDELRULEENUM.CASCADE; pub const DBUPDELRULE_SETNULL = DBUPDELRULEENUM.SETNULL; pub const DBUPDELRULE_SETDEFAULT = DBUPDELRULEENUM.SETDEFAULT; pub const DBMATCHTYPEENUM = enum(i32) { FULL = 0, NONE = 1, PARTIAL = 2, }; pub const DBMATCHTYPE_FULL = DBMATCHTYPEENUM.FULL; pub const DBMATCHTYPE_NONE = DBMATCHTYPEENUM.NONE; pub const DBMATCHTYPE_PARTIAL = DBMATCHTYPEENUM.PARTIAL; pub const DBDEFERRABILITYENUM = enum(i32) { ED = 1, ABLE = 2, }; pub const DBDEFERRABILITY_DEFERRED = DBDEFERRABILITYENUM.ED; pub const DBDEFERRABILITY_DEFERRABLE = DBDEFERRABILITYENUM.ABLE; pub const DBACCESSORFLAGSENUM = enum(i32) { INVALID = 0, PASSBYREF = 1, ROWDATA = 2, PARAMETERDATA = 4, OPTIMIZED = 8, INHERITED = 16, }; pub const DBACCESSOR_INVALID = DBACCESSORFLAGSENUM.INVALID; pub const DBACCESSOR_PASSBYREF = DBACCESSORFLAGSENUM.PASSBYREF; pub const DBACCESSOR_ROWDATA = DBACCESSORFLAGSENUM.ROWDATA; pub const DBACCESSOR_PARAMETERDATA = DBACCESSORFLAGSENUM.PARAMETERDATA; pub const DBACCESSOR_OPTIMIZED = DBACCESSORFLAGSENUM.OPTIMIZED; pub const DBACCESSOR_INHERITED = DBACCESSORFLAGSENUM.INHERITED; pub const DBBINDSTATUSENUM = enum(i32) { OK = 0, BADORDINAL = 1, UNSUPPORTEDCONVERSION = 2, BADBINDINFO = 3, BADSTORAGEFLAGS = 4, NOINTERFACE = 5, MULTIPLESTORAGE = 6, }; pub const DBBINDSTATUS_OK = DBBINDSTATUSENUM.OK; pub const DBBINDSTATUS_BADORDINAL = DBBINDSTATUSENUM.BADORDINAL; pub const DBBINDSTATUS_UNSUPPORTEDCONVERSION = DBBINDSTATUSENUM.UNSUPPORTEDCONVERSION; pub const DBBINDSTATUS_BADBINDINFO = DBBINDSTATUSENUM.BADBINDINFO; pub const DBBINDSTATUS_BADSTORAGEFLAGS = DBBINDSTATUSENUM.BADSTORAGEFLAGS; pub const DBBINDSTATUS_NOINTERFACE = DBBINDSTATUSENUM.NOINTERFACE; pub const DBBINDSTATUS_MULTIPLESTORAGE = DBBINDSTATUSENUM.MULTIPLESTORAGE; const IID_IAccessor_Value = @import("../zig.zig").Guid.initString("0c733a8c-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IAccessor = &IID_IAccessor_Value; pub const IAccessor = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddRefAccessor: fn( self: *const IAccessor, hAccessor: usize, pcRefCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateAccessor: fn( self: *const IAccessor, dwAccessorFlags: u32, cBindings: usize, rgBindings: [*]const DBBINDING, cbRowSize: usize, phAccessor: ?*usize, rgStatus: ?[*]u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetBindings: fn( self: *const IAccessor, hAccessor: usize, pdwAccessorFlags: ?*u32, pcBindings: ?*usize, prgBindings: ?*?*DBBINDING, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ReleaseAccessor: fn( self: *const IAccessor, hAccessor: usize, pcRefCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAccessor_AddRefAccessor(self: *const T, hAccessor: usize, pcRefCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IAccessor.VTable, self.vtable).AddRefAccessor(@ptrCast(*const IAccessor, self), hAccessor, pcRefCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAccessor_CreateAccessor(self: *const T, dwAccessorFlags: u32, cBindings: usize, rgBindings: [*]const DBBINDING, cbRowSize: usize, phAccessor: ?*usize, rgStatus: ?[*]u32) callconv(.Inline) HRESULT { return @ptrCast(*const IAccessor.VTable, self.vtable).CreateAccessor(@ptrCast(*const IAccessor, self), dwAccessorFlags, cBindings, rgBindings, cbRowSize, phAccessor, rgStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAccessor_GetBindings(self: *const T, hAccessor: usize, pdwAccessorFlags: ?*u32, pcBindings: ?*usize, prgBindings: ?*?*DBBINDING) callconv(.Inline) HRESULT { return @ptrCast(*const IAccessor.VTable, self.vtable).GetBindings(@ptrCast(*const IAccessor, self), hAccessor, pdwAccessorFlags, pcBindings, prgBindings); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAccessor_ReleaseAccessor(self: *const T, hAccessor: usize, pcRefCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IAccessor.VTable, self.vtable).ReleaseAccessor(@ptrCast(*const IAccessor, self), hAccessor, pcRefCount); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowset_Value = @import("../zig.zig").Guid.initString("0c733a7c-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowset = &IID_IRowset_Value; pub const IRowset = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddRefRows: fn( self: *const IRowset, cRows: usize, rghRows: ?*const usize, rgRefCounts: ?*u32, rgRowStatus: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetData: fn( self: *const IRowset, hRow: usize, hAccessor: usize, pData: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetNextRows: fn( self: *const IRowset, hReserved: usize, lRowsOffset: isize, cRows: isize, pcRowsObtained: ?*usize, prghRows: ?*?*usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ReleaseRows: fn( self: *const IRowset, cRows: usize, rghRows: ?*const usize, rgRowOptions: ?*u32, rgRefCounts: ?*u32, rgRowStatus: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RestartPosition: fn( self: *const IRowset, hReserved: usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowset_AddRefRows(self: *const T, cRows: usize, rghRows: ?*const usize, rgRefCounts: ?*u32, rgRowStatus: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowset.VTable, self.vtable).AddRefRows(@ptrCast(*const IRowset, self), cRows, rghRows, rgRefCounts, rgRowStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowset_GetData(self: *const T, hRow: usize, hAccessor: usize, pData: ?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IRowset.VTable, self.vtable).GetData(@ptrCast(*const IRowset, self), hRow, hAccessor, pData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowset_GetNextRows(self: *const T, hReserved: usize, lRowsOffset: isize, cRows: isize, pcRowsObtained: ?*usize, prghRows: ?*?*usize) callconv(.Inline) HRESULT { return @ptrCast(*const IRowset.VTable, self.vtable).GetNextRows(@ptrCast(*const IRowset, self), hReserved, lRowsOffset, cRows, pcRowsObtained, prghRows); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowset_ReleaseRows(self: *const T, cRows: usize, rghRows: ?*const usize, rgRowOptions: ?*u32, rgRefCounts: ?*u32, rgRowStatus: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowset.VTable, self.vtable).ReleaseRows(@ptrCast(*const IRowset, self), cRows, rghRows, rgRowOptions, rgRefCounts, rgRowStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowset_RestartPosition(self: *const T, hReserved: usize) callconv(.Inline) HRESULT { return @ptrCast(*const IRowset.VTable, self.vtable).RestartPosition(@ptrCast(*const IRowset, self), hReserved); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowsetInfo_Value = @import("../zig.zig").Guid.initString("0c733a55-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetInfo = &IID_IRowsetInfo_Value; pub const IRowsetInfo = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetProperties: fn( self: *const IRowsetInfo, cPropertyIDSets: u32, rgPropertyIDSets: ?[*]const DBPROPIDSET, pcPropertySets: ?*u32, prgPropertySets: ?*?*DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetReferencedRowset: fn( self: *const IRowsetInfo, iOrdinal: usize, riid: ?*const Guid, ppReferencedRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSpecification: fn( self: *const IRowsetInfo, riid: ?*const Guid, ppSpecification: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetInfo_GetProperties(self: *const T, cPropertyIDSets: u32, rgPropertyIDSets: ?[*]const DBPROPIDSET, pcPropertySets: ?*u32, prgPropertySets: ?*?*DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetInfo.VTable, self.vtable).GetProperties(@ptrCast(*const IRowsetInfo, self), cPropertyIDSets, rgPropertyIDSets, pcPropertySets, prgPropertySets); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetInfo_GetReferencedRowset(self: *const T, iOrdinal: usize, riid: ?*const Guid, ppReferencedRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetInfo.VTable, self.vtable).GetReferencedRowset(@ptrCast(*const IRowsetInfo, self), iOrdinal, riid, ppReferencedRowset); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetInfo_GetSpecification(self: *const T, riid: ?*const Guid, ppSpecification: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetInfo.VTable, self.vtable).GetSpecification(@ptrCast(*const IRowsetInfo, self), riid, ppSpecification); } };} pub usingnamespace MethodMixin(@This()); }; pub const DBCOMPAREENUM = enum(i32) { LT = 0, EQ = 1, GT = 2, NE = 3, NOTCOMPARABLE = 4, }; pub const DBCOMPARE_LT = DBCOMPAREENUM.LT; pub const DBCOMPARE_EQ = DBCOMPAREENUM.EQ; pub const DBCOMPARE_GT = DBCOMPAREENUM.GT; pub const DBCOMPARE_NE = DBCOMPAREENUM.NE; pub const DBCOMPARE_NOTCOMPARABLE = DBCOMPAREENUM.NOTCOMPARABLE; const IID_IRowsetLocate_Value = @import("../zig.zig").Guid.initString("0c733a7d-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetLocate = &IID_IRowsetLocate_Value; pub const IRowsetLocate = extern struct { pub const VTable = extern struct { base: IRowset.VTable, Compare: fn( self: *const IRowsetLocate, hReserved: usize, cbBookmark1: usize, pBookmark1: ?*const u8, cbBookmark2: usize, pBookmark2: ?*const u8, pComparison: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRowsAt: fn( self: *const IRowsetLocate, hReserved1: usize, hReserved2: usize, cbBookmark: usize, pBookmark: ?*const u8, lRowsOffset: isize, cRows: isize, pcRowsObtained: ?*usize, prghRows: ?*?*usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRowsByBookmark: fn( self: *const IRowsetLocate, hReserved: usize, cRows: usize, rgcbBookmarks: ?*const usize, rgpBookmarks: ?*const ?*u8, rghRows: ?*usize, rgRowStatus: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Hash: fn( self: *const IRowsetLocate, hReserved: usize, cBookmarks: usize, rgcbBookmarks: ?*const usize, rgpBookmarks: ?*const ?*u8, rgHashedValues: ?*usize, rgBookmarkStatus: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IRowset.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetLocate_Compare(self: *const T, hReserved: usize, cbBookmark1: usize, pBookmark1: ?*const u8, cbBookmark2: usize, pBookmark2: ?*const u8, pComparison: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetLocate.VTable, self.vtable).Compare(@ptrCast(*const IRowsetLocate, self), hReserved, cbBookmark1, pBookmark1, cbBookmark2, pBookmark2, pComparison); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetLocate_GetRowsAt(self: *const T, hReserved1: usize, hReserved2: usize, cbBookmark: usize, pBookmark: ?*const u8, lRowsOffset: isize, cRows: isize, pcRowsObtained: ?*usize, prghRows: ?*?*usize) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetLocate.VTable, self.vtable).GetRowsAt(@ptrCast(*const IRowsetLocate, self), hReserved1, hReserved2, cbBookmark, pBookmark, lRowsOffset, cRows, pcRowsObtained, prghRows); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetLocate_GetRowsByBookmark(self: *const T, hReserved: usize, cRows: usize, rgcbBookmarks: ?*const usize, rgpBookmarks: ?*const ?*u8, rghRows: ?*usize, rgRowStatus: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetLocate.VTable, self.vtable).GetRowsByBookmark(@ptrCast(*const IRowsetLocate, self), hReserved, cRows, rgcbBookmarks, rgpBookmarks, rghRows, rgRowStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetLocate_Hash(self: *const T, hReserved: usize, cBookmarks: usize, rgcbBookmarks: ?*const usize, rgpBookmarks: ?*const ?*u8, rgHashedValues: ?*usize, rgBookmarkStatus: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetLocate.VTable, self.vtable).Hash(@ptrCast(*const IRowsetLocate, self), hReserved, cBookmarks, rgcbBookmarks, rgpBookmarks, rgHashedValues, rgBookmarkStatus); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowsetResynch_Value = @import("../zig.zig").Guid.initString("0c733a84-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetResynch = &IID_IRowsetResynch_Value; pub const IRowsetResynch = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetVisibleData: fn( self: *const IRowsetResynch, hRow: usize, hAccessor: usize, pData: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ResynchRows: fn( self: *const IRowsetResynch, cRows: usize, rghRows: ?*const usize, pcRowsResynched: ?*usize, prghRowsResynched: ?*?*usize, prgRowStatus: ?*?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetResynch_GetVisibleData(self: *const T, hRow: usize, hAccessor: usize, pData: ?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetResynch.VTable, self.vtable).GetVisibleData(@ptrCast(*const IRowsetResynch, self), hRow, hAccessor, pData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetResynch_ResynchRows(self: *const T, cRows: usize, rghRows: ?*const usize, pcRowsResynched: ?*usize, prghRowsResynched: ?*?*usize, prgRowStatus: ?*?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetResynch.VTable, self.vtable).ResynchRows(@ptrCast(*const IRowsetResynch, self), cRows, rghRows, pcRowsResynched, prghRowsResynched, prgRowStatus); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowsetScroll_Value = @import("../zig.zig").Guid.initString("0c733a7e-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetScroll = &IID_IRowsetScroll_Value; pub const IRowsetScroll = extern struct { pub const VTable = extern struct { base: IRowsetLocate.VTable, GetApproximatePosition: fn( self: *const IRowsetScroll, hReserved: usize, cbBookmark: usize, pBookmark: ?*const u8, pulPosition: ?*usize, pcRows: ?*usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRowsAtRatio: fn( self: *const IRowsetScroll, hReserved1: usize, hReserved2: usize, ulNumerator: usize, ulDenominator: usize, cRows: isize, pcRowsObtained: ?*usize, prghRows: ?*?*usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IRowsetLocate.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetScroll_GetApproximatePosition(self: *const T, hReserved: usize, cbBookmark: usize, pBookmark: ?*const u8, pulPosition: ?*usize, pcRows: ?*usize) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetScroll.VTable, self.vtable).GetApproximatePosition(@ptrCast(*const IRowsetScroll, self), hReserved, cbBookmark, pBookmark, pulPosition, pcRows); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetScroll_GetRowsAtRatio(self: *const T, hReserved1: usize, hReserved2: usize, ulNumerator: usize, ulDenominator: usize, cRows: isize, pcRowsObtained: ?*usize, prghRows: ?*?*usize) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetScroll.VTable, self.vtable).GetRowsAtRatio(@ptrCast(*const IRowsetScroll, self), hReserved1, hReserved2, ulNumerator, ulDenominator, cRows, pcRowsObtained, prghRows); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IChapteredRowset_Value = @import("../zig.zig").Guid.initString("0c733a93-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IChapteredRowset = &IID_IChapteredRowset_Value; pub const IChapteredRowset = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddRefChapter: fn( self: *const IChapteredRowset, hChapter: usize, pcRefCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ReleaseChapter: fn( self: *const IChapteredRowset, hChapter: usize, pcRefCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChapteredRowset_AddRefChapter(self: *const T, hChapter: usize, pcRefCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IChapteredRowset.VTable, self.vtable).AddRefChapter(@ptrCast(*const IChapteredRowset, self), hChapter, pcRefCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IChapteredRowset_ReleaseChapter(self: *const T, hChapter: usize, pcRefCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IChapteredRowset.VTable, self.vtable).ReleaseChapter(@ptrCast(*const IChapteredRowset, self), hChapter, pcRefCount); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowsetFind_Value = @import("../zig.zig").Guid.initString("0c733a9d-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetFind = &IID_IRowsetFind_Value; pub const IRowsetFind = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, FindNextRow: fn( self: *const IRowsetFind, hChapter: usize, hAccessor: usize, pFindValue: ?*c_void, CompareOp: u32, cbBookmark: usize, pBookmark: ?*const u8, lRowsOffset: isize, cRows: isize, pcRowsObtained: ?*usize, prghRows: ?*?*usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetFind_FindNextRow(self: *const T, hChapter: usize, hAccessor: usize, pFindValue: ?*c_void, CompareOp: u32, cbBookmark: usize, pBookmark: ?*const u8, lRowsOffset: isize, cRows: isize, pcRowsObtained: ?*usize, prghRows: ?*?*usize) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetFind.VTable, self.vtable).FindNextRow(@ptrCast(*const IRowsetFind, self), hChapter, hAccessor, pFindValue, CompareOp, cbBookmark, pBookmark, lRowsOffset, cRows, pcRowsObtained, prghRows); } };} pub usingnamespace MethodMixin(@This()); }; pub const DBPOSITIONFLAGSENUM = enum(i32) { OK = 0, NOROW = 1, BOF = 2, EOF = 3, }; pub const DBPOSITION_OK = DBPOSITIONFLAGSENUM.OK; pub const DBPOSITION_NOROW = DBPOSITIONFLAGSENUM.NOROW; pub const DBPOSITION_BOF = DBPOSITIONFLAGSENUM.BOF; pub const DBPOSITION_EOF = DBPOSITIONFLAGSENUM.EOF; const IID_IRowPosition_Value = @import("../zig.zig").Guid.initString("0c733a94-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowPosition = &IID_IRowPosition_Value; pub const IRowPosition = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, ClearRowPosition: fn( self: *const IRowPosition, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRowPosition: fn( self: *const IRowPosition, phChapter: ?*usize, phRow: ?*usize, pdwPositionFlags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRowset: fn( self: *const IRowPosition, riid: ?*const Guid, ppRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Initialize: fn( self: *const IRowPosition, pRowset: ?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRowPosition: fn( self: *const IRowPosition, hChapter: usize, hRow: usize, dwPositionFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowPosition_ClearRowPosition(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IRowPosition.VTable, self.vtable).ClearRowPosition(@ptrCast(*const IRowPosition, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowPosition_GetRowPosition(self: *const T, phChapter: ?*usize, phRow: ?*usize, pdwPositionFlags: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowPosition.VTable, self.vtable).GetRowPosition(@ptrCast(*const IRowPosition, self), phChapter, phRow, pdwPositionFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowPosition_GetRowset(self: *const T, riid: ?*const Guid, ppRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IRowPosition.VTable, self.vtable).GetRowset(@ptrCast(*const IRowPosition, self), riid, ppRowset); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowPosition_Initialize(self: *const T, pRowset: ?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IRowPosition.VTable, self.vtable).Initialize(@ptrCast(*const IRowPosition, self), pRowset); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowPosition_SetRowPosition(self: *const T, hChapter: usize, hRow: usize, dwPositionFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowPosition.VTable, self.vtable).SetRowPosition(@ptrCast(*const IRowPosition, self), hChapter, hRow, dwPositionFlags); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowPositionChange_Value = @import("../zig.zig").Guid.initString("0997a571-126e-11d0-9f8a-00a0c9a0631e"); pub const IID_IRowPositionChange = &IID_IRowPositionChange_Value; pub const IRowPositionChange = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, OnRowPositionChange: fn( self: *const IRowPositionChange, eReason: u32, ePhase: u32, fCantDeny: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowPositionChange_OnRowPositionChange(self: *const T, eReason: u32, ePhase: u32, fCantDeny: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IRowPositionChange.VTable, self.vtable).OnRowPositionChange(@ptrCast(*const IRowPositionChange, self), eReason, ePhase, fCantDeny); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IViewRowset_Value = @import("../zig.zig").Guid.initString("0c733a97-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IViewRowset = &IID_IViewRowset_Value; pub const IViewRowset = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetSpecification: fn( self: *const IViewRowset, riid: ?*const Guid, ppObject: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OpenViewRowset: fn( self: *const IViewRowset, pUnkOuter: ?*IUnknown, riid: ?*const Guid, ppRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IViewRowset_GetSpecification(self: *const T, riid: ?*const Guid, ppObject: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IViewRowset.VTable, self.vtable).GetSpecification(@ptrCast(*const IViewRowset, self), riid, ppObject); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IViewRowset_OpenViewRowset(self: *const T, pUnkOuter: ?*IUnknown, riid: ?*const Guid, ppRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IViewRowset.VTable, self.vtable).OpenViewRowset(@ptrCast(*const IViewRowset, self), pUnkOuter, riid, ppRowset); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IViewChapter_Value = @import("../zig.zig").Guid.initString("0c733a98-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IViewChapter = &IID_IViewChapter_Value; pub const IViewChapter = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetSpecification: fn( self: *const IViewChapter, riid: ?*const Guid, ppRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OpenViewChapter: fn( self: *const IViewChapter, hSource: usize, phViewChapter: ?*usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IViewChapter_GetSpecification(self: *const T, riid: ?*const Guid, ppRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IViewChapter.VTable, self.vtable).GetSpecification(@ptrCast(*const IViewChapter, self), riid, ppRowset); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IViewChapter_OpenViewChapter(self: *const T, hSource: usize, phViewChapter: ?*usize) callconv(.Inline) HRESULT { return @ptrCast(*const IViewChapter.VTable, self.vtable).OpenViewChapter(@ptrCast(*const IViewChapter, self), hSource, phViewChapter); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IViewSort_Value = @import("../zig.zig").Guid.initString("0c733a9a-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IViewSort = &IID_IViewSort_Value; pub const IViewSort = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetSortOrder: fn( self: *const IViewSort, pcValues: ?*usize, prgColumns: ?*?*usize, prgOrders: ?*?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetSortOrder: fn( self: *const IViewSort, cValues: usize, rgColumns: [*]const usize, rgOrders: [*]const u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IViewSort_GetSortOrder(self: *const T, pcValues: ?*usize, prgColumns: ?*?*usize, prgOrders: ?*?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IViewSort.VTable, self.vtable).GetSortOrder(@ptrCast(*const IViewSort, self), pcValues, prgColumns, prgOrders); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IViewSort_SetSortOrder(self: *const T, cValues: usize, rgColumns: [*]const usize, rgOrders: [*]const u32) callconv(.Inline) HRESULT { return @ptrCast(*const IViewSort.VTable, self.vtable).SetSortOrder(@ptrCast(*const IViewSort, self), cValues, rgColumns, rgOrders); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IViewFilter_Value = @import("../zig.zig").Guid.initString("0c733a9b-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IViewFilter = &IID_IViewFilter_Value; pub const IViewFilter = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetFilter: fn( self: *const IViewFilter, hAccessor: usize, pcRows: ?*usize, pCompareOps: [*]?*u32, pCriteriaData: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFilterBindings: fn( self: *const IViewFilter, pcBindings: ?*usize, prgBindings: ?*?*DBBINDING, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetFilter: fn( self: *const IViewFilter, hAccessor: usize, cRows: usize, CompareOps: [*]u32, pCriteriaData: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IViewFilter_GetFilter(self: *const T, hAccessor: usize, pcRows: ?*usize, pCompareOps: [*]?*u32, pCriteriaData: ?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IViewFilter.VTable, self.vtable).GetFilter(@ptrCast(*const IViewFilter, self), hAccessor, pcRows, pCompareOps, pCriteriaData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IViewFilter_GetFilterBindings(self: *const T, pcBindings: ?*usize, prgBindings: ?*?*DBBINDING) callconv(.Inline) HRESULT { return @ptrCast(*const IViewFilter.VTable, self.vtable).GetFilterBindings(@ptrCast(*const IViewFilter, self), pcBindings, prgBindings); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IViewFilter_SetFilter(self: *const T, hAccessor: usize, cRows: usize, CompareOps: [*]u32, pCriteriaData: ?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IViewFilter.VTable, self.vtable).SetFilter(@ptrCast(*const IViewFilter, self), hAccessor, cRows, CompareOps, pCriteriaData); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowsetView_Value = @import("../zig.zig").Guid.initString("0c733a99-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetView = &IID_IRowsetView_Value; pub const IRowsetView = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateView: fn( self: *const IRowsetView, pUnkOuter: ?*IUnknown, riid: ?*const Guid, ppView: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetView: fn( self: *const IRowsetView, hChapter: usize, riid: ?*const Guid, phChapterSource: ?*usize, ppView: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetView_CreateView(self: *const T, pUnkOuter: ?*IUnknown, riid: ?*const Guid, ppView: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetView.VTable, self.vtable).CreateView(@ptrCast(*const IRowsetView, self), pUnkOuter, riid, ppView); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetView_GetView(self: *const T, hChapter: usize, riid: ?*const Guid, phChapterSource: ?*usize, ppView: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetView.VTable, self.vtable).GetView(@ptrCast(*const IRowsetView, self), hChapter, riid, phChapterSource, ppView); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowsetChange_Value = @import("../zig.zig").Guid.initString("0c733a05-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetChange = &IID_IRowsetChange_Value; pub const IRowsetChange = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, DeleteRows: fn( self: *const IRowsetChange, hReserved: usize, cRows: usize, rghRows: ?*const usize, rgRowStatus: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetData: fn( self: *const IRowsetChange, hRow: usize, hAccessor: usize, pData: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, InsertRow: fn( self: *const IRowsetChange, hReserved: usize, hAccessor: usize, pData: ?*c_void, phRow: ?*usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetChange_DeleteRows(self: *const T, hReserved: usize, cRows: usize, rghRows: ?*const usize, rgRowStatus: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetChange.VTable, self.vtable).DeleteRows(@ptrCast(*const IRowsetChange, self), hReserved, cRows, rghRows, rgRowStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetChange_SetData(self: *const T, hRow: usize, hAccessor: usize, pData: ?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetChange.VTable, self.vtable).SetData(@ptrCast(*const IRowsetChange, self), hRow, hAccessor, pData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetChange_InsertRow(self: *const T, hReserved: usize, hAccessor: usize, pData: ?*c_void, phRow: ?*usize) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetChange.VTable, self.vtable).InsertRow(@ptrCast(*const IRowsetChange, self), hReserved, hAccessor, pData, phRow); } };} pub usingnamespace MethodMixin(@This()); }; pub const DBPENDINGSTATUSENUM = enum(i32) { NEW = 1, CHANGED = 2, DELETED = 4, UNCHANGED = 8, INVALIDROW = 16, }; pub const DBPENDINGSTATUS_NEW = DBPENDINGSTATUSENUM.NEW; pub const DBPENDINGSTATUS_CHANGED = DBPENDINGSTATUSENUM.CHANGED; pub const DBPENDINGSTATUS_DELETED = DBPENDINGSTATUSENUM.DELETED; pub const DBPENDINGSTATUS_UNCHANGED = DBPENDINGSTATUSENUM.UNCHANGED; pub const DBPENDINGSTATUS_INVALIDROW = DBPENDINGSTATUSENUM.INVALIDROW; const IID_IRowsetUpdate_Value = @import("../zig.zig").Guid.initString("0c733a6d-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetUpdate = &IID_IRowsetUpdate_Value; pub const IRowsetUpdate = extern struct { pub const VTable = extern struct { base: IRowsetChange.VTable, GetOriginalData: fn( self: *const IRowsetUpdate, hRow: usize, hAccessor: usize, pData: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPendingRows: fn( self: *const IRowsetUpdate, hReserved: usize, dwRowStatus: u32, pcPendingRows: ?*usize, prgPendingRows: ?*?*usize, prgPendingStatus: ?*?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRowStatus: fn( self: *const IRowsetUpdate, hReserved: usize, cRows: usize, rghRows: ?*const usize, rgPendingStatus: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Undo: fn( self: *const IRowsetUpdate, hReserved: usize, cRows: usize, rghRows: ?*const usize, pcRowsUndone: ?*usize, prgRowsUndone: ?*?*usize, prgRowStatus: ?*?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Update: fn( self: *const IRowsetUpdate, hReserved: usize, cRows: usize, rghRows: ?*const usize, pcRows: ?*usize, prgRows: ?*?*usize, prgRowStatus: ?*?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IRowsetChange.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetUpdate_GetOriginalData(self: *const T, hRow: usize, hAccessor: usize, pData: ?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetUpdate.VTable, self.vtable).GetOriginalData(@ptrCast(*const IRowsetUpdate, self), hRow, hAccessor, pData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetUpdate_GetPendingRows(self: *const T, hReserved: usize, dwRowStatus: u32, pcPendingRows: ?*usize, prgPendingRows: ?*?*usize, prgPendingStatus: ?*?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetUpdate.VTable, self.vtable).GetPendingRows(@ptrCast(*const IRowsetUpdate, self), hReserved, dwRowStatus, pcPendingRows, prgPendingRows, prgPendingStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetUpdate_GetRowStatus(self: *const T, hReserved: usize, cRows: usize, rghRows: ?*const usize, rgPendingStatus: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetUpdate.VTable, self.vtable).GetRowStatus(@ptrCast(*const IRowsetUpdate, self), hReserved, cRows, rghRows, rgPendingStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetUpdate_Undo(self: *const T, hReserved: usize, cRows: usize, rghRows: ?*const usize, pcRowsUndone: ?*usize, prgRowsUndone: ?*?*usize, prgRowStatus: ?*?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetUpdate.VTable, self.vtable).Undo(@ptrCast(*const IRowsetUpdate, self), hReserved, cRows, rghRows, pcRowsUndone, prgRowsUndone, prgRowStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetUpdate_Update(self: *const T, hReserved: usize, cRows: usize, rghRows: ?*const usize, pcRows: ?*usize, prgRows: ?*?*usize, prgRowStatus: ?*?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetUpdate.VTable, self.vtable).Update(@ptrCast(*const IRowsetUpdate, self), hReserved, cRows, rghRows, pcRows, prgRows, prgRowStatus); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowsetIdentity_Value = @import("../zig.zig").Guid.initString("0c733a09-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetIdentity = &IID_IRowsetIdentity_Value; pub const IRowsetIdentity = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, IsSameRow: fn( self: *const IRowsetIdentity, hThisRow: usize, hThatRow: usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetIdentity_IsSameRow(self: *const T, hThisRow: usize, hThatRow: usize) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetIdentity.VTable, self.vtable).IsSameRow(@ptrCast(*const IRowsetIdentity, self), hThisRow, hThatRow); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowsetNotify_Value = @import("../zig.zig").Guid.initString("0c733a83-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetNotify = &IID_IRowsetNotify_Value; pub const IRowsetNotify = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, OnFieldChange: fn( self: *const IRowsetNotify, pRowset: ?*IRowset, hRow: usize, cColumns: usize, rgColumns: [*]usize, eReason: u32, ePhase: u32, fCantDeny: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnRowChange: fn( self: *const IRowsetNotify, pRowset: ?*IRowset, cRows: usize, rghRows: [*]const usize, eReason: u32, ePhase: u32, fCantDeny: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnRowsetChange: fn( self: *const IRowsetNotify, pRowset: ?*IRowset, eReason: u32, ePhase: u32, fCantDeny: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetNotify_OnFieldChange(self: *const T, pRowset: ?*IRowset, hRow: usize, cColumns: usize, rgColumns: [*]usize, eReason: u32, ePhase: u32, fCantDeny: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetNotify.VTable, self.vtable).OnFieldChange(@ptrCast(*const IRowsetNotify, self), pRowset, hRow, cColumns, rgColumns, eReason, ePhase, fCantDeny); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetNotify_OnRowChange(self: *const T, pRowset: ?*IRowset, cRows: usize, rghRows: [*]const usize, eReason: u32, ePhase: u32, fCantDeny: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetNotify.VTable, self.vtable).OnRowChange(@ptrCast(*const IRowsetNotify, self), pRowset, cRows, rghRows, eReason, ePhase, fCantDeny); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetNotify_OnRowsetChange(self: *const T, pRowset: ?*IRowset, eReason: u32, ePhase: u32, fCantDeny: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetNotify.VTable, self.vtable).OnRowsetChange(@ptrCast(*const IRowsetNotify, self), pRowset, eReason, ePhase, fCantDeny); } };} pub usingnamespace MethodMixin(@This()); }; pub const DBSEEKENUM = enum(i32) { INVALID = 0, FIRSTEQ = 1, LASTEQ = 2, AFTEREQ = 4, AFTER = 8, BEFOREEQ = 16, BEFORE = 32, }; pub const DBSEEK_INVALID = DBSEEKENUM.INVALID; pub const DBSEEK_FIRSTEQ = DBSEEKENUM.FIRSTEQ; pub const DBSEEK_LASTEQ = DBSEEKENUM.LASTEQ; pub const DBSEEK_AFTEREQ = DBSEEKENUM.AFTEREQ; pub const DBSEEK_AFTER = DBSEEKENUM.AFTER; pub const DBSEEK_BEFOREEQ = DBSEEKENUM.BEFOREEQ; pub const DBSEEK_BEFORE = DBSEEKENUM.BEFORE; pub const DBRANGEENUM = enum(i32) { INCLUSIVESTART = 0, // INCLUSIVEEND = 0, this enum value conflicts with INCLUSIVESTART EXCLUSIVESTART = 1, EXCLUSIVEEND = 2, EXCLUDENULLS = 4, PREFIX = 8, MATCH = 16, }; pub const DBRANGE_INCLUSIVESTART = DBRANGEENUM.INCLUSIVESTART; pub const DBRANGE_INCLUSIVEEND = DBRANGEENUM.INCLUSIVESTART; pub const DBRANGE_EXCLUSIVESTART = DBRANGEENUM.EXCLUSIVESTART; pub const DBRANGE_EXCLUSIVEEND = DBRANGEENUM.EXCLUSIVEEND; pub const DBRANGE_EXCLUDENULLS = DBRANGEENUM.EXCLUDENULLS; pub const DBRANGE_PREFIX = DBRANGEENUM.PREFIX; pub const DBRANGE_MATCH = DBRANGEENUM.MATCH; pub const DBRANGEENUM20 = enum(i32) { SHIFT = 24, MASK = 255, }; pub const DBRANGE_MATCH_N_SHIFT = DBRANGEENUM20.SHIFT; pub const DBRANGE_MATCH_N_MASK = DBRANGEENUM20.MASK; const IID_IRowsetIndex_Value = @import("../zig.zig").Guid.initString("0c733a82-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetIndex = &IID_IRowsetIndex_Value; pub const IRowsetIndex = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetIndexInfo: fn( self: *const IRowsetIndex, pcKeyColumns: ?*usize, prgIndexColumnDesc: ?*?*DBINDEXCOLUMNDESC, pcIndexPropertySets: ?*u32, prgIndexPropertySets: ?*?*DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Seek: fn( self: *const IRowsetIndex, hAccessor: usize, cKeyValues: usize, pData: ?*c_void, dwSeekOptions: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetRange: fn( self: *const IRowsetIndex, hAccessor: usize, cStartKeyColumns: usize, pStartData: ?*c_void, cEndKeyColumns: usize, pEndData: ?*c_void, dwRangeOptions: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetIndex_GetIndexInfo(self: *const T, pcKeyColumns: ?*usize, prgIndexColumnDesc: ?*?*DBINDEXCOLUMNDESC, pcIndexPropertySets: ?*u32, prgIndexPropertySets: ?*?*DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetIndex.VTable, self.vtable).GetIndexInfo(@ptrCast(*const IRowsetIndex, self), pcKeyColumns, prgIndexColumnDesc, pcIndexPropertySets, prgIndexPropertySets); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetIndex_Seek(self: *const T, hAccessor: usize, cKeyValues: usize, pData: ?*c_void, dwSeekOptions: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetIndex.VTable, self.vtable).Seek(@ptrCast(*const IRowsetIndex, self), hAccessor, cKeyValues, pData, dwSeekOptions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetIndex_SetRange(self: *const T, hAccessor: usize, cStartKeyColumns: usize, pStartData: ?*c_void, cEndKeyColumns: usize, pEndData: ?*c_void, dwRangeOptions: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetIndex.VTable, self.vtable).SetRange(@ptrCast(*const IRowsetIndex, self), hAccessor, cStartKeyColumns, pStartData, cEndKeyColumns, pEndData, dwRangeOptions); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ICommand_Value = @import("../zig.zig").Guid.initString("0c733a63-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ICommand = &IID_ICommand_Value; pub const ICommand = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Cancel: fn( self: *const ICommand, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Execute: fn( self: *const ICommand, pUnkOuter: ?*IUnknown, riid: ?*const Guid, pParams: ?*DBPARAMS, pcRowsAffected: ?*isize, ppRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDBSession: fn( self: *const ICommand, riid: ?*const Guid, ppSession: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommand_Cancel(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ICommand.VTable, self.vtable).Cancel(@ptrCast(*const ICommand, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommand_Execute(self: *const T, pUnkOuter: ?*IUnknown, riid: ?*const Guid, pParams: ?*DBPARAMS, pcRowsAffected: ?*isize, ppRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ICommand.VTable, self.vtable).Execute(@ptrCast(*const ICommand, self), pUnkOuter, riid, pParams, pcRowsAffected, ppRowset); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommand_GetDBSession(self: *const T, riid: ?*const Guid, ppSession: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ICommand.VTable, self.vtable).GetDBSession(@ptrCast(*const ICommand, self), riid, ppSession); } };} pub usingnamespace MethodMixin(@This()); }; pub const DBRESULTFLAGENUM = enum(i32) { DEFAULT = 0, ROWSET = 1, ROW = 2, }; pub const DBRESULTFLAG_DEFAULT = DBRESULTFLAGENUM.DEFAULT; pub const DBRESULTFLAG_ROWSET = DBRESULTFLAGENUM.ROWSET; pub const DBRESULTFLAG_ROW = DBRESULTFLAGENUM.ROW; const IID_IMultipleResults_Value = @import("../zig.zig").Guid.initString("0c733a90-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IMultipleResults = &IID_IMultipleResults_Value; pub const IMultipleResults = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetResult: fn( self: *const IMultipleResults, pUnkOuter: ?*IUnknown, lResultFlag: isize, riid: ?*const Guid, pcRowsAffected: ?*isize, ppRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IMultipleResults_GetResult(self: *const T, pUnkOuter: ?*IUnknown, lResultFlag: isize, riid: ?*const Guid, pcRowsAffected: ?*isize, ppRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IMultipleResults.VTable, self.vtable).GetResult(@ptrCast(*const IMultipleResults, self), pUnkOuter, lResultFlag, riid, pcRowsAffected, ppRowset); } };} pub usingnamespace MethodMixin(@This()); }; pub const DBCONVERTFLAGSENUM = enum(i32) { COLUMN = 0, PARAMETER = 1, }; pub const DBCONVERTFLAGS_COLUMN = DBCONVERTFLAGSENUM.COLUMN; pub const DBCONVERTFLAGS_PARAMETER = DBCONVERTFLAGSENUM.PARAMETER; pub const DBCONVERTFLAGSENUM20 = enum(i32) { ISLONG = 2, ISFIXEDLENGTH = 4, FROMVARIANT = 8, }; pub const DBCONVERTFLAGS_ISLONG = DBCONVERTFLAGSENUM20.ISLONG; pub const DBCONVERTFLAGS_ISFIXEDLENGTH = DBCONVERTFLAGSENUM20.ISFIXEDLENGTH; pub const DBCONVERTFLAGS_FROMVARIANT = DBCONVERTFLAGSENUM20.FROMVARIANT; const IID_IConvertType_Value = @import("../zig.zig").Guid.initString("0c733a88-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IConvertType = &IID_IConvertType_Value; pub const IConvertType = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CanConvert: fn( self: *const IConvertType, wFromType: u16, wToType: u16, dwConvertFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConvertType_CanConvert(self: *const T, wFromType: u16, wToType: u16, dwConvertFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IConvertType.VTable, self.vtable).CanConvert(@ptrCast(*const IConvertType, self), wFromType, wToType, dwConvertFlags); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ICommandPrepare_Value = @import("../zig.zig").Guid.initString("0c733a26-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ICommandPrepare = &IID_ICommandPrepare_Value; pub const ICommandPrepare = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Prepare: fn( self: *const ICommandPrepare, cExpectedRuns: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Unprepare: fn( self: *const ICommandPrepare, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandPrepare_Prepare(self: *const T, cExpectedRuns: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandPrepare.VTable, self.vtable).Prepare(@ptrCast(*const ICommandPrepare, self), cExpectedRuns); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandPrepare_Unprepare(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandPrepare.VTable, self.vtable).Unprepare(@ptrCast(*const ICommandPrepare, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ICommandProperties_Value = @import("../zig.zig").Guid.initString("0c733a79-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ICommandProperties = &IID_ICommandProperties_Value; pub const ICommandProperties = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetProperties: fn( self: *const ICommandProperties, cPropertyIDSets: u32, rgPropertyIDSets: ?[*]const DBPROPIDSET, pcPropertySets: ?*u32, prgPropertySets: ?*?*DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetProperties: fn( self: *const ICommandProperties, cPropertySets: u32, rgPropertySets: [*]DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandProperties_GetProperties(self: *const T, cPropertyIDSets: u32, rgPropertyIDSets: ?[*]const DBPROPIDSET, pcPropertySets: ?*u32, prgPropertySets: ?*?*DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandProperties.VTable, self.vtable).GetProperties(@ptrCast(*const ICommandProperties, self), cPropertyIDSets, rgPropertyIDSets, pcPropertySets, prgPropertySets); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandProperties_SetProperties(self: *const T, cPropertySets: u32, rgPropertySets: [*]DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandProperties.VTable, self.vtable).SetProperties(@ptrCast(*const ICommandProperties, self), cPropertySets, rgPropertySets); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ICommandText_Value = @import("../zig.zig").Guid.initString("0c733a27-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ICommandText = &IID_ICommandText_Value; pub const ICommandText = extern struct { pub const VTable = extern struct { base: ICommand.VTable, GetCommandText: fn( self: *const ICommandText, pguidDialect: ?*Guid, ppwszCommand: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetCommandText: fn( self: *const ICommandText, rguidDialect: ?*const Guid, pwszCommand: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ICommand.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandText_GetCommandText(self: *const T, pguidDialect: ?*Guid, ppwszCommand: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandText.VTable, self.vtable).GetCommandText(@ptrCast(*const ICommandText, self), pguidDialect, ppwszCommand); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandText_SetCommandText(self: *const T, rguidDialect: ?*const Guid, pwszCommand: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandText.VTable, self.vtable).SetCommandText(@ptrCast(*const ICommandText, self), rguidDialect, pwszCommand); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ICommandWithParameters_Value = @import("../zig.zig").Guid.initString("0c733a64-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ICommandWithParameters = &IID_ICommandWithParameters_Value; pub const ICommandWithParameters = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetParameterInfo: fn( self: *const ICommandWithParameters, pcParams: ?*usize, prgParamInfo: ?*?*DBPARAMINFO, ppNamesBuffer: ?*?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MapParameterNames: fn( self: *const ICommandWithParameters, cParamNames: usize, rgParamNames: [*]?PWSTR, rgParamOrdinals: [*]isize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetParameterInfo: fn( self: *const ICommandWithParameters, cParams: usize, rgParamOrdinals: ?[*]const usize, rgParamBindInfo: ?[*]const DBPARAMBINDINFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandWithParameters_GetParameterInfo(self: *const T, pcParams: ?*usize, prgParamInfo: ?*?*DBPARAMINFO, ppNamesBuffer: ?*?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandWithParameters.VTable, self.vtable).GetParameterInfo(@ptrCast(*const ICommandWithParameters, self), pcParams, prgParamInfo, ppNamesBuffer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandWithParameters_MapParameterNames(self: *const T, cParamNames: usize, rgParamNames: [*]?PWSTR, rgParamOrdinals: [*]isize) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandWithParameters.VTable, self.vtable).MapParameterNames(@ptrCast(*const ICommandWithParameters, self), cParamNames, rgParamNames, rgParamOrdinals); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandWithParameters_SetParameterInfo(self: *const T, cParams: usize, rgParamOrdinals: ?[*]const usize, rgParamBindInfo: ?[*]const DBPARAMBINDINFO) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandWithParameters.VTable, self.vtable).SetParameterInfo(@ptrCast(*const ICommandWithParameters, self), cParams, rgParamOrdinals, rgParamBindInfo); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IColumnsRowset_Value = @import("../zig.zig").Guid.initString("0c733a10-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IColumnsRowset = &IID_IColumnsRowset_Value; pub const IColumnsRowset = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetAvailableColumns: fn( self: *const IColumnsRowset, pcOptColumns: ?*usize, prgOptColumns: ?*?*DBID, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetColumnsRowset: fn( self: *const IColumnsRowset, pUnkOuter: ?*IUnknown, cOptColumns: usize, rgOptColumns: [*]const DBID, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET, ppColRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IColumnsRowset_GetAvailableColumns(self: *const T, pcOptColumns: ?*usize, prgOptColumns: ?*?*DBID) callconv(.Inline) HRESULT { return @ptrCast(*const IColumnsRowset.VTable, self.vtable).GetAvailableColumns(@ptrCast(*const IColumnsRowset, self), pcOptColumns, prgOptColumns); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IColumnsRowset_GetColumnsRowset(self: *const T, pUnkOuter: ?*IUnknown, cOptColumns: usize, rgOptColumns: [*]const DBID, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET, ppColRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IColumnsRowset.VTable, self.vtable).GetColumnsRowset(@ptrCast(*const IColumnsRowset, self), pUnkOuter, cOptColumns, rgOptColumns, riid, cPropertySets, rgPropertySets, ppColRowset); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IColumnsInfo_Value = @import("../zig.zig").Guid.initString("0c733a11-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IColumnsInfo = &IID_IColumnsInfo_Value; pub const IColumnsInfo = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetColumnInfo: fn( self: *const IColumnsInfo, pcColumns: ?*usize, prgInfo: ?*?*DBCOLUMNINFO, ppStringsBuffer: ?*?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MapColumnIDs: fn( self: *const IColumnsInfo, cColumnIDs: usize, rgColumnIDs: ?[*]const DBID, rgColumns: ?[*]usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IColumnsInfo_GetColumnInfo(self: *const T, pcColumns: ?*usize, prgInfo: ?*?*DBCOLUMNINFO, ppStringsBuffer: ?*?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const IColumnsInfo.VTable, self.vtable).GetColumnInfo(@ptrCast(*const IColumnsInfo, self), pcColumns, prgInfo, ppStringsBuffer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IColumnsInfo_MapColumnIDs(self: *const T, cColumnIDs: usize, rgColumnIDs: ?[*]const DBID, rgColumns: ?[*]usize) callconv(.Inline) HRESULT { return @ptrCast(*const IColumnsInfo.VTable, self.vtable).MapColumnIDs(@ptrCast(*const IColumnsInfo, self), cColumnIDs, rgColumnIDs, rgColumns); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDBCreateCommand_Value = @import("../zig.zig").Guid.initString("0c733a1d-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IDBCreateCommand = &IID_IDBCreateCommand_Value; pub const IDBCreateCommand = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateCommand: fn( self: *const IDBCreateCommand, pUnkOuter: ?*IUnknown, riid: ?*const Guid, ppCommand: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBCreateCommand_CreateCommand(self: *const T, pUnkOuter: ?*IUnknown, riid: ?*const Guid, ppCommand: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IDBCreateCommand.VTable, self.vtable).CreateCommand(@ptrCast(*const IDBCreateCommand, self), pUnkOuter, riid, ppCommand); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDBCreateSession_Value = @import("../zig.zig").Guid.initString("0c733a5d-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IDBCreateSession = &IID_IDBCreateSession_Value; pub const IDBCreateSession = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateSession: fn( self: *const IDBCreateSession, pUnkOuter: ?*IUnknown, riid: ?*const Guid, ppDBSession: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBCreateSession_CreateSession(self: *const T, pUnkOuter: ?*IUnknown, riid: ?*const Guid, ppDBSession: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IDBCreateSession.VTable, self.vtable).CreateSession(@ptrCast(*const IDBCreateSession, self), pUnkOuter, riid, ppDBSession); } };} pub usingnamespace MethodMixin(@This()); }; pub const DBSOURCETYPEENUM = enum(i32) { DATASOURCE = 1, ENUMERATOR = 2, }; pub const DBSOURCETYPE_DATASOURCE = DBSOURCETYPEENUM.DATASOURCE; pub const DBSOURCETYPE_ENUMERATOR = DBSOURCETYPEENUM.ENUMERATOR; pub const DBSOURCETYPEENUM20 = enum(i32) { TDP = 1, MDP = 3, }; pub const DBSOURCETYPE_DATASOURCE_TDP = DBSOURCETYPEENUM20.TDP; pub const DBSOURCETYPE_DATASOURCE_MDP = DBSOURCETYPEENUM20.MDP; pub const DBSOURCETYPEENUM25 = enum(i32) { R = 4, }; pub const DBSOURCETYPE_BINDER = DBSOURCETYPEENUM25.R; const IID_ISourcesRowset_Value = @import("../zig.zig").Guid.initString("0c733a1e-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ISourcesRowset = &IID_ISourcesRowset_Value; pub const ISourcesRowset = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetSourcesRowset: fn( self: *const ISourcesRowset, pUnkOuter: ?*IUnknown, riid: ?*const Guid, cPropertySets: u32, rgProperties: ?[*]DBPROPSET, ppSourcesRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISourcesRowset_GetSourcesRowset(self: *const T, pUnkOuter: ?*IUnknown, riid: ?*const Guid, cPropertySets: u32, rgProperties: ?[*]DBPROPSET, ppSourcesRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ISourcesRowset.VTable, self.vtable).GetSourcesRowset(@ptrCast(*const ISourcesRowset, self), pUnkOuter, riid, cPropertySets, rgProperties, ppSourcesRowset); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDBProperties_Value = @import("../zig.zig").Guid.initString("0c733a8a-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IDBProperties = &IID_IDBProperties_Value; pub const IDBProperties = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetProperties: fn( self: *const IDBProperties, cPropertyIDSets: u32, rgPropertyIDSets: ?[*]const DBPROPIDSET, pcPropertySets: ?*u32, prgPropertySets: ?*?*DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPropertyInfo: fn( self: *const IDBProperties, cPropertyIDSets: u32, rgPropertyIDSets: ?[*]const DBPROPIDSET, pcPropertyInfoSets: ?*u32, prgPropertyInfoSets: ?*?*DBPROPINFOSET, ppDescBuffer: ?*?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetProperties: fn( self: *const IDBProperties, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBProperties_GetProperties(self: *const T, cPropertyIDSets: u32, rgPropertyIDSets: ?[*]const DBPROPIDSET, pcPropertySets: ?*u32, prgPropertySets: ?*?*DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const IDBProperties.VTable, self.vtable).GetProperties(@ptrCast(*const IDBProperties, self), cPropertyIDSets, rgPropertyIDSets, pcPropertySets, prgPropertySets); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBProperties_GetPropertyInfo(self: *const T, cPropertyIDSets: u32, rgPropertyIDSets: ?[*]const DBPROPIDSET, pcPropertyInfoSets: ?*u32, prgPropertyInfoSets: ?*?*DBPROPINFOSET, ppDescBuffer: ?*?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const IDBProperties.VTable, self.vtable).GetPropertyInfo(@ptrCast(*const IDBProperties, self), cPropertyIDSets, rgPropertyIDSets, pcPropertyInfoSets, prgPropertyInfoSets, ppDescBuffer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBProperties_SetProperties(self: *const T, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const IDBProperties.VTable, self.vtable).SetProperties(@ptrCast(*const IDBProperties, self), cPropertySets, rgPropertySets); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDBInitialize_Value = @import("../zig.zig").Guid.initString("0c733a8b-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IDBInitialize = &IID_IDBInitialize_Value; pub const IDBInitialize = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Initialize: fn( self: *const IDBInitialize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Uninitialize: fn( self: *const IDBInitialize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBInitialize_Initialize(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDBInitialize.VTable, self.vtable).Initialize(@ptrCast(*const IDBInitialize, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBInitialize_Uninitialize(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDBInitialize.VTable, self.vtable).Uninitialize(@ptrCast(*const IDBInitialize, self)); } };} pub usingnamespace MethodMixin(@This()); }; pub const DBLITERALENUM = enum(i32) { INVALID = 0, BINARY_LITERAL = 1, CATALOG_NAME = 2, CATALOG_SEPARATOR = 3, CHAR_LITERAL = 4, COLUMN_ALIAS = 5, COLUMN_NAME = 6, CORRELATION_NAME = 7, CURSOR_NAME = 8, ESCAPE_PERCENT = 9, ESCAPE_UNDERSCORE = 10, INDEX_NAME = 11, LIKE_PERCENT = 12, LIKE_UNDERSCORE = 13, PROCEDURE_NAME = 14, QUOTE = 15, SCHEMA_NAME = 16, TABLE_NAME = 17, TEXT_COMMAND = 18, USER_NAME = 19, VIEW_NAME = 20, }; pub const DBLITERAL_INVALID = DBLITERALENUM.INVALID; pub const DBLITERAL_BINARY_LITERAL = DBLITERALENUM.BINARY_LITERAL; pub const DBLITERAL_CATALOG_NAME = DBLITERALENUM.CATALOG_NAME; pub const DBLITERAL_CATALOG_SEPARATOR = DBLITERALENUM.CATALOG_SEPARATOR; pub const DBLITERAL_CHAR_LITERAL = DBLITERALENUM.CHAR_LITERAL; pub const DBLITERAL_COLUMN_ALIAS = DBLITERALENUM.COLUMN_ALIAS; pub const DBLITERAL_COLUMN_NAME = DBLITERALENUM.COLUMN_NAME; pub const DBLITERAL_CORRELATION_NAME = DBLITERALENUM.CORRELATION_NAME; pub const DBLITERAL_CURSOR_NAME = DBLITERALENUM.CURSOR_NAME; pub const DBLITERAL_ESCAPE_PERCENT = DBLITERALENUM.ESCAPE_PERCENT; pub const DBLITERAL_ESCAPE_UNDERSCORE = DBLITERALENUM.ESCAPE_UNDERSCORE; pub const DBLITERAL_INDEX_NAME = DBLITERALENUM.INDEX_NAME; pub const DBLITERAL_LIKE_PERCENT = DBLITERALENUM.LIKE_PERCENT; pub const DBLITERAL_LIKE_UNDERSCORE = DBLITERALENUM.LIKE_UNDERSCORE; pub const DBLITERAL_PROCEDURE_NAME = DBLITERALENUM.PROCEDURE_NAME; pub const DBLITERAL_QUOTE = DBLITERALENUM.QUOTE; pub const DBLITERAL_SCHEMA_NAME = DBLITERALENUM.SCHEMA_NAME; pub const DBLITERAL_TABLE_NAME = DBLITERALENUM.TABLE_NAME; pub const DBLITERAL_TEXT_COMMAND = DBLITERALENUM.TEXT_COMMAND; pub const DBLITERAL_USER_NAME = DBLITERALENUM.USER_NAME; pub const DBLITERAL_VIEW_NAME = DBLITERALENUM.VIEW_NAME; pub const DBLITERALENUM20 = enum(i32) { CUBE_NAME = 21, DIMENSION_NAME = 22, HIERARCHY_NAME = 23, LEVEL_NAME = 24, MEMBER_NAME = 25, PROPERTY_NAME = 26, SCHEMA_SEPARATOR = 27, QUOTE_SUFFIX = 28, }; pub const DBLITERAL_CUBE_NAME = DBLITERALENUM20.CUBE_NAME; pub const DBLITERAL_DIMENSION_NAME = DBLITERALENUM20.DIMENSION_NAME; pub const DBLITERAL_HIERARCHY_NAME = DBLITERALENUM20.HIERARCHY_NAME; pub const DBLITERAL_LEVEL_NAME = DBLITERALENUM20.LEVEL_NAME; pub const DBLITERAL_MEMBER_NAME = DBLITERALENUM20.MEMBER_NAME; pub const DBLITERAL_PROPERTY_NAME = DBLITERALENUM20.PROPERTY_NAME; pub const DBLITERAL_SCHEMA_SEPARATOR = DBLITERALENUM20.SCHEMA_SEPARATOR; pub const DBLITERAL_QUOTE_SUFFIX = DBLITERALENUM20.QUOTE_SUFFIX; pub const DBLITERALENUM21 = enum(i32) { PERCENT_SUFFIX = 29, UNDERSCORE_SUFFIX = 30, }; pub const DBLITERAL_ESCAPE_PERCENT_SUFFIX = DBLITERALENUM21.PERCENT_SUFFIX; pub const DBLITERAL_ESCAPE_UNDERSCORE_SUFFIX = DBLITERALENUM21.UNDERSCORE_SUFFIX; const IID_IDBInfo_Value = @import("../zig.zig").Guid.initString("0c733a89-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IDBInfo = &IID_IDBInfo_Value; pub const IDBInfo = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetKeywords: fn( self: *const IDBInfo, ppwszKeywords: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLiteralInfo: fn( self: *const IDBInfo, cLiterals: u32, rgLiterals: ?[*]const u32, pcLiteralInfo: ?*u32, prgLiteralInfo: ?*?*DBLITERALINFO, ppCharBuffer: ?*?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBInfo_GetKeywords(self: *const T, ppwszKeywords: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IDBInfo.VTable, self.vtable).GetKeywords(@ptrCast(*const IDBInfo, self), ppwszKeywords); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBInfo_GetLiteralInfo(self: *const T, cLiterals: u32, rgLiterals: ?[*]const u32, pcLiteralInfo: ?*u32, prgLiteralInfo: ?*?*DBLITERALINFO, ppCharBuffer: ?*?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const IDBInfo.VTable, self.vtable).GetLiteralInfo(@ptrCast(*const IDBInfo, self), cLiterals, rgLiterals, pcLiteralInfo, prgLiteralInfo, ppCharBuffer); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDBDataSourceAdmin_Value = @import("../zig.zig").Guid.initString("0c733a7a-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IDBDataSourceAdmin = &IID_IDBDataSourceAdmin_Value; pub const IDBDataSourceAdmin = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateDataSource: fn( self: *const IDBDataSourceAdmin, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET, pUnkOuter: ?*IUnknown, riid: ?*const Guid, ppDBSession: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DestroyDataSource: fn( self: *const IDBDataSourceAdmin, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCreationProperties: fn( self: *const IDBDataSourceAdmin, cPropertyIDSets: u32, rgPropertyIDSets: ?[*]const DBPROPIDSET, pcPropertyInfoSets: ?*u32, prgPropertyInfoSets: ?*?*DBPROPINFOSET, ppDescBuffer: ?*?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ModifyDataSource: fn( self: *const IDBDataSourceAdmin, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBDataSourceAdmin_CreateDataSource(self: *const T, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET, pUnkOuter: ?*IUnknown, riid: ?*const Guid, ppDBSession: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IDBDataSourceAdmin.VTable, self.vtable).CreateDataSource(@ptrCast(*const IDBDataSourceAdmin, self), cPropertySets, rgPropertySets, pUnkOuter, riid, ppDBSession); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBDataSourceAdmin_DestroyDataSource(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDBDataSourceAdmin.VTable, self.vtable).DestroyDataSource(@ptrCast(*const IDBDataSourceAdmin, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBDataSourceAdmin_GetCreationProperties(self: *const T, cPropertyIDSets: u32, rgPropertyIDSets: ?[*]const DBPROPIDSET, pcPropertyInfoSets: ?*u32, prgPropertyInfoSets: ?*?*DBPROPINFOSET, ppDescBuffer: ?*?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const IDBDataSourceAdmin.VTable, self.vtable).GetCreationProperties(@ptrCast(*const IDBDataSourceAdmin, self), cPropertyIDSets, rgPropertyIDSets, pcPropertyInfoSets, prgPropertyInfoSets, ppDescBuffer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBDataSourceAdmin_ModifyDataSource(self: *const T, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const IDBDataSourceAdmin.VTable, self.vtable).ModifyDataSource(@ptrCast(*const IDBDataSourceAdmin, self), cPropertySets, rgPropertySets); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDBAsynchNotify_Value = @import("../zig.zig").Guid.initString("0c733a96-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IDBAsynchNotify = &IID_IDBAsynchNotify_Value; pub const IDBAsynchNotify = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, OnLowResource: fn( self: *const IDBAsynchNotify, dwReserved: usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnProgress: fn( self: *const IDBAsynchNotify, hChapter: usize, eOperation: u32, ulProgress: usize, ulProgressMax: usize, eAsynchPhase: u32, pwszStatusText: ?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnStop: fn( self: *const IDBAsynchNotify, hChapter: usize, eOperation: u32, hrStatus: HRESULT, pwszStatusText: ?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBAsynchNotify_OnLowResource(self: *const T, dwReserved: usize) callconv(.Inline) HRESULT { return @ptrCast(*const IDBAsynchNotify.VTable, self.vtable).OnLowResource(@ptrCast(*const IDBAsynchNotify, self), dwReserved); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBAsynchNotify_OnProgress(self: *const T, hChapter: usize, eOperation: u32, ulProgress: usize, ulProgressMax: usize, eAsynchPhase: u32, pwszStatusText: ?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IDBAsynchNotify.VTable, self.vtable).OnProgress(@ptrCast(*const IDBAsynchNotify, self), hChapter, eOperation, ulProgress, ulProgressMax, eAsynchPhase, pwszStatusText); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBAsynchNotify_OnStop(self: *const T, hChapter: usize, eOperation: u32, hrStatus: HRESULT, pwszStatusText: ?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IDBAsynchNotify.VTable, self.vtable).OnStop(@ptrCast(*const IDBAsynchNotify, self), hChapter, eOperation, hrStatus, pwszStatusText); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDBAsynchStatus_Value = @import("../zig.zig").Guid.initString("0c733a95-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IDBAsynchStatus = &IID_IDBAsynchStatus_Value; pub const IDBAsynchStatus = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Abort: fn( self: *const IDBAsynchStatus, hChapter: usize, eOperation: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetStatus: fn( self: *const IDBAsynchStatus, hChapter: usize, eOperation: u32, pulProgress: ?*usize, pulProgressMax: ?*usize, peAsynchPhase: ?*u32, ppwszStatusText: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBAsynchStatus_Abort(self: *const T, hChapter: usize, eOperation: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDBAsynchStatus.VTable, self.vtable).Abort(@ptrCast(*const IDBAsynchStatus, self), hChapter, eOperation); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBAsynchStatus_GetStatus(self: *const T, hChapter: usize, eOperation: u32, pulProgress: ?*usize, pulProgressMax: ?*usize, peAsynchPhase: ?*u32, ppwszStatusText: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IDBAsynchStatus.VTable, self.vtable).GetStatus(@ptrCast(*const IDBAsynchStatus, self), hChapter, eOperation, pulProgress, pulProgressMax, peAsynchPhase, ppwszStatusText); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ISessionProperties_Value = @import("../zig.zig").Guid.initString("0c733a85-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ISessionProperties = &IID_ISessionProperties_Value; pub const ISessionProperties = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetProperties: fn( self: *const ISessionProperties, cPropertyIDSets: u32, rgPropertyIDSets: ?[*]const DBPROPIDSET, pcPropertySets: ?*u32, prgPropertySets: ?*?*DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetProperties: fn( self: *const ISessionProperties, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISessionProperties_GetProperties(self: *const T, cPropertyIDSets: u32, rgPropertyIDSets: ?[*]const DBPROPIDSET, pcPropertySets: ?*u32, prgPropertySets: ?*?*DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const ISessionProperties.VTable, self.vtable).GetProperties(@ptrCast(*const ISessionProperties, self), cPropertyIDSets, rgPropertyIDSets, pcPropertySets, prgPropertySets); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISessionProperties_SetProperties(self: *const T, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const ISessionProperties.VTable, self.vtable).SetProperties(@ptrCast(*const ISessionProperties, self), cPropertySets, rgPropertySets); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IIndexDefinition_Value = @import("../zig.zig").Guid.initString("0c733a68-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IIndexDefinition = &IID_IIndexDefinition_Value; pub const IIndexDefinition = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateIndex: fn( self: *const IIndexDefinition, pTableID: ?*DBID, pIndexID: ?*DBID, cIndexColumnDescs: usize, rgIndexColumnDescs: [*]const DBINDEXCOLUMNDESC, cPropertySets: u32, rgPropertySets: [*]DBPROPSET, ppIndexID: ?*?*DBID, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DropIndex: fn( self: *const IIndexDefinition, pTableID: ?*DBID, pIndexID: ?*DBID, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IIndexDefinition_CreateIndex(self: *const T, pTableID: ?*DBID, pIndexID: ?*DBID, cIndexColumnDescs: usize, rgIndexColumnDescs: [*]const DBINDEXCOLUMNDESC, cPropertySets: u32, rgPropertySets: [*]DBPROPSET, ppIndexID: ?*?*DBID) callconv(.Inline) HRESULT { return @ptrCast(*const IIndexDefinition.VTable, self.vtable).CreateIndex(@ptrCast(*const IIndexDefinition, self), pTableID, pIndexID, cIndexColumnDescs, rgIndexColumnDescs, cPropertySets, rgPropertySets, ppIndexID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IIndexDefinition_DropIndex(self: *const T, pTableID: ?*DBID, pIndexID: ?*DBID) callconv(.Inline) HRESULT { return @ptrCast(*const IIndexDefinition.VTable, self.vtable).DropIndex(@ptrCast(*const IIndexDefinition, self), pTableID, pIndexID); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ITableDefinition_Value = @import("../zig.zig").Guid.initString("0c733a86-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ITableDefinition = &IID_ITableDefinition_Value; pub const ITableDefinition = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateTable: fn( self: *const ITableDefinition, pUnkOuter: ?*IUnknown, pTableID: ?*DBID, cColumnDescs: usize, rgColumnDescs: ?[*]const DBCOLUMNDESC, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET, ppTableID: ?*?*DBID, ppRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DropTable: fn( self: *const ITableDefinition, pTableID: ?*DBID, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddColumn: fn( self: *const ITableDefinition, pTableID: ?*DBID, pColumnDesc: ?*DBCOLUMNDESC, ppColumnID: ?*?*DBID, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DropColumn: fn( self: *const ITableDefinition, pTableID: ?*DBID, pColumnID: ?*DBID, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITableDefinition_CreateTable(self: *const T, pUnkOuter: ?*IUnknown, pTableID: ?*DBID, cColumnDescs: usize, rgColumnDescs: ?[*]const DBCOLUMNDESC, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET, ppTableID: ?*?*DBID, ppRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ITableDefinition.VTable, self.vtable).CreateTable(@ptrCast(*const ITableDefinition, self), pUnkOuter, pTableID, cColumnDescs, rgColumnDescs, riid, cPropertySets, rgPropertySets, ppTableID, ppRowset); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITableDefinition_DropTable(self: *const T, pTableID: ?*DBID) callconv(.Inline) HRESULT { return @ptrCast(*const ITableDefinition.VTable, self.vtable).DropTable(@ptrCast(*const ITableDefinition, self), pTableID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITableDefinition_AddColumn(self: *const T, pTableID: ?*DBID, pColumnDesc: ?*DBCOLUMNDESC, ppColumnID: ?*?*DBID) callconv(.Inline) HRESULT { return @ptrCast(*const ITableDefinition.VTable, self.vtable).AddColumn(@ptrCast(*const ITableDefinition, self), pTableID, pColumnDesc, ppColumnID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITableDefinition_DropColumn(self: *const T, pTableID: ?*DBID, pColumnID: ?*DBID) callconv(.Inline) HRESULT { return @ptrCast(*const ITableDefinition.VTable, self.vtable).DropColumn(@ptrCast(*const ITableDefinition, self), pTableID, pColumnID); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IOpenRowset_Value = @import("../zig.zig").Guid.initString("0c733a69-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IOpenRowset = &IID_IOpenRowset_Value; pub const IOpenRowset = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, OpenRowset: fn( self: *const IOpenRowset, pUnkOuter: ?*IUnknown, pTableID: ?*DBID, pIndexID: ?*DBID, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET, ppRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IOpenRowset_OpenRowset(self: *const T, pUnkOuter: ?*IUnknown, pTableID: ?*DBID, pIndexID: ?*DBID, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET, ppRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IOpenRowset.VTable, self.vtable).OpenRowset(@ptrCast(*const IOpenRowset, self), pUnkOuter, pTableID, pIndexID, riid, cPropertySets, rgPropertySets, ppRowset); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDBSchemaRowset_Value = @import("../zig.zig").Guid.initString("0c733a7b-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IDBSchemaRowset = &IID_IDBSchemaRowset_Value; pub const IDBSchemaRowset = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetRowset: fn( self: *const IDBSchemaRowset, pUnkOuter: ?*IUnknown, rguidSchema: ?*const Guid, cRestrictions: u32, rgRestrictions: ?[*]const VARIANT, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET, ppRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSchemas: fn( self: *const IDBSchemaRowset, pcSchemas: ?*u32, prgSchemas: ?*?*Guid, prgRestrictionSupport: ?*?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBSchemaRowset_GetRowset(self: *const T, pUnkOuter: ?*IUnknown, rguidSchema: ?*const Guid, cRestrictions: u32, rgRestrictions: ?[*]const VARIANT, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?[*]DBPROPSET, ppRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IDBSchemaRowset.VTable, self.vtable).GetRowset(@ptrCast(*const IDBSchemaRowset, self), pUnkOuter, rguidSchema, cRestrictions, rgRestrictions, riid, cPropertySets, rgPropertySets, ppRowset); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBSchemaRowset_GetSchemas(self: *const T, pcSchemas: ?*u32, prgSchemas: ?*?*Guid, prgRestrictionSupport: ?*?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IDBSchemaRowset.VTable, self.vtable).GetSchemas(@ptrCast(*const IDBSchemaRowset, self), pcSchemas, prgSchemas, prgRestrictionSupport); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IMDDataset_Value = @import("../zig.zig").Guid.initString("a07cccd1-8148-11d0-87bb-00c04fc33942"); pub const IID_IMDDataset = &IID_IMDDataset_Value; pub const IMDDataset = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, FreeAxisInfo: fn( self: *const IMDDataset, cAxes: usize, rgAxisInfo: ?*MDAXISINFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAxisInfo: fn( self: *const IMDDataset, pcAxes: ?*usize, prgAxisInfo: ?*?*MDAXISINFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetAxisRowset: fn( self: *const IMDDataset, pUnkOuter: ?*IUnknown, iAxis: usize, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?*DBPROPSET, ppRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCellData: fn( self: *const IMDDataset, hAccessor: usize, ulStartCell: usize, ulEndCell: usize, pData: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSpecification: fn( self: *const IMDDataset, riid: ?*const Guid, ppSpecification: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IMDDataset_FreeAxisInfo(self: *const T, cAxes: usize, rgAxisInfo: ?*MDAXISINFO) callconv(.Inline) HRESULT { return @ptrCast(*const IMDDataset.VTable, self.vtable).FreeAxisInfo(@ptrCast(*const IMDDataset, self), cAxes, rgAxisInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IMDDataset_GetAxisInfo(self: *const T, pcAxes: ?*usize, prgAxisInfo: ?*?*MDAXISINFO) callconv(.Inline) HRESULT { return @ptrCast(*const IMDDataset.VTable, self.vtable).GetAxisInfo(@ptrCast(*const IMDDataset, self), pcAxes, prgAxisInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IMDDataset_GetAxisRowset(self: *const T, pUnkOuter: ?*IUnknown, iAxis: usize, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?*DBPROPSET, ppRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IMDDataset.VTable, self.vtable).GetAxisRowset(@ptrCast(*const IMDDataset, self), pUnkOuter, iAxis, riid, cPropertySets, rgPropertySets, ppRowset); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IMDDataset_GetCellData(self: *const T, hAccessor: usize, ulStartCell: usize, ulEndCell: usize, pData: ?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IMDDataset.VTable, self.vtable).GetCellData(@ptrCast(*const IMDDataset, self), hAccessor, ulStartCell, ulEndCell, pData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IMDDataset_GetSpecification(self: *const T, riid: ?*const Guid, ppSpecification: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IMDDataset.VTable, self.vtable).GetSpecification(@ptrCast(*const IMDDataset, self), riid, ppSpecification); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IMDFind_Value = @import("../zig.zig").Guid.initString("a07cccd2-8148-11d0-87bb-00c04fc33942"); pub const IID_IMDFind = &IID_IMDFind_Value; pub const IMDFind = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, FindCell: fn( self: *const IMDFind, ulStartingOrdinal: usize, cMembers: usize, rgpwszMember: ?*?PWSTR, pulCellOrdinal: ?*usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, FindTuple: fn( self: *const IMDFind, ulAxisIdentifier: u32, ulStartingOrdinal: usize, cMembers: usize, rgpwszMember: ?*?PWSTR, pulTupleOrdinal: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IMDFind_FindCell(self: *const T, ulStartingOrdinal: usize, cMembers: usize, rgpwszMember: ?*?PWSTR, pulCellOrdinal: ?*usize) callconv(.Inline) HRESULT { return @ptrCast(*const IMDFind.VTable, self.vtable).FindCell(@ptrCast(*const IMDFind, self), ulStartingOrdinal, cMembers, rgpwszMember, pulCellOrdinal); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IMDFind_FindTuple(self: *const T, ulAxisIdentifier: u32, ulStartingOrdinal: usize, cMembers: usize, rgpwszMember: ?*?PWSTR, pulTupleOrdinal: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IMDFind.VTable, self.vtable).FindTuple(@ptrCast(*const IMDFind, self), ulAxisIdentifier, ulStartingOrdinal, cMembers, rgpwszMember, pulTupleOrdinal); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IMDRangeRowset_Value = @import("../zig.zig").Guid.initString("0c733aa0-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IMDRangeRowset = &IID_IMDRangeRowset_Value; pub const IMDRangeRowset = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetRangeRowset: fn( self: *const IMDRangeRowset, pUnkOuter: ?*IUnknown, ulStartCell: usize, ulEndCell: usize, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?*DBPROPSET, ppRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IMDRangeRowset_GetRangeRowset(self: *const T, pUnkOuter: ?*IUnknown, ulStartCell: usize, ulEndCell: usize, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?*DBPROPSET, ppRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IMDRangeRowset.VTable, self.vtable).GetRangeRowset(@ptrCast(*const IMDRangeRowset, self), pUnkOuter, ulStartCell, ulEndCell, riid, cPropertySets, rgPropertySets, ppRowset); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAlterTable_Value = @import("../zig.zig").Guid.initString("0c733aa5-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IAlterTable = &IID_IAlterTable_Value; pub const IAlterTable = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AlterColumn: fn( self: *const IAlterTable, pTableId: ?*DBID, pColumnId: ?*DBID, dwColumnDescFlags: u32, pColumnDesc: ?*DBCOLUMNDESC, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AlterTable: fn( self: *const IAlterTable, pTableId: ?*DBID, pNewTableId: ?*DBID, cPropertySets: u32, rgPropertySets: ?*DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAlterTable_AlterColumn(self: *const T, pTableId: ?*DBID, pColumnId: ?*DBID, dwColumnDescFlags: u32, pColumnDesc: ?*DBCOLUMNDESC) callconv(.Inline) HRESULT { return @ptrCast(*const IAlterTable.VTable, self.vtable).AlterColumn(@ptrCast(*const IAlterTable, self), pTableId, pColumnId, dwColumnDescFlags, pColumnDesc); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAlterTable_AlterTable(self: *const T, pTableId: ?*DBID, pNewTableId: ?*DBID, cPropertySets: u32, rgPropertySets: ?*DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const IAlterTable.VTable, self.vtable).AlterTable(@ptrCast(*const IAlterTable, self), pTableId, pNewTableId, cPropertySets, rgPropertySets); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IAlterIndex_Value = @import("../zig.zig").Guid.initString("0c733aa6-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IAlterIndex = &IID_IAlterIndex_Value; pub const IAlterIndex = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AlterIndex: fn( self: *const IAlterIndex, pTableId: ?*DBID, pIndexId: ?*DBID, pNewIndexId: ?*DBID, cPropertySets: u32, rgPropertySets: ?*DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IAlterIndex_AlterIndex(self: *const T, pTableId: ?*DBID, pIndexId: ?*DBID, pNewIndexId: ?*DBID, cPropertySets: u32, rgPropertySets: ?*DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const IAlterIndex.VTable, self.vtable).AlterIndex(@ptrCast(*const IAlterIndex, self), pTableId, pIndexId, pNewIndexId, cPropertySets, rgPropertySets); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowsetChapterMember_Value = @import("../zig.zig").Guid.initString("0c733aa8-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetChapterMember = &IID_IRowsetChapterMember_Value; pub const IRowsetChapterMember = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, IsRowInChapter: fn( self: *const IRowsetChapterMember, hChapter: usize, hRow: usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetChapterMember_IsRowInChapter(self: *const T, hChapter: usize, hRow: usize) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetChapterMember.VTable, self.vtable).IsRowInChapter(@ptrCast(*const IRowsetChapterMember, self), hChapter, hRow); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ICommandPersist_Value = @import("../zig.zig").Guid.initString("0c733aa7-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ICommandPersist = &IID_ICommandPersist_Value; pub const ICommandPersist = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, DeleteCommand: fn( self: *const ICommandPersist, pCommandID: ?*DBID, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCurrentCommand: fn( self: *const ICommandPersist, ppCommandID: ?*?*DBID, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LoadCommand: fn( self: *const ICommandPersist, pCommandID: ?*DBID, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SaveCommand: fn( self: *const ICommandPersist, pCommandID: ?*DBID, dwFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandPersist_DeleteCommand(self: *const T, pCommandID: ?*DBID) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandPersist.VTable, self.vtable).DeleteCommand(@ptrCast(*const ICommandPersist, self), pCommandID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandPersist_GetCurrentCommand(self: *const T, ppCommandID: ?*?*DBID) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandPersist.VTable, self.vtable).GetCurrentCommand(@ptrCast(*const ICommandPersist, self), ppCommandID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandPersist_LoadCommand(self: *const T, pCommandID: ?*DBID, dwFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandPersist.VTable, self.vtable).LoadCommand(@ptrCast(*const ICommandPersist, self), pCommandID, dwFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandPersist_SaveCommand(self: *const T, pCommandID: ?*DBID, dwFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandPersist.VTable, self.vtable).SaveCommand(@ptrCast(*const ICommandPersist, self), pCommandID, dwFlags); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowsetRefresh_Value = @import("../zig.zig").Guid.initString("0c733aa9-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetRefresh = &IID_IRowsetRefresh_Value; pub const IRowsetRefresh = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, RefreshVisibleData: fn( self: *const IRowsetRefresh, hChapter: usize, cRows: usize, rghRows: ?*const usize, fOverWrite: BOOL, pcRowsRefreshed: ?*usize, prghRowsRefreshed: ?*?*usize, prgRowStatus: ?*?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLastVisibleData: fn( self: *const IRowsetRefresh, hRow: usize, hAccessor: usize, pData: ?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetRefresh_RefreshVisibleData(self: *const T, hChapter: usize, cRows: usize, rghRows: ?*const usize, fOverWrite: BOOL, pcRowsRefreshed: ?*usize, prghRowsRefreshed: ?*?*usize, prgRowStatus: ?*?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetRefresh.VTable, self.vtable).RefreshVisibleData(@ptrCast(*const IRowsetRefresh, self), hChapter, cRows, rghRows, fOverWrite, pcRowsRefreshed, prghRowsRefreshed, prgRowStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetRefresh_GetLastVisibleData(self: *const T, hRow: usize, hAccessor: usize, pData: ?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetRefresh.VTable, self.vtable).GetLastVisibleData(@ptrCast(*const IRowsetRefresh, self), hRow, hAccessor, pData); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IParentRowset_Value = @import("../zig.zig").Guid.initString("0c733aaa-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IParentRowset = &IID_IParentRowset_Value; pub const IParentRowset = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetChildRowset: fn( self: *const IParentRowset, pUnkOuter: ?*IUnknown, iOrdinal: usize, riid: ?*const Guid, ppRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IParentRowset_GetChildRowset(self: *const T, pUnkOuter: ?*IUnknown, iOrdinal: usize, riid: ?*const Guid, ppRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IParentRowset.VTable, self.vtable).GetChildRowset(@ptrCast(*const IParentRowset, self), pUnkOuter, iOrdinal, riid, ppRowset); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IErrorRecords_Value = @import("../zig.zig").Guid.initString("0c733a67-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IErrorRecords = &IID_IErrorRecords_Value; pub const IErrorRecords = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddErrorRecord: fn( self: *const IErrorRecords, pErrorInfo: ?*ERRORINFO, dwLookupID: u32, pdispparams: ?*DISPPARAMS, punkCustomError: ?*IUnknown, dwDynamicErrorID: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetBasicErrorInfo: fn( self: *const IErrorRecords, ulRecordNum: u32, pErrorInfo: ?*ERRORINFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCustomErrorObject: fn( self: *const IErrorRecords, ulRecordNum: u32, riid: ?*const Guid, ppObject: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetErrorInfo: fn( self: *const IErrorRecords, ulRecordNum: u32, lcid: u32, ppErrorInfo: ?*?*IErrorInfo, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetErrorParameters: fn( self: *const IErrorRecords, ulRecordNum: u32, pdispparams: ?*DISPPARAMS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRecordCount: fn( self: *const IErrorRecords, pcRecords: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IErrorRecords_AddErrorRecord(self: *const T, pErrorInfo: ?*ERRORINFO, dwLookupID: u32, pdispparams: ?*DISPPARAMS, punkCustomError: ?*IUnknown, dwDynamicErrorID: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IErrorRecords.VTable, self.vtable).AddErrorRecord(@ptrCast(*const IErrorRecords, self), pErrorInfo, dwLookupID, pdispparams, punkCustomError, dwDynamicErrorID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IErrorRecords_GetBasicErrorInfo(self: *const T, ulRecordNum: u32, pErrorInfo: ?*ERRORINFO) callconv(.Inline) HRESULT { return @ptrCast(*const IErrorRecords.VTable, self.vtable).GetBasicErrorInfo(@ptrCast(*const IErrorRecords, self), ulRecordNum, pErrorInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IErrorRecords_GetCustomErrorObject(self: *const T, ulRecordNum: u32, riid: ?*const Guid, ppObject: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IErrorRecords.VTable, self.vtable).GetCustomErrorObject(@ptrCast(*const IErrorRecords, self), ulRecordNum, riid, ppObject); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IErrorRecords_GetErrorInfo(self: *const T, ulRecordNum: u32, lcid: u32, ppErrorInfo: ?*?*IErrorInfo) callconv(.Inline) HRESULT { return @ptrCast(*const IErrorRecords.VTable, self.vtable).GetErrorInfo(@ptrCast(*const IErrorRecords, self), ulRecordNum, lcid, ppErrorInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IErrorRecords_GetErrorParameters(self: *const T, ulRecordNum: u32, pdispparams: ?*DISPPARAMS) callconv(.Inline) HRESULT { return @ptrCast(*const IErrorRecords.VTable, self.vtable).GetErrorParameters(@ptrCast(*const IErrorRecords, self), ulRecordNum, pdispparams); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IErrorRecords_GetRecordCount(self: *const T, pcRecords: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IErrorRecords.VTable, self.vtable).GetRecordCount(@ptrCast(*const IErrorRecords, self), pcRecords); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IErrorLookup_Value = @import("../zig.zig").Guid.initString("0c733a66-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IErrorLookup = &IID_IErrorLookup_Value; pub const IErrorLookup = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetErrorDescription: fn( self: *const IErrorLookup, hrError: HRESULT, dwLookupID: u32, pdispparams: ?*DISPPARAMS, lcid: u32, pbstrSource: ?*?BSTR, pbstrDescription: ?*?BSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHelpInfo: fn( self: *const IErrorLookup, hrError: HRESULT, dwLookupID: u32, lcid: u32, pbstrHelpFile: ?*?BSTR, pdwHelpContext: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ReleaseErrors: fn( self: *const IErrorLookup, dwDynamicErrorID: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IErrorLookup_GetErrorDescription(self: *const T, hrError: HRESULT, dwLookupID: u32, pdispparams: ?*DISPPARAMS, lcid: u32, pbstrSource: ?*?BSTR, pbstrDescription: ?*?BSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IErrorLookup.VTable, self.vtable).GetErrorDescription(@ptrCast(*const IErrorLookup, self), hrError, dwLookupID, pdispparams, lcid, pbstrSource, pbstrDescription); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IErrorLookup_GetHelpInfo(self: *const T, hrError: HRESULT, dwLookupID: u32, lcid: u32, pbstrHelpFile: ?*?BSTR, pdwHelpContext: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IErrorLookup.VTable, self.vtable).GetHelpInfo(@ptrCast(*const IErrorLookup, self), hrError, dwLookupID, lcid, pbstrHelpFile, pdwHelpContext); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IErrorLookup_ReleaseErrors(self: *const T, dwDynamicErrorID: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IErrorLookup.VTable, self.vtable).ReleaseErrors(@ptrCast(*const IErrorLookup, self), dwDynamicErrorID); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ISQLErrorInfo_Value = @import("../zig.zig").Guid.initString("0c733a74-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ISQLErrorInfo = &IID_ISQLErrorInfo_Value; pub const ISQLErrorInfo = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetSQLInfo: fn( self: *const ISQLErrorInfo, pbstrSQLState: ?*?BSTR, plNativeError: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISQLErrorInfo_GetSQLInfo(self: *const T, pbstrSQLState: ?*?BSTR, plNativeError: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const ISQLErrorInfo.VTable, self.vtable).GetSQLInfo(@ptrCast(*const ISQLErrorInfo, self), pbstrSQLState, plNativeError); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IGetDataSource_Value = @import("../zig.zig").Guid.initString("0c733a75-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IGetDataSource = &IID_IGetDataSource_Value; pub const IGetDataSource = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetDataSource: fn( self: *const IGetDataSource, riid: ?*const Guid, ppDataSource: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IGetDataSource_GetDataSource(self: *const T, riid: ?*const Guid, ppDataSource: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IGetDataSource.VTable, self.vtable).GetDataSource(@ptrCast(*const IGetDataSource, self), riid, ppDataSource); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ITransactionLocal_Value = @import("../zig.zig").Guid.initString("0c733a5f-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ITransactionLocal = &IID_ITransactionLocal_Value; pub const ITransactionLocal = extern struct { pub const VTable = extern struct { base: ITransaction.VTable, GetOptionsObject: fn( self: *const ITransactionLocal, ppOptions: ?*?*ITransactionOptions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, StartTransaction: fn( self: *const ITransactionLocal, isoLevel: i32, isoFlags: u32, pOtherOptions: ?*ITransactionOptions, pulTransactionLevel: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ITransaction.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITransactionLocal_GetOptionsObject(self: *const T, ppOptions: ?*?*ITransactionOptions) callconv(.Inline) HRESULT { return @ptrCast(*const ITransactionLocal.VTable, self.vtable).GetOptionsObject(@ptrCast(*const ITransactionLocal, self), ppOptions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITransactionLocal_StartTransaction(self: *const T, isoLevel: i32, isoFlags: u32, pOtherOptions: ?*ITransactionOptions, pulTransactionLevel: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ITransactionLocal.VTable, self.vtable).StartTransaction(@ptrCast(*const ITransactionLocal, self), isoLevel, isoFlags, pOtherOptions, pulTransactionLevel); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ITransactionJoin_Value = @import("../zig.zig").Guid.initString("0c733a5e-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ITransactionJoin = &IID_ITransactionJoin_Value; pub const ITransactionJoin = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetOptionsObject: fn( self: *const ITransactionJoin, ppOptions: ?*?*ITransactionOptions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, JoinTransaction: fn( self: *const ITransactionJoin, punkTransactionCoord: ?*IUnknown, isoLevel: i32, isoFlags: u32, pOtherOptions: ?*ITransactionOptions, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITransactionJoin_GetOptionsObject(self: *const T, ppOptions: ?*?*ITransactionOptions) callconv(.Inline) HRESULT { return @ptrCast(*const ITransactionJoin.VTable, self.vtable).GetOptionsObject(@ptrCast(*const ITransactionJoin, self), ppOptions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITransactionJoin_JoinTransaction(self: *const T, punkTransactionCoord: ?*IUnknown, isoLevel: i32, isoFlags: u32, pOtherOptions: ?*ITransactionOptions) callconv(.Inline) HRESULT { return @ptrCast(*const ITransactionJoin.VTable, self.vtable).JoinTransaction(@ptrCast(*const ITransactionJoin, self), punkTransactionCoord, isoLevel, isoFlags, pOtherOptions); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ITransactionObject_Value = @import("../zig.zig").Guid.initString("0c733a60-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ITransactionObject = &IID_ITransactionObject_Value; pub const ITransactionObject = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetTransactionObject: fn( self: *const ITransactionObject, ulTransactionLevel: u32, ppTransactionObject: ?*?*ITransaction, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITransactionObject_GetTransactionObject(self: *const T, ulTransactionLevel: u32, ppTransactionObject: ?*?*ITransaction) callconv(.Inline) HRESULT { return @ptrCast(*const ITransactionObject.VTable, self.vtable).GetTransactionObject(@ptrCast(*const ITransactionObject, self), ulTransactionLevel, ppTransactionObject); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ITrusteeAdmin_Value = @import("../zig.zig").Guid.initString("0c733aa1-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ITrusteeAdmin = &IID_ITrusteeAdmin_Value; pub const ITrusteeAdmin = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CompareTrustees: fn( self: *const ITrusteeAdmin, pTrustee1: ?*TRUSTEE_W, pTrustee2: ?*TRUSTEE_W, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateTrustee: fn( self: *const ITrusteeAdmin, pTrustee: ?*TRUSTEE_W, cPropertySets: u32, rgPropertySets: ?*DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DeleteTrustee: fn( self: *const ITrusteeAdmin, pTrustee: ?*TRUSTEE_W, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetTrusteeProperties: fn( self: *const ITrusteeAdmin, pTrustee: ?*TRUSTEE_W, cPropertySets: u32, rgPropertySets: ?*DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetTrusteeProperties: fn( self: *const ITrusteeAdmin, pTrustee: ?*TRUSTEE_W, cPropertyIDSets: u32, rgPropertyIDSets: ?*const DBPROPIDSET, pcPropertySets: ?*u32, prgPropertySets: ?*?*DBPROPSET, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITrusteeAdmin_CompareTrustees(self: *const T, pTrustee1: ?*TRUSTEE_W, pTrustee2: ?*TRUSTEE_W) callconv(.Inline) HRESULT { return @ptrCast(*const ITrusteeAdmin.VTable, self.vtable).CompareTrustees(@ptrCast(*const ITrusteeAdmin, self), pTrustee1, pTrustee2); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITrusteeAdmin_CreateTrustee(self: *const T, pTrustee: ?*TRUSTEE_W, cPropertySets: u32, rgPropertySets: ?*DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const ITrusteeAdmin.VTable, self.vtable).CreateTrustee(@ptrCast(*const ITrusteeAdmin, self), pTrustee, cPropertySets, rgPropertySets); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITrusteeAdmin_DeleteTrustee(self: *const T, pTrustee: ?*TRUSTEE_W) callconv(.Inline) HRESULT { return @ptrCast(*const ITrusteeAdmin.VTable, self.vtable).DeleteTrustee(@ptrCast(*const ITrusteeAdmin, self), pTrustee); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITrusteeAdmin_SetTrusteeProperties(self: *const T, pTrustee: ?*TRUSTEE_W, cPropertySets: u32, rgPropertySets: ?*DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const ITrusteeAdmin.VTable, self.vtable).SetTrusteeProperties(@ptrCast(*const ITrusteeAdmin, self), pTrustee, cPropertySets, rgPropertySets); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITrusteeAdmin_GetTrusteeProperties(self: *const T, pTrustee: ?*TRUSTEE_W, cPropertyIDSets: u32, rgPropertyIDSets: ?*const DBPROPIDSET, pcPropertySets: ?*u32, prgPropertySets: ?*?*DBPROPSET) callconv(.Inline) HRESULT { return @ptrCast(*const ITrusteeAdmin.VTable, self.vtable).GetTrusteeProperties(@ptrCast(*const ITrusteeAdmin, self), pTrustee, cPropertyIDSets, rgPropertyIDSets, pcPropertySets, prgPropertySets); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ITrusteeGroupAdmin_Value = @import("../zig.zig").Guid.initString("0c733aa2-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ITrusteeGroupAdmin = &IID_ITrusteeGroupAdmin_Value; pub const ITrusteeGroupAdmin = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddMember: fn( self: *const ITrusteeGroupAdmin, pMembershipTrustee: ?*TRUSTEE_W, pMemberTrustee: ?*TRUSTEE_W, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DeleteMember: fn( self: *const ITrusteeGroupAdmin, pMembershipTrustee: ?*TRUSTEE_W, pMemberTrustee: ?*TRUSTEE_W, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsMember: fn( self: *const ITrusteeGroupAdmin, pMembershipTrustee: ?*TRUSTEE_W, pMemberTrustee: ?*TRUSTEE_W, pfStatus: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMembers: fn( self: *const ITrusteeGroupAdmin, pMembershipTrustee: ?*TRUSTEE_W, pcMembers: ?*u32, prgMembers: ?*?*TRUSTEE_W, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetMemberships: fn( self: *const ITrusteeGroupAdmin, pTrustee: ?*TRUSTEE_W, pcMemberships: ?*u32, prgMemberships: ?*?*TRUSTEE_W, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITrusteeGroupAdmin_AddMember(self: *const T, pMembershipTrustee: ?*TRUSTEE_W, pMemberTrustee: ?*TRUSTEE_W) callconv(.Inline) HRESULT { return @ptrCast(*const ITrusteeGroupAdmin.VTable, self.vtable).AddMember(@ptrCast(*const ITrusteeGroupAdmin, self), pMembershipTrustee, pMemberTrustee); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITrusteeGroupAdmin_DeleteMember(self: *const T, pMembershipTrustee: ?*TRUSTEE_W, pMemberTrustee: ?*TRUSTEE_W) callconv(.Inline) HRESULT { return @ptrCast(*const ITrusteeGroupAdmin.VTable, self.vtable).DeleteMember(@ptrCast(*const ITrusteeGroupAdmin, self), pMembershipTrustee, pMemberTrustee); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITrusteeGroupAdmin_IsMember(self: *const T, pMembershipTrustee: ?*TRUSTEE_W, pMemberTrustee: ?*TRUSTEE_W, pfStatus: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ITrusteeGroupAdmin.VTable, self.vtable).IsMember(@ptrCast(*const ITrusteeGroupAdmin, self), pMembershipTrustee, pMemberTrustee, pfStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITrusteeGroupAdmin_GetMembers(self: *const T, pMembershipTrustee: ?*TRUSTEE_W, pcMembers: ?*u32, prgMembers: ?*?*TRUSTEE_W) callconv(.Inline) HRESULT { return @ptrCast(*const ITrusteeGroupAdmin.VTable, self.vtable).GetMembers(@ptrCast(*const ITrusteeGroupAdmin, self), pMembershipTrustee, pcMembers, prgMembers); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITrusteeGroupAdmin_GetMemberships(self: *const T, pTrustee: ?*TRUSTEE_W, pcMemberships: ?*u32, prgMemberships: ?*?*TRUSTEE_W) callconv(.Inline) HRESULT { return @ptrCast(*const ITrusteeGroupAdmin.VTable, self.vtable).GetMemberships(@ptrCast(*const ITrusteeGroupAdmin, self), pTrustee, pcMemberships, prgMemberships); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IObjectAccessControl_Value = @import("../zig.zig").Guid.initString("0c733aa3-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IObjectAccessControl = &IID_IObjectAccessControl_Value; pub const IObjectAccessControl = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetObjectAccessRights: fn( self: *const IObjectAccessControl, pObject: ?*SEC_OBJECT, pcAccessEntries: ?*u32, prgAccessEntries: ?*?*EXPLICIT_ACCESS_W, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetObjectOwner: fn( self: *const IObjectAccessControl, pObject: ?*SEC_OBJECT, ppOwner: ?*?*TRUSTEE_W, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsObjectAccessAllowed: fn( self: *const IObjectAccessControl, pObject: ?*SEC_OBJECT, pAccessEntry: ?*EXPLICIT_ACCESS_W, pfResult: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetObjectAccessRights: fn( self: *const IObjectAccessControl, pObject: ?*SEC_OBJECT, cAccessEntries: u32, prgAccessEntries: ?*EXPLICIT_ACCESS_W, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetObjectOwner: fn( self: *const IObjectAccessControl, pObject: ?*SEC_OBJECT, pOwner: ?*TRUSTEE_W, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IObjectAccessControl_GetObjectAccessRights(self: *const T, pObject: ?*SEC_OBJECT, pcAccessEntries: ?*u32, prgAccessEntries: ?*?*EXPLICIT_ACCESS_W) callconv(.Inline) HRESULT { return @ptrCast(*const IObjectAccessControl.VTable, self.vtable).GetObjectAccessRights(@ptrCast(*const IObjectAccessControl, self), pObject, pcAccessEntries, prgAccessEntries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IObjectAccessControl_GetObjectOwner(self: *const T, pObject: ?*SEC_OBJECT, ppOwner: ?*?*TRUSTEE_W) callconv(.Inline) HRESULT { return @ptrCast(*const IObjectAccessControl.VTable, self.vtable).GetObjectOwner(@ptrCast(*const IObjectAccessControl, self), pObject, ppOwner); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IObjectAccessControl_IsObjectAccessAllowed(self: *const T, pObject: ?*SEC_OBJECT, pAccessEntry: ?*EXPLICIT_ACCESS_W, pfResult: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IObjectAccessControl.VTable, self.vtable).IsObjectAccessAllowed(@ptrCast(*const IObjectAccessControl, self), pObject, pAccessEntry, pfResult); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IObjectAccessControl_SetObjectAccessRights(self: *const T, pObject: ?*SEC_OBJECT, cAccessEntries: u32, prgAccessEntries: ?*EXPLICIT_ACCESS_W) callconv(.Inline) HRESULT { return @ptrCast(*const IObjectAccessControl.VTable, self.vtable).SetObjectAccessRights(@ptrCast(*const IObjectAccessControl, self), pObject, cAccessEntries, prgAccessEntries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IObjectAccessControl_SetObjectOwner(self: *const T, pObject: ?*SEC_OBJECT, pOwner: ?*TRUSTEE_W) callconv(.Inline) HRESULT { return @ptrCast(*const IObjectAccessControl.VTable, self.vtable).SetObjectOwner(@ptrCast(*const IObjectAccessControl, self), pObject, pOwner); } };} pub usingnamespace MethodMixin(@This()); }; pub const ACCESS_MASKENUM = enum(i32) { EXCLUSIVE = 512, READDESIGN = 1024, WRITEDESIGN = 2048, WITHGRANT = 4096, REFERENCE = 8192, CREATE = 16384, INSERT = 32768, DELETE = 65536, READCONTROL = 131072, WRITEPERMISSIONS = 262144, WRITEOWNER = 524288, MAXIMUM_ALLOWED = 33554432, ALL = 268435456, EXECUTE = 536870912, READ = -2147483648, UPDATE = 1073741824, DROP = 256, }; pub const PERM_EXCLUSIVE = ACCESS_MASKENUM.EXCLUSIVE; pub const PERM_READDESIGN = ACCESS_MASKENUM.READDESIGN; pub const PERM_WRITEDESIGN = ACCESS_MASKENUM.WRITEDESIGN; pub const PERM_WITHGRANT = ACCESS_MASKENUM.WITHGRANT; pub const PERM_REFERENCE = ACCESS_MASKENUM.REFERENCE; pub const PERM_CREATE = ACCESS_MASKENUM.CREATE; pub const PERM_INSERT = ACCESS_MASKENUM.INSERT; pub const PERM_DELETE = ACCESS_MASKENUM.DELETE; pub const PERM_READCONTROL = ACCESS_MASKENUM.READCONTROL; pub const PERM_WRITEPERMISSIONS = ACCESS_MASKENUM.WRITEPERMISSIONS; pub const PERM_WRITEOWNER = ACCESS_MASKENUM.WRITEOWNER; pub const PERM_MAXIMUM_ALLOWED = ACCESS_MASKENUM.MAXIMUM_ALLOWED; pub const PERM_ALL = ACCESS_MASKENUM.ALL; pub const PERM_EXECUTE = ACCESS_MASKENUM.EXECUTE; pub const PERM_READ = ACCESS_MASKENUM.READ; pub const PERM_UPDATE = ACCESS_MASKENUM.UPDATE; pub const PERM_DROP = ACCESS_MASKENUM.DROP; const IID_ISecurityInfo_Value = @import("../zig.zig").Guid.initString("0c733aa4-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ISecurityInfo = &IID_ISecurityInfo_Value; pub const ISecurityInfo = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCurrentTrustee: fn( self: *const ISecurityInfo, ppTrustee: ?*?*TRUSTEE_W, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetObjectTypes: fn( self: *const ISecurityInfo, cObjectTypes: ?*u32, rgObjectTypes: ?*?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPermissions: fn( self: *const ISecurityInfo, ObjectType: Guid, pPermissions: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISecurityInfo_GetCurrentTrustee(self: *const T, ppTrustee: ?*?*TRUSTEE_W) callconv(.Inline) HRESULT { return @ptrCast(*const ISecurityInfo.VTable, self.vtable).GetCurrentTrustee(@ptrCast(*const ISecurityInfo, self), ppTrustee); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISecurityInfo_GetObjectTypes(self: *const T, cObjectTypes: ?*u32, rgObjectTypes: ?*?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const ISecurityInfo.VTable, self.vtable).GetObjectTypes(@ptrCast(*const ISecurityInfo, self), cObjectTypes, rgObjectTypes); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISecurityInfo_GetPermissions(self: *const T, ObjectType: Guid, pPermissions: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISecurityInfo.VTable, self.vtable).GetPermissions(@ptrCast(*const ISecurityInfo, self), ObjectType, pPermissions); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ITableCreation_Value = @import("../zig.zig").Guid.initString("0c733abc-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ITableCreation = &IID_ITableCreation_Value; pub const ITableCreation = extern struct { pub const VTable = extern struct { base: ITableDefinition.VTable, GetTableDefinition: fn( self: *const ITableCreation, pTableID: ?*DBID, pcColumnDescs: ?*usize, prgColumnDescs: ?[*]?*DBCOLUMNDESC, pcPropertySets: ?*u32, prgPropertySets: ?[*]?*DBPROPSET, pcConstraintDescs: ?*u32, prgConstraintDescs: ?[*]?*DBCONSTRAINTDESC, ppwszStringBuffer: ?*?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ITableDefinition.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITableCreation_GetTableDefinition(self: *const T, pTableID: ?*DBID, pcColumnDescs: ?*usize, prgColumnDescs: ?[*]?*DBCOLUMNDESC, pcPropertySets: ?*u32, prgPropertySets: ?[*]?*DBPROPSET, pcConstraintDescs: ?*u32, prgConstraintDescs: ?[*]?*DBCONSTRAINTDESC, ppwszStringBuffer: ?*?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const ITableCreation.VTable, self.vtable).GetTableDefinition(@ptrCast(*const ITableCreation, self), pTableID, pcColumnDescs, prgColumnDescs, pcPropertySets, prgPropertySets, pcConstraintDescs, prgConstraintDescs, ppwszStringBuffer); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ITableDefinitionWithConstraints_Value = @import("../zig.zig").Guid.initString("0c733aab-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ITableDefinitionWithConstraints = &IID_ITableDefinitionWithConstraints_Value; pub const ITableDefinitionWithConstraints = extern struct { pub const VTable = extern struct { base: ITableCreation.VTable, AddConstraint: fn( self: *const ITableDefinitionWithConstraints, pTableID: ?*DBID, pConstraintDesc: ?*DBCONSTRAINTDESC, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateTableWithConstraints: fn( self: *const ITableDefinitionWithConstraints, pUnkOuter: ?*IUnknown, pTableID: ?*DBID, cColumnDescs: usize, rgColumnDescs: ?*DBCOLUMNDESC, cConstraintDescs: u32, rgConstraintDescs: ?*DBCONSTRAINTDESC, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?*DBPROPSET, ppTableID: ?*?*DBID, ppRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DropConstraint: fn( self: *const ITableDefinitionWithConstraints, pTableID: ?*DBID, pConstraintID: ?*DBID, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ITableCreation.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITableDefinitionWithConstraints_AddConstraint(self: *const T, pTableID: ?*DBID, pConstraintDesc: ?*DBCONSTRAINTDESC) callconv(.Inline) HRESULT { return @ptrCast(*const ITableDefinitionWithConstraints.VTable, self.vtable).AddConstraint(@ptrCast(*const ITableDefinitionWithConstraints, self), pTableID, pConstraintDesc); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITableDefinitionWithConstraints_CreateTableWithConstraints(self: *const T, pUnkOuter: ?*IUnknown, pTableID: ?*DBID, cColumnDescs: usize, rgColumnDescs: ?*DBCOLUMNDESC, cConstraintDescs: u32, rgConstraintDescs: ?*DBCONSTRAINTDESC, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: ?*DBPROPSET, ppTableID: ?*?*DBID, ppRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ITableDefinitionWithConstraints.VTable, self.vtable).CreateTableWithConstraints(@ptrCast(*const ITableDefinitionWithConstraints, self), pUnkOuter, pTableID, cColumnDescs, rgColumnDescs, cConstraintDescs, rgConstraintDescs, riid, cPropertySets, rgPropertySets, ppTableID, ppRowset); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITableDefinitionWithConstraints_DropConstraint(self: *const T, pTableID: ?*DBID, pConstraintID: ?*DBID) callconv(.Inline) HRESULT { return @ptrCast(*const ITableDefinitionWithConstraints.VTable, self.vtable).DropConstraint(@ptrCast(*const ITableDefinitionWithConstraints, self), pTableID, pConstraintID); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRow_Value = @import("../zig.zig").Guid.initString("0c733ab4-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRow = &IID_IRow_Value; pub const IRow = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetColumns: fn( self: *const IRow, cColumns: usize, rgColumns: [*]DBCOLUMNACCESS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSourceRowset: fn( self: *const IRow, riid: ?*const Guid, ppRowset: ?*?*IUnknown, phRow: ?*usize, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Open: fn( self: *const IRow, pUnkOuter: ?*IUnknown, pColumnID: ?*DBID, rguidColumnType: ?*const Guid, dwBindFlags: u32, riid: ?*const Guid, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRow_GetColumns(self: *const T, cColumns: usize, rgColumns: [*]DBCOLUMNACCESS) callconv(.Inline) HRESULT { return @ptrCast(*const IRow.VTable, self.vtable).GetColumns(@ptrCast(*const IRow, self), cColumns, rgColumns); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRow_GetSourceRowset(self: *const T, riid: ?*const Guid, ppRowset: ?*?*IUnknown, phRow: ?*usize) callconv(.Inline) HRESULT { return @ptrCast(*const IRow.VTable, self.vtable).GetSourceRowset(@ptrCast(*const IRow, self), riid, ppRowset, phRow); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRow_Open(self: *const T, pUnkOuter: ?*IUnknown, pColumnID: ?*DBID, rguidColumnType: ?*const Guid, dwBindFlags: u32, riid: ?*const Guid, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IRow.VTable, self.vtable).Open(@ptrCast(*const IRow, self), pUnkOuter, pColumnID, rguidColumnType, dwBindFlags, riid, ppUnk); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowChange_Value = @import("../zig.zig").Guid.initString("0c733ab5-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowChange = &IID_IRowChange_Value; pub const IRowChange = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, SetColumns: fn( self: *const IRowChange, cColumns: usize, rgColumns: [*]DBCOLUMNACCESS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowChange_SetColumns(self: *const T, cColumns: usize, rgColumns: [*]DBCOLUMNACCESS) callconv(.Inline) HRESULT { return @ptrCast(*const IRowChange.VTable, self.vtable).SetColumns(@ptrCast(*const IRowChange, self), cColumns, rgColumns); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowSchemaChange_Value = @import("../zig.zig").Guid.initString("0c733aae-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowSchemaChange = &IID_IRowSchemaChange_Value; pub const IRowSchemaChange = extern struct { pub const VTable = extern struct { base: IRowChange.VTable, DeleteColumns: fn( self: *const IRowSchemaChange, cColumns: usize, rgColumnIDs: ?*const DBID, rgdwStatus: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddColumns: fn( self: *const IRowSchemaChange, cColumns: usize, rgNewColumnInfo: ?*const DBCOLUMNINFO, rgColumns: ?*DBCOLUMNACCESS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IRowChange.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowSchemaChange_DeleteColumns(self: *const T, cColumns: usize, rgColumnIDs: ?*const DBID, rgdwStatus: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowSchemaChange.VTable, self.vtable).DeleteColumns(@ptrCast(*const IRowSchemaChange, self), cColumns, rgColumnIDs, rgdwStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowSchemaChange_AddColumns(self: *const T, cColumns: usize, rgNewColumnInfo: ?*const DBCOLUMNINFO, rgColumns: ?*DBCOLUMNACCESS) callconv(.Inline) HRESULT { return @ptrCast(*const IRowSchemaChange.VTable, self.vtable).AddColumns(@ptrCast(*const IRowSchemaChange, self), cColumns, rgNewColumnInfo, rgColumns); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IGetRow_Value = @import("../zig.zig").Guid.initString("0c733aaf-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IGetRow = &IID_IGetRow_Value; pub const IGetRow = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetRowFromHROW: fn( self: *const IGetRow, pUnkOuter: ?*IUnknown, hRow: usize, riid: ?*const Guid, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetURLFromHROW: fn( self: *const IGetRow, hRow: usize, ppwszURL: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IGetRow_GetRowFromHROW(self: *const T, pUnkOuter: ?*IUnknown, hRow: usize, riid: ?*const Guid, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IGetRow.VTable, self.vtable).GetRowFromHROW(@ptrCast(*const IGetRow, self), pUnkOuter, hRow, riid, ppUnk); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IGetRow_GetURLFromHROW(self: *const T, hRow: usize, ppwszURL: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IGetRow.VTable, self.vtable).GetURLFromHROW(@ptrCast(*const IGetRow, self), hRow, ppwszURL); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IBindResource_Value = @import("../zig.zig").Guid.initString("0c733ab1-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IBindResource = &IID_IBindResource_Value; pub const IBindResource = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Bind: fn( self: *const IBindResource, pUnkOuter: ?*IUnknown, pwszURL: ?[*:0]const u16, dwBindURLFlags: u32, rguid: ?*const Guid, riid: ?*const Guid, pAuthenticate: ?*IAuthenticate, pImplSession: ?*DBIMPLICITSESSION, pdwBindStatus: ?*u32, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IBindResource_Bind(self: *const T, pUnkOuter: ?*IUnknown, pwszURL: ?[*:0]const u16, dwBindURLFlags: u32, rguid: ?*const Guid, riid: ?*const Guid, pAuthenticate: ?*IAuthenticate, pImplSession: ?*DBIMPLICITSESSION, pdwBindStatus: ?*u32, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IBindResource.VTable, self.vtable).Bind(@ptrCast(*const IBindResource, self), pUnkOuter, pwszURL, dwBindURLFlags, rguid, riid, pAuthenticate, pImplSession, pdwBindStatus, ppUnk); } };} pub usingnamespace MethodMixin(@This()); }; pub const DBCOPYFLAGSENUM = enum(i32) { ASYNC = 256, REPLACE_EXISTING = 512, ALLOW_EMULATION = 1024, NON_RECURSIVE = 2048, ATOMIC = 4096, }; pub const DBCOPY_ASYNC = DBCOPYFLAGSENUM.ASYNC; pub const DBCOPY_REPLACE_EXISTING = DBCOPYFLAGSENUM.REPLACE_EXISTING; pub const DBCOPY_ALLOW_EMULATION = DBCOPYFLAGSENUM.ALLOW_EMULATION; pub const DBCOPY_NON_RECURSIVE = DBCOPYFLAGSENUM.NON_RECURSIVE; pub const DBCOPY_ATOMIC = DBCOPYFLAGSENUM.ATOMIC; pub const DBMOVEFLAGSENUM = enum(i32) { REPLACE_EXISTING = 1, ASYNC = 256, DONT_UPDATE_LINKS = 512, ALLOW_EMULATION = 1024, ATOMIC = 4096, }; pub const DBMOVE_REPLACE_EXISTING = DBMOVEFLAGSENUM.REPLACE_EXISTING; pub const DBMOVE_ASYNC = DBMOVEFLAGSENUM.ASYNC; pub const DBMOVE_DONT_UPDATE_LINKS = DBMOVEFLAGSENUM.DONT_UPDATE_LINKS; pub const DBMOVE_ALLOW_EMULATION = DBMOVEFLAGSENUM.ALLOW_EMULATION; pub const DBMOVE_ATOMIC = DBMOVEFLAGSENUM.ATOMIC; pub const DBDELETEFLAGSENUM = enum(i32) { SYNC = 256, TOMIC = 4096, }; pub const DBDELETE_ASYNC = DBDELETEFLAGSENUM.SYNC; pub const DBDELETE_ATOMIC = DBDELETEFLAGSENUM.TOMIC; const IID_IScopedOperations_Value = @import("../zig.zig").Guid.initString("0c733ab0-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IScopedOperations = &IID_IScopedOperations_Value; pub const IScopedOperations = extern struct { pub const VTable = extern struct { base: IBindResource.VTable, Copy: fn( self: *const IScopedOperations, cRows: usize, rgpwszSourceURLs: ?[*]?PWSTR, rgpwszDestURLs: [*]?PWSTR, dwCopyFlags: u32, pAuthenticate: ?*IAuthenticate, rgdwStatus: [*]u32, rgpwszNewURLs: ?[*]?PWSTR, ppStringsBuffer: ?*?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Move: fn( self: *const IScopedOperations, cRows: usize, rgpwszSourceURLs: ?[*]?PWSTR, rgpwszDestURLs: [*]?PWSTR, dwMoveFlags: u32, pAuthenticate: ?*IAuthenticate, rgdwStatus: [*]u32, rgpwszNewURLs: ?[*]?PWSTR, ppStringsBuffer: ?*?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Delete: fn( self: *const IScopedOperations, cRows: usize, rgpwszURLs: [*]?PWSTR, dwDeleteFlags: u32, rgdwStatus: [*]u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OpenRowset: fn( self: *const IScopedOperations, pUnkOuter: ?*IUnknown, pTableID: ?*DBID, pIndexID: ?*DBID, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: [*]DBPROPSET, ppRowset: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IBindResource.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScopedOperations_Copy(self: *const T, cRows: usize, rgpwszSourceURLs: ?[*]?PWSTR, rgpwszDestURLs: [*]?PWSTR, dwCopyFlags: u32, pAuthenticate: ?*IAuthenticate, rgdwStatus: [*]u32, rgpwszNewURLs: ?[*]?PWSTR, ppStringsBuffer: ?*?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const IScopedOperations.VTable, self.vtable).Copy(@ptrCast(*const IScopedOperations, self), cRows, rgpwszSourceURLs, rgpwszDestURLs, dwCopyFlags, pAuthenticate, rgdwStatus, rgpwszNewURLs, ppStringsBuffer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScopedOperations_Move(self: *const T, cRows: usize, rgpwszSourceURLs: ?[*]?PWSTR, rgpwszDestURLs: [*]?PWSTR, dwMoveFlags: u32, pAuthenticate: ?*IAuthenticate, rgdwStatus: [*]u32, rgpwszNewURLs: ?[*]?PWSTR, ppStringsBuffer: ?*?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const IScopedOperations.VTable, self.vtable).Move(@ptrCast(*const IScopedOperations, self), cRows, rgpwszSourceURLs, rgpwszDestURLs, dwMoveFlags, pAuthenticate, rgdwStatus, rgpwszNewURLs, ppStringsBuffer); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScopedOperations_Delete(self: *const T, cRows: usize, rgpwszURLs: [*]?PWSTR, dwDeleteFlags: u32, rgdwStatus: [*]u32) callconv(.Inline) HRESULT { return @ptrCast(*const IScopedOperations.VTable, self.vtable).Delete(@ptrCast(*const IScopedOperations, self), cRows, rgpwszURLs, dwDeleteFlags, rgdwStatus); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IScopedOperations_OpenRowset(self: *const T, pUnkOuter: ?*IUnknown, pTableID: ?*DBID, pIndexID: ?*DBID, riid: ?*const Guid, cPropertySets: u32, rgPropertySets: [*]DBPROPSET, ppRowset: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IScopedOperations.VTable, self.vtable).OpenRowset(@ptrCast(*const IScopedOperations, self), pUnkOuter, pTableID, pIndexID, riid, cPropertySets, rgPropertySets, ppRowset); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ICreateRow_Value = @import("../zig.zig").Guid.initString("0c733ab2-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ICreateRow = &IID_ICreateRow_Value; pub const ICreateRow = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateRow: fn( self: *const ICreateRow, pUnkOuter: ?*IUnknown, pwszURL: ?[*:0]const u16, dwBindURLFlags: u32, rguid: ?*const Guid, riid: ?*const Guid, pAuthenticate: ?*IAuthenticate, pImplSession: ?*DBIMPLICITSESSION, pdwBindStatus: ?*u32, ppwszNewURL: ?*?PWSTR, ppUnk: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICreateRow_CreateRow(self: *const T, pUnkOuter: ?*IUnknown, pwszURL: ?[*:0]const u16, dwBindURLFlags: u32, rguid: ?*const Guid, riid: ?*const Guid, pAuthenticate: ?*IAuthenticate, pImplSession: ?*DBIMPLICITSESSION, pdwBindStatus: ?*u32, ppwszNewURL: ?*?PWSTR, ppUnk: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ICreateRow.VTable, self.vtable).CreateRow(@ptrCast(*const ICreateRow, self), pUnkOuter, pwszURL, dwBindURLFlags, rguid, riid, pAuthenticate, pImplSession, pdwBindStatus, ppwszNewURL, ppUnk); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IDBBinderProperties_Value = @import("../zig.zig").Guid.initString("0c733ab3-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IDBBinderProperties = &IID_IDBBinderProperties_Value; pub const IDBBinderProperties = extern struct { pub const VTable = extern struct { base: IDBProperties.VTable, Reset: fn( self: *const IDBBinderProperties, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IDBProperties.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IDBBinderProperties_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IDBBinderProperties.VTable, self.vtable).Reset(@ptrCast(*const IDBBinderProperties, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IColumnsInfo2_Value = @import("../zig.zig").Guid.initString("0c733ab8-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IColumnsInfo2 = &IID_IColumnsInfo2_Value; pub const IColumnsInfo2 = extern struct { pub const VTable = extern struct { base: IColumnsInfo.VTable, GetRestrictedColumnInfo: fn( self: *const IColumnsInfo2, cColumnIDMasks: usize, rgColumnIDMasks: [*]const DBID, dwFlags: u32, pcColumns: ?*usize, prgColumnIDs: ?*?*DBID, prgColumnInfo: ?*?*DBCOLUMNINFO, ppStringsBuffer: ?*?*u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IColumnsInfo.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IColumnsInfo2_GetRestrictedColumnInfo(self: *const T, cColumnIDMasks: usize, rgColumnIDMasks: [*]const DBID, dwFlags: u32, pcColumns: ?*usize, prgColumnIDs: ?*?*DBID, prgColumnInfo: ?*?*DBCOLUMNINFO, ppStringsBuffer: ?*?*u16) callconv(.Inline) HRESULT { return @ptrCast(*const IColumnsInfo2.VTable, self.vtable).GetRestrictedColumnInfo(@ptrCast(*const IColumnsInfo2, self), cColumnIDMasks, rgColumnIDMasks, dwFlags, pcColumns, prgColumnIDs, prgColumnInfo, ppStringsBuffer); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRegisterProvider_Value = @import("../zig.zig").Guid.initString("0c733ab9-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRegisterProvider = &IID_IRegisterProvider_Value; pub const IRegisterProvider = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetURLMapping: fn( self: *const IRegisterProvider, pwszURL: ?[*:0]const u16, dwReserved: usize, pclsidProvider: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetURLMapping: fn( self: *const IRegisterProvider, pwszURL: ?[*:0]const u16, dwReserved: usize, rclsidProvider: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UnregisterProvider: fn( self: *const IRegisterProvider, pwszURL: ?[*:0]const u16, dwReserved: usize, rclsidProvider: ?*const Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRegisterProvider_GetURLMapping(self: *const T, pwszURL: ?[*:0]const u16, dwReserved: usize, pclsidProvider: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IRegisterProvider.VTable, self.vtable).GetURLMapping(@ptrCast(*const IRegisterProvider, self), pwszURL, dwReserved, pclsidProvider); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRegisterProvider_SetURLMapping(self: *const T, pwszURL: ?[*:0]const u16, dwReserved: usize, rclsidProvider: ?*const Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IRegisterProvider.VTable, self.vtable).SetURLMapping(@ptrCast(*const IRegisterProvider, self), pwszURL, dwReserved, rclsidProvider); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRegisterProvider_UnregisterProvider(self: *const T, pwszURL: ?[*:0]const u16, dwReserved: usize, rclsidProvider: ?*const Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IRegisterProvider.VTable, self.vtable).UnregisterProvider(@ptrCast(*const IRegisterProvider, self), pwszURL, dwReserved, rclsidProvider); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IGetSession_Value = @import("../zig.zig").Guid.initString("0c733aba-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IGetSession = &IID_IGetSession_Value; pub const IGetSession = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetSession: fn( self: *const IGetSession, riid: ?*const Guid, ppSession: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IGetSession_GetSession(self: *const T, riid: ?*const Guid, ppSession: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IGetSession.VTable, self.vtable).GetSession(@ptrCast(*const IGetSession, self), riid, ppSession); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IGetSourceRow_Value = @import("../zig.zig").Guid.initString("0c733abb-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IGetSourceRow = &IID_IGetSourceRow_Value; pub const IGetSourceRow = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetSourceRow: fn( self: *const IGetSourceRow, riid: ?*const Guid, ppRow: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IGetSourceRow_GetSourceRow(self: *const T, riid: ?*const Guid, ppRow: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IGetSourceRow.VTable, self.vtable).GetSourceRow(@ptrCast(*const IGetSourceRow, self), riid, ppRow); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowsetCurrentIndex_Value = @import("../zig.zig").Guid.initString("0c733abd-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetCurrentIndex = &IID_IRowsetCurrentIndex_Value; pub const IRowsetCurrentIndex = extern struct { pub const VTable = extern struct { base: IRowsetIndex.VTable, GetIndex: fn( self: *const IRowsetCurrentIndex, ppIndexID: ?*?*DBID, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetIndex: fn( self: *const IRowsetCurrentIndex, pIndexID: ?*DBID, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IRowsetIndex.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetCurrentIndex_GetIndex(self: *const T, ppIndexID: ?*?*DBID) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetCurrentIndex.VTable, self.vtable).GetIndex(@ptrCast(*const IRowsetCurrentIndex, self), ppIndexID); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetCurrentIndex_SetIndex(self: *const T, pIndexID: ?*DBID) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetCurrentIndex.VTable, self.vtable).SetIndex(@ptrCast(*const IRowsetCurrentIndex, self), pIndexID); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ICommandStream_Value = @import("../zig.zig").Guid.initString("0c733abf-2a1c-11ce-ade5-00aa0044773d"); pub const IID_ICommandStream = &IID_ICommandStream_Value; pub const ICommandStream = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCommandStream: fn( self: *const ICommandStream, piid: ?*Guid, pguidDialect: ?*Guid, ppCommandStream: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetCommandStream: fn( self: *const ICommandStream, riid: ?*const Guid, rguidDialect: ?*const Guid, pCommandStream: ?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandStream_GetCommandStream(self: *const T, piid: ?*Guid, pguidDialect: ?*Guid, ppCommandStream: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandStream.VTable, self.vtable).GetCommandStream(@ptrCast(*const ICommandStream, self), piid, pguidDialect, ppCommandStream); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ICommandStream_SetCommandStream(self: *const T, riid: ?*const Guid, rguidDialect: ?*const Guid, pCommandStream: ?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const ICommandStream.VTable, self.vtable).SetCommandStream(@ptrCast(*const ICommandStream, self), riid, rguidDialect, pCommandStream); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IRowsetBookmark_Value = @import("../zig.zig").Guid.initString("0c733ac2-2a1c-11ce-ade5-00aa0044773d"); pub const IID_IRowsetBookmark = &IID_IRowsetBookmark_Value; pub const IRowsetBookmark = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, PositionOnBookmark: fn( self: *const IRowsetBookmark, hChapter: usize, cbBookmark: usize, // TODO: what to do with BytesParamIndex 1? pBookmark: ?*const u8, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetBookmark_PositionOnBookmark(self: *const T, hChapter: usize, cbBookmark: usize, pBookmark: ?*const u8) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetBookmark.VTable, self.vtable).PositionOnBookmark(@ptrCast(*const IRowsetBookmark, self), hChapter, cbBookmark, pBookmark); } };} pub usingnamespace MethodMixin(@This()); }; const CLSID_QueryParser_Value = @import("../zig.zig").Guid.initString("b72f8fd8-0fab-4dd9-bdbf-245a6ce1485b"); pub const CLSID_QueryParser = &CLSID_QueryParser_Value; const CLSID_NegationCondition_Value = @import("../zig.zig").Guid.initString("8de9c74c-605a-4acd-bee3-2b222aa2d23d"); pub const CLSID_NegationCondition = &CLSID_NegationCondition_Value; const CLSID_CompoundCondition_Value = @import("../zig.zig").Guid.initString("116f8d13-101e-4fa5-84d4-ff8279381935"); pub const CLSID_CompoundCondition = &CLSID_CompoundCondition_Value; const CLSID_LeafCondition_Value = @import("../zig.zig").Guid.initString("52f15c89-5a17-48e1-bbcd-46a3f89c7cc2"); pub const CLSID_LeafCondition = &CLSID_LeafCondition_Value; const CLSID_ConditionFactory_Value = @import("../zig.zig").Guid.initString("e03e85b0-7be3-4000-ba98-6c13de9fa486"); pub const CLSID_ConditionFactory = &CLSID_ConditionFactory_Value; const CLSID_Interval_Value = @import("../zig.zig").Guid.initString("d957171f-4bf9-4de2-bcd5-c70a7ca55836"); pub const CLSID_Interval = &CLSID_Interval_Value; const CLSID_QueryParserManager_Value = @import("../zig.zig").Guid.initString("5088b39a-29b4-4d9d-8245-4ee289222f66"); pub const CLSID_QueryParserManager = &CLSID_QueryParserManager_Value; pub const STRUCTURED_QUERY_SYNTAX = enum(i32) { NO_SYNTAX = 0, ADVANCED_QUERY_SYNTAX = 1, NATURAL_QUERY_SYNTAX = 2, }; pub const SQS_NO_SYNTAX = STRUCTURED_QUERY_SYNTAX.NO_SYNTAX; pub const SQS_ADVANCED_QUERY_SYNTAX = STRUCTURED_QUERY_SYNTAX.ADVANCED_QUERY_SYNTAX; pub const SQS_NATURAL_QUERY_SYNTAX = STRUCTURED_QUERY_SYNTAX.NATURAL_QUERY_SYNTAX; pub const STRUCTURED_QUERY_SINGLE_OPTION = enum(i32) { SCHEMA = 0, LOCALE_WORD_BREAKING = 1, WORD_BREAKER = 2, NATURAL_SYNTAX = 3, AUTOMATIC_WILDCARD = 4, TRACE_LEVEL = 5, LANGUAGE_KEYWORDS = 6, SYNTAX = 7, TIME_ZONE = 8, IMPLICIT_CONNECTOR = 9, CONNECTOR_CASE = 10, }; pub const SQSO_SCHEMA = STRUCTURED_QUERY_SINGLE_OPTION.SCHEMA; pub const SQSO_LOCALE_WORD_BREAKING = STRUCTURED_QUERY_SINGLE_OPTION.LOCALE_WORD_BREAKING; pub const SQSO_WORD_BREAKER = STRUCTURED_QUERY_SINGLE_OPTION.WORD_BREAKER; pub const SQSO_NATURAL_SYNTAX = STRUCTURED_QUERY_SINGLE_OPTION.NATURAL_SYNTAX; pub const SQSO_AUTOMATIC_WILDCARD = STRUCTURED_QUERY_SINGLE_OPTION.AUTOMATIC_WILDCARD; pub const SQSO_TRACE_LEVEL = STRUCTURED_QUERY_SINGLE_OPTION.TRACE_LEVEL; pub const SQSO_LANGUAGE_KEYWORDS = STRUCTURED_QUERY_SINGLE_OPTION.LANGUAGE_KEYWORDS; pub const SQSO_SYNTAX = STRUCTURED_QUERY_SINGLE_OPTION.SYNTAX; pub const SQSO_TIME_ZONE = STRUCTURED_QUERY_SINGLE_OPTION.TIME_ZONE; pub const SQSO_IMPLICIT_CONNECTOR = STRUCTURED_QUERY_SINGLE_OPTION.IMPLICIT_CONNECTOR; pub const SQSO_CONNECTOR_CASE = STRUCTURED_QUERY_SINGLE_OPTION.CONNECTOR_CASE; pub const STRUCTURED_QUERY_MULTIOPTION = enum(i32) { VIRTUAL_PROPERTY = 0, DEFAULT_PROPERTY = 1, GENERATOR_FOR_TYPE = 2, MAP_PROPERTY = 3, }; pub const SQMO_VIRTUAL_PROPERTY = STRUCTURED_QUERY_MULTIOPTION.VIRTUAL_PROPERTY; pub const SQMO_DEFAULT_PROPERTY = STRUCTURED_QUERY_MULTIOPTION.DEFAULT_PROPERTY; pub const SQMO_GENERATOR_FOR_TYPE = STRUCTURED_QUERY_MULTIOPTION.GENERATOR_FOR_TYPE; pub const SQMO_MAP_PROPERTY = STRUCTURED_QUERY_MULTIOPTION.MAP_PROPERTY; pub const STRUCTURED_QUERY_PARSE_ERROR = enum(i32) { NONE = 0, EXTRA_OPENING_PARENTHESIS = 1, EXTRA_CLOSING_PARENTHESIS = 2, IGNORED_MODIFIER = 3, IGNORED_CONNECTOR = 4, IGNORED_KEYWORD = 5, UNHANDLED = 6, }; pub const SQPE_NONE = STRUCTURED_QUERY_PARSE_ERROR.NONE; pub const SQPE_EXTRA_OPENING_PARENTHESIS = STRUCTURED_QUERY_PARSE_ERROR.EXTRA_OPENING_PARENTHESIS; pub const SQPE_EXTRA_CLOSING_PARENTHESIS = STRUCTURED_QUERY_PARSE_ERROR.EXTRA_CLOSING_PARENTHESIS; pub const SQPE_IGNORED_MODIFIER = STRUCTURED_QUERY_PARSE_ERROR.IGNORED_MODIFIER; pub const SQPE_IGNORED_CONNECTOR = STRUCTURED_QUERY_PARSE_ERROR.IGNORED_CONNECTOR; pub const SQPE_IGNORED_KEYWORD = STRUCTURED_QUERY_PARSE_ERROR.IGNORED_KEYWORD; pub const SQPE_UNHANDLED = STRUCTURED_QUERY_PARSE_ERROR.UNHANDLED; pub const STRUCTURED_QUERY_RESOLVE_OPTION = enum(u32) { DEFAULT = 0, DONT_RESOLVE_DATETIME = 1, ALWAYS_ONE_INTERVAL = 2, DONT_SIMPLIFY_CONDITION_TREES = 4, DONT_MAP_RELATIONS = 8, DONT_RESOLVE_RANGES = 16, DONT_REMOVE_UNRESTRICTED_KEYWORDS = 32, DONT_SPLIT_WORDS = 64, IGNORE_PHRASE_ORDER = 128, ADD_VALUE_TYPE_FOR_PLAIN_VALUES = 256, ADD_ROBUST_ITEM_NAME = 512, _, pub fn initFlags(o: struct { DEFAULT: u1 = 0, DONT_RESOLVE_DATETIME: u1 = 0, ALWAYS_ONE_INTERVAL: u1 = 0, DONT_SIMPLIFY_CONDITION_TREES: u1 = 0, DONT_MAP_RELATIONS: u1 = 0, DONT_RESOLVE_RANGES: u1 = 0, DONT_REMOVE_UNRESTRICTED_KEYWORDS: u1 = 0, DONT_SPLIT_WORDS: u1 = 0, IGNORE_PHRASE_ORDER: u1 = 0, ADD_VALUE_TYPE_FOR_PLAIN_VALUES: u1 = 0, ADD_ROBUST_ITEM_NAME: u1 = 0, }) STRUCTURED_QUERY_RESOLVE_OPTION { return @intToEnum(STRUCTURED_QUERY_RESOLVE_OPTION, (if (o.DEFAULT == 1) @enumToInt(STRUCTURED_QUERY_RESOLVE_OPTION.DEFAULT) else 0) | (if (o.DONT_RESOLVE_DATETIME == 1) @enumToInt(STRUCTURED_QUERY_RESOLVE_OPTION.DONT_RESOLVE_DATETIME) else 0) | (if (o.ALWAYS_ONE_INTERVAL == 1) @enumToInt(STRUCTURED_QUERY_RESOLVE_OPTION.ALWAYS_ONE_INTERVAL) else 0) | (if (o.DONT_SIMPLIFY_CONDITION_TREES == 1) @enumToInt(STRUCTURED_QUERY_RESOLVE_OPTION.DONT_SIMPLIFY_CONDITION_TREES) else 0) | (if (o.DONT_MAP_RELATIONS == 1) @enumToInt(STRUCTURED_QUERY_RESOLVE_OPTION.DONT_MAP_RELATIONS) else 0) | (if (o.DONT_RESOLVE_RANGES == 1) @enumToInt(STRUCTURED_QUERY_RESOLVE_OPTION.DONT_RESOLVE_RANGES) else 0) | (if (o.DONT_REMOVE_UNRESTRICTED_KEYWORDS == 1) @enumToInt(STRUCTURED_QUERY_RESOLVE_OPTION.DONT_REMOVE_UNRESTRICTED_KEYWORDS) else 0) | (if (o.DONT_SPLIT_WORDS == 1) @enumToInt(STRUCTURED_QUERY_RESOLVE_OPTION.DONT_SPLIT_WORDS) else 0) | (if (o.IGNORE_PHRASE_ORDER == 1) @enumToInt(STRUCTURED_QUERY_RESOLVE_OPTION.IGNORE_PHRASE_ORDER) else 0) | (if (o.ADD_VALUE_TYPE_FOR_PLAIN_VALUES == 1) @enumToInt(STRUCTURED_QUERY_RESOLVE_OPTION.ADD_VALUE_TYPE_FOR_PLAIN_VALUES) else 0) | (if (o.ADD_ROBUST_ITEM_NAME == 1) @enumToInt(STRUCTURED_QUERY_RESOLVE_OPTION.ADD_ROBUST_ITEM_NAME) else 0) ); } }; pub const SQRO_DEFAULT = STRUCTURED_QUERY_RESOLVE_OPTION.DEFAULT; pub const SQRO_DONT_RESOLVE_DATETIME = STRUCTURED_QUERY_RESOLVE_OPTION.DONT_RESOLVE_DATETIME; pub const SQRO_ALWAYS_ONE_INTERVAL = STRUCTURED_QUERY_RESOLVE_OPTION.ALWAYS_ONE_INTERVAL; pub const SQRO_DONT_SIMPLIFY_CONDITION_TREES = STRUCTURED_QUERY_RESOLVE_OPTION.DONT_SIMPLIFY_CONDITION_TREES; pub const SQRO_DONT_MAP_RELATIONS = STRUCTURED_QUERY_RESOLVE_OPTION.DONT_MAP_RELATIONS; pub const SQRO_DONT_RESOLVE_RANGES = STRUCTURED_QUERY_RESOLVE_OPTION.DONT_RESOLVE_RANGES; pub const SQRO_DONT_REMOVE_UNRESTRICTED_KEYWORDS = STRUCTURED_QUERY_RESOLVE_OPTION.DONT_REMOVE_UNRESTRICTED_KEYWORDS; pub const SQRO_DONT_SPLIT_WORDS = STRUCTURED_QUERY_RESOLVE_OPTION.DONT_SPLIT_WORDS; pub const SQRO_IGNORE_PHRASE_ORDER = STRUCTURED_QUERY_RESOLVE_OPTION.IGNORE_PHRASE_ORDER; pub const SQRO_ADD_VALUE_TYPE_FOR_PLAIN_VALUES = STRUCTURED_QUERY_RESOLVE_OPTION.ADD_VALUE_TYPE_FOR_PLAIN_VALUES; pub const SQRO_ADD_ROBUST_ITEM_NAME = STRUCTURED_QUERY_RESOLVE_OPTION.ADD_ROBUST_ITEM_NAME; pub const CASE_REQUIREMENT = enum(i32) { ANY = 0, UPPER_IF_AQS = 1, }; pub const CASE_REQUIREMENT_ANY = CASE_REQUIREMENT.ANY; pub const CASE_REQUIREMENT_UPPER_IF_AQS = CASE_REQUIREMENT.UPPER_IF_AQS; pub const INTERVAL_LIMIT_KIND = enum(i32) { EXPLICIT_INCLUDED = 0, EXPLICIT_EXCLUDED = 1, NEGATIVE_INFINITY = 2, POSITIVE_INFINITY = 3, }; pub const ILK_EXPLICIT_INCLUDED = INTERVAL_LIMIT_KIND.EXPLICIT_INCLUDED; pub const ILK_EXPLICIT_EXCLUDED = INTERVAL_LIMIT_KIND.EXPLICIT_EXCLUDED; pub const ILK_NEGATIVE_INFINITY = INTERVAL_LIMIT_KIND.NEGATIVE_INFINITY; pub const ILK_POSITIVE_INFINITY = INTERVAL_LIMIT_KIND.POSITIVE_INFINITY; pub const QUERY_PARSER_MANAGER_OPTION = enum(i32) { SCHEMA_BINARY_NAME = 0, PRELOCALIZED_SCHEMA_BINARY_PATH = 1, UNLOCALIZED_SCHEMA_BINARY_PATH = 2, LOCALIZED_SCHEMA_BINARY_PATH = 3, APPEND_LCID_TO_LOCALIZED_PATH = 4, LOCALIZER_SUPPORT = 5, }; pub const QPMO_SCHEMA_BINARY_NAME = QUERY_PARSER_MANAGER_OPTION.SCHEMA_BINARY_NAME; pub const QPMO_PRELOCALIZED_SCHEMA_BINARY_PATH = QUERY_PARSER_MANAGER_OPTION.PRELOCALIZED_SCHEMA_BINARY_PATH; pub const QPMO_UNLOCALIZED_SCHEMA_BINARY_PATH = QUERY_PARSER_MANAGER_OPTION.UNLOCALIZED_SCHEMA_BINARY_PATH; pub const QPMO_LOCALIZED_SCHEMA_BINARY_PATH = QUERY_PARSER_MANAGER_OPTION.LOCALIZED_SCHEMA_BINARY_PATH; pub const QPMO_APPEND_LCID_TO_LOCALIZED_PATH = QUERY_PARSER_MANAGER_OPTION.APPEND_LCID_TO_LOCALIZED_PATH; pub const QPMO_LOCALIZER_SUPPORT = QUERY_PARSER_MANAGER_OPTION.LOCALIZER_SUPPORT; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IQueryParser_Value = @import("../zig.zig").Guid.initString("2ebdee67-3505-43f8-9946-ea44abc8e5b0"); pub const IID_IQueryParser = &IID_IQueryParser_Value; pub const IQueryParser = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Parse: fn( self: *const IQueryParser, pszInputString: ?[*:0]const u16, pCustomProperties: ?*IEnumUnknown, ppSolution: ?*?*IQuerySolution, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetOption: fn( self: *const IQueryParser, option: STRUCTURED_QUERY_SINGLE_OPTION, pOptionValue: ?*const PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetOption: fn( self: *const IQueryParser, option: STRUCTURED_QUERY_SINGLE_OPTION, pOptionValue: ?*PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetMultiOption: fn( self: *const IQueryParser, option: STRUCTURED_QUERY_MULTIOPTION, pszOptionKey: ?[*:0]const u16, pOptionValue: ?*const PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSchemaProvider: fn( self: *const IQueryParser, ppSchemaProvider: ?*?*ISchemaProvider, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RestateToString: fn( self: *const IQueryParser, pCondition: ?*ICondition, fUseEnglish: BOOL, ppszQueryString: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ParsePropertyValue: fn( self: *const IQueryParser, pszPropertyName: ?[*:0]const u16, pszInputString: ?[*:0]const u16, ppSolution: ?*?*IQuerySolution, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RestatePropertyValueToString: fn( self: *const IQueryParser, pCondition: ?*ICondition, fUseEnglish: BOOL, ppszPropertyName: ?*?PWSTR, ppszQueryString: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQueryParser_Parse(self: *const T, pszInputString: ?[*:0]const u16, pCustomProperties: ?*IEnumUnknown, ppSolution: ?*?*IQuerySolution) callconv(.Inline) HRESULT { return @ptrCast(*const IQueryParser.VTable, self.vtable).Parse(@ptrCast(*const IQueryParser, self), pszInputString, pCustomProperties, ppSolution); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQueryParser_SetOption(self: *const T, option: STRUCTURED_QUERY_SINGLE_OPTION, pOptionValue: ?*const PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IQueryParser.VTable, self.vtable).SetOption(@ptrCast(*const IQueryParser, self), option, pOptionValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQueryParser_GetOption(self: *const T, option: STRUCTURED_QUERY_SINGLE_OPTION, pOptionValue: ?*PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IQueryParser.VTable, self.vtable).GetOption(@ptrCast(*const IQueryParser, self), option, pOptionValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQueryParser_SetMultiOption(self: *const T, option: STRUCTURED_QUERY_MULTIOPTION, pszOptionKey: ?[*:0]const u16, pOptionValue: ?*const PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IQueryParser.VTable, self.vtable).SetMultiOption(@ptrCast(*const IQueryParser, self), option, pszOptionKey, pOptionValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQueryParser_GetSchemaProvider(self: *const T, ppSchemaProvider: ?*?*ISchemaProvider) callconv(.Inline) HRESULT { return @ptrCast(*const IQueryParser.VTable, self.vtable).GetSchemaProvider(@ptrCast(*const IQueryParser, self), ppSchemaProvider); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQueryParser_RestateToString(self: *const T, pCondition: ?*ICondition, fUseEnglish: BOOL, ppszQueryString: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IQueryParser.VTable, self.vtable).RestateToString(@ptrCast(*const IQueryParser, self), pCondition, fUseEnglish, ppszQueryString); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQueryParser_ParsePropertyValue(self: *const T, pszPropertyName: ?[*:0]const u16, pszInputString: ?[*:0]const u16, ppSolution: ?*?*IQuerySolution) callconv(.Inline) HRESULT { return @ptrCast(*const IQueryParser.VTable, self.vtable).ParsePropertyValue(@ptrCast(*const IQueryParser, self), pszPropertyName, pszInputString, ppSolution); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQueryParser_RestatePropertyValueToString(self: *const T, pCondition: ?*ICondition, fUseEnglish: BOOL, ppszPropertyName: ?*?PWSTR, ppszQueryString: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IQueryParser.VTable, self.vtable).RestatePropertyValueToString(@ptrCast(*const IQueryParser, self), pCondition, fUseEnglish, ppszPropertyName, ppszQueryString); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IConditionFactory_Value = @import("../zig.zig").Guid.initString("a5efe073-b16f-474f-9f3e-9f8b497a3e08"); pub const IID_IConditionFactory = &IID_IConditionFactory_Value; pub const IConditionFactory = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, MakeNot: fn( self: *const IConditionFactory, pcSub: ?*ICondition, fSimplify: BOOL, ppcResult: ?*?*ICondition, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MakeAndOr: fn( self: *const IConditionFactory, ct: CONDITION_TYPE, peuSubs: ?*IEnumUnknown, fSimplify: BOOL, ppcResult: ?*?*ICondition, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MakeLeaf: fn( self: *const IConditionFactory, pszPropertyName: ?[*:0]const u16, cop: CONDITION_OPERATION, pszValueType: ?[*:0]const u16, ppropvar: ?*const PROPVARIANT, pPropertyNameTerm: ?*IRichChunk, pOperationTerm: ?*IRichChunk, pValueTerm: ?*IRichChunk, fExpand: BOOL, ppcResult: ?*?*ICondition, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Resolve: fn( self: *const IConditionFactory, pc: ?*ICondition, sqro: STRUCTURED_QUERY_RESOLVE_OPTION, pstReferenceTime: ?*const SYSTEMTIME, ppcResolved: ?*?*ICondition, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionFactory_MakeNot(self: *const T, pcSub: ?*ICondition, fSimplify: BOOL, ppcResult: ?*?*ICondition) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionFactory.VTable, self.vtable).MakeNot(@ptrCast(*const IConditionFactory, self), pcSub, fSimplify, ppcResult); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionFactory_MakeAndOr(self: *const T, ct: CONDITION_TYPE, peuSubs: ?*IEnumUnknown, fSimplify: BOOL, ppcResult: ?*?*ICondition) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionFactory.VTable, self.vtable).MakeAndOr(@ptrCast(*const IConditionFactory, self), ct, peuSubs, fSimplify, ppcResult); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionFactory_MakeLeaf(self: *const T, pszPropertyName: ?[*:0]const u16, cop: CONDITION_OPERATION, pszValueType: ?[*:0]const u16, ppropvar: ?*const PROPVARIANT, pPropertyNameTerm: ?*IRichChunk, pOperationTerm: ?*IRichChunk, pValueTerm: ?*IRichChunk, fExpand: BOOL, ppcResult: ?*?*ICondition) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionFactory.VTable, self.vtable).MakeLeaf(@ptrCast(*const IConditionFactory, self), pszPropertyName, cop, pszValueType, ppropvar, pPropertyNameTerm, pOperationTerm, pValueTerm, fExpand, ppcResult); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionFactory_Resolve(self: *const T, pc: ?*ICondition, sqro: STRUCTURED_QUERY_RESOLVE_OPTION, pstReferenceTime: ?*const SYSTEMTIME, ppcResolved: ?*?*ICondition) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionFactory.VTable, self.vtable).Resolve(@ptrCast(*const IConditionFactory, self), pc, sqro, pstReferenceTime, ppcResolved); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IQuerySolution_Value = @import("../zig.zig").Guid.initString("d6ebc66b-8921-4193-afdd-a1789fb7ff57"); pub const IID_IQuerySolution = &IID_IQuerySolution_Value; pub const IQuerySolution = extern struct { pub const VTable = extern struct { base: IConditionFactory.VTable, GetQuery: fn( self: *const IQuerySolution, ppQueryNode: ?*?*ICondition, ppMainType: ?*?*IEntity, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetErrors: fn( self: *const IQuerySolution, riid: ?*const Guid, ppParseErrors: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLexicalData: fn( self: *const IQuerySolution, ppszInputString: ?*?PWSTR, ppTokens: ?*?*ITokenCollection, plcid: ?*u32, ppWordBreaker: ?*?*IUnknown, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IConditionFactory.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQuerySolution_GetQuery(self: *const T, ppQueryNode: ?*?*ICondition, ppMainType: ?*?*IEntity) callconv(.Inline) HRESULT { return @ptrCast(*const IQuerySolution.VTable, self.vtable).GetQuery(@ptrCast(*const IQuerySolution, self), ppQueryNode, ppMainType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQuerySolution_GetErrors(self: *const T, riid: ?*const Guid, ppParseErrors: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IQuerySolution.VTable, self.vtable).GetErrors(@ptrCast(*const IQuerySolution, self), riid, ppParseErrors); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQuerySolution_GetLexicalData(self: *const T, ppszInputString: ?*?PWSTR, ppTokens: ?*?*ITokenCollection, plcid: ?*u32, ppWordBreaker: ?*?*IUnknown) callconv(.Inline) HRESULT { return @ptrCast(*const IQuerySolution.VTable, self.vtable).GetLexicalData(@ptrCast(*const IQuerySolution, self), ppszInputString, ppTokens, plcid, ppWordBreaker); } };} pub usingnamespace MethodMixin(@This()); }; pub const CONDITION_CREATION_OPTIONS = enum(u32) { DEFAULT = 0, // NONE = 0, this enum value conflicts with DEFAULT SIMPLIFY = 1, VECTOR_AND = 2, VECTOR_OR = 4, VECTOR_LEAF = 8, USE_CONTENT_LOCALE = 16, _, pub fn initFlags(o: struct { DEFAULT: u1 = 0, SIMPLIFY: u1 = 0, VECTOR_AND: u1 = 0, VECTOR_OR: u1 = 0, VECTOR_LEAF: u1 = 0, USE_CONTENT_LOCALE: u1 = 0, }) CONDITION_CREATION_OPTIONS { return @intToEnum(CONDITION_CREATION_OPTIONS, (if (o.DEFAULT == 1) @enumToInt(CONDITION_CREATION_OPTIONS.DEFAULT) else 0) | (if (o.SIMPLIFY == 1) @enumToInt(CONDITION_CREATION_OPTIONS.SIMPLIFY) else 0) | (if (o.VECTOR_AND == 1) @enumToInt(CONDITION_CREATION_OPTIONS.VECTOR_AND) else 0) | (if (o.VECTOR_OR == 1) @enumToInt(CONDITION_CREATION_OPTIONS.VECTOR_OR) else 0) | (if (o.VECTOR_LEAF == 1) @enumToInt(CONDITION_CREATION_OPTIONS.VECTOR_LEAF) else 0) | (if (o.USE_CONTENT_LOCALE == 1) @enumToInt(CONDITION_CREATION_OPTIONS.USE_CONTENT_LOCALE) else 0) ); } }; pub const CONDITION_CREATION_DEFAULT = CONDITION_CREATION_OPTIONS.DEFAULT; pub const CONDITION_CREATION_NONE = CONDITION_CREATION_OPTIONS.DEFAULT; pub const CONDITION_CREATION_SIMPLIFY = CONDITION_CREATION_OPTIONS.SIMPLIFY; pub const CONDITION_CREATION_VECTOR_AND = CONDITION_CREATION_OPTIONS.VECTOR_AND; pub const CONDITION_CREATION_VECTOR_OR = CONDITION_CREATION_OPTIONS.VECTOR_OR; pub const CONDITION_CREATION_VECTOR_LEAF = CONDITION_CREATION_OPTIONS.VECTOR_LEAF; pub const CONDITION_CREATION_USE_CONTENT_LOCALE = CONDITION_CREATION_OPTIONS.USE_CONTENT_LOCALE; // TODO: this type is limited to platform 'windows6.1' const IID_IConditionFactory2_Value = @import("../zig.zig").Guid.initString("71d222e1-432f-429e-8c13-b6dafde5077a"); pub const IID_IConditionFactory2 = &IID_IConditionFactory2_Value; pub const IConditionFactory2 = extern struct { pub const VTable = extern struct { base: IConditionFactory.VTable, CreateTrueFalse: fn( self: *const IConditionFactory2, fVal: BOOL, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateNegation: fn( self: *const IConditionFactory2, pcSub: ?*ICondition, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateCompoundFromObjectArray: fn( self: *const IConditionFactory2, ct: CONDITION_TYPE, poaSubs: ?*IObjectArray, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateCompoundFromArray: fn( self: *const IConditionFactory2, ct: CONDITION_TYPE, ppcondSubs: [*]?*ICondition, cSubs: u32, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateStringLeaf: fn( self: *const IConditionFactory2, propkey: ?*const PROPERTYKEY, cop: CONDITION_OPERATION, pszValue: ?[*:0]const u16, pszLocaleName: ?[*:0]const u16, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateIntegerLeaf: fn( self: *const IConditionFactory2, propkey: ?*const PROPERTYKEY, cop: CONDITION_OPERATION, lValue: i32, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateBooleanLeaf: fn( self: *const IConditionFactory2, propkey: ?*const PROPERTYKEY, cop: CONDITION_OPERATION, fValue: BOOL, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateLeaf: fn( self: *const IConditionFactory2, propkey: ?*const PROPERTYKEY, cop: CONDITION_OPERATION, propvar: ?*const PROPVARIANT, pszSemanticType: ?[*:0]const u16, pszLocaleName: ?[*:0]const u16, pPropertyNameTerm: ?*IRichChunk, pOperationTerm: ?*IRichChunk, pValueTerm: ?*IRichChunk, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ResolveCondition: fn( self: *const IConditionFactory2, pc: ?*ICondition, sqro: STRUCTURED_QUERY_RESOLVE_OPTION, pstReferenceTime: ?*const SYSTEMTIME, riid: ?*const Guid, ppv: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IConditionFactory.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionFactory2_CreateTrueFalse(self: *const T, fVal: BOOL, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionFactory2.VTable, self.vtable).CreateTrueFalse(@ptrCast(*const IConditionFactory2, self), fVal, cco, riid, ppv); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionFactory2_CreateNegation(self: *const T, pcSub: ?*ICondition, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionFactory2.VTable, self.vtable).CreateNegation(@ptrCast(*const IConditionFactory2, self), pcSub, cco, riid, ppv); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionFactory2_CreateCompoundFromObjectArray(self: *const T, ct: CONDITION_TYPE, poaSubs: ?*IObjectArray, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionFactory2.VTable, self.vtable).CreateCompoundFromObjectArray(@ptrCast(*const IConditionFactory2, self), ct, poaSubs, cco, riid, ppv); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionFactory2_CreateCompoundFromArray(self: *const T, ct: CONDITION_TYPE, ppcondSubs: [*]?*ICondition, cSubs: u32, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionFactory2.VTable, self.vtable).CreateCompoundFromArray(@ptrCast(*const IConditionFactory2, self), ct, ppcondSubs, cSubs, cco, riid, ppv); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionFactory2_CreateStringLeaf(self: *const T, propkey: ?*const PROPERTYKEY, cop: CONDITION_OPERATION, pszValue: ?[*:0]const u16, pszLocaleName: ?[*:0]const u16, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionFactory2.VTable, self.vtable).CreateStringLeaf(@ptrCast(*const IConditionFactory2, self), propkey, cop, pszValue, pszLocaleName, cco, riid, ppv); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionFactory2_CreateIntegerLeaf(self: *const T, propkey: ?*const PROPERTYKEY, cop: CONDITION_OPERATION, lValue: i32, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionFactory2.VTable, self.vtable).CreateIntegerLeaf(@ptrCast(*const IConditionFactory2, self), propkey, cop, lValue, cco, riid, ppv); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionFactory2_CreateBooleanLeaf(self: *const T, propkey: ?*const PROPERTYKEY, cop: CONDITION_OPERATION, fValue: BOOL, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionFactory2.VTable, self.vtable).CreateBooleanLeaf(@ptrCast(*const IConditionFactory2, self), propkey, cop, fValue, cco, riid, ppv); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionFactory2_CreateLeaf(self: *const T, propkey: ?*const PROPERTYKEY, cop: CONDITION_OPERATION, propvar: ?*const PROPVARIANT, pszSemanticType: ?[*:0]const u16, pszLocaleName: ?[*:0]const u16, pPropertyNameTerm: ?*IRichChunk, pOperationTerm: ?*IRichChunk, pValueTerm: ?*IRichChunk, cco: CONDITION_CREATION_OPTIONS, riid: ?*const Guid, ppv: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionFactory2.VTable, self.vtable).CreateLeaf(@ptrCast(*const IConditionFactory2, self), propkey, cop, propvar, pszSemanticType, pszLocaleName, pPropertyNameTerm, pOperationTerm, pValueTerm, cco, riid, ppv); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionFactory2_ResolveCondition(self: *const T, pc: ?*ICondition, sqro: STRUCTURED_QUERY_RESOLVE_OPTION, pstReferenceTime: ?*const SYSTEMTIME, riid: ?*const Guid, ppv: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionFactory2.VTable, self.vtable).ResolveCondition(@ptrCast(*const IConditionFactory2, self), pc, sqro, pstReferenceTime, riid, ppv); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IConditionGenerator_Value = @import("../zig.zig").Guid.initString("92d2cc58-4386-45a3-b98c-7e0ce64a4117"); pub const IID_IConditionGenerator = &IID_IConditionGenerator_Value; pub const IConditionGenerator = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Initialize: fn( self: *const IConditionGenerator, pSchemaProvider: ?*ISchemaProvider, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RecognizeNamedEntities: fn( self: *const IConditionGenerator, pszInputString: ?[*:0]const u16, lcidUserLocale: u32, pTokenCollection: ?*ITokenCollection, pNamedEntities: ?*INamedEntityCollector, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GenerateForLeaf: fn( self: *const IConditionGenerator, pConditionFactory: ?*IConditionFactory, pszPropertyName: ?[*:0]const u16, cop: CONDITION_OPERATION, pszValueType: ?[*:0]const u16, pszValue: ?[*:0]const u16, pszValue2: ?[*:0]const u16, pPropertyNameTerm: ?*IRichChunk, pOperationTerm: ?*IRichChunk, pValueTerm: ?*IRichChunk, automaticWildcard: BOOL, pNoStringQuery: ?*BOOL, ppQueryExpression: ?*?*ICondition, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DefaultPhrase: fn( self: *const IConditionGenerator, pszValueType: ?[*:0]const u16, ppropvar: ?*const PROPVARIANT, fUseEnglish: BOOL, ppszPhrase: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionGenerator_Initialize(self: *const T, pSchemaProvider: ?*ISchemaProvider) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionGenerator.VTable, self.vtable).Initialize(@ptrCast(*const IConditionGenerator, self), pSchemaProvider); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionGenerator_RecognizeNamedEntities(self: *const T, pszInputString: ?[*:0]const u16, lcidUserLocale: u32, pTokenCollection: ?*ITokenCollection, pNamedEntities: ?*INamedEntityCollector) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionGenerator.VTable, self.vtable).RecognizeNamedEntities(@ptrCast(*const IConditionGenerator, self), pszInputString, lcidUserLocale, pTokenCollection, pNamedEntities); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionGenerator_GenerateForLeaf(self: *const T, pConditionFactory: ?*IConditionFactory, pszPropertyName: ?[*:0]const u16, cop: CONDITION_OPERATION, pszValueType: ?[*:0]const u16, pszValue: ?[*:0]const u16, pszValue2: ?[*:0]const u16, pPropertyNameTerm: ?*IRichChunk, pOperationTerm: ?*IRichChunk, pValueTerm: ?*IRichChunk, automaticWildcard: BOOL, pNoStringQuery: ?*BOOL, ppQueryExpression: ?*?*ICondition) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionGenerator.VTable, self.vtable).GenerateForLeaf(@ptrCast(*const IConditionGenerator, self), pConditionFactory, pszPropertyName, cop, pszValueType, pszValue, pszValue2, pPropertyNameTerm, pOperationTerm, pValueTerm, automaticWildcard, pNoStringQuery, ppQueryExpression); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IConditionGenerator_DefaultPhrase(self: *const T, pszValueType: ?[*:0]const u16, ppropvar: ?*const PROPVARIANT, fUseEnglish: BOOL, ppszPhrase: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IConditionGenerator.VTable, self.vtable).DefaultPhrase(@ptrCast(*const IConditionGenerator, self), pszValueType, ppropvar, fUseEnglish, ppszPhrase); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IInterval_Value = @import("../zig.zig").Guid.initString("6bf0a714-3c18-430b-8b5d-83b1c234d3db"); pub const IID_IInterval = &IID_IInterval_Value; pub const IInterval = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetLimits: fn( self: *const IInterval, pilkLower: ?*INTERVAL_LIMIT_KIND, ppropvarLower: ?*PROPVARIANT, pilkUpper: ?*INTERVAL_LIMIT_KIND, ppropvarUpper: ?*PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IInterval_GetLimits(self: *const T, pilkLower: ?*INTERVAL_LIMIT_KIND, ppropvarLower: ?*PROPVARIANT, pilkUpper: ?*INTERVAL_LIMIT_KIND, ppropvarUpper: ?*PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IInterval.VTable, self.vtable).GetLimits(@ptrCast(*const IInterval, self), pilkLower, ppropvarLower, pilkUpper, ppropvarUpper); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IMetaData_Value = @import("../zig.zig").Guid.initString("780102b0-c43b-4876-bc7b-5e9ba5c88794"); pub const IID_IMetaData = &IID_IMetaData_Value; pub const IMetaData = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetData: fn( self: *const IMetaData, ppszKey: ?*?PWSTR, ppszValue: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IMetaData_GetData(self: *const T, ppszKey: ?*?PWSTR, ppszValue: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IMetaData.VTable, self.vtable).GetData(@ptrCast(*const IMetaData, self), ppszKey, ppszValue); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IEntity_Value = @import("../zig.zig").Guid.initString("24264891-e80b-4fd3-b7ce-4ff2fae8931f"); pub const IID_IEntity = &IID_IEntity_Value; pub const IEntity = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Name: fn( self: *const IEntity, ppszName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Base: fn( self: *const IEntity, pBaseEntity: ?*?*IEntity, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Relationships: fn( self: *const IEntity, riid: ?*const Guid, pRelationships: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRelationship: fn( self: *const IEntity, pszRelationName: ?[*:0]const u16, pRelationship: ?*?*IRelationship, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MetaData: fn( self: *const IEntity, riid: ?*const Guid, pMetaData: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, NamedEntities: fn( self: *const IEntity, riid: ?*const Guid, pNamedEntities: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetNamedEntity: fn( self: *const IEntity, pszValue: ?[*:0]const u16, ppNamedEntity: ?*?*INamedEntity, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DefaultPhrase: fn( self: *const IEntity, ppszPhrase: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEntity_Name(self: *const T, ppszName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IEntity.VTable, self.vtable).Name(@ptrCast(*const IEntity, self), ppszName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEntity_Base(self: *const T, pBaseEntity: ?*?*IEntity) callconv(.Inline) HRESULT { return @ptrCast(*const IEntity.VTable, self.vtable).Base(@ptrCast(*const IEntity, self), pBaseEntity); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEntity_Relationships(self: *const T, riid: ?*const Guid, pRelationships: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IEntity.VTable, self.vtable).Relationships(@ptrCast(*const IEntity, self), riid, pRelationships); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEntity_GetRelationship(self: *const T, pszRelationName: ?[*:0]const u16, pRelationship: ?*?*IRelationship) callconv(.Inline) HRESULT { return @ptrCast(*const IEntity.VTable, self.vtable).GetRelationship(@ptrCast(*const IEntity, self), pszRelationName, pRelationship); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEntity_MetaData(self: *const T, riid: ?*const Guid, pMetaData: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IEntity.VTable, self.vtable).MetaData(@ptrCast(*const IEntity, self), riid, pMetaData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEntity_NamedEntities(self: *const T, riid: ?*const Guid, pNamedEntities: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IEntity.VTable, self.vtable).NamedEntities(@ptrCast(*const IEntity, self), riid, pNamedEntities); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEntity_GetNamedEntity(self: *const T, pszValue: ?[*:0]const u16, ppNamedEntity: ?*?*INamedEntity) callconv(.Inline) HRESULT { return @ptrCast(*const IEntity.VTable, self.vtable).GetNamedEntity(@ptrCast(*const IEntity, self), pszValue, ppNamedEntity); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEntity_DefaultPhrase(self: *const T, ppszPhrase: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IEntity.VTable, self.vtable).DefaultPhrase(@ptrCast(*const IEntity, self), ppszPhrase); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IRelationship_Value = @import("../zig.zig").Guid.initString("2769280b-5108-498c-9c7f-a51239b63147"); pub const IID_IRelationship = &IID_IRelationship_Value; pub const IRelationship = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Name: fn( self: *const IRelationship, ppszName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsReal: fn( self: *const IRelationship, pIsReal: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Destination: fn( self: *const IRelationship, pDestinationEntity: ?*?*IEntity, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MetaData: fn( self: *const IRelationship, riid: ?*const Guid, pMetaData: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DefaultPhrase: fn( self: *const IRelationship, ppszPhrase: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRelationship_Name(self: *const T, ppszName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IRelationship.VTable, self.vtable).Name(@ptrCast(*const IRelationship, self), ppszName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRelationship_IsReal(self: *const T, pIsReal: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IRelationship.VTable, self.vtable).IsReal(@ptrCast(*const IRelationship, self), pIsReal); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRelationship_Destination(self: *const T, pDestinationEntity: ?*?*IEntity) callconv(.Inline) HRESULT { return @ptrCast(*const IRelationship.VTable, self.vtable).Destination(@ptrCast(*const IRelationship, self), pDestinationEntity); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRelationship_MetaData(self: *const T, riid: ?*const Guid, pMetaData: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IRelationship.VTable, self.vtable).MetaData(@ptrCast(*const IRelationship, self), riid, pMetaData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRelationship_DefaultPhrase(self: *const T, ppszPhrase: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const IRelationship.VTable, self.vtable).DefaultPhrase(@ptrCast(*const IRelationship, self), ppszPhrase); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_INamedEntity_Value = @import("../zig.zig").Guid.initString("abdbd0b1-7d54-49fb-ab5c-bff4130004cd"); pub const IID_INamedEntity = &IID_INamedEntity_Value; pub const INamedEntity = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetValue: fn( self: *const INamedEntity, ppszValue: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DefaultPhrase: fn( self: *const INamedEntity, ppszPhrase: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn INamedEntity_GetValue(self: *const T, ppszValue: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const INamedEntity.VTable, self.vtable).GetValue(@ptrCast(*const INamedEntity, self), ppszValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn INamedEntity_DefaultPhrase(self: *const T, ppszPhrase: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const INamedEntity.VTable, self.vtable).DefaultPhrase(@ptrCast(*const INamedEntity, self), ppszPhrase); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISchemaProvider_Value = @import("../zig.zig").Guid.initString("8cf89bcb-394c-49b2-ae28-a59dd4ed7f68"); pub const IID_ISchemaProvider = &IID_ISchemaProvider_Value; pub const ISchemaProvider = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Entities: fn( self: *const ISchemaProvider, riid: ?*const Guid, pEntities: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RootEntity: fn( self: *const ISchemaProvider, pRootEntity: ?*?*IEntity, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetEntity: fn( self: *const ISchemaProvider, pszEntityName: ?[*:0]const u16, pEntity: ?*?*IEntity, ) callconv(@import("std").os.windows.WINAPI) HRESULT, MetaData: fn( self: *const ISchemaProvider, riid: ?*const Guid, pMetaData: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Localize: fn( self: *const ISchemaProvider, lcid: u32, pSchemaLocalizerSupport: ?*ISchemaLocalizerSupport, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SaveBinary: fn( self: *const ISchemaProvider, pszSchemaBinaryPath: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LookupAuthoredNamedEntity: fn( self: *const ISchemaProvider, pEntity: ?*IEntity, pszInputString: ?[*:0]const u16, pTokenCollection: ?*ITokenCollection, cTokensBegin: u32, pcTokensLength: ?*u32, ppszValue: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISchemaProvider_Entities(self: *const T, riid: ?*const Guid, pEntities: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const ISchemaProvider.VTable, self.vtable).Entities(@ptrCast(*const ISchemaProvider, self), riid, pEntities); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISchemaProvider_RootEntity(self: *const T, pRootEntity: ?*?*IEntity) callconv(.Inline) HRESULT { return @ptrCast(*const ISchemaProvider.VTable, self.vtable).RootEntity(@ptrCast(*const ISchemaProvider, self), pRootEntity); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISchemaProvider_GetEntity(self: *const T, pszEntityName: ?[*:0]const u16, pEntity: ?*?*IEntity) callconv(.Inline) HRESULT { return @ptrCast(*const ISchemaProvider.VTable, self.vtable).GetEntity(@ptrCast(*const ISchemaProvider, self), pszEntityName, pEntity); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISchemaProvider_MetaData(self: *const T, riid: ?*const Guid, pMetaData: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const ISchemaProvider.VTable, self.vtable).MetaData(@ptrCast(*const ISchemaProvider, self), riid, pMetaData); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISchemaProvider_Localize(self: *const T, lcid: u32, pSchemaLocalizerSupport: ?*ISchemaLocalizerSupport) callconv(.Inline) HRESULT { return @ptrCast(*const ISchemaProvider.VTable, self.vtable).Localize(@ptrCast(*const ISchemaProvider, self), lcid, pSchemaLocalizerSupport); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISchemaProvider_SaveBinary(self: *const T, pszSchemaBinaryPath: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISchemaProvider.VTable, self.vtable).SaveBinary(@ptrCast(*const ISchemaProvider, self), pszSchemaBinaryPath); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISchemaProvider_LookupAuthoredNamedEntity(self: *const T, pEntity: ?*IEntity, pszInputString: ?[*:0]const u16, pTokenCollection: ?*ITokenCollection, cTokensBegin: u32, pcTokensLength: ?*u32, ppszValue: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISchemaProvider.VTable, self.vtable).LookupAuthoredNamedEntity(@ptrCast(*const ISchemaProvider, self), pEntity, pszInputString, pTokenCollection, cTokensBegin, pcTokensLength, ppszValue); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ITokenCollection_Value = @import("../zig.zig").Guid.initString("22d8b4f2-f577-4adb-a335-c2ae88416fab"); pub const IID_ITokenCollection = &IID_ITokenCollection_Value; pub const ITokenCollection = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, NumberOfTokens: fn( self: *const ITokenCollection, pCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetToken: fn( self: *const ITokenCollection, i: u32, pBegin: ?*u32, pLength: ?*u32, ppsz: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITokenCollection_NumberOfTokens(self: *const T, pCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ITokenCollection.VTable, self.vtable).NumberOfTokens(@ptrCast(*const ITokenCollection, self), pCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ITokenCollection_GetToken(self: *const T, i: u32, pBegin: ?*u32, pLength: ?*u32, ppsz: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ITokenCollection.VTable, self.vtable).GetToken(@ptrCast(*const ITokenCollection, self), i, pBegin, pLength, ppsz); } };} pub usingnamespace MethodMixin(@This()); }; pub const NAMED_ENTITY_CERTAINTY = enum(i32) { LOW = 0, MEDIUM = 1, HIGH = 2, }; pub const NEC_LOW = NAMED_ENTITY_CERTAINTY.LOW; pub const NEC_MEDIUM = NAMED_ENTITY_CERTAINTY.MEDIUM; pub const NEC_HIGH = NAMED_ENTITY_CERTAINTY.HIGH; // TODO: this type is limited to platform 'windows5.1.2600' const IID_INamedEntityCollector_Value = @import("../zig.zig").Guid.initString("af2440f6-8afc-47d0-9a7f-396a0acfb43d"); pub const IID_INamedEntityCollector = &IID_INamedEntityCollector_Value; pub const INamedEntityCollector = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Add: fn( self: *const INamedEntityCollector, beginSpan: u32, endSpan: u32, beginActual: u32, endActual: u32, pType: ?*IEntity, pszValue: ?[*:0]const u16, certainty: NAMED_ENTITY_CERTAINTY, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn INamedEntityCollector_Add(self: *const T, beginSpan: u32, endSpan: u32, beginActual: u32, endActual: u32, pType: ?*IEntity, pszValue: ?[*:0]const u16, certainty: NAMED_ENTITY_CERTAINTY) callconv(.Inline) HRESULT { return @ptrCast(*const INamedEntityCollector.VTable, self.vtable).Add(@ptrCast(*const INamedEntityCollector, self), beginSpan, endSpan, beginActual, endActual, pType, pszValue, certainty); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISchemaLocalizerSupport_Value = @import("../zig.zig").Guid.initString("ca3fdca2-bfbe-4eed-90d7-0caef0a1bda1"); pub const IID_ISchemaLocalizerSupport = &IID_ISchemaLocalizerSupport_Value; pub const ISchemaLocalizerSupport = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Localize: fn( self: *const ISchemaLocalizerSupport, pszGlobalString: ?[*:0]const u16, ppszLocalString: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISchemaLocalizerSupport_Localize(self: *const T, pszGlobalString: ?[*:0]const u16, ppszLocalString: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISchemaLocalizerSupport.VTable, self.vtable).Localize(@ptrCast(*const ISchemaLocalizerSupport, self), pszGlobalString, ppszLocalString); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IQueryParserManager_Value = @import("../zig.zig").Guid.initString("a879e3c4-af77-44fb-8f37-ebd1487cf920"); pub const IID_IQueryParserManager = &IID_IQueryParserManager_Value; pub const IQueryParserManager = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, CreateLoadedParser: fn( self: *const IQueryParserManager, pszCatalog: ?[*:0]const u16, langidForKeywords: u16, riid: ?*const Guid, ppQueryParser: ?*?*c_void, ) callconv(@import("std").os.windows.WINAPI) HRESULT, InitializeOptions: fn( self: *const IQueryParserManager, fUnderstandNQS: BOOL, fAutoWildCard: BOOL, pQueryParser: ?*IQueryParser, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetOption: fn( self: *const IQueryParserManager, option: QUERY_PARSER_MANAGER_OPTION, pOptionValue: ?*const PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQueryParserManager_CreateLoadedParser(self: *const T, pszCatalog: ?[*:0]const u16, langidForKeywords: u16, riid: ?*const Guid, ppQueryParser: ?*?*c_void) callconv(.Inline) HRESULT { return @ptrCast(*const IQueryParserManager.VTable, self.vtable).CreateLoadedParser(@ptrCast(*const IQueryParserManager, self), pszCatalog, langidForKeywords, riid, ppQueryParser); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQueryParserManager_InitializeOptions(self: *const T, fUnderstandNQS: BOOL, fAutoWildCard: BOOL, pQueryParser: ?*IQueryParser) callconv(.Inline) HRESULT { return @ptrCast(*const IQueryParserManager.VTable, self.vtable).InitializeOptions(@ptrCast(*const IQueryParserManager, self), fUnderstandNQS, fAutoWildCard, pQueryParser); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IQueryParserManager_SetOption(self: *const T, option: QUERY_PARSER_MANAGER_OPTION, pOptionValue: ?*const PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IQueryParserManager.VTable, self.vtable).SetOption(@ptrCast(*const IQueryParserManager, self), option, pOptionValue); } };} pub usingnamespace MethodMixin(@This()); }; pub const HITRANGE = extern struct { iPosition: u32, cLength: u32, }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IUrlAccessor_Value = @import("../zig.zig").Guid.initString("0b63e318-9ccc-11d0-bcdb-00805fccce04"); pub const IID_IUrlAccessor = &IID_IUrlAccessor_Value; pub const IUrlAccessor = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddRequestParameter: fn( self: *const IUrlAccessor, pSpec: ?*PROPSPEC, pVar: ?*PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDocFormat: fn( self: *const IUrlAccessor, wszDocFormat: [*:0]u16, dwSize: u32, pdwLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCLSID: fn( self: *const IUrlAccessor, pClsid: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetHost: fn( self: *const IUrlAccessor, wszHost: [*:0]u16, dwSize: u32, pdwLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsDirectory: fn( self: *const IUrlAccessor, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSize: fn( self: *const IUrlAccessor, pllSize: ?*u64, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetLastModified: fn( self: *const IUrlAccessor, pftLastModified: ?*FILETIME, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetFileName: fn( self: *const IUrlAccessor, wszFileName: [*:0]u16, dwSize: u32, pdwLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSecurityDescriptor: fn( self: *const IUrlAccessor, pSD: [*:0]u8, dwSize: u32, pdwLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetRedirectedURL: fn( self: *const IUrlAccessor, wszRedirectedURL: [*:0]u16, dwSize: u32, pdwLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSecurityProvider: fn( self: *const IUrlAccessor, pSPClsid: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, BindToStream: fn( self: *const IUrlAccessor, ppStream: ?*?*IStream, ) callconv(@import("std").os.windows.WINAPI) HRESULT, BindToFilter: fn( self: *const IUrlAccessor, ppFilter: ?*?*IFilter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor_AddRequestParameter(self: *const T, pSpec: ?*PROPSPEC, pVar: ?*PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor.VTable, self.vtable).AddRequestParameter(@ptrCast(*const IUrlAccessor, self), pSpec, pVar); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor_GetDocFormat(self: *const T, wszDocFormat: [*:0]u16, dwSize: u32, pdwLength: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor.VTable, self.vtable).GetDocFormat(@ptrCast(*const IUrlAccessor, self), wszDocFormat, dwSize, pdwLength); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor_GetCLSID(self: *const T, pClsid: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor.VTable, self.vtable).GetCLSID(@ptrCast(*const IUrlAccessor, self), pClsid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor_GetHost(self: *const T, wszHost: [*:0]u16, dwSize: u32, pdwLength: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor.VTable, self.vtable).GetHost(@ptrCast(*const IUrlAccessor, self), wszHost, dwSize, pdwLength); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor_IsDirectory(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor.VTable, self.vtable).IsDirectory(@ptrCast(*const IUrlAccessor, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor_GetSize(self: *const T, pllSize: ?*u64) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor.VTable, self.vtable).GetSize(@ptrCast(*const IUrlAccessor, self), pllSize); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor_GetLastModified(self: *const T, pftLastModified: ?*FILETIME) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor.VTable, self.vtable).GetLastModified(@ptrCast(*const IUrlAccessor, self), pftLastModified); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor_GetFileName(self: *const T, wszFileName: [*:0]u16, dwSize: u32, pdwLength: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor.VTable, self.vtable).GetFileName(@ptrCast(*const IUrlAccessor, self), wszFileName, dwSize, pdwLength); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor_GetSecurityDescriptor(self: *const T, pSD: [*:0]u8, dwSize: u32, pdwLength: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor.VTable, self.vtable).GetSecurityDescriptor(@ptrCast(*const IUrlAccessor, self), pSD, dwSize, pdwLength); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor_GetRedirectedURL(self: *const T, wszRedirectedURL: [*:0]u16, dwSize: u32, pdwLength: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor.VTable, self.vtable).GetRedirectedURL(@ptrCast(*const IUrlAccessor, self), wszRedirectedURL, dwSize, pdwLength); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor_GetSecurityProvider(self: *const T, pSPClsid: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor.VTable, self.vtable).GetSecurityProvider(@ptrCast(*const IUrlAccessor, self), pSPClsid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor_BindToStream(self: *const T, ppStream: ?*?*IStream) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor.VTable, self.vtable).BindToStream(@ptrCast(*const IUrlAccessor, self), ppStream); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor_BindToFilter(self: *const T, ppFilter: ?*?*IFilter) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor.VTable, self.vtable).BindToFilter(@ptrCast(*const IUrlAccessor, self), ppFilter); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IUrlAccessor2_Value = @import("../zig.zig").Guid.initString("c7310734-ac80-11d1-8df3-00c04fb6ef4f"); pub const IID_IUrlAccessor2 = &IID_IUrlAccessor2_Value; pub const IUrlAccessor2 = extern struct { pub const VTable = extern struct { base: IUrlAccessor.VTable, GetDisplayUrl: fn( self: *const IUrlAccessor2, wszDocUrl: [*:0]u16, dwSize: u32, pdwLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsDocument: fn( self: *const IUrlAccessor2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCodePage: fn( self: *const IUrlAccessor2, wszCodePage: [*:0]u16, dwSize: u32, pdwLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUrlAccessor.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor2_GetDisplayUrl(self: *const T, wszDocUrl: [*:0]u16, dwSize: u32, pdwLength: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor2.VTable, self.vtable).GetDisplayUrl(@ptrCast(*const IUrlAccessor2, self), wszDocUrl, dwSize, pdwLength); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor2_IsDocument(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor2.VTable, self.vtable).IsDocument(@ptrCast(*const IUrlAccessor2, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor2_GetCodePage(self: *const T, wszCodePage: [*:0]u16, dwSize: u32, pdwLength: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor2.VTable, self.vtable).GetCodePage(@ptrCast(*const IUrlAccessor2, self), wszCodePage, dwSize, pdwLength); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IUrlAccessor3_Value = @import("../zig.zig").Guid.initString("6fbc7005-0455-4874-b8ff-7439450241a3"); pub const IID_IUrlAccessor3 = &IID_IUrlAccessor3_Value; pub const IUrlAccessor3 = extern struct { pub const VTable = extern struct { base: IUrlAccessor2.VTable, GetImpersonationSidBlobs: fn( self: *const IUrlAccessor3, pcwszURL: ?[*:0]const u16, pcSidCount: ?*u32, ppSidBlobs: ?*?*BLOB, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUrlAccessor2.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor3_GetImpersonationSidBlobs(self: *const T, pcwszURL: ?[*:0]const u16, pcSidCount: ?*u32, ppSidBlobs: ?*?*BLOB) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor3.VTable, self.vtable).GetImpersonationSidBlobs(@ptrCast(*const IUrlAccessor3, self), pcwszURL, pcSidCount, ppSidBlobs); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.0.6000' const IID_IUrlAccessor4_Value = @import("../zig.zig").Guid.initString("5cc51041-c8d2-41d7-bca3-9e9e286297dc"); pub const IID_IUrlAccessor4 = &IID_IUrlAccessor4_Value; pub const IUrlAccessor4 = extern struct { pub const VTable = extern struct { base: IUrlAccessor3.VTable, ShouldIndexItemContent: fn( self: *const IUrlAccessor4, pfIndexContent: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ShouldIndexProperty: fn( self: *const IUrlAccessor4, key: ?*const PROPERTYKEY, pfIndexProperty: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUrlAccessor3.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor4_ShouldIndexItemContent(self: *const T, pfIndexContent: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor4.VTable, self.vtable).ShouldIndexItemContent(@ptrCast(*const IUrlAccessor4, self), pfIndexContent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IUrlAccessor4_ShouldIndexProperty(self: *const T, key: ?*const PROPERTYKEY, pfIndexProperty: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IUrlAccessor4.VTable, self.vtable).ShouldIndexProperty(@ptrCast(*const IUrlAccessor4, self), key, pfIndexProperty); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IOpLockStatus_Value = @import("../zig.zig").Guid.initString("c731065d-ac80-11d1-8df3-00c04fb6ef4f"); pub const IID_IOpLockStatus = &IID_IOpLockStatus_Value; pub const IOpLockStatus = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, IsOplockValid: fn( self: *const IOpLockStatus, pfIsOplockValid: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsOplockBroken: fn( self: *const IOpLockStatus, pfIsOplockBroken: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetOplockEventHandle: fn( self: *const IOpLockStatus, phOplockEv: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IOpLockStatus_IsOplockValid(self: *const T, pfIsOplockValid: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IOpLockStatus.VTable, self.vtable).IsOplockValid(@ptrCast(*const IOpLockStatus, self), pfIsOplockValid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IOpLockStatus_IsOplockBroken(self: *const T, pfIsOplockBroken: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const IOpLockStatus.VTable, self.vtable).IsOplockBroken(@ptrCast(*const IOpLockStatus, self), pfIsOplockBroken); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IOpLockStatus_GetOplockEventHandle(self: *const T, phOplockEv: ?*?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const IOpLockStatus.VTable, self.vtable).GetOplockEventHandle(@ptrCast(*const IOpLockStatus, self), phOplockEv); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchProtocolThreadContext_Value = @import("../zig.zig").Guid.initString("c73106e1-ac80-11d1-8df3-00c04fb6ef4f"); pub const IID_ISearchProtocolThreadContext = &IID_ISearchProtocolThreadContext_Value; pub const ISearchProtocolThreadContext = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, ThreadInit: fn( self: *const ISearchProtocolThreadContext, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ThreadShutdown: fn( self: *const ISearchProtocolThreadContext, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ThreadIdle: fn( self: *const ISearchProtocolThreadContext, dwTimeElaspedSinceLastCallInMS: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchProtocolThreadContext_ThreadInit(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchProtocolThreadContext.VTable, self.vtable).ThreadInit(@ptrCast(*const ISearchProtocolThreadContext, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchProtocolThreadContext_ThreadShutdown(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchProtocolThreadContext.VTable, self.vtable).ThreadShutdown(@ptrCast(*const ISearchProtocolThreadContext, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchProtocolThreadContext_ThreadIdle(self: *const T, dwTimeElaspedSinceLastCallInMS: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchProtocolThreadContext.VTable, self.vtable).ThreadIdle(@ptrCast(*const ISearchProtocolThreadContext, self), dwTimeElaspedSinceLastCallInMS); } };} pub usingnamespace MethodMixin(@This()); }; pub const TIMEOUT_INFO = extern struct { dwSize: u32, dwConnectTimeout: u32, dwDataTimeout: u32, }; pub const PROXY_ACCESS = enum(i32) { PRECONFIG = 0, DIRECT = 1, PROXY = 2, }; pub const PROXY_ACCESS_PRECONFIG = PROXY_ACCESS.PRECONFIG; pub const PROXY_ACCESS_DIRECT = PROXY_ACCESS.DIRECT; pub const PROXY_ACCESS_PROXY = PROXY_ACCESS.PROXY; pub const PROXY_INFO = extern struct { dwSize: u32, pcwszUserAgent: ?[*:0]const u16, paUseProxy: PROXY_ACCESS, fLocalBypass: BOOL, dwPortNumber: u32, pcwszProxyName: ?[*:0]const u16, pcwszBypassList: ?[*:0]const u16, }; pub const AUTH_TYPE = enum(i32) { ANONYMOUS = 0, NTLM = 1, BASIC = 2, }; pub const eAUTH_TYPE_ANONYMOUS = AUTH_TYPE.ANONYMOUS; pub const eAUTH_TYPE_NTLM = AUTH_TYPE.NTLM; pub const eAUTH_TYPE_BASIC = AUTH_TYPE.BASIC; pub const AUTHENTICATION_INFO = extern struct { dwSize: u32, atAuthenticationType: AUTH_TYPE, pcwszUser: ?[*:0]const u16, pcwszPassword: ?[*:0]const u16, }; pub const INCREMENTAL_ACCESS_INFO = extern struct { dwSize: u32, ftLastModifiedTime: FILETIME, }; pub const ITEM_INFO = extern struct { dwSize: u32, pcwszFromEMail: ?[*:0]const u16, pcwszApplicationName: ?[*:0]const u16, pcwszCatalogName: ?[*:0]const u16, pcwszContentClass: ?[*:0]const u16, }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchProtocol_Value = @import("../zig.zig").Guid.initString("c73106ba-ac80-11d1-8df3-00c04fb6ef4f"); pub const IID_ISearchProtocol = &IID_ISearchProtocol_Value; pub const ISearchProtocol = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Init: fn( self: *const ISearchProtocol, pTimeoutInfo: ?*TIMEOUT_INFO, pProtocolHandlerSite: ?*IProtocolHandlerSite, pProxyInfo: ?*PROXY_INFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateAccessor: fn( self: *const ISearchProtocol, pcwszURL: ?[*:0]const u16, pAuthenticationInfo: ?*AUTHENTICATION_INFO, pIncrementalAccessInfo: ?*INCREMENTAL_ACCESS_INFO, pItemInfo: ?*ITEM_INFO, ppAccessor: ?*?*IUrlAccessor, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CloseAccessor: fn( self: *const ISearchProtocol, pAccessor: ?*IUrlAccessor, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ShutDown: fn( self: *const ISearchProtocol, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchProtocol_Init(self: *const T, pTimeoutInfo: ?*TIMEOUT_INFO, pProtocolHandlerSite: ?*IProtocolHandlerSite, pProxyInfo: ?*PROXY_INFO) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchProtocol.VTable, self.vtable).Init(@ptrCast(*const ISearchProtocol, self), pTimeoutInfo, pProtocolHandlerSite, pProxyInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchProtocol_CreateAccessor(self: *const T, pcwszURL: ?[*:0]const u16, pAuthenticationInfo: ?*AUTHENTICATION_INFO, pIncrementalAccessInfo: ?*INCREMENTAL_ACCESS_INFO, pItemInfo: ?*ITEM_INFO, ppAccessor: ?*?*IUrlAccessor) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchProtocol.VTable, self.vtable).CreateAccessor(@ptrCast(*const ISearchProtocol, self), pcwszURL, pAuthenticationInfo, pIncrementalAccessInfo, pItemInfo, ppAccessor); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchProtocol_CloseAccessor(self: *const T, pAccessor: ?*IUrlAccessor) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchProtocol.VTable, self.vtable).CloseAccessor(@ptrCast(*const ISearchProtocol, self), pAccessor); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchProtocol_ShutDown(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchProtocol.VTable, self.vtable).ShutDown(@ptrCast(*const ISearchProtocol, self)); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchProtocol2_Value = @import("../zig.zig").Guid.initString("7789f0b2-b5b2-4722-8b65-5dbd150697a9"); pub const IID_ISearchProtocol2 = &IID_ISearchProtocol2_Value; pub const ISearchProtocol2 = extern struct { pub const VTable = extern struct { base: ISearchProtocol.VTable, CreateAccessorEx: fn( self: *const ISearchProtocol2, pcwszURL: ?[*:0]const u16, pAuthenticationInfo: ?*AUTHENTICATION_INFO, pIncrementalAccessInfo: ?*INCREMENTAL_ACCESS_INFO, pItemInfo: ?*ITEM_INFO, pUserData: ?*const BLOB, ppAccessor: ?*?*IUrlAccessor, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISearchProtocol.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchProtocol2_CreateAccessorEx(self: *const T, pcwszURL: ?[*:0]const u16, pAuthenticationInfo: ?*AUTHENTICATION_INFO, pIncrementalAccessInfo: ?*INCREMENTAL_ACCESS_INFO, pItemInfo: ?*ITEM_INFO, pUserData: ?*const BLOB, ppAccessor: ?*?*IUrlAccessor) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchProtocol2.VTable, self.vtable).CreateAccessorEx(@ptrCast(*const ISearchProtocol2, self), pcwszURL, pAuthenticationInfo, pIncrementalAccessInfo, pItemInfo, pUserData, ppAccessor); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IProtocolHandlerSite_Value = @import("../zig.zig").Guid.initString("0b63e385-9ccc-11d0-bcdb-00805fccce04"); pub const IID_IProtocolHandlerSite = &IID_IProtocolHandlerSite_Value; pub const IProtocolHandlerSite = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetFilter: fn( self: *const IProtocolHandlerSite, pclsidObj: ?*Guid, pcwszContentType: ?[*:0]const u16, pcwszExtension: ?[*:0]const u16, ppFilter: ?*?*IFilter, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IProtocolHandlerSite_GetFilter(self: *const T, pclsidObj: ?*Guid, pcwszContentType: ?[*:0]const u16, pcwszExtension: ?[*:0]const u16, ppFilter: ?*?*IFilter) callconv(.Inline) HRESULT { return @ptrCast(*const IProtocolHandlerSite.VTable, self.vtable).GetFilter(@ptrCast(*const IProtocolHandlerSite, self), pclsidObj, pcwszContentType, pcwszExtension, ppFilter); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchRoot_Value = @import("../zig.zig").Guid.initString("04c18ccf-1f57-4cbd-88cc-3900f5195ce3"); pub const IID_ISearchRoot = &IID_ISearchRoot_Value; pub const ISearchRoot = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Schedule: fn( self: *const ISearchRoot, pszTaskArg: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Schedule: fn( self: *const ISearchRoot, ppszTaskArg: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_RootURL: fn( self: *const ISearchRoot, pszURL: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_RootURL: fn( self: *const ISearchRoot, ppszURL: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_IsHierarchical: fn( self: *const ISearchRoot, fIsHierarchical: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_IsHierarchical: fn( self: *const ISearchRoot, pfIsHierarchical: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_ProvidesNotifications: fn( self: *const ISearchRoot, fProvidesNotifications: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ProvidesNotifications: fn( self: *const ISearchRoot, pfProvidesNotifications: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_UseNotificationsOnly: fn( self: *const ISearchRoot, fUseNotificationsOnly: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_UseNotificationsOnly: fn( self: *const ISearchRoot, pfUseNotificationsOnly: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_EnumerationDepth: fn( self: *const ISearchRoot, dwDepth: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_EnumerationDepth: fn( self: *const ISearchRoot, pdwDepth: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_HostDepth: fn( self: *const ISearchRoot, dwDepth: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_HostDepth: fn( self: *const ISearchRoot, pdwDepth: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_FollowDirectories: fn( self: *const ISearchRoot, fFollowDirectories: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_FollowDirectories: fn( self: *const ISearchRoot, pfFollowDirectories: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_AuthenticationType: fn( self: *const ISearchRoot, authType: AUTH_TYPE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_AuthenticationType: fn( self: *const ISearchRoot, pAuthType: ?*AUTH_TYPE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_User: fn( self: *const ISearchRoot, pszUser: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_User: fn( self: *const ISearchRoot, ppszUser: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_Password: fn( self: *const ISearchRoot, pszPassword: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Password: fn( self: *const ISearchRoot, ppszPassword: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_put_Schedule(self: *const T, pszTaskArg: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).put_Schedule(@ptrCast(*const ISearchRoot, self), pszTaskArg); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_get_Schedule(self: *const T, ppszTaskArg: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).get_Schedule(@ptrCast(*const ISearchRoot, self), ppszTaskArg); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_put_RootURL(self: *const T, pszURL: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).put_RootURL(@ptrCast(*const ISearchRoot, self), pszURL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_get_RootURL(self: *const T, ppszURL: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).get_RootURL(@ptrCast(*const ISearchRoot, self), ppszURL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_put_IsHierarchical(self: *const T, fIsHierarchical: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).put_IsHierarchical(@ptrCast(*const ISearchRoot, self), fIsHierarchical); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_get_IsHierarchical(self: *const T, pfIsHierarchical: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).get_IsHierarchical(@ptrCast(*const ISearchRoot, self), pfIsHierarchical); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_put_ProvidesNotifications(self: *const T, fProvidesNotifications: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).put_ProvidesNotifications(@ptrCast(*const ISearchRoot, self), fProvidesNotifications); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_get_ProvidesNotifications(self: *const T, pfProvidesNotifications: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).get_ProvidesNotifications(@ptrCast(*const ISearchRoot, self), pfProvidesNotifications); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_put_UseNotificationsOnly(self: *const T, fUseNotificationsOnly: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).put_UseNotificationsOnly(@ptrCast(*const ISearchRoot, self), fUseNotificationsOnly); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_get_UseNotificationsOnly(self: *const T, pfUseNotificationsOnly: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).get_UseNotificationsOnly(@ptrCast(*const ISearchRoot, self), pfUseNotificationsOnly); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_put_EnumerationDepth(self: *const T, dwDepth: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).put_EnumerationDepth(@ptrCast(*const ISearchRoot, self), dwDepth); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_get_EnumerationDepth(self: *const T, pdwDepth: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).get_EnumerationDepth(@ptrCast(*const ISearchRoot, self), pdwDepth); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_put_HostDepth(self: *const T, dwDepth: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).put_HostDepth(@ptrCast(*const ISearchRoot, self), dwDepth); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_get_HostDepth(self: *const T, pdwDepth: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).get_HostDepth(@ptrCast(*const ISearchRoot, self), pdwDepth); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_put_FollowDirectories(self: *const T, fFollowDirectories: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).put_FollowDirectories(@ptrCast(*const ISearchRoot, self), fFollowDirectories); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_get_FollowDirectories(self: *const T, pfFollowDirectories: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).get_FollowDirectories(@ptrCast(*const ISearchRoot, self), pfFollowDirectories); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_put_AuthenticationType(self: *const T, authType: AUTH_TYPE) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).put_AuthenticationType(@ptrCast(*const ISearchRoot, self), authType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_get_AuthenticationType(self: *const T, pAuthType: ?*AUTH_TYPE) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).get_AuthenticationType(@ptrCast(*const ISearchRoot, self), pAuthType); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_put_User(self: *const T, pszUser: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).put_User(@ptrCast(*const ISearchRoot, self), pszUser); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_get_User(self: *const T, ppszUser: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).get_User(@ptrCast(*const ISearchRoot, self), ppszUser); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_put_Password(self: *const T, pszPassword: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).put_Password(@ptrCast(*const ISearchRoot, self), pszPassword); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchRoot_get_Password(self: *const T, ppszPassword: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchRoot.VTable, self.vtable).get_Password(@ptrCast(*const ISearchRoot, self), ppszPassword); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IEnumSearchRoots_Value = @import("../zig.zig").Guid.initString("ab310581-ac80-11d1-8df3-00c04fb6ef52"); pub const IID_IEnumSearchRoots = &IID_IEnumSearchRoots_Value; pub const IEnumSearchRoots = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumSearchRoots, celt: u32, rgelt: [*]?*ISearchRoot, pceltFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IEnumSearchRoots, celt: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IEnumSearchRoots, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IEnumSearchRoots, ppenum: ?*?*IEnumSearchRoots, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSearchRoots_Next(self: *const T, celt: u32, rgelt: [*]?*ISearchRoot, pceltFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSearchRoots.VTable, self.vtable).Next(@ptrCast(*const IEnumSearchRoots, self), celt, rgelt, pceltFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSearchRoots_Skip(self: *const T, celt: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSearchRoots.VTable, self.vtable).Skip(@ptrCast(*const IEnumSearchRoots, self), celt); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSearchRoots_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSearchRoots.VTable, self.vtable).Reset(@ptrCast(*const IEnumSearchRoots, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSearchRoots_Clone(self: *const T, ppenum: ?*?*IEnumSearchRoots) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSearchRoots.VTable, self.vtable).Clone(@ptrCast(*const IEnumSearchRoots, self), ppenum); } };} pub usingnamespace MethodMixin(@This()); }; pub const FOLLOW_FLAGS = enum(i32) { INDEXCOMPLEXURLS = 1, SUPPRESSINDEXING = 2, }; pub const FF_INDEXCOMPLEXURLS = FOLLOW_FLAGS.INDEXCOMPLEXURLS; pub const FF_SUPPRESSINDEXING = FOLLOW_FLAGS.SUPPRESSINDEXING; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchScopeRule_Value = @import("../zig.zig").Guid.initString("ab310581-ac80-11d1-8df3-00c04fb6ef53"); pub const IID_ISearchScopeRule = &IID_ISearchScopeRule_Value; pub const ISearchScopeRule = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_PatternOrURL: fn( self: *const ISearchScopeRule, ppszPatternOrURL: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_IsIncluded: fn( self: *const ISearchScopeRule, pfIsIncluded: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_IsDefault: fn( self: *const ISearchScopeRule, pfIsDefault: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_FollowFlags: fn( self: *const ISearchScopeRule, pFollowFlags: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchScopeRule_get_PatternOrURL(self: *const T, ppszPatternOrURL: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchScopeRule.VTable, self.vtable).get_PatternOrURL(@ptrCast(*const ISearchScopeRule, self), ppszPatternOrURL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchScopeRule_get_IsIncluded(self: *const T, pfIsIncluded: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchScopeRule.VTable, self.vtable).get_IsIncluded(@ptrCast(*const ISearchScopeRule, self), pfIsIncluded); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchScopeRule_get_IsDefault(self: *const T, pfIsDefault: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchScopeRule.VTable, self.vtable).get_IsDefault(@ptrCast(*const ISearchScopeRule, self), pfIsDefault); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchScopeRule_get_FollowFlags(self: *const T, pFollowFlags: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchScopeRule.VTable, self.vtable).get_FollowFlags(@ptrCast(*const ISearchScopeRule, self), pFollowFlags); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_IEnumSearchScopeRules_Value = @import("../zig.zig").Guid.initString("ab310581-ac80-11d1-8df3-00c04fb6ef54"); pub const IID_IEnumSearchScopeRules = &IID_IEnumSearchScopeRules_Value; pub const IEnumSearchScopeRules = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumSearchScopeRules, celt: u32, pprgelt: [*]?*ISearchScopeRule, pceltFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IEnumSearchScopeRules, celt: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IEnumSearchScopeRules, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IEnumSearchScopeRules, ppenum: ?*?*IEnumSearchScopeRules, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSearchScopeRules_Next(self: *const T, celt: u32, pprgelt: [*]?*ISearchScopeRule, pceltFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSearchScopeRules.VTable, self.vtable).Next(@ptrCast(*const IEnumSearchScopeRules, self), celt, pprgelt, pceltFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSearchScopeRules_Skip(self: *const T, celt: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSearchScopeRules.VTable, self.vtable).Skip(@ptrCast(*const IEnumSearchScopeRules, self), celt); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSearchScopeRules_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSearchScopeRules.VTable, self.vtable).Reset(@ptrCast(*const IEnumSearchScopeRules, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSearchScopeRules_Clone(self: *const T, ppenum: ?*?*IEnumSearchScopeRules) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSearchScopeRules.VTable, self.vtable).Clone(@ptrCast(*const IEnumSearchScopeRules, self), ppenum); } };} pub usingnamespace MethodMixin(@This()); }; pub const CLUSION_REASON = enum(i32) { UNKNOWNSCOPE = 0, DEFAULT = 1, USER = 2, GROUPPOLICY = 3, }; pub const CLUSIONREASON_UNKNOWNSCOPE = CLUSION_REASON.UNKNOWNSCOPE; pub const CLUSIONREASON_DEFAULT = CLUSION_REASON.DEFAULT; pub const CLUSIONREASON_USER = CLUSION_REASON.USER; pub const CLUSIONREASON_GROUPPOLICY = CLUSION_REASON.GROUPPOLICY; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchCrawlScopeManager_Value = @import("../zig.zig").Guid.initString("ab310581-ac80-11d1-8df3-00c04fb6ef55"); pub const IID_ISearchCrawlScopeManager = &IID_ISearchCrawlScopeManager_Value; pub const ISearchCrawlScopeManager = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, AddDefaultScopeRule: fn( self: *const ISearchCrawlScopeManager, pszURL: ?[*:0]const u16, fInclude: BOOL, fFollowFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddRoot: fn( self: *const ISearchCrawlScopeManager, pSearchRoot: ?*ISearchRoot, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RemoveRoot: fn( self: *const ISearchCrawlScopeManager, pszURL: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EnumerateRoots: fn( self: *const ISearchCrawlScopeManager, ppSearchRoots: ?*?*IEnumSearchRoots, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddHierarchicalScope: fn( self: *const ISearchCrawlScopeManager, pszURL: ?[*:0]const u16, fInclude: BOOL, fDefault: BOOL, fOverrideChildren: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AddUserScopeRule: fn( self: *const ISearchCrawlScopeManager, pszURL: ?[*:0]const u16, fInclude: BOOL, fOverrideChildren: BOOL, fFollowFlags: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RemoveScopeRule: fn( self: *const ISearchCrawlScopeManager, pszRule: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EnumerateScopeRules: fn( self: *const ISearchCrawlScopeManager, ppSearchScopeRules: ?*?*IEnumSearchScopeRules, ) callconv(@import("std").os.windows.WINAPI) HRESULT, HasParentScopeRule: fn( self: *const ISearchCrawlScopeManager, pszURL: ?[*:0]const u16, pfHasParentRule: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, HasChildScopeRule: fn( self: *const ISearchCrawlScopeManager, pszURL: ?[*:0]const u16, pfHasChildRule: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IncludedInCrawlScope: fn( self: *const ISearchCrawlScopeManager, pszURL: ?[*:0]const u16, pfIsIncluded: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IncludedInCrawlScopeEx: fn( self: *const ISearchCrawlScopeManager, pszURL: ?[*:0]const u16, pfIsIncluded: ?*BOOL, pReason: ?*CLUSION_REASON, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RevertToDefaultScopes: fn( self: *const ISearchCrawlScopeManager, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SaveAll: fn( self: *const ISearchCrawlScopeManager, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetParentScopeVersionId: fn( self: *const ISearchCrawlScopeManager, pszURL: ?[*:0]const u16, plScopeId: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RemoveDefaultScopeRule: fn( self: *const ISearchCrawlScopeManager, pszURL: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_AddDefaultScopeRule(self: *const T, pszURL: ?[*:0]const u16, fInclude: BOOL, fFollowFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).AddDefaultScopeRule(@ptrCast(*const ISearchCrawlScopeManager, self), pszURL, fInclude, fFollowFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_AddRoot(self: *const T, pSearchRoot: ?*ISearchRoot) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).AddRoot(@ptrCast(*const ISearchCrawlScopeManager, self), pSearchRoot); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_RemoveRoot(self: *const T, pszURL: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).RemoveRoot(@ptrCast(*const ISearchCrawlScopeManager, self), pszURL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_EnumerateRoots(self: *const T, ppSearchRoots: ?*?*IEnumSearchRoots) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).EnumerateRoots(@ptrCast(*const ISearchCrawlScopeManager, self), ppSearchRoots); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_AddHierarchicalScope(self: *const T, pszURL: ?[*:0]const u16, fInclude: BOOL, fDefault: BOOL, fOverrideChildren: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).AddHierarchicalScope(@ptrCast(*const ISearchCrawlScopeManager, self), pszURL, fInclude, fDefault, fOverrideChildren); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_AddUserScopeRule(self: *const T, pszURL: ?[*:0]const u16, fInclude: BOOL, fOverrideChildren: BOOL, fFollowFlags: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).AddUserScopeRule(@ptrCast(*const ISearchCrawlScopeManager, self), pszURL, fInclude, fOverrideChildren, fFollowFlags); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_RemoveScopeRule(self: *const T, pszRule: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).RemoveScopeRule(@ptrCast(*const ISearchCrawlScopeManager, self), pszRule); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_EnumerateScopeRules(self: *const T, ppSearchScopeRules: ?*?*IEnumSearchScopeRules) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).EnumerateScopeRules(@ptrCast(*const ISearchCrawlScopeManager, self), ppSearchScopeRules); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_HasParentScopeRule(self: *const T, pszURL: ?[*:0]const u16, pfHasParentRule: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).HasParentScopeRule(@ptrCast(*const ISearchCrawlScopeManager, self), pszURL, pfHasParentRule); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_HasChildScopeRule(self: *const T, pszURL: ?[*:0]const u16, pfHasChildRule: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).HasChildScopeRule(@ptrCast(*const ISearchCrawlScopeManager, self), pszURL, pfHasChildRule); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_IncludedInCrawlScope(self: *const T, pszURL: ?[*:0]const u16, pfIsIncluded: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).IncludedInCrawlScope(@ptrCast(*const ISearchCrawlScopeManager, self), pszURL, pfIsIncluded); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_IncludedInCrawlScopeEx(self: *const T, pszURL: ?[*:0]const u16, pfIsIncluded: ?*BOOL, pReason: ?*CLUSION_REASON) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).IncludedInCrawlScopeEx(@ptrCast(*const ISearchCrawlScopeManager, self), pszURL, pfIsIncluded, pReason); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_RevertToDefaultScopes(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).RevertToDefaultScopes(@ptrCast(*const ISearchCrawlScopeManager, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_SaveAll(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).SaveAll(@ptrCast(*const ISearchCrawlScopeManager, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_GetParentScopeVersionId(self: *const T, pszURL: ?[*:0]const u16, plScopeId: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).GetParentScopeVersionId(@ptrCast(*const ISearchCrawlScopeManager, self), pszURL, plScopeId); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager_RemoveDefaultScopeRule(self: *const T, pszURL: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager.VTable, self.vtable).RemoveDefaultScopeRule(@ptrCast(*const ISearchCrawlScopeManager, self), pszURL); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows6.1' const IID_ISearchCrawlScopeManager2_Value = @import("../zig.zig").Guid.initString("6292f7ad-4e19-4717-a534-8fc22bcd5ccd"); pub const IID_ISearchCrawlScopeManager2 = &IID_ISearchCrawlScopeManager2_Value; pub const ISearchCrawlScopeManager2 = extern struct { pub const VTable = extern struct { base: ISearchCrawlScopeManager.VTable, GetVersion: fn( self: *const ISearchCrawlScopeManager2, plVersion: ?*?*i32, phFileMapping: ?*?HANDLE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISearchCrawlScopeManager.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCrawlScopeManager2_GetVersion(self: *const T, plVersion: ?*?*i32, phFileMapping: ?*?HANDLE) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCrawlScopeManager2.VTable, self.vtable).GetVersion(@ptrCast(*const ISearchCrawlScopeManager2, self), plVersion, phFileMapping); } };} pub usingnamespace MethodMixin(@This()); }; pub const SEARCH_KIND_OF_CHANGE = enum(i32) { ADD = 0, DELETE = 1, MODIFY = 2, MOVE_RENAME = 3, SEMANTICS_DIRECTORY = 262144, SEMANTICS_SHALLOW = 524288, SEMANTICS_UPDATE_SECURITY = 4194304, }; pub const SEARCH_CHANGE_ADD = SEARCH_KIND_OF_CHANGE.ADD; pub const SEARCH_CHANGE_DELETE = SEARCH_KIND_OF_CHANGE.DELETE; pub const SEARCH_CHANGE_MODIFY = SEARCH_KIND_OF_CHANGE.MODIFY; pub const SEARCH_CHANGE_MOVE_RENAME = SEARCH_KIND_OF_CHANGE.MOVE_RENAME; pub const SEARCH_CHANGE_SEMANTICS_DIRECTORY = SEARCH_KIND_OF_CHANGE.SEMANTICS_DIRECTORY; pub const SEARCH_CHANGE_SEMANTICS_SHALLOW = SEARCH_KIND_OF_CHANGE.SEMANTICS_SHALLOW; pub const SEARCH_CHANGE_SEMANTICS_UPDATE_SECURITY = SEARCH_KIND_OF_CHANGE.SEMANTICS_UPDATE_SECURITY; pub const SEARCH_NOTIFICATION_PRIORITY = enum(i32) { NORMAL_PRIORITY = 0, HIGH_PRIORITY = 1, }; pub const SEARCH_NORMAL_PRIORITY = SEARCH_NOTIFICATION_PRIORITY.NORMAL_PRIORITY; pub const SEARCH_HIGH_PRIORITY = SEARCH_NOTIFICATION_PRIORITY.HIGH_PRIORITY; pub const SEARCH_ITEM_CHANGE = extern struct { Change: SEARCH_KIND_OF_CHANGE, Priority: SEARCH_NOTIFICATION_PRIORITY, pUserData: ?*BLOB, lpwszURL: ?PWSTR, lpwszOldURL: ?PWSTR, }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchItemsChangedSink_Value = @import("../zig.zig").Guid.initString("ab310581-ac80-11d1-8df3-00c04fb6ef58"); pub const IID_ISearchItemsChangedSink = &IID_ISearchItemsChangedSink_Value; pub const ISearchItemsChangedSink = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, StartedMonitoringScope: fn( self: *const ISearchItemsChangedSink, pszURL: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, StoppedMonitoringScope: fn( self: *const ISearchItemsChangedSink, pszURL: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnItemsChanged: fn( self: *const ISearchItemsChangedSink, dwNumberOfChanges: u32, rgDataChangeEntries: [*]SEARCH_ITEM_CHANGE, rgdwDocIds: [*]u32, rghrCompletionCodes: [*]HRESULT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchItemsChangedSink_StartedMonitoringScope(self: *const T, pszURL: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchItemsChangedSink.VTable, self.vtable).StartedMonitoringScope(@ptrCast(*const ISearchItemsChangedSink, self), pszURL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchItemsChangedSink_StoppedMonitoringScope(self: *const T, pszURL: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchItemsChangedSink.VTable, self.vtable).StoppedMonitoringScope(@ptrCast(*const ISearchItemsChangedSink, self), pszURL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchItemsChangedSink_OnItemsChanged(self: *const T, dwNumberOfChanges: u32, rgDataChangeEntries: [*]SEARCH_ITEM_CHANGE, rgdwDocIds: [*]u32, rghrCompletionCodes: [*]HRESULT) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchItemsChangedSink.VTable, self.vtable).OnItemsChanged(@ptrCast(*const ISearchItemsChangedSink, self), dwNumberOfChanges, rgDataChangeEntries, rgdwDocIds, rghrCompletionCodes); } };} pub usingnamespace MethodMixin(@This()); }; pub const SEARCH_ITEM_PERSISTENT_CHANGE = extern struct { Change: SEARCH_KIND_OF_CHANGE, URL: ?PWSTR, OldURL: ?PWSTR, Priority: SEARCH_NOTIFICATION_PRIORITY, }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchPersistentItemsChangedSink_Value = @import("../zig.zig").Guid.initString("a2ffdf9b-4758-4f84-b729-df81a1a0612f"); pub const IID_ISearchPersistentItemsChangedSink = &IID_ISearchPersistentItemsChangedSink_Value; pub const ISearchPersistentItemsChangedSink = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, StartedMonitoringScope: fn( self: *const ISearchPersistentItemsChangedSink, pszURL: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, StoppedMonitoringScope: fn( self: *const ISearchPersistentItemsChangedSink, pszURL: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnItemsChanged: fn( self: *const ISearchPersistentItemsChangedSink, dwNumberOfChanges: u32, DataChangeEntries: [*]SEARCH_ITEM_PERSISTENT_CHANGE, hrCompletionCodes: [*]HRESULT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchPersistentItemsChangedSink_StartedMonitoringScope(self: *const T, pszURL: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchPersistentItemsChangedSink.VTable, self.vtable).StartedMonitoringScope(@ptrCast(*const ISearchPersistentItemsChangedSink, self), pszURL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchPersistentItemsChangedSink_StoppedMonitoringScope(self: *const T, pszURL: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchPersistentItemsChangedSink.VTable, self.vtable).StoppedMonitoringScope(@ptrCast(*const ISearchPersistentItemsChangedSink, self), pszURL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchPersistentItemsChangedSink_OnItemsChanged(self: *const T, dwNumberOfChanges: u32, DataChangeEntries: [*]SEARCH_ITEM_PERSISTENT_CHANGE, hrCompletionCodes: [*]HRESULT) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchPersistentItemsChangedSink.VTable, self.vtable).OnItemsChanged(@ptrCast(*const ISearchPersistentItemsChangedSink, self), dwNumberOfChanges, DataChangeEntries, hrCompletionCodes); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchViewChangedSink_Value = @import("../zig.zig").Guid.initString("ab310581-ac80-11d1-8df3-00c04fb6ef65"); pub const IID_ISearchViewChangedSink = &IID_ISearchViewChangedSink_Value; pub const ISearchViewChangedSink = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, OnChange: fn( self: *const ISearchViewChangedSink, pdwDocID: ?*i32, pChange: ?*SEARCH_ITEM_CHANGE, pfInView: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchViewChangedSink_OnChange(self: *const T, pdwDocID: ?*i32, pChange: ?*SEARCH_ITEM_CHANGE, pfInView: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchViewChangedSink.VTable, self.vtable).OnChange(@ptrCast(*const ISearchViewChangedSink, self), pdwDocID, pChange, pfInView); } };} pub usingnamespace MethodMixin(@This()); }; pub const SEARCH_INDEXING_PHASE = enum(i32) { GATHERER = 0, QUERYABLE = 1, PERSISTED = 2, }; pub const SEARCH_INDEXING_PHASE_GATHERER = SEARCH_INDEXING_PHASE.GATHERER; pub const SEARCH_INDEXING_PHASE_QUERYABLE = SEARCH_INDEXING_PHASE.QUERYABLE; pub const SEARCH_INDEXING_PHASE_PERSISTED = SEARCH_INDEXING_PHASE.PERSISTED; pub const SEARCH_ITEM_INDEXING_STATUS = extern struct { dwDocID: u32, hrIndexingStatus: HRESULT, }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchNotifyInlineSite_Value = @import("../zig.zig").Guid.initString("b5702e61-e75c-4b64-82a1-6cb4f832fccf"); pub const IID_ISearchNotifyInlineSite = &IID_ISearchNotifyInlineSite_Value; pub const ISearchNotifyInlineSite = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, OnItemIndexedStatusChange: fn( self: *const ISearchNotifyInlineSite, sipStatus: SEARCH_INDEXING_PHASE, dwNumEntries: u32, rgItemStatusEntries: [*]SEARCH_ITEM_INDEXING_STATUS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnCatalogStatusChange: fn( self: *const ISearchNotifyInlineSite, guidCatalogResetSignature: ?*const Guid, guidCheckPointSignature: ?*const Guid, dwLastCheckPointNumber: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchNotifyInlineSite_OnItemIndexedStatusChange(self: *const T, sipStatus: SEARCH_INDEXING_PHASE, dwNumEntries: u32, rgItemStatusEntries: [*]SEARCH_ITEM_INDEXING_STATUS) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchNotifyInlineSite.VTable, self.vtable).OnItemIndexedStatusChange(@ptrCast(*const ISearchNotifyInlineSite, self), sipStatus, dwNumEntries, rgItemStatusEntries); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchNotifyInlineSite_OnCatalogStatusChange(self: *const T, guidCatalogResetSignature: ?*const Guid, guidCheckPointSignature: ?*const Guid, dwLastCheckPointNumber: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchNotifyInlineSite.VTable, self.vtable).OnCatalogStatusChange(@ptrCast(*const ISearchNotifyInlineSite, self), guidCatalogResetSignature, guidCheckPointSignature, dwLastCheckPointNumber); } };} pub usingnamespace MethodMixin(@This()); }; pub const CatalogStatus = enum(i32) { IDLE = 0, PAUSED = 1, RECOVERING = 2, FULL_CRAWL = 3, INCREMENTAL_CRAWL = 4, PROCESSING_NOTIFICATIONS = 5, SHUTTING_DOWN = 6, }; pub const CATALOG_STATUS_IDLE = CatalogStatus.IDLE; pub const CATALOG_STATUS_PAUSED = CatalogStatus.PAUSED; pub const CATALOG_STATUS_RECOVERING = CatalogStatus.RECOVERING; pub const CATALOG_STATUS_FULL_CRAWL = CatalogStatus.FULL_CRAWL; pub const CATALOG_STATUS_INCREMENTAL_CRAWL = CatalogStatus.INCREMENTAL_CRAWL; pub const CATALOG_STATUS_PROCESSING_NOTIFICATIONS = CatalogStatus.PROCESSING_NOTIFICATIONS; pub const CATALOG_STATUS_SHUTTING_DOWN = CatalogStatus.SHUTTING_DOWN; pub const CatalogPausedReason = enum(i32) { NONE = 0, HIGH_IO = 1, HIGH_CPU = 2, HIGH_NTF_RATE = 3, LOW_BATTERY = 4, LOW_MEMORY = 5, LOW_DISK = 6, DELAYED_RECOVERY = 7, USER_ACTIVE = 8, EXTERNAL = 9, UPGRADING = 10, }; pub const CATALOG_PAUSED_REASON_NONE = CatalogPausedReason.NONE; pub const CATALOG_PAUSED_REASON_HIGH_IO = CatalogPausedReason.HIGH_IO; pub const CATALOG_PAUSED_REASON_HIGH_CPU = CatalogPausedReason.HIGH_CPU; pub const CATALOG_PAUSED_REASON_HIGH_NTF_RATE = CatalogPausedReason.HIGH_NTF_RATE; pub const CATALOG_PAUSED_REASON_LOW_BATTERY = CatalogPausedReason.LOW_BATTERY; pub const CATALOG_PAUSED_REASON_LOW_MEMORY = CatalogPausedReason.LOW_MEMORY; pub const CATALOG_PAUSED_REASON_LOW_DISK = CatalogPausedReason.LOW_DISK; pub const CATALOG_PAUSED_REASON_DELAYED_RECOVERY = CatalogPausedReason.DELAYED_RECOVERY; pub const CATALOG_PAUSED_REASON_USER_ACTIVE = CatalogPausedReason.USER_ACTIVE; pub const CATALOG_PAUSED_REASON_EXTERNAL = CatalogPausedReason.EXTERNAL; pub const CATALOG_PAUSED_REASON_UPGRADING = CatalogPausedReason.UPGRADING; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchCatalogManager_Value = @import("../zig.zig").Guid.initString("ab310581-ac80-11d1-8df3-00c04fb6ef50"); pub const IID_ISearchCatalogManager = &IID_ISearchCatalogManager_Value; pub const ISearchCatalogManager = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_Name: fn( self: *const ISearchCatalogManager, pszName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetParameter: fn( self: *const ISearchCatalogManager, pszName: ?[*:0]const u16, ppValue: ?*?*PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetParameter: fn( self: *const ISearchCatalogManager, pszName: ?[*:0]const u16, pValue: ?*PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCatalogStatus: fn( self: *const ISearchCatalogManager, pStatus: ?*CatalogStatus, pPausedReason: ?*CatalogPausedReason, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const ISearchCatalogManager, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reindex: fn( self: *const ISearchCatalogManager, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ReindexMatchingURLs: fn( self: *const ISearchCatalogManager, pszPattern: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ReindexSearchRoot: fn( self: *const ISearchCatalogManager, pszRootURL: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_ConnectTimeout: fn( self: *const ISearchCatalogManager, dwConnectTimeout: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ConnectTimeout: fn( self: *const ISearchCatalogManager, pdwConnectTimeout: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_DataTimeout: fn( self: *const ISearchCatalogManager, dwDataTimeout: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DataTimeout: fn( self: *const ISearchCatalogManager, pdwDataTimeout: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, NumberOfItems: fn( self: *const ISearchCatalogManager, plCount: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, NumberOfItemsToIndex: fn( self: *const ISearchCatalogManager, plIncrementalCount: ?*i32, plNotificationQueue: ?*i32, plHighPriorityQueue: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, URLBeingIndexed: fn( self: *const ISearchCatalogManager, pszUrl: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetURLIndexingState: fn( self: *const ISearchCatalogManager, pszURL: ?[*:0]const u16, pdwState: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetPersistentItemsChangedSink: fn( self: *const ISearchCatalogManager, ppISearchPersistentItemsChangedSink: ?*?*ISearchPersistentItemsChangedSink, ) callconv(@import("std").os.windows.WINAPI) HRESULT, RegisterViewForNotification: fn( self: *const ISearchCatalogManager, pszView: ?[*:0]const u16, pViewChangedSink: ?*ISearchViewChangedSink, pdwCookie: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetItemsChangedSink: fn( self: *const ISearchCatalogManager, pISearchNotifyInlineSite: ?*ISearchNotifyInlineSite, riid: ?*const Guid, ppv: ?*?*c_void, pGUIDCatalogResetSignature: ?*Guid, pGUIDCheckPointSignature: ?*Guid, pdwLastCheckPointNumber: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UnregisterViewForNotification: fn( self: *const ISearchCatalogManager, dwCookie: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetExtensionClusion: fn( self: *const ISearchCatalogManager, pszExtension: ?[*:0]const u16, fExclude: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EnumerateExcludedExtensions: fn( self: *const ISearchCatalogManager, ppExtensions: ?*?*IEnumString, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetQueryHelper: fn( self: *const ISearchCatalogManager, ppSearchQueryHelper: ?*?*ISearchQueryHelper, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_DiacriticSensitivity: fn( self: *const ISearchCatalogManager, fDiacriticSensitive: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_DiacriticSensitivity: fn( self: *const ISearchCatalogManager, pfDiacriticSensitive: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCrawlScopeManager: fn( self: *const ISearchCatalogManager, ppCrawlScopeManager: ?*?*ISearchCrawlScopeManager, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_get_Name(self: *const T, pszName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).get_Name(@ptrCast(*const ISearchCatalogManager, self), pszName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_GetParameter(self: *const T, pszName: ?[*:0]const u16, ppValue: ?*?*PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).GetParameter(@ptrCast(*const ISearchCatalogManager, self), pszName, ppValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_SetParameter(self: *const T, pszName: ?[*:0]const u16, pValue: ?*PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).SetParameter(@ptrCast(*const ISearchCatalogManager, self), pszName, pValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_GetCatalogStatus(self: *const T, pStatus: ?*CatalogStatus, pPausedReason: ?*CatalogPausedReason) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).GetCatalogStatus(@ptrCast(*const ISearchCatalogManager, self), pStatus, pPausedReason); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).Reset(@ptrCast(*const ISearchCatalogManager, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_Reindex(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).Reindex(@ptrCast(*const ISearchCatalogManager, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_ReindexMatchingURLs(self: *const T, pszPattern: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).ReindexMatchingURLs(@ptrCast(*const ISearchCatalogManager, self), pszPattern); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_ReindexSearchRoot(self: *const T, pszRootURL: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).ReindexSearchRoot(@ptrCast(*const ISearchCatalogManager, self), pszRootURL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_put_ConnectTimeout(self: *const T, dwConnectTimeout: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).put_ConnectTimeout(@ptrCast(*const ISearchCatalogManager, self), dwConnectTimeout); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_get_ConnectTimeout(self: *const T, pdwConnectTimeout: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).get_ConnectTimeout(@ptrCast(*const ISearchCatalogManager, self), pdwConnectTimeout); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_put_DataTimeout(self: *const T, dwDataTimeout: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).put_DataTimeout(@ptrCast(*const ISearchCatalogManager, self), dwDataTimeout); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_get_DataTimeout(self: *const T, pdwDataTimeout: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).get_DataTimeout(@ptrCast(*const ISearchCatalogManager, self), pdwDataTimeout); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_NumberOfItems(self: *const T, plCount: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).NumberOfItems(@ptrCast(*const ISearchCatalogManager, self), plCount); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_NumberOfItemsToIndex(self: *const T, plIncrementalCount: ?*i32, plNotificationQueue: ?*i32, plHighPriorityQueue: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).NumberOfItemsToIndex(@ptrCast(*const ISearchCatalogManager, self), plIncrementalCount, plNotificationQueue, plHighPriorityQueue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_URLBeingIndexed(self: *const T, pszUrl: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).URLBeingIndexed(@ptrCast(*const ISearchCatalogManager, self), pszUrl); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_GetURLIndexingState(self: *const T, pszURL: ?[*:0]const u16, pdwState: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).GetURLIndexingState(@ptrCast(*const ISearchCatalogManager, self), pszURL, pdwState); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_GetPersistentItemsChangedSink(self: *const T, ppISearchPersistentItemsChangedSink: ?*?*ISearchPersistentItemsChangedSink) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).GetPersistentItemsChangedSink(@ptrCast(*const ISearchCatalogManager, self), ppISearchPersistentItemsChangedSink); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_RegisterViewForNotification(self: *const T, pszView: ?[*:0]const u16, pViewChangedSink: ?*ISearchViewChangedSink, pdwCookie: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).RegisterViewForNotification(@ptrCast(*const ISearchCatalogManager, self), pszView, pViewChangedSink, pdwCookie); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_GetItemsChangedSink(self: *const T, pISearchNotifyInlineSite: ?*ISearchNotifyInlineSite, riid: ?*const Guid, ppv: ?*?*c_void, pGUIDCatalogResetSignature: ?*Guid, pGUIDCheckPointSignature: ?*Guid, pdwLastCheckPointNumber: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).GetItemsChangedSink(@ptrCast(*const ISearchCatalogManager, self), pISearchNotifyInlineSite, riid, ppv, pGUIDCatalogResetSignature, pGUIDCheckPointSignature, pdwLastCheckPointNumber); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_UnregisterViewForNotification(self: *const T, dwCookie: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).UnregisterViewForNotification(@ptrCast(*const ISearchCatalogManager, self), dwCookie); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_SetExtensionClusion(self: *const T, pszExtension: ?[*:0]const u16, fExclude: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).SetExtensionClusion(@ptrCast(*const ISearchCatalogManager, self), pszExtension, fExclude); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_EnumerateExcludedExtensions(self: *const T, ppExtensions: ?*?*IEnumString) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).EnumerateExcludedExtensions(@ptrCast(*const ISearchCatalogManager, self), ppExtensions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_GetQueryHelper(self: *const T, ppSearchQueryHelper: ?*?*ISearchQueryHelper) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).GetQueryHelper(@ptrCast(*const ISearchCatalogManager, self), ppSearchQueryHelper); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_put_DiacriticSensitivity(self: *const T, fDiacriticSensitive: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).put_DiacriticSensitivity(@ptrCast(*const ISearchCatalogManager, self), fDiacriticSensitive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_get_DiacriticSensitivity(self: *const T, pfDiacriticSensitive: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).get_DiacriticSensitivity(@ptrCast(*const ISearchCatalogManager, self), pfDiacriticSensitive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager_GetCrawlScopeManager(self: *const T, ppCrawlScopeManager: ?*?*ISearchCrawlScopeManager) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager.VTable, self.vtable).GetCrawlScopeManager(@ptrCast(*const ISearchCatalogManager, self), ppCrawlScopeManager); } };} pub usingnamespace MethodMixin(@This()); }; pub const PRIORITIZE_FLAGS = enum(i32) { RETRYFAILEDITEMS = 1, IGNOREFAILURECOUNT = 2, }; pub const PRIORITIZE_FLAG_RETRYFAILEDITEMS = PRIORITIZE_FLAGS.RETRYFAILEDITEMS; pub const PRIORITIZE_FLAG_IGNOREFAILURECOUNT = PRIORITIZE_FLAGS.IGNOREFAILURECOUNT; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchCatalogManager2_Value = @import("../zig.zig").Guid.initString("7ac3286d-4d1d-4817-84fc-c1c85e3af0d9"); pub const IID_ISearchCatalogManager2 = &IID_ISearchCatalogManager2_Value; pub const ISearchCatalogManager2 = extern struct { pub const VTable = extern struct { base: ISearchCatalogManager.VTable, PrioritizeMatchingURLs: fn( self: *const ISearchCatalogManager2, pszPattern: ?[*:0]const u16, dwPrioritizeFlags: PRIORITIZE_FLAGS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISearchCatalogManager.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchCatalogManager2_PrioritizeMatchingURLs(self: *const T, pszPattern: ?[*:0]const u16, dwPrioritizeFlags: PRIORITIZE_FLAGS) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchCatalogManager2.VTable, self.vtable).PrioritizeMatchingURLs(@ptrCast(*const ISearchCatalogManager2, self), pszPattern, dwPrioritizeFlags); } };} pub usingnamespace MethodMixin(@This()); }; pub const SEARCH_TERM_EXPANSION = enum(i32) { NO_EXPANSION = 0, PREFIX_ALL = 1, STEM_ALL = 2, }; pub const SEARCH_TERM_NO_EXPANSION = SEARCH_TERM_EXPANSION.NO_EXPANSION; pub const SEARCH_TERM_PREFIX_ALL = SEARCH_TERM_EXPANSION.PREFIX_ALL; pub const SEARCH_TERM_STEM_ALL = SEARCH_TERM_EXPANSION.STEM_ALL; pub const SEARCH_QUERY_SYNTAX = enum(i32) { NO_QUERY_SYNTAX = 0, ADVANCED_QUERY_SYNTAX = 1, NATURAL_QUERY_SYNTAX = 2, }; pub const SEARCH_NO_QUERY_SYNTAX = SEARCH_QUERY_SYNTAX.NO_QUERY_SYNTAX; pub const SEARCH_ADVANCED_QUERY_SYNTAX = SEARCH_QUERY_SYNTAX.ADVANCED_QUERY_SYNTAX; pub const SEARCH_NATURAL_QUERY_SYNTAX = SEARCH_QUERY_SYNTAX.NATURAL_QUERY_SYNTAX; pub const SEARCH_COLUMN_PROPERTIES = extern struct { Value: PROPVARIANT, lcid: u32, }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchQueryHelper_Value = @import("../zig.zig").Guid.initString("ab310581-ac80-11d1-8df3-00c04fb6ef63"); pub const IID_ISearchQueryHelper = &IID_ISearchQueryHelper_Value; pub const ISearchQueryHelper = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ConnectionString: fn( self: *const ISearchQueryHelper, pszConnectionString: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_QueryContentLocale: fn( self: *const ISearchQueryHelper, lcid: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_QueryContentLocale: fn( self: *const ISearchQueryHelper, plcid: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_QueryKeywordLocale: fn( self: *const ISearchQueryHelper, lcid: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_QueryKeywordLocale: fn( self: *const ISearchQueryHelper, plcid: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_QueryTermExpansion: fn( self: *const ISearchQueryHelper, expandTerms: SEARCH_TERM_EXPANSION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_QueryTermExpansion: fn( self: *const ISearchQueryHelper, pExpandTerms: ?*SEARCH_TERM_EXPANSION, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_QuerySyntax: fn( self: *const ISearchQueryHelper, querySyntax: SEARCH_QUERY_SYNTAX, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_QuerySyntax: fn( self: *const ISearchQueryHelper, pQuerySyntax: ?*SEARCH_QUERY_SYNTAX, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_QueryContentProperties: fn( self: *const ISearchQueryHelper, pszContentProperties: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_QueryContentProperties: fn( self: *const ISearchQueryHelper, ppszContentProperties: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_QuerySelectColumns: fn( self: *const ISearchQueryHelper, pszSelectColumns: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_QuerySelectColumns: fn( self: *const ISearchQueryHelper, ppszSelectColumns: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_QueryWhereRestrictions: fn( self: *const ISearchQueryHelper, pszRestrictions: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_QueryWhereRestrictions: fn( self: *const ISearchQueryHelper, ppszRestrictions: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_QuerySorting: fn( self: *const ISearchQueryHelper, pszSorting: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_QuerySorting: fn( self: *const ISearchQueryHelper, ppszSorting: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GenerateSQLFromUserQuery: fn( self: *const ISearchQueryHelper, pszQuery: ?[*:0]const u16, ppszSQL: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, WriteProperties: fn( self: *const ISearchQueryHelper, itemID: i32, dwNumberOfColumns: u32, pColumns: [*]PROPERTYKEY, pValues: [*]SEARCH_COLUMN_PROPERTIES, pftGatherModifiedTime: ?*FILETIME, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_QueryMaxResults: fn( self: *const ISearchQueryHelper, cMaxResults: i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_QueryMaxResults: fn( self: *const ISearchQueryHelper, pcMaxResults: ?*i32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_get_ConnectionString(self: *const T, pszConnectionString: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).get_ConnectionString(@ptrCast(*const ISearchQueryHelper, self), pszConnectionString); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_put_QueryContentLocale(self: *const T, lcid: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).put_QueryContentLocale(@ptrCast(*const ISearchQueryHelper, self), lcid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_get_QueryContentLocale(self: *const T, plcid: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).get_QueryContentLocale(@ptrCast(*const ISearchQueryHelper, self), plcid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_put_QueryKeywordLocale(self: *const T, lcid: u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).put_QueryKeywordLocale(@ptrCast(*const ISearchQueryHelper, self), lcid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_get_QueryKeywordLocale(self: *const T, plcid: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).get_QueryKeywordLocale(@ptrCast(*const ISearchQueryHelper, self), plcid); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_put_QueryTermExpansion(self: *const T, expandTerms: SEARCH_TERM_EXPANSION) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).put_QueryTermExpansion(@ptrCast(*const ISearchQueryHelper, self), expandTerms); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_get_QueryTermExpansion(self: *const T, pExpandTerms: ?*SEARCH_TERM_EXPANSION) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).get_QueryTermExpansion(@ptrCast(*const ISearchQueryHelper, self), pExpandTerms); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_put_QuerySyntax(self: *const T, querySyntax: SEARCH_QUERY_SYNTAX) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).put_QuerySyntax(@ptrCast(*const ISearchQueryHelper, self), querySyntax); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_get_QuerySyntax(self: *const T, pQuerySyntax: ?*SEARCH_QUERY_SYNTAX) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).get_QuerySyntax(@ptrCast(*const ISearchQueryHelper, self), pQuerySyntax); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_put_QueryContentProperties(self: *const T, pszContentProperties: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).put_QueryContentProperties(@ptrCast(*const ISearchQueryHelper, self), pszContentProperties); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_get_QueryContentProperties(self: *const T, ppszContentProperties: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).get_QueryContentProperties(@ptrCast(*const ISearchQueryHelper, self), ppszContentProperties); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_put_QuerySelectColumns(self: *const T, pszSelectColumns: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).put_QuerySelectColumns(@ptrCast(*const ISearchQueryHelper, self), pszSelectColumns); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_get_QuerySelectColumns(self: *const T, ppszSelectColumns: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).get_QuerySelectColumns(@ptrCast(*const ISearchQueryHelper, self), ppszSelectColumns); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_put_QueryWhereRestrictions(self: *const T, pszRestrictions: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).put_QueryWhereRestrictions(@ptrCast(*const ISearchQueryHelper, self), pszRestrictions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_get_QueryWhereRestrictions(self: *const T, ppszRestrictions: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).get_QueryWhereRestrictions(@ptrCast(*const ISearchQueryHelper, self), ppszRestrictions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_put_QuerySorting(self: *const T, pszSorting: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).put_QuerySorting(@ptrCast(*const ISearchQueryHelper, self), pszSorting); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_get_QuerySorting(self: *const T, ppszSorting: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).get_QuerySorting(@ptrCast(*const ISearchQueryHelper, self), ppszSorting); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_GenerateSQLFromUserQuery(self: *const T, pszQuery: ?[*:0]const u16, ppszSQL: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).GenerateSQLFromUserQuery(@ptrCast(*const ISearchQueryHelper, self), pszQuery, ppszSQL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_WriteProperties(self: *const T, itemID: i32, dwNumberOfColumns: u32, pColumns: [*]PROPERTYKEY, pValues: [*]SEARCH_COLUMN_PROPERTIES, pftGatherModifiedTime: ?*FILETIME) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).WriteProperties(@ptrCast(*const ISearchQueryHelper, self), itemID, dwNumberOfColumns, pColumns, pValues, pftGatherModifiedTime); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_put_QueryMaxResults(self: *const T, cMaxResults: i32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).put_QueryMaxResults(@ptrCast(*const ISearchQueryHelper, self), cMaxResults); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchQueryHelper_get_QueryMaxResults(self: *const T, pcMaxResults: ?*i32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchQueryHelper.VTable, self.vtable).get_QueryMaxResults(@ptrCast(*const ISearchQueryHelper, self), pcMaxResults); } };} pub usingnamespace MethodMixin(@This()); }; pub const PRIORITY_LEVEL = enum(i32) { FOREGROUND = 0, HIGH = 1, LOW = 2, DEFAULT = 3, }; pub const PRIORITY_LEVEL_FOREGROUND = PRIORITY_LEVEL.FOREGROUND; pub const PRIORITY_LEVEL_HIGH = PRIORITY_LEVEL.HIGH; pub const PRIORITY_LEVEL_LOW = PRIORITY_LEVEL.LOW; pub const PRIORITY_LEVEL_DEFAULT = PRIORITY_LEVEL.DEFAULT; // TODO: this type is limited to platform 'windows6.1' const IID_IRowsetPrioritization_Value = @import("../zig.zig").Guid.initString("42811652-079d-481b-87a2-09a69ecc5f44"); pub const IID_IRowsetPrioritization = &IID_IRowsetPrioritization_Value; pub const IRowsetPrioritization = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, SetScopePriority: fn( self: *const IRowsetPrioritization, priority: PRIORITY_LEVEL, scopeStatisticsEventFrequency: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetScopePriority: fn( self: *const IRowsetPrioritization, priority: ?*PRIORITY_LEVEL, scopeStatisticsEventFrequency: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetScopeStatistics: fn( self: *const IRowsetPrioritization, indexedDocumentCount: ?*u32, oustandingAddCount: ?*u32, oustandingModifyCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetPrioritization_SetScopePriority(self: *const T, priority: PRIORITY_LEVEL, scopeStatisticsEventFrequency: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetPrioritization.VTable, self.vtable).SetScopePriority(@ptrCast(*const IRowsetPrioritization, self), priority, scopeStatisticsEventFrequency); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetPrioritization_GetScopePriority(self: *const T, priority: ?*PRIORITY_LEVEL, scopeStatisticsEventFrequency: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetPrioritization.VTable, self.vtable).GetScopePriority(@ptrCast(*const IRowsetPrioritization, self), priority, scopeStatisticsEventFrequency); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetPrioritization_GetScopeStatistics(self: *const T, indexedDocumentCount: ?*u32, oustandingAddCount: ?*u32, oustandingModifyCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetPrioritization.VTable, self.vtable).GetScopeStatistics(@ptrCast(*const IRowsetPrioritization, self), indexedDocumentCount, oustandingAddCount, oustandingModifyCount); } };} pub usingnamespace MethodMixin(@This()); }; pub const ROWSETEVENT_ITEMSTATE = enum(i32) { NOTINROWSET = 0, INROWSET = 1, UNKNOWN = 2, }; pub const ROWSETEVENT_ITEMSTATE_NOTINROWSET = ROWSETEVENT_ITEMSTATE.NOTINROWSET; pub const ROWSETEVENT_ITEMSTATE_INROWSET = ROWSETEVENT_ITEMSTATE.INROWSET; pub const ROWSETEVENT_ITEMSTATE_UNKNOWN = ROWSETEVENT_ITEMSTATE.UNKNOWN; pub const ROWSETEVENT_TYPE = enum(i32) { DATAEXPIRED = 0, FOREGROUNDLOST = 1, SCOPESTATISTICS = 2, }; pub const ROWSETEVENT_TYPE_DATAEXPIRED = ROWSETEVENT_TYPE.DATAEXPIRED; pub const ROWSETEVENT_TYPE_FOREGROUNDLOST = ROWSETEVENT_TYPE.FOREGROUNDLOST; pub const ROWSETEVENT_TYPE_SCOPESTATISTICS = ROWSETEVENT_TYPE.SCOPESTATISTICS; // TODO: this type is limited to platform 'windows6.1' const IID_IRowsetEvents_Value = @import("../zig.zig").Guid.initString("1551aea5-5d66-4b11-86f5-d5634cb211b9"); pub const IID_IRowsetEvents = &IID_IRowsetEvents_Value; pub const IRowsetEvents = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, OnNewItem: fn( self: *const IRowsetEvents, itemID: ?*const PROPVARIANT, newItemState: ROWSETEVENT_ITEMSTATE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnChangedItem: fn( self: *const IRowsetEvents, itemID: ?*const PROPVARIANT, rowsetItemState: ROWSETEVENT_ITEMSTATE, changedItemState: ROWSETEVENT_ITEMSTATE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnDeletedItem: fn( self: *const IRowsetEvents, itemID: ?*const PROPVARIANT, deletedItemState: ROWSETEVENT_ITEMSTATE, ) callconv(@import("std").os.windows.WINAPI) HRESULT, OnRowsetEvent: fn( self: *const IRowsetEvents, eventType: ROWSETEVENT_TYPE, eventData: ?*const PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetEvents_OnNewItem(self: *const T, itemID: ?*const PROPVARIANT, newItemState: ROWSETEVENT_ITEMSTATE) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetEvents.VTable, self.vtable).OnNewItem(@ptrCast(*const IRowsetEvents, self), itemID, newItemState); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetEvents_OnChangedItem(self: *const T, itemID: ?*const PROPVARIANT, rowsetItemState: ROWSETEVENT_ITEMSTATE, changedItemState: ROWSETEVENT_ITEMSTATE) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetEvents.VTable, self.vtable).OnChangedItem(@ptrCast(*const IRowsetEvents, self), itemID, rowsetItemState, changedItemState); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetEvents_OnDeletedItem(self: *const T, itemID: ?*const PROPVARIANT, deletedItemState: ROWSETEVENT_ITEMSTATE) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetEvents.VTable, self.vtable).OnDeletedItem(@ptrCast(*const IRowsetEvents, self), itemID, deletedItemState); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IRowsetEvents_OnRowsetEvent(self: *const T, eventType: ROWSETEVENT_TYPE, eventData: ?*const PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const IRowsetEvents.VTable, self.vtable).OnRowsetEvent(@ptrCast(*const IRowsetEvents, self), eventType, eventData); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchManager_Value = @import("../zig.zig").Guid.initString("ab310581-ac80-11d1-8df3-00c04fb6ef69"); pub const IID_ISearchManager = &IID_ISearchManager_Value; pub const ISearchManager = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetIndexerVersionStr: fn( self: *const ISearchManager, ppszVersionString: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetIndexerVersion: fn( self: *const ISearchManager, pdwMajor: ?*u32, pdwMinor: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetParameter: fn( self: *const ISearchManager, pszName: ?[*:0]const u16, ppValue: ?*?*PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetParameter: fn( self: *const ISearchManager, pszName: ?[*:0]const u16, pValue: ?*const PROPVARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_ProxyName: fn( self: *const ISearchManager, ppszProxyName: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_BypassList: fn( self: *const ISearchManager, ppszBypassList: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetProxy: fn( self: *const ISearchManager, sUseProxy: PROXY_ACCESS, fLocalByPassProxy: BOOL, dwPortNumber: u32, pszProxyName: ?[*:0]const u16, pszByPassList: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCatalog: fn( self: *const ISearchManager, pszCatalog: ?[*:0]const u16, ppCatalogManager: ?*?*ISearchCatalogManager, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_UserAgent: fn( self: *const ISearchManager, ppszUserAgent: ?*?PWSTR, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? put_UserAgent: fn( self: *const ISearchManager, pszUserAgent: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_UseProxy: fn( self: *const ISearchManager, pUseProxy: ?*PROXY_ACCESS, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_LocalBypass: fn( self: *const ISearchManager, pfLocalBypass: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, // TODO: this function has a "SpecialName", should Zig do anything with this? get_PortNumber: fn( self: *const ISearchManager, pdwPortNumber: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager_GetIndexerVersionStr(self: *const T, ppszVersionString: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager.VTable, self.vtable).GetIndexerVersionStr(@ptrCast(*const ISearchManager, self), ppszVersionString); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager_GetIndexerVersion(self: *const T, pdwMajor: ?*u32, pdwMinor: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager.VTable, self.vtable).GetIndexerVersion(@ptrCast(*const ISearchManager, self), pdwMajor, pdwMinor); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager_GetParameter(self: *const T, pszName: ?[*:0]const u16, ppValue: ?*?*PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager.VTable, self.vtable).GetParameter(@ptrCast(*const ISearchManager, self), pszName, ppValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager_SetParameter(self: *const T, pszName: ?[*:0]const u16, pValue: ?*const PROPVARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager.VTable, self.vtable).SetParameter(@ptrCast(*const ISearchManager, self), pszName, pValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager_get_ProxyName(self: *const T, ppszProxyName: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager.VTable, self.vtable).get_ProxyName(@ptrCast(*const ISearchManager, self), ppszProxyName); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager_get_BypassList(self: *const T, ppszBypassList: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager.VTable, self.vtable).get_BypassList(@ptrCast(*const ISearchManager, self), ppszBypassList); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager_SetProxy(self: *const T, sUseProxy: PROXY_ACCESS, fLocalByPassProxy: BOOL, dwPortNumber: u32, pszProxyName: ?[*:0]const u16, pszByPassList: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager.VTable, self.vtable).SetProxy(@ptrCast(*const ISearchManager, self), sUseProxy, fLocalByPassProxy, dwPortNumber, pszProxyName, pszByPassList); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager_GetCatalog(self: *const T, pszCatalog: ?[*:0]const u16, ppCatalogManager: ?*?*ISearchCatalogManager) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager.VTable, self.vtable).GetCatalog(@ptrCast(*const ISearchManager, self), pszCatalog, ppCatalogManager); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager_get_UserAgent(self: *const T, ppszUserAgent: ?*?PWSTR) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager.VTable, self.vtable).get_UserAgent(@ptrCast(*const ISearchManager, self), ppszUserAgent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager_put_UserAgent(self: *const T, pszUserAgent: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager.VTable, self.vtable).put_UserAgent(@ptrCast(*const ISearchManager, self), pszUserAgent); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager_get_UseProxy(self: *const T, pUseProxy: ?*PROXY_ACCESS) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager.VTable, self.vtable).get_UseProxy(@ptrCast(*const ISearchManager, self), pUseProxy); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager_get_LocalBypass(self: *const T, pfLocalBypass: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager.VTable, self.vtable).get_LocalBypass(@ptrCast(*const ISearchManager, self), pfLocalBypass); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager_get_PortNumber(self: *const T, pdwPortNumber: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager.VTable, self.vtable).get_PortNumber(@ptrCast(*const ISearchManager, self), pdwPortNumber); } };} pub usingnamespace MethodMixin(@This()); }; // TODO: this type is limited to platform 'windows8.0' const IID_ISearchManager2_Value = @import("../zig.zig").Guid.initString("dbab3f73-db19-4a79-bfc0-a61a93886ddf"); pub const IID_ISearchManager2 = &IID_ISearchManager2_Value; pub const ISearchManager2 = extern struct { pub const VTable = extern struct { base: ISearchManager.VTable, CreateCatalog: fn( self: *const ISearchManager2, pszCatalog: ?[*:0]const u16, ppCatalogManager: ?*?*ISearchCatalogManager, ) callconv(@import("std").os.windows.WINAPI) HRESULT, DeleteCatalog: fn( self: *const ISearchManager2, pszCatalog: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISearchManager.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager2_CreateCatalog(self: *const T, pszCatalog: ?[*:0]const u16, ppCatalogManager: ?*?*ISearchCatalogManager) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager2.VTable, self.vtable).CreateCatalog(@ptrCast(*const ISearchManager2, self), pszCatalog, ppCatalogManager); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchManager2_DeleteCatalog(self: *const T, pszCatalog: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchManager2.VTable, self.vtable).DeleteCatalog(@ptrCast(*const ISearchManager2, self), pszCatalog); } };} pub usingnamespace MethodMixin(@This()); }; const CLSID_CSearchLanguageSupport_Value = @import("../zig.zig").Guid.initString("6a68cc80-4337-4dbc-bd27-fbfb1053820b"); pub const CLSID_CSearchLanguageSupport = &CLSID_CSearchLanguageSupport_Value; // TODO: this type is limited to platform 'windows5.1.2600' const IID_ISearchLanguageSupport_Value = @import("../zig.zig").Guid.initString("24c3cbaa-ebc1-491a-9ef1-9f6d8deb1b8f"); pub const IID_ISearchLanguageSupport = &IID_ISearchLanguageSupport_Value; pub const ISearchLanguageSupport = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, SetDiacriticSensitivity: fn( self: *const ISearchLanguageSupport, fDiacriticSensitive: BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDiacriticSensitivity: fn( self: *const ISearchLanguageSupport, pfDiacriticSensitive: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LoadWordBreaker: fn( self: *const ISearchLanguageSupport, lcid: u32, riid: ?*const Guid, ppWordBreaker: ?*?*c_void, pLcidUsed: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, LoadStemmer: fn( self: *const ISearchLanguageSupport, lcid: u32, riid: ?*const Guid, ppStemmer: ?*?*c_void, pLcidUsed: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsPrefixNormalized: fn( self: *const ISearchLanguageSupport, pwcsQueryToken: [*:0]const u16, cwcQueryToken: u32, pwcsDocumentToken: [*:0]const u16, cwcDocumentToken: u32, pulPrefixLength: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchLanguageSupport_SetDiacriticSensitivity(self: *const T, fDiacriticSensitive: BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchLanguageSupport.VTable, self.vtable).SetDiacriticSensitivity(@ptrCast(*const ISearchLanguageSupport, self), fDiacriticSensitive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchLanguageSupport_GetDiacriticSensitivity(self: *const T, pfDiacriticSensitive: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchLanguageSupport.VTable, self.vtable).GetDiacriticSensitivity(@ptrCast(*const ISearchLanguageSupport, self), pfDiacriticSensitive); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchLanguageSupport_LoadWordBreaker(self: *const T, lcid: u32, riid: ?*const Guid, ppWordBreaker: ?*?*c_void, pLcidUsed: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchLanguageSupport.VTable, self.vtable).LoadWordBreaker(@ptrCast(*const ISearchLanguageSupport, self), lcid, riid, ppWordBreaker, pLcidUsed); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchLanguageSupport_LoadStemmer(self: *const T, lcid: u32, riid: ?*const Guid, ppStemmer: ?*?*c_void, pLcidUsed: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchLanguageSupport.VTable, self.vtable).LoadStemmer(@ptrCast(*const ISearchLanguageSupport, self), lcid, riid, ppStemmer, pLcidUsed); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISearchLanguageSupport_IsPrefixNormalized(self: *const T, pwcsQueryToken: [*:0]const u16, cwcQueryToken: u32, pwcsDocumentToken: [*:0]const u16, cwcDocumentToken: u32, pulPrefixLength: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISearchLanguageSupport.VTable, self.vtable).IsPrefixNormalized(@ptrCast(*const ISearchLanguageSupport, self), pwcsQueryToken, cwcQueryToken, pwcsDocumentToken, cwcDocumentToken, pulPrefixLength); } };} pub usingnamespace MethodMixin(@This()); }; const CLSID_SubscriptionMgr_Value = @import("../zig.zig").Guid.initString("abbe31d0-6dae-11d0-beca-00c04fd940be"); pub const CLSID_SubscriptionMgr = &CLSID_SubscriptionMgr_Value; pub const ITEMPROP = extern struct { variantValue: VARIANT, pwszName: ?PWSTR, }; const IID_IEnumItemProperties_Value = @import("../zig.zig").Guid.initString("f72c8d96-6dbd-11d1-a1e8-00c04fc2fbe1"); pub const IID_IEnumItemProperties = &IID_IEnumItemProperties_Value; pub const IEnumItemProperties = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumItemProperties, celt: u32, rgelt: [*]ITEMPROP, pceltFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IEnumItemProperties, celt: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IEnumItemProperties, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IEnumItemProperties, ppenum: ?*?*IEnumItemProperties, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCount: fn( self: *const IEnumItemProperties, pnCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumItemProperties_Next(self: *const T, celt: u32, rgelt: [*]ITEMPROP, pceltFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumItemProperties.VTable, self.vtable).Next(@ptrCast(*const IEnumItemProperties, self), celt, rgelt, pceltFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumItemProperties_Skip(self: *const T, celt: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumItemProperties.VTable, self.vtable).Skip(@ptrCast(*const IEnumItemProperties, self), celt); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumItemProperties_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumItemProperties.VTable, self.vtable).Reset(@ptrCast(*const IEnumItemProperties, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumItemProperties_Clone(self: *const T, ppenum: ?*?*IEnumItemProperties) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumItemProperties.VTable, self.vtable).Clone(@ptrCast(*const IEnumItemProperties, self), ppenum); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumItemProperties_GetCount(self: *const T, pnCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumItemProperties.VTable, self.vtable).GetCount(@ptrCast(*const IEnumItemProperties, self), pnCount); } };} pub usingnamespace MethodMixin(@This()); }; pub const SUBSCRIPTIONITEMINFO = extern struct { cbSize: u32, dwFlags: u32, dwPriority: u32, ScheduleGroup: Guid, clsidAgent: Guid, }; const IID_ISubscriptionItem_Value = @import("../zig.zig").Guid.initString("a97559f8-6c4a-11d1-a1e8-00c04fc2fbe1"); pub const IID_ISubscriptionItem = &IID_ISubscriptionItem_Value; pub const ISubscriptionItem = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, GetCookie: fn( self: *const ISubscriptionItem, pCookie: ?*Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSubscriptionItemInfo: fn( self: *const ISubscriptionItem, pSubscriptionItemInfo: ?*SUBSCRIPTIONITEMINFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, SetSubscriptionItemInfo: fn( self: *const ISubscriptionItem, pSubscriptionItemInfo: ?*const SUBSCRIPTIONITEMINFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ReadProperties: fn( self: *const ISubscriptionItem, nCount: u32, rgwszName: [*]const ?[*:0]const u16, rgValue: [*]VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, WriteProperties: fn( self: *const ISubscriptionItem, nCount: u32, rgwszName: [*]const ?[*:0]const u16, rgValue: [*]const VARIANT, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EnumProperties: fn( self: *const ISubscriptionItem, ppEnumItemProperties: ?*?*IEnumItemProperties, ) callconv(@import("std").os.windows.WINAPI) HRESULT, NotifyChanged: fn( self: *const ISubscriptionItem, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionItem_GetCookie(self: *const T, pCookie: ?*Guid) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionItem.VTable, self.vtable).GetCookie(@ptrCast(*const ISubscriptionItem, self), pCookie); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionItem_GetSubscriptionItemInfo(self: *const T, pSubscriptionItemInfo: ?*SUBSCRIPTIONITEMINFO) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionItem.VTable, self.vtable).GetSubscriptionItemInfo(@ptrCast(*const ISubscriptionItem, self), pSubscriptionItemInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionItem_SetSubscriptionItemInfo(self: *const T, pSubscriptionItemInfo: ?*const SUBSCRIPTIONITEMINFO) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionItem.VTable, self.vtable).SetSubscriptionItemInfo(@ptrCast(*const ISubscriptionItem, self), pSubscriptionItemInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionItem_ReadProperties(self: *const T, nCount: u32, rgwszName: [*]const ?[*:0]const u16, rgValue: [*]VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionItem.VTable, self.vtable).ReadProperties(@ptrCast(*const ISubscriptionItem, self), nCount, rgwszName, rgValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionItem_WriteProperties(self: *const T, nCount: u32, rgwszName: [*]const ?[*:0]const u16, rgValue: [*]const VARIANT) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionItem.VTable, self.vtable).WriteProperties(@ptrCast(*const ISubscriptionItem, self), nCount, rgwszName, rgValue); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionItem_EnumProperties(self: *const T, ppEnumItemProperties: ?*?*IEnumItemProperties) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionItem.VTable, self.vtable).EnumProperties(@ptrCast(*const ISubscriptionItem, self), ppEnumItemProperties); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionItem_NotifyChanged(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionItem.VTable, self.vtable).NotifyChanged(@ptrCast(*const ISubscriptionItem, self)); } };} pub usingnamespace MethodMixin(@This()); }; const IID_IEnumSubscription_Value = @import("../zig.zig").Guid.initString("f72c8d97-6dbd-11d1-a1e8-00c04fc2fbe1"); pub const IID_IEnumSubscription = &IID_IEnumSubscription_Value; pub const IEnumSubscription = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, Next: fn( self: *const IEnumSubscription, celt: u32, rgelt: [*]Guid, pceltFetched: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Skip: fn( self: *const IEnumSubscription, celt: u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Reset: fn( self: *const IEnumSubscription, ) callconv(@import("std").os.windows.WINAPI) HRESULT, Clone: fn( self: *const IEnumSubscription, ppenum: ?*?*IEnumSubscription, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetCount: fn( self: *const IEnumSubscription, pnCount: ?*u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSubscription_Next(self: *const T, celt: u32, rgelt: [*]Guid, pceltFetched: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSubscription.VTable, self.vtable).Next(@ptrCast(*const IEnumSubscription, self), celt, rgelt, pceltFetched); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSubscription_Skip(self: *const T, celt: u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSubscription.VTable, self.vtable).Skip(@ptrCast(*const IEnumSubscription, self), celt); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSubscription_Reset(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSubscription.VTable, self.vtable).Reset(@ptrCast(*const IEnumSubscription, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSubscription_Clone(self: *const T, ppenum: ?*?*IEnumSubscription) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSubscription.VTable, self.vtable).Clone(@ptrCast(*const IEnumSubscription, self), ppenum); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn IEnumSubscription_GetCount(self: *const T, pnCount: ?*u32) callconv(.Inline) HRESULT { return @ptrCast(*const IEnumSubscription.VTable, self.vtable).GetCount(@ptrCast(*const IEnumSubscription, self), pnCount); } };} pub usingnamespace MethodMixin(@This()); }; pub const SUBSCRIPTIONTYPE = enum(i32) { URL = 0, CHANNEL = 1, DESKTOPURL = 2, EXTERNAL = 3, DESKTOPCHANNEL = 4, }; pub const SUBSTYPE_URL = SUBSCRIPTIONTYPE.URL; pub const SUBSTYPE_CHANNEL = SUBSCRIPTIONTYPE.CHANNEL; pub const SUBSTYPE_DESKTOPURL = SUBSCRIPTIONTYPE.DESKTOPURL; pub const SUBSTYPE_EXTERNAL = SUBSCRIPTIONTYPE.EXTERNAL; pub const SUBSTYPE_DESKTOPCHANNEL = SUBSCRIPTIONTYPE.DESKTOPCHANNEL; pub const SUBSCRIPTIONINFOFLAGS = enum(i32) { SCHEDULE = 1, RECURSE = 2, WEBCRAWL = 4, MAILNOT = 8, MAXSIZEKB = 16, USER = 32, PASSWORD = 64, TASKFLAGS = 256, GLEAM = 512, CHANGESONLY = 1024, CHANNELFLAGS = 2048, FRIENDLYNAME = 8192, NEEDPASSWORD = <PASSWORD>, TYPE = 32768, }; pub const SUBSINFO_SCHEDULE = SUBSCRIPTIONINFOFLAGS.SCHEDULE; pub const SUBSINFO_RECURSE = SUBSCRIPTIONINFOFLAGS.RECURSE; pub const SUBSINFO_WEBCRAWL = SUBSCRIPTIONINFOFLAGS.WEBCRAWL; pub const SUBSINFO_MAILNOT = SUBSCRIPTIONINFOFLAGS.MAILNOT; pub const SUBSINFO_MAXSIZEKB = SUBSCRIPTIONINFOFLAGS.MAXSIZEKB; pub const SUBSINFO_USER = SUBSCRIPTIONINFOFLAGS.USER; pub const SUBSINFO_PASSWORD = SUBSCRIPTIONINFOFLAGS.PASSWORD; pub const SUBSINFO_TASKFLAGS = SUBSCRIPTIONINFOFLAGS.TASKFLAGS; pub const SUBSINFO_GLEAM = SUBSCRIPTIONINFOFLAGS.GLEAM; pub const SUBSINFO_CHANGESONLY = SUBSCRIPTIONINFOFLAGS.CHANGESONLY; pub const SUBSINFO_CHANNELFLAGS = SUBSCRIPTIONINFOFLAGS.CHANNELFLAGS; pub const SUBSINFO_FRIENDLYNAME = SUBSCRIPTIONINFOFLAGS.FRIENDLYNAME; pub const SUBSINFO_NEEDPASSWORD = SUBSCRIPTIONINFOFLAGS.NEEDPASSWORD; pub const SUBSINFO_TYPE = SUBSCRIPTIONINFOFLAGS.TYPE; pub const CREATESUBSCRIPTIONFLAGS = enum(i32) { ADDTOFAVORITES = 1, FROMFAVORITES = 2, NOUI = 4, NOSAVE = 8, SOFTWAREUPDATE = 16, }; pub const CREATESUBS_ADDTOFAVORITES = CREATESUBSCRIPTIONFLAGS.ADDTOFAVORITES; pub const CREATESUBS_FROMFAVORITES = CREATESUBSCRIPTIONFLAGS.FROMFAVORITES; pub const CREATESUBS_NOUI = CREATESUBSCRIPTIONFLAGS.NOUI; pub const CREATESUBS_NOSAVE = CREATESUBSCRIPTIONFLAGS.NOSAVE; pub const CREATESUBS_SOFTWAREUPDATE = CREATESUBSCRIPTIONFLAGS.SOFTWAREUPDATE; pub const SUBSCRIPTIONSCHEDULE = enum(i32) { AUTO = 0, DAILY = 1, WEEKLY = 2, CUSTOM = 3, MANUAL = 4, }; pub const SUBSSCHED_AUTO = SUBSCRIPTIONSCHEDULE.AUTO; pub const SUBSSCHED_DAILY = SUBSCRIPTIONSCHEDULE.DAILY; pub const SUBSSCHED_WEEKLY = SUBSCRIPTIONSCHEDULE.WEEKLY; pub const SUBSSCHED_CUSTOM = SUBSCRIPTIONSCHEDULE.CUSTOM; pub const SUBSSCHED_MANUAL = SUBSCRIPTIONSCHEDULE.MANUAL; pub const SUBSCRIPTIONINFO = extern struct { cbSize: u32, fUpdateFlags: u32, schedule: SUBSCRIPTIONSCHEDULE, customGroupCookie: Guid, pTrigger: ?*c_void, dwRecurseLevels: u32, fWebcrawlerFlags: u32, bMailNotification: BOOL, bGleam: BOOL, bChangesOnly: BOOL, bNeedPassword: BOOL, fChannelFlags: u32, bstrUserName: ?BSTR, bstrPassword: ?BSTR, bstrFriendlyName: ?BSTR, dwMaxSizeKB: u32, subType: SUBSCRIPTIONTYPE, fTaskFlags: u32, dwReserved: u32, }; const IID_ISubscriptionMgr_Value = @import("../zig.zig").Guid.initString("085fb2c0-0df8-11d1-8f4b-00a0c905413f"); pub const IID_ISubscriptionMgr = &IID_ISubscriptionMgr_Value; pub const ISubscriptionMgr = extern struct { pub const VTable = extern struct { base: IUnknown.VTable, DeleteSubscription: fn( self: *const ISubscriptionMgr, pwszURL: ?[*:0]const u16, hwnd: ?HWND, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UpdateSubscription: fn( self: *const ISubscriptionMgr, pwszURL: ?[*:0]const u16, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UpdateAll: fn( self: *const ISubscriptionMgr, ) callconv(@import("std").os.windows.WINAPI) HRESULT, IsSubscribed: fn( self: *const ISubscriptionMgr, pwszURL: ?[*:0]const u16, pfSubscribed: ?*BOOL, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSubscriptionInfo: fn( self: *const ISubscriptionMgr, pwszURL: ?[*:0]const u16, pInfo: ?*SUBSCRIPTIONINFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetDefaultInfo: fn( self: *const ISubscriptionMgr, subType: SUBSCRIPTIONTYPE, pInfo: ?*SUBSCRIPTIONINFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, ShowSubscriptionProperties: fn( self: *const ISubscriptionMgr, pwszURL: ?[*:0]const u16, hwnd: ?HWND, ) callconv(@import("std").os.windows.WINAPI) HRESULT, CreateSubscription: fn( self: *const ISubscriptionMgr, hwnd: ?HWND, pwszURL: ?[*:0]const u16, pwszFriendlyName: ?[*:0]const u16, dwFlags: u32, subsType: SUBSCRIPTIONTYPE, pInfo: ?*SUBSCRIPTIONINFO, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace IUnknown.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr_DeleteSubscription(self: *const T, pwszURL: ?[*:0]const u16, hwnd: ?HWND) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr.VTable, self.vtable).DeleteSubscription(@ptrCast(*const ISubscriptionMgr, self), pwszURL, hwnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr_UpdateSubscription(self: *const T, pwszURL: ?[*:0]const u16) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr.VTable, self.vtable).UpdateSubscription(@ptrCast(*const ISubscriptionMgr, self), pwszURL); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr_UpdateAll(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr.VTable, self.vtable).UpdateAll(@ptrCast(*const ISubscriptionMgr, self)); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr_IsSubscribed(self: *const T, pwszURL: ?[*:0]const u16, pfSubscribed: ?*BOOL) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr.VTable, self.vtable).IsSubscribed(@ptrCast(*const ISubscriptionMgr, self), pwszURL, pfSubscribed); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr_GetSubscriptionInfo(self: *const T, pwszURL: ?[*:0]const u16, pInfo: ?*SUBSCRIPTIONINFO) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr.VTable, self.vtable).GetSubscriptionInfo(@ptrCast(*const ISubscriptionMgr, self), pwszURL, pInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr_GetDefaultInfo(self: *const T, subType: SUBSCRIPTIONTYPE, pInfo: ?*SUBSCRIPTIONINFO) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr.VTable, self.vtable).GetDefaultInfo(@ptrCast(*const ISubscriptionMgr, self), subType, pInfo); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr_ShowSubscriptionProperties(self: *const T, pwszURL: ?[*:0]const u16, hwnd: ?HWND) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr.VTable, self.vtable).ShowSubscriptionProperties(@ptrCast(*const ISubscriptionMgr, self), pwszURL, hwnd); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr_CreateSubscription(self: *const T, hwnd: ?HWND, pwszURL: ?[*:0]const u16, pwszFriendlyName: ?[*:0]const u16, dwFlags: u32, subsType: SUBSCRIPTIONTYPE, pInfo: ?*SUBSCRIPTIONINFO) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr.VTable, self.vtable).CreateSubscription(@ptrCast(*const ISubscriptionMgr, self), hwnd, pwszURL, pwszFriendlyName, dwFlags, subsType, pInfo); } };} pub usingnamespace MethodMixin(@This()); }; const IID_ISubscriptionMgr2_Value = @import("../zig.zig").Guid.initString("614bc270-aedf-11d1-a1f9-00c04fc2fbe1"); pub const IID_ISubscriptionMgr2 = &IID_ISubscriptionMgr2_Value; pub const ISubscriptionMgr2 = extern struct { pub const VTable = extern struct { base: ISubscriptionMgr.VTable, GetItemFromURL: fn( self: *const ISubscriptionMgr2, pwszURL: ?[*:0]const u16, ppSubscriptionItem: ?*?*ISubscriptionItem, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetItemFromCookie: fn( self: *const ISubscriptionMgr2, pSubscriptionCookie: ?*const Guid, ppSubscriptionItem: ?*?*ISubscriptionItem, ) callconv(@import("std").os.windows.WINAPI) HRESULT, GetSubscriptionRunState: fn( self: *const ISubscriptionMgr2, dwNumCookies: u32, pCookies: [*]const Guid, pdwRunState: [*]u32, ) callconv(@import("std").os.windows.WINAPI) HRESULT, EnumSubscriptions: fn( self: *const ISubscriptionMgr2, dwFlags: u32, ppEnumSubscriptions: ?*?*IEnumSubscription, ) callconv(@import("std").os.windows.WINAPI) HRESULT, UpdateItems: fn( self: *const ISubscriptionMgr2, dwFlags: u32, dwNumCookies: u32, pCookies: [*]const Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AbortItems: fn( self: *const ISubscriptionMgr2, dwNumCookies: u32, pCookies: [*]const Guid, ) callconv(@import("std").os.windows.WINAPI) HRESULT, AbortAll: fn( self: *const ISubscriptionMgr2, ) callconv(@import("std").os.windows.WINAPI) HRESULT, }; vtable: *const VTable, pub fn MethodMixin(comptime T: type) type { return struct { pub usingnamespace ISubscriptionMgr.MethodMixin(T); // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr2_GetItemFromURL(self: *const T, pwszURL: ?[*:0]const u16, ppSubscriptionItem: ?*?*ISubscriptionItem) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr2.VTable, self.vtable).GetItemFromURL(@ptrCast(*const ISubscriptionMgr2, self), pwszURL, ppSubscriptionItem); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr2_GetItemFromCookie(self: *const T, pSubscriptionCookie: ?*const Guid, ppSubscriptionItem: ?*?*ISubscriptionItem) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr2.VTable, self.vtable).GetItemFromCookie(@ptrCast(*const ISubscriptionMgr2, self), pSubscriptionCookie, ppSubscriptionItem); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr2_GetSubscriptionRunState(self: *const T, dwNumCookies: u32, pCookies: [*]const Guid, pdwRunState: [*]u32) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr2.VTable, self.vtable).GetSubscriptionRunState(@ptrCast(*const ISubscriptionMgr2, self), dwNumCookies, pCookies, pdwRunState); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr2_EnumSubscriptions(self: *const T, dwFlags: u32, ppEnumSubscriptions: ?*?*IEnumSubscription) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr2.VTable, self.vtable).EnumSubscriptions(@ptrCast(*const ISubscriptionMgr2, self), dwFlags, ppEnumSubscriptions); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr2_UpdateItems(self: *const T, dwFlags: u32, dwNumCookies: u32, pCookies: [*]const Guid) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr2.VTable, self.vtable).UpdateItems(@ptrCast(*const ISubscriptionMgr2, self), dwFlags, dwNumCookies, pCookies); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr2_AbortItems(self: *const T, dwNumCookies: u32, pCookies: [*]const Guid) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr2.VTable, self.vtable).AbortItems(@ptrCast(*const ISubscriptionMgr2, self), dwNumCookies, pCookies); } // NOTE: method is namespaced with interface name to avoid conflicts for now pub fn ISubscriptionMgr2_AbortAll(self: *const T) callconv(.Inline) HRESULT { return @ptrCast(*const ISubscriptionMgr2.VTable, self.vtable).AbortAll(@ptrCast(*const ISubscriptionMgr2, self)); } };} pub usingnamespace MethodMixin(@This()); }; pub const DELIVERY_AGENT_FLAGS = enum(i32) { NO_BROADCAST = 4, NO_RESTRICTIONS = 8, SILENT_DIAL = 16, }; pub const DELIVERY_AGENT_FLAG_NO_BROADCAST = DELIVERY_AGENT_FLAGS.NO_BROADCAST; pub const DELIVERY_AGENT_FLAG_NO_RESTRICTIONS = DELIVERY_AGENT_FLAGS.NO_RESTRICTIONS; pub const DELIVERY_AGENT_FLAG_SILENT_DIAL = DELIVERY_AGENT_FLAGS.SILENT_DIAL; pub const WEBCRAWL_RECURSEFLAGS = enum(i32) { DONT_MAKE_STICKY = 1, GET_IMAGES = 2, GET_VIDEOS = 4, GET_BGSOUNDS = 8, GET_CONTROLS = 16, LINKS_ELSEWHERE = 32, IGNORE_ROBOTSTXT = 128, ONLY_LINKS_TO_HTML = 256, }; pub const WEBCRAWL_DONT_MAKE_STICKY = WEBCRAWL_RECURSEFLAGS.DONT_MAKE_STICKY; pub const WEBCRAWL_GET_IMAGES = WEBCRAWL_RECURSEFLAGS.GET_IMAGES; pub const WEBCRAWL_GET_VIDEOS = WEBCRAWL_RECURSEFLAGS.GET_VIDEOS; pub const WEBCRAWL_GET_BGSOUNDS = WEBCRAWL_RECURSEFLAGS.GET_BGSOUNDS; pub const WEBCRAWL_GET_CONTROLS = WEBCRAWL_RECURSEFLAGS.GET_CONTROLS; pub const WEBCRAWL_LINKS_ELSEWHERE = WEBCRAWL_RECURSEFLAGS.LINKS_ELSEWHERE; pub const WEBCRAWL_IGNORE_ROBOTSTXT = WEBCRAWL_RECURSEFLAGS.IGNORE_ROBOTSTXT; pub const WEBCRAWL_ONLY_LINKS_TO_HTML = WEBCRAWL_RECURSEFLAGS.ONLY_LINKS_TO_HTML; pub const CHANNEL_AGENT_FLAGS = enum(i32) { DYNAMIC_SCHEDULE = 1, PRECACHE_SOME = 2, PRECACHE_ALL = 4, PRECACHE_SCRNSAVER = 8, }; pub const CHANNEL_AGENT_DYNAMIC_SCHEDULE = CHANNEL_AGENT_FLAGS.DYNAMIC_SCHEDULE; pub const CHANNEL_AGENT_PRECACHE_SOME = CHANNEL_AGENT_FLAGS.PRECACHE_SOME; pub const CHANNEL_AGENT_PRECACHE_ALL = CHANNEL_AGENT_FLAGS.PRECACHE_ALL; pub const CHANNEL_AGENT_PRECACHE_SCRNSAVER = CHANNEL_AGENT_FLAGS.PRECACHE_SCRNSAVER; pub const DBVECTOR = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { size: usize, ptr: ?*c_void, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug size: usize, ptr: ?*c_void, }, }; pub const DBTIMESTAMP = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { year: i16, month: u16, day: u16, hour: u16, minute: u16, second: u16, fraction: u32, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug year: i16, month: u16, day: u16, hour: u16, minute: u16, second: u16, fraction: u32, }, }; pub const SEC_OBJECT_ELEMENT = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { guidObjectType: Guid, ObjectID: DBID, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug guidObjectType: Guid, ObjectID: DBID, }, }; pub const SEC_OBJECT = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { cObjects: u32, prgObjects: ?*SEC_OBJECT_ELEMENT, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug cObjects: u32, prgObjects: ?*SEC_OBJECT_ELEMENT, }, }; pub const DBIMPLICITSESSION = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { pUnkOuter: ?*IUnknown, piid: ?*Guid, pSession: ?*IUnknown, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug pUnkOuter: ?*IUnknown, piid: ?*Guid, pSession: ?*IUnknown, }, }; pub const DBOBJECT = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { dwFlags: u32, iid: Guid, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug dwFlags: u32, iid: Guid, }, }; pub const DBBINDEXT = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { pExtension: ?*u8, ulExtension: usize, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug pExtension: ?*u8, ulExtension: usize, }, }; pub const DBBINDING = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { iOrdinal: usize, obValue: usize, obLength: usize, obStatus: usize, pTypeInfo: ?*ITypeInfo, pObject: ?*DBOBJECT, pBindExt: ?*DBBINDEXT, dwPart: u32, dwMemOwner: u32, eParamIO: u32, cbMaxLen: usize, dwFlags: u32, wType: u16, bPrecision: u8, bScale: u8, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug iOrdinal: usize, obValue: usize, obLength: usize, obStatus: usize, pTypeInfo: ?*ITypeInfo, pObject: ?*DBOBJECT, pBindExt: ?*DBBINDEXT, dwPart: u32, dwMemOwner: u32, eParamIO: u32, cbMaxLen: usize, dwFlags: u32, wType: u16, bPrecision: u8, bScale: u8, }, }; pub const DBFAILUREINFO = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { hRow: usize, iColumn: usize, failure: HRESULT, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug hRow: usize, iColumn: usize, failure: HRESULT, }, }; pub const DBCOLUMNINFO = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { pwszName: ?PWSTR, pTypeInfo: ?*ITypeInfo, iOrdinal: usize, dwFlags: u32, ulColumnSize: usize, wType: u16, bPrecision: u8, bScale: u8, columnid: DBID, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug pwszName: ?PWSTR, pTypeInfo: ?*ITypeInfo, iOrdinal: usize, dwFlags: u32, ulColumnSize: usize, wType: u16, bPrecision: u8, bScale: u8, columnid: DBID, }, }; pub const DBPARAMS = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { pData: ?*c_void, cParamSets: usize, hAccessor: usize, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug pData: ?*c_void, cParamSets: usize, hAccessor: usize, }, }; pub const DBPARAMINFO = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { dwFlags: u32, iOrdinal: usize, pwszName: ?PWSTR, pTypeInfo: ?*ITypeInfo, ulParamSize: usize, wType: u16, bPrecision: u8, bScale: u8, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug dwFlags: u32, iOrdinal: usize, pwszName: ?PWSTR, pTypeInfo: ?*ITypeInfo, ulParamSize: usize, wType: u16, bPrecision: u8, bScale: u8, }, }; pub const DBPROPIDSET = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { rgPropertyIDs: ?*u32, cPropertyIDs: u32, guidPropertySet: Guid, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug rgPropertyIDs: ?*u32, cPropertyIDs: u32, guidPropertySet: Guid, }, }; pub const DBPROPINFO = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { pwszDescription: ?PWSTR, dwPropertyID: u32, dwFlags: u32, vtType: u16, vValues: VARIANT, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug pwszDescription: ?PWSTR, dwPropertyID: u32, dwFlags: u32, vtType: u16, vValues: VARIANT, }, }; pub const DBPROPINFOSET = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { rgPropertyInfos: ?*DBPROPINFO, cPropertyInfos: u32, guidPropertySet: Guid, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug rgPropertyInfos: ?*DBPROPINFO, cPropertyInfos: u32, guidPropertySet: Guid, }, }; pub const DBPROP = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { dwPropertyID: u32, dwOptions: u32, dwStatus: u32, colid: DBID, vValue: VARIANT, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug dwPropertyID: u32, dwOptions: u32, dwStatus: u32, colid: DBID, vValue: VARIANT, }, }; pub const DBPROPSET = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { rgProperties: ?*DBPROP, cProperties: u32, guidPropertySet: Guid, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug rgProperties: ?*DBPROP, cProperties: u32, guidPropertySet: Guid, }, }; pub const DBINDEXCOLUMNDESC = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { pColumnID: ?*DBID, eIndexColOrder: u32, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug pColumnID: ?*DBID, eIndexColOrder: u32, }, }; pub const DBCOLUMNDESC = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { pwszTypeName: ?PWSTR, pTypeInfo: ?*ITypeInfo, rgPropertySets: ?*DBPROPSET, pclsid: ?*Guid, cPropertySets: u32, ulColumnSize: usize, dbcid: DBID, wType: u16, bPrecision: u8, bScale: u8, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug pwszTypeName: ?PWSTR, pTypeInfo: ?*ITypeInfo, rgPropertySets: ?*DBPROPSET, pclsid: ?*Guid, cPropertySets: u32, ulColumnSize: usize, dbcid: DBID, wType: u16, bPrecision: u8, bScale: u8, }, }; pub const DBCOLUMNACCESS = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { pData: ?*c_void, columnid: DBID, cbDataLen: usize, dwStatus: u32, cbMaxLen: usize, dwReserved: usize, wType: u16, bPrecision: u8, bScale: u8, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug pData: ?*c_void, columnid: DBID, cbDataLen: usize, dwStatus: u32, cbMaxLen: usize, dwReserved: usize, wType: u16, bPrecision: u8, bScale: u8, }, }; pub const DBCONSTRAINTDESC = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { pConstraintID: ?*DBID, ConstraintType: u32, cColumns: usize, rgColumnList: ?*DBID, pReferencedTableID: ?*DBID, cForeignKeyColumns: usize, rgForeignKeyColumnList: ?*DBID, pwszConstraintText: ?PWSTR, UpdateRule: u32, DeleteRule: u32, MatchType: u32, Deferrability: u32, cReserved: usize, rgReserved: ?*DBPROPSET, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug pConstraintID: ?*DBID, ConstraintType: u32, cColumns: usize, rgColumnList: ?*DBID, pReferencedTableID: ?*DBID, cForeignKeyColumns: usize, rgForeignKeyColumnList: ?*DBID, pwszConstraintText: ?PWSTR, UpdateRule: u32, DeleteRule: u32, MatchType: u32, Deferrability: u32, cReserved: usize, rgReserved: ?*DBPROPSET, }, }; pub const MDAXISINFO = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { cbSize: usize, iAxis: usize, cDimensions: usize, cCoordinates: usize, rgcColumns: ?*usize, rgpwszDimensionNames: ?*?PWSTR, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug cbSize: usize, iAxis: usize, cDimensions: usize, cCoordinates: usize, rgcColumns: ?*usize, rgpwszDimensionNames: ?*?PWSTR, }, }; pub const RMTPACK = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { pISeqStream: ?*ISequentialStream, cbData: u32, cBSTR: u32, rgBSTR: ?*?BSTR, cVARIANT: u32, rgVARIANT: ?*VARIANT, cIDISPATCH: u32, rgIDISPATCH: ?*?*IDispatch, cIUNKNOWN: u32, rgIUNKNOWN: ?*?*IUnknown, cPROPVARIANT: u32, rgPROPVARIANT: ?*PROPVARIANT, cArray: u32, rgArray: ?*VARIANT, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug pISeqStream: ?*ISequentialStream, cbData: u32, cBSTR: u32, rgBSTR: ?*?BSTR, cVARIANT: u32, rgVARIANT: ?*VARIANT, cIDISPATCH: u32, rgIDISPATCH: ?*?*IDispatch, cIUNKNOWN: u32, rgIUNKNOWN: ?*?*IUnknown, cPROPVARIANT: u32, rgPROPVARIANT: ?*PROPVARIANT, cArray: u32, rgArray: ?*VARIANT, }, }; pub const DBPARAMBINDINFO = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { pwszDataSourceType: ?PWSTR, pwszName: ?PWSTR, ulParamSize: usize, dwFlags: u32, bPrecision: u8, bScale: u8, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug pwszDataSourceType: ?PWSTR, pwszName: ?PWSTR, ulParamSize: usize, dwFlags: u32, bPrecision: u8, bScale: u8, }, }; pub const DBLITERALINFO = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { pwszLiteralValue: ?PWSTR, pwszInvalidChars: ?PWSTR, pwszInvalidStartingChars: ?PWSTR, lt: u32, fSupported: BOOL, cchMaxLen: u32, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug pwszLiteralValue: ?PWSTR, pwszInvalidChars: ?PWSTR, pwszInvalidStartingChars: ?PWSTR, lt: u32, fSupported: BOOL, cchMaxLen: u32, }, }; pub const ERRORINFO = switch(@import("../zig.zig").arch) { .X64, .Arm64 => extern struct { hrError: HRESULT, dwMinor: u32, clsid: Guid, iid: Guid, dispid: i32, }, .X86 => extern struct { // WARNING: unable to add field alignment because it's causing a compiler bug hrError: HRESULT, dwMinor: u32, clsid: Guid, iid: Guid, dispid: i32, }, }; //-------------------------------------------------------------------------------- // Section: Functions (0) //-------------------------------------------------------------------------------- //-------------------------------------------------------------------------------- // Section: Unicode Aliases (0) //-------------------------------------------------------------------------------- const thismodule = @This(); pub usingnamespace switch (@import("../zig.zig").unicode_mode) { .ansi => struct { }, .wide => struct { }, .unspecified => if (@import("builtin").is_test) struct { } else struct { }, }; //-------------------------------------------------------------------------------- // Section: Imports (35) //-------------------------------------------------------------------------------- const Guid = @import("../zig.zig").Guid; const BLOB = @import("../system/com.zig").BLOB; const BOOL = @import("../foundation.zig").BOOL; const BSTR = @import("../foundation.zig").BSTR; const DBID = @import("../storage/index_server.zig").DBID; const DISPPARAMS = @import("../system/ole_automation.zig").DISPPARAMS; const EXPLICIT_ACCESS_W = @import("../security/authorization.zig").EXPLICIT_ACCESS_W; const FILETIME = @import("../foundation.zig").FILETIME; const HANDLE = @import("../foundation.zig").HANDLE; const HRESULT = @import("../foundation.zig").HRESULT; const HWND = @import("../foundation.zig").HWND; const IAuthenticate = @import("../system/com.zig").IAuthenticate; const IDispatch = @import("../system/ole_automation.zig").IDispatch; const IEnumString = @import("../system/com.zig").IEnumString; const IEnumUnknown = @import("../system/com.zig").IEnumUnknown; const IErrorInfo = @import("../system/ole_automation.zig").IErrorInfo; const IFilter = @import("../storage/index_server.zig").IFilter; const IObjectArray = @import("../ui/shell.zig").IObjectArray; const IPersistStream = @import("../system/com.zig").IPersistStream; const IPhraseSink = @import("../storage/index_server.zig").IPhraseSink; const ISequentialStream = @import("../storage/structured_storage.zig").ISequentialStream; const IStorage = @import("../storage/structured_storage.zig").IStorage; const IStream = @import("../storage/structured_storage.zig").IStream; const ITransaction = @import("../system/component_services.zig").ITransaction; const ITransactionOptions = @import("../system/component_services.zig").ITransactionOptions; const ITypeInfo = @import("../system/ole_automation.zig").ITypeInfo; const IUnknown = @import("../system/com.zig").IUnknown; const PROPERTYKEY = @import("../system/properties_system.zig").PROPERTYKEY; const PROPSPEC = @import("../storage/structured_storage.zig").PROPSPEC; const PROPVARIANT = @import("../storage/structured_storage.zig").PROPVARIANT; const PWSTR = @import("../foundation.zig").PWSTR; const SYSTEMTIME = @import("../foundation.zig").SYSTEMTIME; const TRUSTEE_W = @import("../security/authorization.zig").TRUSTEE_W; const VARIANT = @import("../system/ole_automation.zig").VARIANT; const WORDREP_BREAK_TYPE = @import("../storage/index_server.zig").WORDREP_BREAK_TYPE; test { // The following '_ = <FuncPtrType>' lines are a workaround for https://github.com/ziglang/zig/issues/4476 if (@hasDecl(@This(), "PFNFILLTEXTBUFFER")) { _ = PFNFILLTEXTBUFFER; } @setEvalBranchQuota( @import("std").meta.declarations(@This()).len * 3 ); // reference all the pub declarations if (!@import("builtin").is_test) return; inline for (@import("std").meta.declarations(@This())) |decl| { if (decl.is_pub) { _ = decl; } } }
deps/zigwin32/win32/system/search.zig
const std = @import("std"); // ---------------------------------------------------------------------------- const arm_cmse = @import("arm_cmse"); const arm_m = @import("arm_m"); const EXC_RETURN = arm_m.EXC_RETURN; const getMspNs = arm_m.getMspNs; const getPspNs = arm_m.getPspNs; const getPsp = arm_m.getPsp; const getControlNs = arm_m.getControlNs; const control = arm_m.control; // ---------------------------------------------------------------------------- pub const port = @import("../ports/" ++ @import("build_options").BOARD ++ "/secure.zig"); const secure_board_vec_table = @import("../ports/" ++ @import("build_options").BOARD ++ "/excvector.zig").secure_board_vec_table; // ---------------------------------------------------------------------------- // Import definitions in `monitor.zig`, which is compiled as a separate // compilation unit const monitor = @import("../monitor/exports.zig"); // ---------------------------------------------------------------------------- export fn main() void { // Enable SecureFault, UsageFault, BusFault, and MemManage for ease of // debugging. (Without this, they all escalate to HardFault) arm_m.scb.regShcsr().* = arm_m.Scb.SHCSR_MEMFAULTENA | arm_m.Scb.SHCSR_BUSFAULTENA | arm_m.Scb.SHCSR_USGFAULTENA | arm_m.Scb.SHCSR_SECUREFAULTENA; // Prioritize Secure exceptions. // Don't enable Non-Secure BusFault, HardFault, and NMI because // `FAULTMASK_NS` would boost the current execution priority to higher // than Secure SysTick and `profile-ses` wouldn't be able to get full // samples. arm_m.scb.regAircr().* = (arm_m.scb.regAircr().* & ~arm_m.Scb.AIRCR_VECTKEY_MASK) & ~arm_m.Scb.AIRCR_BFHFNMINS | arm_m.Scb.AIRCR_PRIS | (arm_m.Scb.AIRCR_VECTKEY_MAGIC << arm_m.Scb.AIRCR_VECTKEY_SHIFT); // Set the priority of Secure SysTick to 0 arm_m.scb.regShpr3().* = 0; // Intialize secure stacks // ----------------------------------------------------------------------- // On reset, MSP is used for both of Thread and Handler modes. We want to // set a new stack pointer only for Handler mode by updating MSP, but MSP // is currently in use. So, we first copy MSP To PSP and then switch to // PSP. asm volatile ( \\ mrs r0, msp \\ msr psp, r0 \\ mrs r0, control \\ orr r0, #2 // SPSEL = 1 (Use PSP in Thread mode) \\ msr control, r0 : : : "r0" ); // Now we can safely update MSP. arm_m.setMsp(@ptrToInt(_handler_stack_top)); // Set stack limits. arm_m.setMspLimit(@ptrToInt(_handler_stack_limit)); arm_m.setPspLimit(@ptrToInt(_main_stack_limit)); // Board-specific initialization // ----------------------------------------------------------------------- port.init(); port.print("(Hit ^A X to quit QEMU)\r\n", .{}); port.print("The Secure code is running!\r\n", .{}); // Enable SAU // ----------------------------------------------------------------------- arm_cmse.sau.regCtrl().* |= arm_cmse.Sau.CTRL_ENABLE; // Initialize Secure MPU // ----------------------------------------------------------------------- // TZmCFI Shadow Stack utilizes MPU for bound checking. const mpu = arm_m.mpu; const Mpu = arm_m.Mpu; // - `CTRL_HFNMIENA`: Keep MPU on even if the current execution priority // is less than 0 (e.g., in a HardFault handler and when `FAULTMASK` is // set to 1). // // - `CTRL_PRIVDEFENA`: Allow privileged access everywhere as if MPU // is not enabled. Region overlaps still cause access violation, which // we utilize for the bound checking. // mpu.regCtrl().* = Mpu.CTRL_ENABLE | Mpu.CTRL_HFNMIENA | Mpu.CTRL_PRIVDEFENA; // The region 2 mpu.regRnr().* = 0; mpu.regRbarA(2).* = 0 | Mpu.RBAR_AP_RW_ANY; mpu.regRlarA(2).* = Mpu.RLAR_LIMIT_MASK | Mpu.RLAR_EN; // Initialize TZmCFI Monitor // ----------------------------------------------------------------------- // Call `TCXInitializeMonitor` that resides in a different // compilation unit monitor.TCXInitializeMonitor(tcWarnHandler); // Boot the Non-Secure code // ----------------------------------------------------------------------- // Configure the Non-Secure exception vector table arm_m.scb_ns.regVtor().* = port.VTOR_NS; port.print("Booting the Non-Secure code...\r\n", .{}); // Call Non-Secure code's entry point const ns_entry = @intToPtr(*volatile fn () void, port.VTOR_NS + 4).*; _ = arm_cmse.nonSecureCall(ns_entry, 0, 0, 0, 0); @panic("Non-Secure reset handler returned unexpectedly"); } fn tcWarnHandler(data: [*]const u8, len: usize) callconv(.C) void { port.print("{}", .{data[0..len]}); } // ---------------------------------------------------------------------------- /// The Non-Secure-callable function that outputs zero or more bytes to the /// debug output. fn nsDebugOutput(count: usize, ptr: usize, r2: usize, r32: usize) callconv(.C) usize { const bytes = arm_cmse.checkSlice(u8, ptr, count, arm_cmse.CheckOptions{}) catch |err| { port.print("warning: pointer security check failed: {}\r\n", .{err}); port.print(" count = {}, ptr = 0x{x}\r\n", .{ count, ptr }); return 0; }; // Even if the permission check has succeeded, it's still unsafe to treat // Non-Secure pointers as normal pointers (this is why `bytes` is // `[]volatile u8`), so we can't use `writeSlice` here. for (bytes) |byte| { port.printByte(byte); } return 0; } comptime { arm_cmse.exportNonSecureCallable("debugOutput", nsDebugOutput); } // ---------------------------------------------------------------------------- var g_sampled_pc: usize = 0; /// Start a timer to sample the program counter after the specified duration. fn nsScheduleSamplePc(cycles: usize, _r1: usize, _r2: usize, _r3: usize) callconv(.C) usize { @ptrCast(*volatile usize, &g_sampled_pc).* = 0; arm_m.sys_tick.regCsr().* = 0; arm_m.sys_tick.regRvr().* = cycles; arm_m.sys_tick.regCvr().* = cycles; // write-to-clear (value doesn't matter) arm_m.sys_tick.regCsr().* = arm_m.SysTick.CSR_ENABLE | arm_m.SysTick.CSR_TICKINT | arm_m.SysTick.CSR_CLKSOURCE; return 0; } /// Retrieve the sampled value of the program counter. fn nsGetSampledPc(_r0: usize, _r1: usize, _r2: usize, _r3: usize) callconv(.C) usize { return @ptrCast(*volatile usize, &g_sampled_pc).*; } fn handleSysTick() callconv(.C) void { asm volatile ( \\ mov r0, sp \\ b handleSysTickInner ); } export fn handleSysTickInner(msp_s: [*]const usize) callconv(.C) void { // Disable SysTick arm_m.sys_tick.regCsr().* = 0; // Find the exception frame const exc_return = @returnAddress(); const use_psp = (exc_return & EXC_RETURN.SPSEL) != 0 and (exc_return & EXC_RETURN.MODE) != 0; var frame: [*]const usize = if ((exc_return & EXC_RETURN.S) != 0) if (use_psp) @intToPtr([*]const usize, getPsp()) else msp_s else if ((getControlNs() & control.SPSEL) != 0) @intToPtr([*]const usize, getPspNs()) else @intToPtr([*]const usize, getMspNs()); // Get and store the original PC. Don't write `0` - it would be // misinterpreted as "not sampled yet" var pc = frame[6]; if (pc == 0) { pc = 1; } @ptrCast(*volatile usize, &g_sampled_pc).* = pc; } comptime { arm_cmse.exportNonSecureCallable("scheduleSamplePc", nsScheduleSamplePc); arm_cmse.exportNonSecureCallable("getSampledPc", nsGetSampledPc); } // ---------------------------------------------------------------------------- // Build the exception vector table // zig fmt: off const VecTable = @import("../common/vectable.zig").VecTable; export const exception_vectors linksection(".isr_vector") = secure_board_vec_table .setInitStackPtr(_main_stack_top) .setExcHandler(arm_m.irqs.Reset_IRQn, handleReset) .setExcHandler(arm_m.irqs.SysTick_IRQn, handleSysTick); // zig fmt: on extern fn _main_stack_top() void; extern fn _handler_stack_top() void; extern fn _main_stack_limit() void; extern fn _handler_stack_limit() void; extern fn handleReset() void;
examples/secure/main.zig
const kernel = @import("../../kernel.zig"); const virtio = @import("virtio.zig"); const SplitQueue = virtio.SplitQueue; const MMIO = virtio.MMIO; const Descriptor = virtio.Descriptor; const sector_size = kernel.arch.sector_size; const Disk = kernel.Disk; const GenericDriver = kernel.driver; const Driver = @This(); disk: Disk, queue: *volatile SplitQueue, mmio: *volatile MMIO, batch_read_byte_count: u64, const log = kernel.log.scoped(.VirtioBlock); pub const Initialization = struct { pub const Context = u64; pub const Error = error{ allocation_failure, }; pub fn callback(allocate: GenericDriver.AllocationCallback, mmio_address: u64) Error!*Driver { const driver_allocation = allocate(@sizeOf(Driver)) orelse return Error.allocation_failure; const driver = @intToPtr(*Driver, driver_allocation); kernel.arch.Virtual.map(mmio_address, 1); driver.mmio = @intToPtr(*volatile MMIO, mmio_address); driver.mmio.init(BlockFeature); driver.queue = driver.mmio.add_queue_to_device(0); driver.disk.read_callback = read_callback; // TODO: stop hardcoding interrupt number const interrupt = kernel.arch.Interrupts.Interrupt{ .handler = handler, .pending_operations_handler = foo, }; interrupt.register(8); driver.mmio.set_driver_initialized(); log.debug("Block driver initialized", .{}); return driver; } }; fn foo() void { @panic("reached here"); } const BlockFeature = enum(u6) { size_max = 1, seg_max = 2, geometry = 4, read_only = 5, blk_size = 6, flush = 9, topology = 10, config_wce = 11, discard = 13, write_zeroes = 14, }; const BlockType = enum(u32) { in = 0, out = 1, flush = 4, discard = 11, write_zeroes = 13, }; const Request = struct { const Header = struct { block_type: BlockType, reserved: u32, sector: u64, }; }; const Operation = enum { read, write, }; /// The sector buffer address needs to be physical and have at least 512 (sector_size) bytes available pub fn operate(driver: *Driver, comptime operation: Operation, sector_index: u64, sector_buffer_physical_address: u64) void { const status_size = 1; const header_size = @sizeOf(Request.Header); const status_buffer = kernel.heap.allocate(status_size, true, true) orelse @panic("status buffer unable to be allocated"); const header_buffer = kernel.heap.allocate(header_size, true, true) orelse @panic("header buffer unable to be allocated"); // TODO: Here we should distinguish between virtual and physical addresses const header = @intToPtr(*Request.Header, header_buffer.virtual); header.block_type = switch (operation) { .read => BlockType.in, .write => BlockType.out, }; header.sector = sector_index; var descriptor1: u16 = 0; var descriptor2: u16 = 0; var descriptor3: u16 = 0; driver.queue.push_descriptor(&descriptor3).* = Descriptor{ .address = status_buffer.physical, .flags = @enumToInt(Descriptor.Flag.write_only), .length = 1, .next = 0, }; driver.queue.push_descriptor(&descriptor2).* = Descriptor{ .address = sector_buffer_physical_address, .flags = @enumToInt(Descriptor.Flag.next) | if (operation == Operation.read) @enumToInt(Descriptor.Flag.write_only) else 0, .length = sector_size, .next = descriptor3, }; driver.queue.push_descriptor(&descriptor1).* = Descriptor{ .address = header_buffer.physical, .flags = @enumToInt(Descriptor.Flag.next), .length = @sizeOf(Request.Header), .next = descriptor2, }; driver.queue.push_available(descriptor1); driver.mmio.notify_queue(); } pub fn handler() u64 { kernel.assert(@src(), kernel.Disk.drivers.len > 0); // TODO: can use more than one driver: const driver = @ptrCast(*Driver, kernel.Disk.drivers[0]); const descriptor = driver.queue.pop_used() orelse @panic("virtio block descriptor corrupted"); // TODO Get virtual of this physical address @Virtual @Physical const header = @intToPtr(*volatile Request.Header, kernel.arch.Virtual.AddressSpace.physical_to_virtual(descriptor.address)); const operation: Operation = switch (header.block_type) { .in => .read, .out => .write, else => unreachable, }; _ = operation; const sector_descriptor = driver.queue.get_descriptor(descriptor.next) orelse @panic("unable to get descriptor"); const status_descriptor = driver.queue.get_descriptor(sector_descriptor.next) orelse @panic("unable to get descriptor"); const status = @intToPtr([*]u8, kernel.arch.Virtual.AddressSpace.physical_to_virtual(status_descriptor.address))[0]; //log.debug("Disk operation status: {}", .{status}); if (status != 0) kernel.panic("Disk operation failed: {}", .{status}); driver.batch_read_byte_count += sector_size; return 0; } pub fn read_callback(disk_driver: *Disk, buffer: []u8, start_sector: u64, sector_count: u64) u64 { const driver = @ptrCast(*Driver, disk_driver); log.debug("Asked {} sectors from sector {}", .{ sector_count, start_sector }); const total_size = sector_count * sector_size; kernel.assert(@src(), buffer.len >= total_size); var bytes_asked: u64 = 0; var sector_i: u64 = start_sector; while (sector_i < sector_count + start_sector) : ({ sector_i += 1; }) { const sector_physical = kernel.arch.Virtual.AddressSpace.virtual_to_physical(@ptrToInt(&buffer[bytes_asked])); log.debug("Sending request for sector {}", .{sector_i}); driver.operate(.read, sector_i, sector_physical); bytes_asked += sector_size; while (driver.batch_read_byte_count != bytes_asked) { kernel.spinloop_hint(); } } kernel.assert(@src(), bytes_asked == driver.batch_read_byte_count); const read_bytes = driver.batch_read_byte_count; driver.batch_read_byte_count = 0; log.debug("Block device read {} bytes. Asked sector count: {}", .{ read_bytes, sector_count }); kernel.assert(@src(), sector_count * sector_size == read_bytes); return sector_count; }
src/kernel/arch/riscv64/virtio_block.zig
const std = @import("std"); const testing = std.testing; const StringTable = @import("./strtab.zig").StringTable; const allocator = std.heap.page_allocator; pub const DB = struct { pub const Ticket = struct { values: std.ArrayList(usize), valid: bool, pub fn init() *Ticket { var self = allocator.create(Ticket) catch unreachable; self.* = Ticket{ .values = std.ArrayList(usize).init(allocator), .valid = true, }; return self; } pub fn deinit(self: *Ticket) void { self.values.deinit(); } }; const Range = struct { min: usize, max: usize, }; const Rule = struct { code: usize, ranges: [2]Range = undefined, pub fn init(code: usize) Rule { var self = Rule{ .code = code, .ranges = undefined, }; return self; } }; const Guess = struct { mask: usize, code: usize, pub fn init(mask: usize) Guess { var self = Guess{ .mask = mask, .code = std.math.maxInt(usize), }; return self; } }; zone: usize, fields: StringTable, rules: std.AutoHashMap(usize, Rule), tickets: std.ArrayList(*Ticket), // first one is mine guessed: std.ArrayList(Guess), // per field pub fn init() DB { var self = DB{ .zone = 0, .fields = StringTable.init(allocator), .rules = std.AutoHashMap(usize, Rule).init(allocator), .tickets = std.ArrayList(*Ticket).init(allocator), .guessed = std.ArrayList(Guess).init(allocator), }; return self; } pub fn deinit(self: *DB) void { self.guessed.deinit(); self.tickets.deinit(); self.rules.deinit(); self.fields.deinit(); } pub fn add_line(self: *DB, line: []const u8) void { if (line.len == 0) return; if (std.mem.eql(u8, line, "your ticket:") or std.mem.eql(u8, line, "nearby tickets:")) { self.zone += 1; // std.debug.warn("ZONE {}\n", .{self.zone}); return; } if (self.zone == 0) { var it_colon = std.mem.tokenize(u8, line, ":"); const name = it_colon.next().?; const code = self.fields.add(name); // std.debug.warn("FIELD {} {}\n", .{ code, name }); var rule = Rule.init(code); const rest = it_colon.next().?; var it = std.mem.tokenize(u8, rest, " -"); rule.ranges[0].min = std.fmt.parseInt(usize, it.next().?, 10) catch unreachable; rule.ranges[0].max = std.fmt.parseInt(usize, it.next().?, 10) catch unreachable; _ = it.next().?; // "or" rule.ranges[1].min = std.fmt.parseInt(usize, it.next().?, 10) catch unreachable; rule.ranges[1].max = std.fmt.parseInt(usize, it.next().?, 10) catch unreachable; _ = self.rules.put(code, rule) catch unreachable; // std.debug.warn("RULE {} {}\n", .{ code, self.fields.get_str(code) }); return; } if (self.zone == 1 or self.zone == 2) { var ticket = Ticket.init(); var it = std.mem.tokenize(u8, line, ","); while (it.next()) |str| { const value = std.fmt.parseInt(usize, str, 10) catch unreachable; ticket.values.append(value) catch unreachable; } self.tickets.append(ticket) catch unreachable; return; } @panic("ZONE"); } pub fn ticket_scanning_error_rate(self: *DB) usize { return self.mark_invalid_tickets(); } pub fn multiply_fields(self: *DB, prefix: []const u8) usize { _ = self.mark_invalid_tickets(); self.guess_ticket_fields(); var product: usize = 1; var pg: usize = 0; while (pg < self.guessed.items.len) : (pg += 1) { const guess = self.guessed.items[pg]; const name = self.fields.get_str(guess.code).?; if (!std.mem.startsWith(u8, name, prefix)) continue; const value = self.tickets.items[0].values.items[pg]; // std.debug.warn("MULT {} POS {} VAL {}\n", .{ name, pg, value }); product *= value; } return product; } fn mark_invalid_tickets(self: *DB) usize { var rate: usize = 0; var pt: usize = 1; // skip mine while (pt < self.tickets.items.len) : (pt += 1) { const ticket = self.tickets.items[pt]; ticket.valid = true; var pv: usize = 0; while (pv < ticket.values.items.len) : (pv += 1) { const value = ticket.values.items[pv]; if (self.is_value_valid(value)) continue; rate += value; ticket.valid = false; } } return rate; } fn is_value_valid(self: *DB, value: usize) bool { var itr = self.rules.iterator(); while (itr.next()) |kv| { const rule = kv.value_ptr.*; var pr: usize = 0; while (pr < 2) : (pr += 1) { if (value >= rule.ranges[pr].min and value <= rule.ranges[pr].max) { return true; } } } return false; } fn guess_ticket_fields(self: *DB) void { const NF = self.tickets.items[0].values.items.len; var pf: usize = 0; while (pf < NF) : (pf += 1) { // std.debug.warn("FIELD {}\n", .{pf}); var field_mask: usize = std.math.maxInt(usize); var pt: usize = 1; // skip mine while (pt < self.tickets.items.len) : (pt += 1) { var ticket_mask: usize = 0; const ticket = self.tickets.items[pt]; if (!ticket.valid) continue; // std.debug.warn("TICKET {}\n", .{pt}); const value = ticket.values.items[pf]; var itr = self.rules.iterator(); while (itr.next()) |kv| { const code = kv.key_ptr.*; const rule = kv.value_ptr.*; var valid = false; var pr: usize = 0; while (pr < 2) : (pr += 1) { if (value >= rule.ranges[pr].min and value <= rule.ranges[pr].max) { valid = true; break; } } if (!valid) continue; const shift: u6 = @intCast(u6, code); ticket_mask |= @as(u64, 1) << shift; } // std.debug.warn("TICKET {} MASK {b}\n", .{ pt, ticket_mask }); field_mask &= ticket_mask; } // std.debug.warn("FIELD {} MASK {b}\n", .{ pf, field_mask }); self.guessed.append(Guess.init(field_mask)) catch unreachable; } var found: usize = 0; while (found < NF) { var count: usize = 0; var pg: usize = 0; while (pg < self.guessed.items.len) : (pg += 1) { var guess = &self.guessed.items[pg]; if (guess.*.code != std.math.maxInt(usize)) continue; const mask = guess.*.mask; if (mask == 0) @panic("MASK"); if (@popCount(usize, mask) != 1) continue; const code = @ctz(usize, mask); // const name = self.fields.get_str(code); // std.debug.warn("FIELD {} IS {b} {} {}\n", .{ pg, mask, code, name }); guess.*.code = code; count += 1; var po: usize = 0; while (po < self.guessed.items.len) : (po += 1) { if (po == pg) continue; var other = &self.guessed.items[po]; if (other.*.code != std.math.maxInt(usize)) continue; // const old = other.*.mask; other.*.mask &= ~mask; // std.debug.warn("RESET FIELD {} {b} -> {b}\n", .{ po, old, other.*.mask }); } } if (count <= 0) break; found += count; } } }; test "sample part a" { const data: []const u8 = \\class: 1-3 or 5-7 \\row: 6-11 or 33-44 \\seat: 13-40 or 45-50 \\ \\your ticket: \\7,1,14 \\ \\nearby tickets: \\7,3,47 \\40,4,50 \\55,2,20 \\38,6,12 ; var db = DB.init(); defer db.deinit(); var it = std.mem.split(u8, data, "\n"); while (it.next()) |line| { db.add_line(line); } const tser = db.ticket_scanning_error_rate(); try testing.expect(tser == 71); } test "sample part b" { const data: []const u8 = \\class: 0-1 or 4-19 \\flight row: 0-5 or 8-19 \\flight seat: 0-13 or 16-19 \\ \\your ticket: \\11,12,13 \\ \\nearby tickets: \\3,9,18 \\15,1,5 \\5,14,9 ; var db = DB.init(); defer db.deinit(); var it = std.mem.split(u8, data, "\n"); while (it.next()) |line| { db.add_line(line); } const product = db.multiply_fields("flight"); try testing.expect(product == 143); }
2020/p16/ticket.zig
const std = @import("std"); const assert = std.debug.assert; const tools = @import("tools"); fn pgcd(_a: u64, _b: u64) u64 { var a = _a; var b = _b; while (b != 0) { var t = b; b = a % b; a = t; } return a; } const Registers = [6]u48; const Opcode = enum { addi, addr, muli, mulr, bani, banr, bori, borr, setr, seti, gtir, gtri, gtrr, eqir, eqri, eqrr }; fn eval(op: Opcode, par: [3]u32, r: Registers) Registers { var o = r; switch (op) { .addi => o[par[2]] = r[par[0]] + par[1], .addr => o[par[2]] = r[par[0]] + r[par[1]], .muli => o[par[2]] = r[par[0]] * par[1], .mulr => o[par[2]] = r[par[0]] * r[par[1]], .bani => o[par[2]] = r[par[0]] & par[1], .banr => o[par[2]] = r[par[0]] & r[par[1]], .bori => o[par[2]] = r[par[0]] | par[1], .borr => o[par[2]] = r[par[0]] | r[par[1]], .setr => o[par[2]] = r[par[0]], .seti => o[par[2]] = par[0], .gtir => o[par[2]] = if (par[0] > r[par[1]]) @as(u32, 1) else @as(u32, 0), .gtri => o[par[2]] = if (r[par[0]] > par[1]) @as(u32, 1) else @as(u32, 0), .gtrr => o[par[2]] = if (r[par[0]] > r[par[1]]) @as(u32, 1) else @as(u32, 0), .eqir => o[par[2]] = if (par[0] == r[par[1]]) @as(u32, 1) else @as(u32, 0), .eqri => o[par[2]] = if (r[par[0]] == par[1]) @as(u32, 1) else @as(u32, 0), .eqrr => o[par[2]] = if (r[par[0]] == r[par[1]]) @as(u32, 1) else @as(u32, 0), } return o; } pub fn run(input_text: []const u8, allocator: std.mem.Allocator) ![2][]const u8 { var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); const Insn = struct { op: Opcode, par: [3]u32 }; const param: struct { ip: u32, prg: []Insn, } = param: { var prg = std.ArrayList(Insn).init(arena.allocator()); var ip: ?u32 = null; var it = std.mem.tokenize(u8, input_text, "\n\r"); while (it.next()) |line| { if (tools.match_pattern("{} {} {} {}", line)) |fields| { const op = try tools.nameToEnum(Opcode, fields[0].lit); const par = [3]u32{ @intCast(u32, fields[1].imm), @intCast(u32, fields[2].imm), @intCast(u32, fields[3].imm), }; try prg.append(Insn{ .op = op, .par = par }); } else if (tools.match_pattern("#ip {}", line)) |fields| { assert(ip == null); ip = @intCast(u32, fields[0].imm); } else unreachable; } break :param .{ .ip = ip.?, .prg = prg.items }; }; const ans1 = ans: { const State = struct { reg: Registers = .{ 0, 0, 0, 0, 0, 0 }, //ip: u32 = 0, }; var states = try allocator.alloc(State, 1); // 16 * 1024 * 1024); defer allocator.free(states); for (states) |*s, i| { s.* = State{ .reg = .{ @intCast(u32, i), 0, 0, 0, 0, 0 } }; // 13443200 } var cycles: u32 = 0; while (true) { for (states) |*s, i| { const ip = s.reg[param.ip]; s.reg = eval(param.prg[ip].op, param.prg[ip].par, s.reg); s.reg[param.ip] += 1; if (false) std.debug.print("[{}] {} {},{},{} regs=<{}, {}, ({}), {}, {}, {}>\n", .{ s.reg[param.ip], param.prg[s.ip].op, param.prg[s.ip].par[0], param.prg[s.ip].par[1], param.prg[s.ip].par[2], s.reg[0], s.reg[1], s.reg[2], s.reg[3], s.reg[4], s.reg[5], }); if (s.reg[param.ip] == 28) break :ans s.reg[4]; // insn 28 tests r0 vs r4 -> halt // if (s.reg[param.ip] >= param.prg.len) break :ans i; } cycles += 1; //if (cycles % 1000 == 0) { // std.debug.print("### cycles = {}\n", .{cycles}); //} } }; const ans2 = ans: { const State = struct { reg: Registers }; var s = State{ .reg = .{ 0, 0, 0, 0, 0, 0 } }; const repeats = try allocator.alloc(bool, 16 * 1024 * 1024); defer allocator.free(repeats); std.mem.set(bool, repeats, false); var prev: u64 = 0; while (true) { const ip = s.reg[param.ip]; s.reg = eval(param.prg[ip].op, param.prg[ip].par, s.reg); s.reg[param.ip] += 1; if (false and (ip == 26 or ip == 13)) std.debug.print("[{}] {} {},{},{} regs=<{}, {}, ({}), {}, {}, {}>\n", .{ ip, param.prg[ip].op, param.prg[ip].par[0], param.prg[ip].par[1], param.prg[ip].par[2], s.reg[0], s.reg[1], s.reg[2], s.reg[3], s.reg[4], s.reg[5], }); if (ip == 28) { const val = s.reg[4]; //std.debug.print("halting R0 = {}\n", .{val}); if (repeats[val]) break :ans prev; // -> 7717135 repeats[val] = true; prev = val; } //if (s.reg[param.ip] >= param.prg.len) break :ans s.reg[0]; } }; return [_][]const u8{ try std.fmt.allocPrint(allocator, "{}", .{ans1}), try std.fmt.allocPrint(allocator, "{}", .{ans2}), }; } pub const main = tools.defaultMain("2018/input_day21.txt", run); // xx seti 123 0 4 // xx bani 4 456 4 // xx eqri 4 72 4 // xx addr 4 2 2 // xx seti 0 0 2 // seti 0 7 4 R4=0 // bori 4 65536 3 next: R3=R4|0x10000 // seti 10283511 1 4 R4=10283511 // bani 3 255 1 again: || // addr 4 1 4 || // bani 4 16777215 4 || // muli 4 65899 4 || R4=((R4+(R3%255))*65899)%0xFFFFFF // bani 4 16777215 4 || // gtir 256 3 1 if (R3<256) // addr 1 2 2 // // addi 2 1 2 // // seti 27 8 2 // jmp LabelExitTest // seti 0 1 1 R1 = 0 // addi 1 1 5 loop: || // muli 5 256 5 ||R5=(R5+1)*256 = ((0+1)*256+1)*256.... = 0 *256^n +256^n = 256^R1 // gtrr 5 3 5 if (R5>R3) // addr 5 2 2 // // addi 2 1 2 // // seti 25 3 2 // jmp break // addi 1 1 1 R1++ // seti 17 0 2 jmp loop // setr 1 4 3 break: R3=R1 R3=ln(R3)/ln(256) // seti 7 6 2 jmp again // eqrr 4 0 1 LabelExitTest // addr 1 2 2 // seti 5 2 2
2018/day21_nocomptime.zig
const std = @import("std"); const os = @import("root").os; /// HandleTable is the class for table of generic handles /// It manages a map from integers to handles (type T) /// TODO: Add more methods like dup2 and reserve pub fn HandleTable(comptime T: type) type { return struct { /// Result type for a new cell allocation procedure. /// index - index in the table /// ref - reference to the memory for the handle pub const Location = struct { id: usize, ref: *T }; /// We use max(usize) to indicate that there is no next member, /// as it is impossible for array to be that large const no_next = std.math.maxInt(usize); /// We use max(usize) - 1 to indicate that cell is in use const used_cell = std.math.maxInt(usize) - 1; /// Cell with handle and pointer to the next free slot const HandleCell = struct { /// Index of the next free cell next_free: usize, item: T, }; /// Handle cells themselves cells: std.ArrayList(HandleCell), /// Index of the first free cell first_cell: usize, /// Create empty handle table pub fn init(allocator: *std.mem.Allocator) @This() { return .{ .cells = std.ArrayList(HandleCell).init(allocator), .first_cell = no_next, }; } /// Allocate a new cell for a new handle pub fn new_cell(self: *@This()) !Location { std.debug.assert(self.first_cell != used_cell); if (self.first_cell == no_next) { // Increase dynarray length const pos = self.cells.items.len; const last = try self.cells.addOne(); last.next_free = no_next; self.first_cell = pos; } const cell = &self.cells.items[self.first_cell]; const index = self.first_cell; std.debug.assert(cell.next_free != used_cell); self.first_cell = cell.next_free; cell.next_free = used_cell; return Location{ .id = index, .ref = &cell.item }; } /// Validate ID. Returns true if index is ID that was returned by new_cell pub fn validate_id(self: *const @This(), index: usize) bool { if (self.cells.items.len <= index) { return false; } if (self.cells.items[index].next_free != used_cell) { return false; } return true; } /// Free cell pub fn free_cell(self: *@This(), id: usize) !void { if (!self.validate_id(id)) { return error.InvalidID; } self.cells.items[id].next_free = self.first_cell; self.first_cell = id; } /// Get access to handle data pub fn get_data(self: *const @This(), id: usize) !*T { if (!self.validate_id(id)) { return error.InvalidID; } return &self.cells.items[id].item; } /// Dispose handle array. Dispose handler is the struct with .dispose(loc: Location) method /// that is called for every remaining alive handle pub fn deinit(self: *@This(), comptime disposer_type: type, disposer: *disposer_type) void { comptime { if (!@hasDecl(disposer_type, "dispose")) { @compileError("Disposer type should define \"dispose\" function"); } const expect_disposer_type = fn (*disposer_type, Location) void; if (@TypeOf(disposer_type.dispose) != expect_disposer_type) { @compileError("Invalid type of dispose function"); } } for (self.cells.items) |*cell, id| { if (cell.next_free == used_cell) { disposer.dispose(Location{ .ref = &cell.item, .id = id }); } } self.cells.deinit(); } }; } test "handle_table" { var buffer: [4096]u8 = undefined; var fixed_buffer = std.heap.FixedBufferAllocator.init(&buffer); const allocator = &fixed_buffer.allocator; var instance = HandleTable(u64).init(allocator); const result1 = try instance.new_cell(); result1.ref.* = 69; std.testing.expect(result1.id == 0); const result2 = try instance.new_cell(); result2.ref.* = 420; std.testing.expect(result2.id == 1); if (instance.get_data(2)) { unreachable; } else |err| { std.testing.expect(err == error.InvalidID); } std.testing.expect((try instance.get_data(0)).* == 69); try instance.free_cell(0); if (instance.get_data(0)) { unreachable; } else |err| { std.testing.expect(err == error.InvalidID); } std.testing.expect((try instance.get_data(1)).* == 420); const TestDisposer = struct { called: bool, fn init() @This() { return .{ .called = false }; } fn dispose(self: *@This(), loc: HandleTable(u64).Location) void { std.debug.assert(!self.called); std.debug.assert(loc.id == 1); std.debug.assert(loc.ref.* == 420); self.called = true; } }; var disposer = TestDisposer.init(); instance.deinit(TestDisposer, &disposer); std.testing.expect(disposer.called); } /// Locked handle table is a wrapper on HandleTable that /// allows only one reader/writer using os.thread.Mutex /// TODO: generic Lock trait for migration from kernel code pub fn LockedHandleTable(comptime T: type) type { return struct { /// Location type that is reused from HandleTable pub const Location = HandleTable(T).Location; /// Handle table itself table: HandleTable(T) = undefined, /// Protecting lock mutex: os.thread.Mutex = .{}, /// Initialize LockedHandleTable pub fn init(self: *@This(), allocator: *std.mem.Allocator) void { self.mutex.init(); self.table = HandleTable(T).init(allocator); } /// Allocate a new cell for a new handle and /// leave the table locked /// NOTE: Don't forget to call unlock lol :^) pub fn new_cell(self: *@This()) !Location { self.mutex.lock(); errdefer self.mutex.unlock(); return self.table.new_cell(); } /// Free cell. pub fn free_cell(self: *@This(), id: usize) !void { self.mutex.lock(); defer self.mutex.unlock(); return self.table.free_cell(id); } /// Get cell data and leave the table locked pub fn get_data(self: *@This(), id: usize) !*T { self.mutex.lock(); errdefer self.mutex.unlock(); return self.table.get_data(id); } /// Deinitialize the table pub fn deinit(self: *@This(), comptime disposer_type: type, disposer: *disposer_type) void { self.mutex.lock(); self.table.deinit(disposer_type, disposer); } /// Unlock the table pub fn unlock(self: *@This()) void { if (!self.mutex.held_by_me()) { @panic("LMAO"); } self.mutex.unlock(); } }; }
src/lib/handle_table.zig
const std = @import("std"); const util = @import("util"); usingnamespace @import("context.zig"); usingnamespace @import("unit.zig"); const Allocator = std.mem.Allocator; const ZigSrcLoc = std.builtin.SourceLocation; pub const Source = struct { allocator: *Allocator, path: []const u8, text: []const u8, pub const Ptr = *const Self; pub const Line = SourceLine; pub const Token = SourceToken; const Self = @This(); pub fn createFromFile(allocator: *Allocator, path: []const u8) !Ptr { var cwd = std.fs.cwd(); var file = try cwd.openFile(path, .{}); defer file.close(); const file_size = try file.getEndPos(); var text = try allocator.alloc(u8, file_size); errdefer allocator.free(text); _ = try file.readAll(text); return create(allocator, path, text); } pub fn createFromBytes(allocator: *Allocator, bytes: []const u8) !Ptr { const text = try allocator.dupe(u8, bytes); errdefer allocator.free(text); return try create(allocator, "(string)", text); } fn create(allocator: *Allocator, path: []const u8, text: []const u8) !Ptr { var self = try allocator.create(Self); self.* = Self { .allocator = allocator, .path = path, .text = text, }; // errdefer allocator.destroy(self); // try self.initLines(); return self; } pub fn destroy(self: Ptr) void { self.allocator.free(self.text); self.allocator.destroy(self); } }; fn isSubSlice(sup: []const u8, sub: []const u8) bool { const sub_start = @ptrToInt(sub.ptr); const sub_end = sub_start + sub.len * @sizeOf(u8); const sup_start = @ptrToInt(sup.ptr); const sup_end = sup_start + sup.len * @sizeOf(u8); return ( sub_start >= sup_start and sub_start < sup_end and sub_end >= sup_start and sub_end <= sup_end ); } const SourceLine = struct { source: Source.Ptr, index: usize, start: usize, pub const Ptr = *const Self; const Self = @This(); pub fn init(source: Source.Ptr, index: usize, start: usize) Self { return Self { .source = source, .index = index, .start = start, }; } pub fn token(self: Self, start: usize, len: usize) SourceToken { return SourceToken { .line = self, .start = start, .len = len, }; } pub fn text(self: Self) []const u8 { var st = self.source.text[self.start..]; for (st) |char, i| { switch (char) { '\n' => return st[0..i], '\r' => { if ((i+1 < st.len) and st[i+i] == '\n') { return st[0..i]; } }, else => {} } } return st; } pub fn tokenFromSlice(self: Self, slice: []const u8) SourceToken { if (std.debug.runtime_safety) { const self_text = self.text(); if (!isSubSlice(self_text, slice)) { std.debug.panic("token '{s}' out of bounds of line:\n'{s}'", .{slice, self_text}); } } const token_start = @ptrToInt(slice.ptr) - @ptrToInt(self.source.text.ptr); return self.token(token_start - self.start, slice.len); } }; const SourceToken = struct { line: Source.Line, start: usize, len: usize, const Self = @This(); pub fn source(self: Self) Source.Ptr { return self.line.source; } pub fn text(self: Self) []const u8 { return self.line.text()[self.start..][0..self.len]; } // pub fn format(self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { // try writer.print("{s}:{d}\n") // } };
src/compile/source.zig
const std = @import("std"); pub const ErrorSet = error{InvalidGeoJson} || std.mem.Allocator.Error || std.json.TokenStream.Error || std.fmt.ParseIntError; const log = std.log.scoped(.zig_geojson); pub const GeoJson = struct { arena: std.heap.ArenaAllocator, bbox: ?BBox, content: union(enum) { feature: Feature, feature_collection: FeatureCollection, geometry: Geometry, }, pub fn feature(self: @This()) ?Feature { if (self.content == .feature) { return self.content.feature; } return null; } pub fn featureCollection(self: @This()) ?FeatureCollection { if (self.content == .feature_collection) { return self.content.feature_collection; } return null; } pub fn geometry(self: @This()) ?Geometry { if (self.content == .geometry) { return self.content.geometry; } return null; } pub fn deinit(self: *GeoJson) void { self.arena.deinit(); } }; pub const BBox = struct { min: Point, max: Point, }; pub const Feature = struct { geometry: Geometry, properties: ?std.StringHashMap(PropertyValue), id: ?Identifier, }; pub const FeatureCollection = []Feature; pub const Geometry = union(enum) { @"null": void, point: Point, line_string: []Point, polygon: Polygon, multi_point: MultiPoint, multi_line_string: MultiLineString, multi_polygon: MultiPolygon, geometry_collection: GeometryCollection, }; pub const Point = [2]f32; pub const MultiPoint = []Point; pub const LineString = []Point; pub const MultiLineString = []LineString; pub const LinearRing = []Point; pub const Polygon = []LinearRing; pub const MultiPolygon = []Polygon; pub const GeometryCollection = []Geometry; pub const PropertyValue = union(enum) { array: []PropertyValue, @"null": void, bool: bool, int: i64, float: f64, string: []const u8, object: std.StringHashMap(PropertyValue), }; pub const Identifier = union(enum) { int: i64, float: f64, string: []const u8, }; pub const Parser = struct { pub fn parse( json_text: []const u8, allocator: *std.mem.Allocator, ) ErrorSet!GeoJson { var json_parser = std.json.Parser.init(allocator, false); defer json_parser.deinit(); var json = json_parser.parse(json_text) catch |err| { log.err("Unable to parse json\n", .{}); return err; }; defer json.deinit(); return parseJson(json, allocator); } pub fn parseJson( json: std.json.ValueTree, child_allocator: *std.mem.Allocator, ) ErrorSet!GeoJson { const root = json.root; var arena = std.heap.ArenaAllocator.init(child_allocator); const allocator = &arena.allocator; const bbox = try parseBoundingBox(root.Object.get("bbox")); if (root.Object.get("type")) |contentType| { const t = contentType.String; if (std.mem.eql(u8, "Feature", t)) { return GeoJson{ .content = .{ .feature = try parseFeature(root, allocator) }, .bbox = bbox, .arena = arena, }; } if (std.mem.eql(u8, "FeatureCollection", t)) { const features = root.Object.get("features").?; return GeoJson{ .content = .{ .feature_collection = try parseFeatures(features, allocator) }, .bbox = bbox, .arena = arena, }; } // this json is a geometry only return GeoJson{ .content = .{ .geometry = try parseGeometry(root, allocator) }, .bbox = bbox, .arena = arena, }; } return ErrorSet.InvalidGeoJson; } fn parseBoundingBox(value: ?std.json.Value) !?BBox { if (value) |v| { const json_array = v.Array; return BBox{ .min = Point{ try parseFloat(json_array.items[0]), try parseFloat(json_array.items[1]), }, .max = Point{ try parseFloat(json_array.items[2]), try parseFloat(json_array.items[3]), }, }; } return null; } fn parseFeatures( value: std.json.Value, allocator: *std.mem.Allocator, ) ![]Feature { const json_array = value.Array; const features = try allocator.alloc(Feature, json_array.items.len); for (json_array.items) |item, idx| { features[idx] = try parseFeature(item, allocator); } return features; } fn parseFeature( value: std.json.Value, allocator: *std.mem.Allocator, ) !Feature { const geometry = try parseGeometry(value.Object.get("geometry"), allocator); const properties = if (value.Object.get("properties")) |p| try parseProperties(p, allocator) else null; const id = if (value.Object.get("id")) |id| try parseIdentifier(id, allocator) else null; return Feature{ .geometry = geometry, .properties = properties, .id = id, }; } fn parseIdentifier( value: std.json.Value, allocator: *std.mem.Allocator, ) !Identifier { return switch (value) { .String => |s| Identifier{ .string = try std.mem.dupe(allocator, u8, s) }, .Integer => |i| Identifier{ .int = i }, .Float => |f| Identifier{ .float = f }, else => error.InvalidGeoJson, }; } fn parseProperties( value: std.json.Value, allocator: *std.mem.Allocator, ) !std.StringHashMap(PropertyValue) { return (try parsePropertiesValue(value, allocator)).object; } fn parsePropertiesValue( value: std.json.Value, allocator: *std.mem.Allocator, ) ErrorSet!PropertyValue { switch (value) { .Null => return PropertyValue.@"null", .Bool => |b| return PropertyValue{ .bool = b }, .Integer => |i| return PropertyValue{ .int = i }, .Float => |f| return PropertyValue{ .float = f }, .String => |s| return PropertyValue{ .string = try std.mem.dupe(allocator, u8, s) }, .NumberString => |s| return PropertyValue{ .string = try std.mem.dupe(allocator, u8, s) }, .Array => |arr| { const array = try allocator.alloc(PropertyValue, arr.items.len); for (arr.items) |item, idx| { const pValue = try parsePropertiesValue(item, allocator); array[idx] = pValue; } return PropertyValue{ .array = array }; }, .Object => |o| { var hashmap = std.StringHashMap(PropertyValue).init(allocator); var iterator = o.iterator(); while (iterator.next()) |kv| { const pValue = try parsePropertiesValue(kv.value_ptr.*, allocator); try hashmap.put(kv.key_ptr.*, pValue); } return PropertyValue{ .object = hashmap }; }, } } fn parseGeometry( value: ?std.json.Value, allocator: *std.mem.Allocator, ) ErrorSet!Geometry { if (value == null) return Geometry.@"null"; const v = value.?; if (v != .Object) return Geometry.@"null"; const t = v.Object.get("type").?.String; if (std.mem.eql(u8, "Point", t)) { return Geometry{ .point = try parsePoint(v) }; } else if (std.mem.eql(u8, "Polygon", t)) { return Geometry{ .polygon = try parsePolygon(v, allocator) }; } else if (std.mem.eql(u8, "LineString", t)) { return Geometry{ .line_string = try parseLineString(v, allocator) }; } else if (std.mem.eql(u8, "MultiLineString", t)) { return Geometry{ .multi_line_string = try parseMultiLineString(v, allocator) }; } else if (std.mem.eql(u8, "MultiPolygon", t)) { return Geometry{ .multi_polygon = try parseMultiPolygon(v, allocator) }; } else if (std.mem.eql(u8, "MultiPoint", t)) { return Geometry{ .multi_point = try parseMultiPoint(v, allocator) }; } else if (std.mem.eql(u8, "GeometryCollection", t)) { return Geometry{ .geometry_collection = try parseGeometryCollection(v, allocator) }; } log.err("Missing implementation for geometry of type '{s}'\n", .{t}); return ErrorSet.InvalidGeoJson; } fn parsePoint( value: std.json.Value, ) !Point { const coordinates = value.Object.get("coordinates").?; return parsePointRaw(coordinates); } fn parseMultiPoint( value: std.json.Value, allocator: *std.mem.Allocator, ) !MultiPoint { const coordinates = value.Object.get("coordinates").?; return try parsePoints(coordinates, allocator); } fn parsePolygon( value: std.json.Value, allocator: *std.mem.Allocator, ) !Polygon { const coordinates = value.Object.get("coordinates").?.Array; const rings = try allocator.alloc([]Point, coordinates.items.len); for (coordinates.items) |item, idx| { rings[idx] = try parsePoints(item, allocator); } return rings; } fn parseMultiPolygon( value: std.json.Value, allocator: *std.mem.Allocator, ) !MultiPolygon { const coordinates = value.Object.get("coordinates").?.Array; const polygons = try allocator.alloc(Polygon, coordinates.items.len); for (coordinates.items) |item, idx| { polygons[idx] = try parsePolygonRaw(item, allocator); } return polygons; } fn parseLineString( value: std.json.Value, allocator: *std.mem.Allocator, ) !LineString { const coordinates = value.Object.get("coordinates").?; return try parsePoints(coordinates, allocator); } fn parseMultiLineString( value: std.json.Value, allocator: *std.mem.Allocator, ) !MultiLineString { const coordinates = value.Object.get("coordinates").?.Array; const lineStrings = try allocator.alloc([]Point, coordinates.items.len); for (coordinates.items) |item, idx| { lineStrings[idx] = try parsePoints(item, allocator); } return lineStrings; } fn parseGeometryCollection( value: std.json.Value, allocator: *std.mem.Allocator, ) !GeometryCollection { const array = value.Object.get("geometries").?.Array; const geometries = try allocator.alloc(Geometry, array.items.len); for (array.items) |item, idx| { geometries[idx] = try parseGeometry(item, allocator); } return geometries; } fn parsePolygonRaw( value: std.json.Value, allocator: *std.mem.Allocator, ) !Polygon { const array = value.Array; const rings = try allocator.alloc([]Point, array.items.len); for (array.items) |item, idx| { rings[idx] = try parsePoints(item, allocator); } return rings; } fn parsePoints( value: std.json.Value, allocator: *std.mem.Allocator, ) ![]Point { const array = value.Array; const points = try allocator.alloc(Point, array.items.len); for (array.items) |json, idx| { points[idx] = try parsePointRaw(json); } return points; } fn parsePointRaw(value: std.json.Value) !Point { const array = value.Array; const first = array.items[0]; const second = array.items[1]; return Point{ try parseFloat(first), try parseFloat(second) }; } fn parseFloat( value: std.json.Value, ) ErrorSet!f32 { return switch (value) { .Integer => @intToFloat(f32, value.Integer), .Float => @floatCast(f32, value.Float), else => { log.err("Invalid geojson. Expected Integer or Float, actual {}\n", .{value}); return ErrorSet.InvalidGeoJson; }, }; } };
src/main.zig
const std = @import("std"); const warn = std.debug.warn; const Allocator = std.mem.Allocator; const testing = std.testing; const util = @import("util.zig"); pub const Type = Toot(); pub fn Toot() type { return struct { hashmap: Toothashmap, tagList: TagList, imgList: ImgList, const Self = @This(); const TagType = []const u8; pub const TagList = std.ArrayList(TagType); const ImgType = []const u8; const ImgList = std.ArrayList(ImgType); const K = []const u8; const V = std.json.Value; const Toothashmap = std.StringHashMap(V); pub fn init(hash: Toothashmap, allocator: *Allocator) Self { var newToot = Self{ .hashmap = hash, .tagList = TagList.init(allocator), .imgList = ImgList.init(allocator), }; newToot.parseTags(allocator); return newToot; } pub fn get(self: *const Self, key: K) ?V { return self.hashmap.get(key); } pub fn id(self: *const Self) []const u8 { if (self.hashmap.get("id")) |kv| { return kv.String; } else { unreachable; } } pub fn author(self: *const Self, acct: []const u8) bool { if (self.hashmap.get("account")) |kv| { if (kv.Object.get("acct")) |akv| { const existing_acct = akv.String; return std.mem.order(u8, acct, existing_acct) == std.math.Order.eq; } else { return false; } } else { return false; } } pub fn content(self: *const Self) []const u8 { return self.hashmap.get("content").?.String; } pub fn parseTags(self: *Self, allocator: *Allocator) void { const hDecode = util.htmlEntityDecode(self.content(), allocator) catch unreachable; const html_trim = util.htmlTagStrip(hDecode, allocator) catch unreachable; var wordParts = std.mem.tokenize(html_trim, " "); while (wordParts.next()) |word| { if (std.mem.startsWith(u8, word, "#")) { self.tagList.append(word) catch unreachable; } } } pub fn addImg(self: *Self, imgdata: ImgType) void { warn("addImg toot {*}\n", .{self}); self.imgList.append(imgdata) catch unreachable; } pub fn imgCount(self: *Self) usize { var images = self.hashmap.get("media_attachments").?.Array; return images.items.len; } }; } test "Toot" { var bytes: [8096]u8 = undefined; const allocator = &std.heap.FixedBufferAllocator.init(bytes[0..]).allocator; var tootHash = Type.Toothashmap.init(allocator); var jString = std.json.Value{ .String = "" }; _ = tootHash.put("content", jString) catch unreachable; jString.String = "ABC"; _ = tootHash.put("content", jString) catch unreachable; var toot = Type.init(tootHash, allocator); testing.expect(toot.tagList.count() == 0); warn("toot1 {*}\n", &toot); jString.String = "ABC #xyz"; _ = tootHash.put("content", jString) catch unreachable; const toot2 = Type.init(tootHash, allocator); warn("toot2 {*}\n", &toot2); testing.expect(toot2.tagList.count() == 1); testing.expect(std.mem.order(u8, toot2.tagList.at(0), "#xyz") == std.math.Order.eq); }
src/toot.zig
const expectEqual = @import("std").testing.expectEqual; const Vector = @import("std").meta.Vector; const math = @import("math.zig").math; pub fn Matrix(comptime T: type, rows: comptime_int, columns: comptime_int) type { switch (@typeInfo(T)) { .Int, .Float => {}, else => @compileError("Matrix only supports integers and floats"), } return struct { columnMajorValues: [columns * rows]T, const T = T; const rows = rows; const columns = columns; const Self = @This(); const SelfV = Vector(columns * rows, T); const isMatrixType = true; pub fn lit(v: anytype) @This() { const VType = @TypeOf(v); const vTypeInfo = @typeInfo(VType); if (vTypeInfo != .Struct) { @compileError("Expected tuple or struct argument, found " ++ @typeName(VType)); } const fieldsInfo = vTypeInfo.Struct.fields; if (fieldsInfo.len != rows * columns) { @compileError("Wrong size literal for matrix"); } var r: @This() = undefined; comptime var column: usize = 0; inline while (column < columns) : (column += 1) { comptime var row: usize = 0; inline while (row < rows) : (row += 1) { r.columnMajorValues[column * rows + row] = @field(v, fieldsInfo[row * columns + column].name); } } return r; } pub fn ident() Self { if (rows != columns) { @panic("ident is only valid for square matrices."); } const arr = comptime init: { var arrInit = [_]T{0} ** (columns * rows); var i: usize = 0; while (i < arrInit.len) : (i += columns + 1) { arrInit[i] = 1; } break :init arrInit; }; return Self{ .columnMajorValues = arr, }; } pub fn mul(lhs: anytype, rhs: anytype) Self.mulReturnType(@TypeOf(lhs), @TypeOf(rhs)) { const Lhs = @TypeOf(lhs); const Rhs = @TypeOf(rhs); if (Lhs == Self and Rhs == T) { return lhs.mulScale(rhs); } if (Lhs == T and Rhs == Self) { return rhs.mulScale(lhs); } var r: Self.mulReturnType(Lhs, Rhs) = undefined; var selfRows: [rows]Vector(columns, T) = undefined; // TODO: Maybe putting into a big vector and shuffling out values // would be faster? { var row: usize = 0; while (row < rows) : (row += 1) { var rowArr: [columns]T = undefined; var column: usize = 0; while (column < columns) : (column += 1) { rowArr[column] = lhs.columnMajorValues[column * rows + row]; } selfRows[row] = rowArr; } } var column: usize = 0; while (column < Rhs.columns) : (column += 1) { var columnVec: Vector(Rhs.rows, T) = rhs.columnMajorValues[column * Rhs.rows ..][0..Rhs.rows].*; var row: usize = 0; while (row < rows) : (row += 1) { r.columnMajorValues[column * rows + row] = @reduce(.Add, selfRows[row] * columnVec); } } return r; } pub fn mulReturnType(comptime LhsMaybe: type, comptime RhsMaybe: type) type { const Lhs = DepointerType(LhsMaybe); const Rhs = DepointerType(RhsMaybe); if ((Lhs == Self and Rhs == T) or (Lhs == T and Rhs == Self)) { return Self; } if (!@hasDecl(Rhs, "isMatrixType") or Lhs != Self) { return void; } if (T != Rhs.T) { return void; // @compileError("Matrix multiplcation value types must match"); } if (columns != Rhs.rows) { return void; // @compileError("Matrix multiplcation sizes incompatible."); } return Matrix(T, rows, Rhs.columns); } fn mulScale(self: Self, scaler: T) Self { var scalerVec = @splat(columns * rows, scaler); return Self{ .columnMajorValues = @as(SelfV, self.columnMajorValues) * scalerVec, }; } pub fn add(self: Self, other: Self) Self { return Self{ .columnMajorValues = @as(SelfV, self.columnMajorValues) + @as(SelfV, other.columnMajorValues), }; } pub fn sub(self: Self, other: Self) Self { return Self{ .columnMajorValues = @as(SelfV, self.columnMajorValues) - @as(SelfV, other.columnMajorValues), }; } }; } fn DepointerType(comptime T: type) type { switch (@typeInfo(T)) { .Pointer => |ptr| { switch (@typeInfo(ptr.child)) { .Struct, .Enum, .Union => return ptr.child, else => {}, } }, else => {}, } return T; } test "matrix multiplcation type" { const A = Matrix(f32, 5, 3); const B = Matrix(f32, 3, 4); const C = comptime A.mulReturnType(A, B); try expectEqual(5, C.rows); try expectEqual(4, C.columns); } test "matrix literal" { var a = Matrix(f32, 2, 2).lit(.{ 2, 3, 4, 5, }); try expectEqual(@as(f32, 2), a.columnMajorValues[0]); try expectEqual(@as(f32, 4), a.columnMajorValues[1]); try expectEqual(@as(f32, 3), a.columnMajorValues[2]); try expectEqual(@as(f32, 5), a.columnMajorValues[3]); } test "matrix multiplcation" { var a = Matrix(f32, 2, 3).lit(.{ 2, 3, 4, 5, 6, 7, }); var b = Matrix(f32, 3, 2).lit(.{ 8, 9, 10, 11, 12, 13, }); var c = math("a * b", .{ .a = a, .b = b, }); try expectEqual(Matrix(f32, 2, 2).lit(.{ 94, 103, 184, 202, }), c); try expectEqual(Matrix(f32, 2, 2).lit(.{ 94, 103, 184, 202, }), a.mul(b)); } test "matrix addition" { var a = Matrix(f32, 2, 3).lit(.{ 2, 3, 4, 5, 6, 7, }); var b = Matrix(f32, 2, 3).lit(.{ 8, 9, 10, 11, 12, 13, }); var c = math("a + b", .{ .a = a, .b = b, }); try expectEqual(Matrix(f32, 2, 3).lit(.{ 10, 12, 14, 16, 18, 20, }), c); c = math("a - b", .{ .a = a, .b = b, }); try expectEqual(Matrix(f32, 2, 3).lit(.{ -6, -6, -6, -6, -6, -6, }), c); } test "matrix scale" { var a = Matrix(f32, 2, 2).lit(.{ 1, 2, 3, 4, }); var b: f32 = 2; var c = math("a * b", .{ .a = a, .b = b, }); try expectEqual(Matrix(f32, 2, 2).lit(.{ 2, 4, 6, 8, }), c); var d = math("b * a", .{ .a = a, .b = b, }); try expectEqual(Matrix(f32, 2, 2).lit(.{ 2, 4, 6, 8, }), d); } test "identity matrix" { const T = Matrix(f32, 5, 5); var a = T.lit(.{ 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, }); var b = T.ident(); try expectEqual(a, b); }
src/mat.zig
const std = @import("std"); const os = std.os; const mem = std.mem; const assert = std.debug.assert; const log = std.log.scoped(.io); const config = @import("../config.zig"); const FIFO = @import("../fifo.zig").FIFO; const Time = @import("../time.zig").Time; const buffer_limit = @import("../io.zig").buffer_limit; pub const IO = struct { kq: os.fd_t, time: Time = .{}, io_inflight: usize = 0, timeouts: FIFO(Completion) = .{}, completed: FIFO(Completion) = .{}, io_pending: FIFO(Completion) = .{}, pub fn init(entries: u12, flags: u32) !IO { _ = entries; _ = flags; const kq = try os.kqueue(); assert(kq > -1); return IO{ .kq = kq }; } pub fn deinit(self: *IO) void { assert(self.kq > -1); os.close(self.kq); self.kq = -1; } /// Pass all queued submissions to the kernel and peek for completions. pub fn tick(self: *IO) !void { return self.flush(false); } /// Pass all queued submissions to the kernel and run for `nanoseconds`. /// The `nanoseconds` argument is a u63 to allow coercion to the i64 used /// in the __kernel_timespec struct. pub fn run_for_ns(self: *IO, nanoseconds: u63) !void { var timed_out = false; var completion: Completion = undefined; const on_timeout = struct { fn callback( timed_out_ptr: *bool, _completion: *Completion, result: TimeoutError!void, ) void { _ = _completion; _ = result catch unreachable; timed_out_ptr.* = true; } }.callback; // Submit a timeout which sets the timed_out value to true to terminate the loop below. self.timeout( *bool, &timed_out, on_timeout, &completion, nanoseconds, ); // Loop until our timeout completion is processed above, which sets timed_out to true. // LLVM shouldn't be able to cache timed_out's value here since its address escapes above. while (!timed_out) { try self.flush(true); } } fn flush(self: *IO, wait_for_completions: bool) !void { var io_pending = self.io_pending.peek(); var events: [256]os.Kevent = undefined; // Check timeouts and fill events with completions in io_pending // (they will be submitted through kevent). // Timeouts are expired here and possibly pushed to the completed queue. const next_timeout = self.flush_timeouts(); const change_events = self.flush_io(&events, &io_pending); // Only call kevent() if we need to submit io events or if we need to wait for completions. if (change_events > 0 or self.completed.peek() == null) { // Zero timeouts for kevent() implies a non-blocking poll var ts = std.mem.zeroes(os.timespec); // We need to wait (not poll) on kevent if there's nothing to submit or complete. // We should never wait indefinitely (timeout_ptr = null for kevent) given: // - tick() is non-blocking (wait_for_completions = false) // - run_for_ns() always submits a timeout if (change_events == 0 and self.completed.peek() == null) { if (wait_for_completions) { const timeout_ns = next_timeout orelse @panic("kevent() blocking forever"); ts.tv_nsec = @intCast(@TypeOf(ts.tv_nsec), timeout_ns % std.time.ns_per_s); ts.tv_sec = @intCast(@TypeOf(ts.tv_sec), timeout_ns / std.time.ns_per_s); } else if (self.io_inflight == 0) { return; } } const new_events = try os.kevent( self.kq, events[0..change_events], events[0..events.len], &ts, ); // Mark the io events submitted only after kevent() successfully processed them self.io_pending.out = io_pending; if (io_pending == null) { self.io_pending.in = null; } self.io_inflight += change_events; self.io_inflight -= new_events; for (events[0..new_events]) |event| { const completion = @intToPtr(*Completion, event.udata); completion.next = null; self.completed.push(completion); } } var completed = self.completed; self.completed = .{}; while (completed.pop()) |completion| { (completion.callback)(self, completion); } } fn flush_io(_: *IO, events: []os.Kevent, io_pending_top: *?*Completion) usize { for (events) |*event, flushed| { const completion = io_pending_top.* orelse return flushed; io_pending_top.* = completion.next; const event_info = switch (completion.operation) { .accept => |op| [2]c_int{ op.socket, os.system.EVFILT_READ }, .connect => |op| [2]c_int{ op.socket, os.system.EVFILT_WRITE }, .read => |op| [2]c_int{ op.fd, os.system.EVFILT_READ }, .write => |op| [2]c_int{ op.fd, os.system.EVFILT_WRITE }, .recv => |op| [2]c_int{ op.socket, os.system.EVFILT_READ }, .send => |op| [2]c_int{ op.socket, os.system.EVFILT_WRITE }, else => @panic("invalid completion operation queued for io"), }; event.* = .{ .ident = @intCast(u32, event_info[0]), .filter = @intCast(i16, event_info[1]), .flags = os.system.EV_ADD | os.system.EV_ENABLE | os.system.EV_ONESHOT, .fflags = 0, .data = 0, .udata = @ptrToInt(completion), }; } return events.len; } fn flush_timeouts(self: *IO) ?u64 { var min_timeout: ?u64 = null; var timeouts: ?*Completion = self.timeouts.peek(); while (timeouts) |completion| { timeouts = completion.next; // NOTE: We could cache `now` above the loop but monotonic() should be cheap to call. const now = self.time.monotonic(); const expires = completion.operation.timeout.expires; // NOTE: remove() could be O(1) here with a doubly-linked-list // since we know the previous Completion. if (now >= expires) { self.timeouts.remove(completion); self.completed.push(completion); continue; } const timeout_ns = expires - now; if (min_timeout) |min_ns| { min_timeout = std.math.min(min_ns, timeout_ns); } else { min_timeout = timeout_ns; } } return min_timeout; } /// This struct holds the data needed for a single IO operation pub const Completion = struct { next: ?*Completion, context: ?*anyopaque, callback: fn (*IO, *Completion) void, operation: Operation, }; const Operation = union(enum) { accept: struct { socket: os.socket_t, }, close: struct { fd: os.fd_t, }, connect: struct { socket: os.socket_t, address: std.net.Address, initiated: bool, }, read: struct { fd: os.fd_t, buf: [*]u8, len: u32, offset: u64, }, recv: struct { socket: os.socket_t, buf: [*]u8, len: u32, }, send: struct { socket: os.socket_t, buf: [*]const u8, len: u32, }, timeout: struct { expires: u64, }, write: struct { fd: os.fd_t, buf: [*]const u8, len: u32, offset: u64, }, }; fn submit( self: *IO, context: anytype, comptime callback: anytype, completion: *Completion, comptime operation_tag: std.meta.Tag(Operation), operation_data: anytype, comptime OperationImpl: type, ) void { const Context = @TypeOf(context); const onCompleteFn = struct { fn onComplete(io: *IO, _completion: *Completion) void { // Perform the actual operaton const op_data = &@field(_completion.operation, @tagName(operation_tag)); const result = OperationImpl.do_operation(op_data); // Requeue onto io_pending if error.WouldBlock switch (operation_tag) { .accept, .connect, .read, .write, .send, .recv => { _ = result catch |err| switch (err) { error.WouldBlock => { _completion.next = null; io.io_pending.push(_completion); return; }, else => {}, }; }, else => {}, } // Complete the Completion return callback( @intToPtr(Context, @ptrToInt(_completion.context)), _completion, result, ); } }.onComplete; completion.* = .{ .next = null, .context = context, .callback = onCompleteFn, .operation = @unionInit(Operation, @tagName(operation_tag), operation_data), }; switch (operation_tag) { .timeout => self.timeouts.push(completion), else => self.completed.push(completion), } } pub const AcceptError = os.AcceptError || os.SetSockOptError; pub fn accept( self: *IO, comptime Context: type, context: Context, comptime callback: fn ( context: Context, completion: *Completion, result: AcceptError!os.socket_t, ) void, completion: *Completion, socket: os.socket_t, ) void { self.submit( context, callback, completion, .accept, .{ .socket = socket, }, struct { fn do_operation(op: anytype) AcceptError!os.socket_t { const fd = try os.accept( op.socket, null, null, os.SOCK.NONBLOCK | os.SOCK.CLOEXEC, ); errdefer os.close(fd); // Darwin doesn't support os.MSG_NOSIGNAL to avoid getting SIGPIPE on socket send(). // Instead, it uses the SO_NOSIGPIPE socket option which does the same for all send()s. os.setsockopt( fd, os.SOL.SOCKET, os.SO.NOSIGPIPE, &mem.toBytes(@as(c_int, 1)), ) catch |err| return switch (err) { error.TimeoutTooBig => unreachable, error.PermissionDenied => error.NetworkSubsystemFailed, error.AlreadyConnected => error.NetworkSubsystemFailed, error.InvalidProtocolOption => error.ProtocolFailure, else => |e| e, }; return fd; } }, ); } pub const CloseError = error{ FileDescriptorInvalid, DiskQuota, InputOutput, NoSpaceLeft, } || os.UnexpectedError; pub fn close( self: *IO, comptime Context: type, context: Context, comptime callback: fn ( context: Context, completion: *Completion, result: CloseError!void, ) void, completion: *Completion, fd: os.fd_t, ) void { self.submit( context, callback, completion, .close, .{ .fd = fd, }, struct { fn do_operation(op: anytype) CloseError!void { return switch (os.errno(os.system.close(op.fd))) { .SUCCESS => {}, .BADF => error.FileDescriptorInvalid, .INTR => {}, // A success, see https://github.com/ziglang/zig/issues/2425 .IO => error.InputOutput, else => |errno| os.unexpectedErrno(errno), }; } }, ); } pub const ConnectError = os.ConnectError; pub fn connect( self: *IO, comptime Context: type, context: Context, comptime callback: fn ( context: Context, completion: *Completion, result: ConnectError!void, ) void, completion: *Completion, socket: os.socket_t, address: std.net.Address, ) void { self.submit( context, callback, completion, .connect, .{ .socket = socket, .address = address, .initiated = false, }, struct { fn do_operation(op: anytype) ConnectError!void { // Don't call connect after being rescheduled by io_pending as it gives EISCONN. // Instead, check the socket error to see if has been connected successfully. const result = switch (op.initiated) { true => os.getsockoptError(op.socket), else => os.connect(op.socket, &op.address.any, op.address.getOsSockLen()), }; op.initiated = true; return result; } }, ); } pub const ReadError = error{ WouldBlock, NotOpenForReading, ConnectionResetByPeer, Alignment, InputOutput, IsDir, SystemResources, Unseekable, } || os.UnexpectedError; pub fn read( self: *IO, comptime Context: type, context: Context, comptime callback: fn ( context: Context, completion: *Completion, result: ReadError!usize, ) void, completion: *Completion, fd: os.fd_t, buffer: []u8, offset: u64, ) void { self.submit( context, callback, completion, .read, .{ .fd = fd, .buf = buffer.ptr, .len = @intCast(u32, buffer_limit(buffer.len)), .offset = offset, }, struct { fn do_operation(op: anytype) ReadError!usize { while (true) { const rc = os.system.pread( op.fd, op.buf, op.len, @bitCast(isize, op.offset), ); return switch (os.errno(rc)) { .SUCCESS => @intCast(usize, rc), .INTR => continue, .AGAIN => error.WouldBlock, .BADF => error.NotOpenForReading, .CONNRESET => error.ConnectionResetByPeer, .FAULT => unreachable, .INVAL => error.Alignment, .IO => error.InputOutput, .ISDIR => error.IsDir, .NOBUFS => error.SystemResources, .NOMEM => error.SystemResources, .NXIO => error.Unseekable, .OVERFLOW => error.Unseekable, .SPIPE => error.Unseekable, else => |err| os.unexpectedErrno(err), }; } } }, ); } pub const RecvError = os.RecvFromError; pub fn recv( self: *IO, comptime Context: type, context: Context, comptime callback: fn ( context: Context, completion: *Completion, result: RecvError!usize, ) void, completion: *Completion, socket: os.socket_t, buffer: []u8, ) void { self.submit( context, callback, completion, .recv, .{ .socket = socket, .buf = buffer.ptr, .len = @intCast(u32, buffer_limit(buffer.len)), }, struct { fn do_operation(op: anytype) RecvError!usize { return os.recv(op.socket, op.buf[0..op.len], 0); } }, ); } pub const SendError = os.SendError; pub fn send( self: *IO, comptime Context: type, context: Context, comptime callback: fn ( context: Context, completion: *Completion, result: SendError!usize, ) void, completion: *Completion, socket: os.socket_t, buffer: []const u8, ) void { self.submit( context, callback, completion, .send, .{ .socket = socket, .buf = buffer.ptr, .len = @intCast(u32, buffer_limit(buffer.len)), }, struct { fn do_operation(op: anytype) SendError!usize { return os.send(op.socket, op.buf[0..op.len], 0); } }, ); } pub const TimeoutError = error{Canceled} || os.UnexpectedError; pub fn timeout( self: *IO, comptime Context: type, context: Context, comptime callback: fn ( context: Context, completion: *Completion, result: TimeoutError!void, ) void, completion: *Completion, nanoseconds: u63, ) void { self.submit( context, callback, completion, .timeout, .{ .expires = self.time.monotonic() + nanoseconds, }, struct { fn do_operation(_: anytype) TimeoutError!void { return; // timeouts don't have errors for now } }, ); } pub const WriteError = os.PWriteError; pub fn write( self: *IO, comptime Context: type, context: Context, comptime callback: fn ( context: Context, completion: *Completion, result: WriteError!usize, ) void, completion: *Completion, fd: os.fd_t, buffer: []const u8, offset: u64, ) void { self.submit( context, callback, completion, .write, .{ .fd = fd, .buf = buffer.ptr, .len = @intCast(u32, buffer_limit(buffer.len)), .offset = offset, }, struct { fn do_operation(op: anytype) WriteError!usize { return os.pwrite(op.fd, op.buf[0..op.len], op.offset); } }, ); } pub const INVALID_SOCKET = -1; /// Creates a socket that can be used for async operations with the IO instance. pub fn open_socket(self: *IO, family: u32, sock_type: u32, protocol: u32) !os.socket_t { _ = self; const fd = try os.socket(family, sock_type | os.SOCK.NONBLOCK, protocol); errdefer os.closeSocket(fd); // darwin doesn't support os.MSG_NOSIGNAL, but instead a socket option to avoid SIGPIPE. try os.setsockopt(fd, os.SOL.SOCKET, os.SO.NOSIGPIPE, &mem.toBytes(@as(c_int, 1))); return fd; } /// Opens a directory with read only access. pub fn open_dir(dir_path: [:0]const u8) !os.fd_t { return os.openZ(dir_path, os.O.CLOEXEC | os.O.RDONLY, 0); } /// Opens or creates a journal file: /// - For reading and writing. /// - For Direct I/O (required on darwin). /// - Obtains an advisory exclusive lock to the file descriptor. /// - Allocates the file contiguously on disk if this is supported by the file system. /// - Ensures that the file data (and file inode in the parent directory) is durable on disk. /// The caller is responsible for ensuring that the parent directory inode is durable. /// - Verifies that the file size matches the expected file size before returning. pub fn open_file( self: *IO, dir_fd: os.fd_t, relative_path: [:0]const u8, size: u64, must_create: bool, ) !os.fd_t { _ = self; assert(relative_path.len > 0); assert(size >= config.sector_size); assert(size % config.sector_size == 0); // TODO Use O_EXCL when opening as a block device to obtain a mandatory exclusive lock. // This is much stronger than an advisory exclusive lock, and is required on some platforms. // Opening with O_DSYNC is essential for both durability and correctness. // O_DSYNC enables us to omit fsync() calls in the data plane, since we sync to the disk on every write. var flags: u32 = os.O.CLOEXEC | os.O.RDWR | os.O.DSYNC; var mode: os.mode_t = 0; // TODO Document this and investigate whether this is in fact correct to set here. if (@hasDecl(os.O, "LARGEFILE")) flags |= os.O.LARGEFILE; if (must_create) { log.info("creating \"{s}\"...", .{relative_path}); flags |= os.O.CREAT; flags |= os.O.EXCL; mode = 0o666; } else { log.info("opening \"{s}\"...", .{relative_path}); } // This is critical as we rely on O_DSYNC for fsync() whenever we write to the file: assert((flags & os.O.DSYNC) > 0); // Be careful with openat(2): "If pathname is absolute, then dirfd is ignored." (man page) assert(!std.fs.path.isAbsolute(relative_path)); const fd = try os.openatZ(dir_fd, relative_path, flags, mode); // TODO Return a proper error message when the path exists or does not exist (init/start). errdefer os.close(fd); // TODO Check that the file is actually a file. // On darwin assume that Direct I/O is always supported. // Use F_NOCACHE to disable the page cache as O_DIRECT doesn't exist. if (config.direct_io) { _ = try os.fcntl(fd, os.F.NOCACHE, 1); } // Obtain an advisory exclusive lock that works only if all processes actually use flock(). // LOCK_NB means that we want to fail the lock without waiting if another process has it. os.flock(fd, os.LOCK.EX | os.LOCK.NB) catch |err| switch (err) { error.WouldBlock => @panic("another process holds the data file lock"), else => return err, }; // Ask the file system to allocate contiguous sectors for the file (if possible): // If the file system does not support `fallocate()`, then this could mean more seeks or a // panic if we run out of disk space (ENOSPC). if (must_create) try fs_allocate(fd, size); // The best fsync strategy is always to fsync before reading because this prevents us from // making decisions on data that was never durably written by a previously crashed process. // We therefore always fsync when we open the path, also to wait for any pending O_DSYNC. // Thanks to <NAME> from FoundationDB for diving into our source and pointing this out. try fs_sync(fd); // We fsync the parent directory to ensure that the file inode is durably written. // The caller is responsible for the parent directory inode stored under the grandparent. // We always do this when opening because we don't know if this was done before crashing. try fs_sync(dir_fd); const stat = try os.fstat(fd); if (stat.size != size) @panic("data file inode size was truncated or corrupted"); return fd; } /// Darwin's fsync() syscall does not flush past the disk cache. We must use F_FULLFSYNC instead. /// https://twitter.com/TigerBeetleDB/status/1422491736224436225 fn fs_sync(fd: os.fd_t) !void { _ = os.fcntl(fd, os.F.FULLFSYNC, 1) catch return os.fsync(fd); } /// Allocates a file contiguously using fallocate() if supported. /// Alternatively, writes to the last sector so that at least the file size is correct. fn fs_allocate(fd: os.fd_t, size: u64) !void { log.info("allocating {}...", .{std.fmt.fmtIntSizeBin(size)}); // Darwin doesn't have fallocate() but we can simulate it using fcntl()s. // // https://stackoverflow.com/a/11497568 // https://api.kde.org/frameworks/kcoreaddons/html/posix__fallocate__mac_8h_source.html // http://hg.mozilla.org/mozilla-central/file/3d846420a907/xpcom/glue/FileUtils.cpp#l61 const F_ALLOCATECONTIG = 0x2; // Allocate contiguous space. const F_ALLOCATEALL = 0x4; // Allocate all or nothing. const F_PEOFPOSMODE = 3; // Use relative offset from the seek pos mode. const fstore_t = extern struct { fst_flags: c_uint, fst_posmode: c_int, fst_offset: os.off_t, fst_length: os.off_t, fst_bytesalloc: os.off_t, }; var store = fstore_t{ .fst_flags = F_ALLOCATECONTIG | F_ALLOCATEALL, .fst_posmode = F_PEOFPOSMODE, .fst_offset = 0, .fst_length = @intCast(os.off_t, size), .fst_bytesalloc = 0, }; // Try to pre-allocate contiguous space and fall back to default non-contiguous. var res = os.system.fcntl(fd, os.F.PREALLOCATE, @ptrToInt(&store)); if (os.errno(res) != .SUCCESS) { store.fst_flags = F_ALLOCATEALL; res = os.system.fcntl(fd, os.F.PREALLOCATE, @ptrToInt(&store)); } switch (os.errno(res)) { .SUCCESS => {}, .ACCES => unreachable, // F_SETLK or F_SETSIZE of F_WRITEBOOTSTRAP .BADF => return error.FileDescriptorInvalid, .DEADLK => unreachable, // F_SETLKW .INTR => unreachable, // F_SETLKW .INVAL => return error.ArgumentsInvalid, // for F_PREALLOCATE (offset invalid) .MFILE => unreachable, // F_DUPFD or F_DUPED .NOLCK => unreachable, // F_SETLK or F_SETLKW .OVERFLOW => return error.FileTooBig, .SRCH => unreachable, // F_SETOWN .OPNOTSUPP => return error.OperationNotSupported, // not reported but need same error union else => |errno| return os.unexpectedErrno(errno), } // Now actually perform the allocation. return os.ftruncate(fd, size) catch |err| switch (err) { error.AccessDenied => error.PermissionDenied, else => |e| e, }; } };
src/io/darwin.zig
const std = @import("../index.zig"); const debug = std.debug; const math = std.math; const mem = std.mem; const Endian = @import("builtin").Endian; pub fn SipHash64(comptime c_rounds: usize, comptime d_rounds: usize) type { return SipHash(u64, c_rounds, d_rounds); } pub fn SipHash128(comptime c_rounds: usize, comptime d_rounds: usize) type { return SipHash(u128, c_rounds, d_rounds); } fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize) type { debug.assert(T == u64 or T == u128); debug.assert(c_rounds > 0 and d_rounds > 0); return struct { const Self = @This(); const digest_size = 64; const block_size = 64; v0: u64, v1: u64, v2: u64, v3: u64, // streaming cache buf: [8]u8, buf_len: usize, msg_len: u8, pub fn init(key: []const u8) Self { debug.assert(key.len >= 16); const k0 = mem.readIntSliceLittle(u64, key[0..8]); const k1 = mem.readIntSliceLittle(u64, key[8..16]); var d = Self{ .v0 = k0 ^ 0x736f6d6570736575, .v1 = k1 ^ 0x646f72616e646f6d, .v2 = k0 ^ 0x6c7967656e657261, .v3 = k1 ^ 0x7465646279746573, .buf = undefined, .buf_len = 0, .msg_len = 0, }; if (T == u128) { d.v1 ^= 0xee; } return d; } pub fn update(d: *Self, b: []const u8) void { var off: usize = 0; // Partial from previous. if (d.buf_len != 0 and d.buf_len + b.len > 8) { off += 8 - d.buf_len; mem.copy(u8, d.buf[d.buf_len..], b[0..off]); d.round(d.buf[0..]); d.buf_len = 0; } // Full middle blocks. while (off + 8 <= b.len) : (off += 8) { d.round(b[off .. off + 8]); } // Remainder for next pass. mem.copy(u8, d.buf[d.buf_len..], b[off..]); d.buf_len += @intCast(u8, b[off..].len); d.msg_len +%= @truncate(u8, b.len); } pub fn final(d: *Self) T { // Padding mem.set(u8, d.buf[d.buf_len..], 0); d.buf[7] = d.msg_len; d.round(d.buf[0..]); if (T == u128) { d.v2 ^= 0xee; } else { d.v2 ^= 0xff; } comptime var i: usize = 0; inline while (i < d_rounds) : (i += 1) { @inlineCall(sipRound, d); } const b1 = d.v0 ^ d.v1 ^ d.v2 ^ d.v3; if (T == u64) { return b1; } d.v1 ^= 0xdd; comptime var j: usize = 0; inline while (j < d_rounds) : (j += 1) { @inlineCall(sipRound, d); } const b2 = d.v0 ^ d.v1 ^ d.v2 ^ d.v3; return (u128(b2) << 64) | b1; } fn round(d: *Self, b: []const u8) void { debug.assert(b.len == 8); const m = mem.readIntSliceLittle(u64, b[0..]); d.v3 ^= m; comptime var i: usize = 0; inline while (i < c_rounds) : (i += 1) { @inlineCall(sipRound, d); } d.v0 ^= m; } fn sipRound(d: *Self) void { d.v0 +%= d.v1; d.v1 = math.rotl(u64, d.v1, u64(13)); d.v1 ^= d.v0; d.v0 = math.rotl(u64, d.v0, u64(32)); d.v2 +%= d.v3; d.v3 = math.rotl(u64, d.v3, u64(16)); d.v3 ^= d.v2; d.v0 +%= d.v3; d.v3 = math.rotl(u64, d.v3, u64(21)); d.v3 ^= d.v0; d.v2 +%= d.v1; d.v1 = math.rotl(u64, d.v1, u64(17)); d.v1 ^= d.v2; d.v2 = math.rotl(u64, d.v2, u64(32)); } pub fn hash(key: []const u8, input: []const u8) T { var c = Self.init(key); c.update(input); return c.final(); } }; } // Test vectors from reference implementation. // https://github.com/veorq/SipHash/blob/master/vectors.h const test_key = <KEY>"; test "siphash64-2-4 sanity" { const vectors = [][8]u8{ "\x31\x0e\x0e\xdd\x47\xdb\x6f\x72", // "" "\xfd\x67\xdc\x93\xc5\x39\xf8\x74", // "\x00" "\x5a\x4f\xa9\xd9\x09\x80\x6c\x0d", // "\x00\x01" ... etc "\x2d\x7e\xfb\xd7\x96\x66\x67\x85", "\xb7\x87\x71\x27\xe0\x94\x27\xcf", "\x8d\xa6\x99\xcd\x64\x55\x76\x18", "\xce\xe3\xfe\x58\x6e\x46\xc9\xcb", "\x37\xd1\x01\x8b\xf5\x00\x02\xab", "\x62\x24\x93\x9a\x79\xf5\xf5\x93", "\xb0\xe4\xa9\x0b\xdf\x82\x00\x9e", "\xf3\xb9\xdd\x94\xc5\xbb\x5d\x7a", "\xa7\xad\x6b\x22\x46\x2f\xb3\xf4", "\xfb\xe5\x0e\x86\xbc\x8f\x1e\x75", "\x90\x3d\x84\xc0\x27\x56\xea\x14", "\xee\xf2\x7a\x8e\x90\xca\x23\xf7", "\xe5\x45\xbe\x49\x61\xca\x29\xa1", "\xdb\x9b\xc2\x57\x7f\xcc\x2a\x3f", "\x94\x47\xbe\x2c\xf5\xe9\x9a\x69", "\x9c\xd3\x8d\x96\xf0\xb3\xc1\x4b", "\xbd\x61\x79\xa7\x1d\xc9\x6d\xbb", "\x98\xee\xa2\x1a\xf2\x5c\xd6\xbe", "\xc7\x67\x3b\x2e\xb0\xcb\xf2\xd0", "\x88\x3e\xa3\xe3\x95\x67\x53\x93", "\xc8\xce\x5c\xcd\x8c\x03\x0c\xa8", "\x94\xaf\x49\xf6\xc6\x50\xad\xb8", "\xea\xb8\x85\x8a\xde\x92\xe1\xbc", "\xf3\x15\xbb\x5b\xb8\x35\xd8\x17", "\xad\xcf\x6b\x07\x63\x61\x2e\x2f", "\xa5\xc9\x1d\xa7\xac\xaa\x4d\xde", "\x71\x65\x95\x87\x66\x50\xa2\xa6", "\x28\xef\x49\x5c\x53\xa3\x87\xad", "\x42\xc3\x41\xd8\xfa\x92\xd8\x32", "\xce\x7c\xf2\x72\x2f\x51\x27\x71", "\xe3\x78\x59\xf9\x46\x23\xf3\xa7", "\x38\x12\x05\xbb\x1a\xb0\xe0\x12", "\xae\x97\xa1\x0f\xd4\x34\xe0\x15", "\xb4\xa3\x15\x08\xbe\xff\x4d\x31", "\x81\x39\x62\x29\xf0\x90\x79\x02", "\x4d\x0c\xf4\x9e\xe5\xd4\xdc\xca", "\x5c\x73\x33\x6a\x76\xd8\xbf\x9a", "\xd0\xa7\x04\x53\x6b\xa9\x3e\x0e", "\x92\x59\x58\xfc\xd6\x42\x0c\xad", "\xa9\x15\xc2\x9b\xc8\x06\x73\x18", "\x95\x2b\x79\xf3\xbc\x0a\xa6\xd4", "\xf2\x1d\xf2\xe4\x1d\x45\x35\xf9", "\x87\x57\x75\x19\x04\x8f\x53\xa9", "\x10\xa5\x6c\xf5\xdf\xcd\x9a\xdb", "\xeb\x75\x09\x5c\xcd\x98\x6c\xd0", "\x51\xa9\xcb\x9e\xcb\xa3\x12\xe6", "\x96\xaf\xad\xfc\x2c\xe6\x66\xc7", "\x72\xfe\x52\x97\x5a\x43\x64\xee", "\x5a\x16\x45\xb2\x76\xd5\x92\xa1", "\xb2\x74\xcb\x8e\xbf\x87\x87\x0a", "\x6f\x9b\xb4\x20\x3d\xe7\xb3\x81", "\xea\xec\xb2\xa3\x0b\x22\xa8\x7f", "\x99\x24\xa4\x3c\xc1\x31\x57\x24", "\xbd\x83\x8d\x3a\xaf\xbf\x8d\xb7", "\x0b\x1a\x2a\x32\x65\xd5\x1a\xea", "\x13\x50\x79\xa3\x23\x1c\xe6\x60", "\x93\x2b\x28\x46\xe4\xd7\x06\x66", "\xe1\x91\x5f\x5c\xb1\xec\xa4\x6c", "\xf3\x25\x96\x5c\xa1\x6d\x62\x9f", "\x57\x5f\xf2\x8e\x60\x38\x1b\xe5", "\x72\x45\x06\xeb\x4c\x32\x8a\x95", }; const siphash = SipHash64(2, 4); var buffer: [64]u8 = undefined; for (vectors) |vector, i| { buffer[i] = @intCast(u8, i); const expected = mem.readIntLittle(u64, &vector); debug.assert(siphash.hash(test_key, buffer[0..i]) == expected); } } test "siphash128-2-4 sanity" { const vectors = [][16]u8{ "\xa3\x81\x7f\x04\xba\x25\xa8\xe6\x6d\xf6\x72\x14\xc7\x55\x02\x93", "\xda\x87\xc1\xd8\x6b\x99\xaf\x44\x34\x76\x59\x11\x9b\x22\xfc\x45", "\x81\x77\x22\x8d\xa4\xa4\x5d\xc7\xfc\xa3\x8b\xde\xf6\x0a\xff\xe4", "\x9c\x70\xb6\x0c\x52\x67\xa9\x4e\x5f\x33\xb6\xb0\x29\x85\xed\x51", "\xf8\x81\x64\xc1\x2d\x9c\x8f\xaf\x7d\x0f\x6e\x7c\x7b\xcd\x55\x79", "\x13\x68\x87\x59\x80\x77\x6f\x88\x54\x52\x7a\x07\x69\x0e\x96\x27", "\x14\xee\xca\x33\x8b\x20\x86\x13\x48\x5e\xa0\x30\x8f\xd7\xa1\x5e", "\xa1\xf1\xeb\xbe\xd8\xdb\xc1\x53\xc0\xb8\x4a\xa6\x1f\xf0\x82\x39", "\x3b\x62\xa9\xba\x62\x58\xf5\x61\x0f\x83\xe2\x64\xf3\x14\x97\xb4", "\x26\x44\x99\x06\x0a\xd9\xba\xab\xc4\x7f\x8b\x02\xbb\x6d\x71\xed", "\x00\x11\x0d\xc3\x78\x14\x69\x56\xc9\x54\x47\xd3\xf3\xd0\xfb\xba", "\x01\x51\xc5\x68\x38\x6b\x66\x77\xa2\xb4\xdc\x6f\x81\xe5\xdc\x18", "\xd6\x26\xb2\x66\x90\x5e\xf3\x58\x82\x63\x4d\xf6\x85\x32\xc1\x25", "\x98\x69\xe2\x47\xe9\xc0\x8b\x10\xd0\x29\x93\x4f\xc4\xb9\x52\xf7", "\x31\xfc\xef\xac\x66\xd7\xde\x9c\x7e\xc7\x48\x5f\xe4\x49\x49\x02", "\x54\x93\xe9\x99\x33\xb0\xa8\x11\x7e\x08\xec\x0f\x97\xcf\xc3\xd9", "\x6e\xe2\xa4\xca\x67\xb0\x54\xbb\xfd\x33\x15\xbf\x85\x23\x05\x77", "\x47\x3d\x06\xe8\x73\x8d\xb8\x98\x54\xc0\x66\xc4\x7a\xe4\x77\x40", "\xa4\x26\xe5\xe4\x23\xbf\x48\x85\x29\x4d\xa4\x81\xfe\xae\xf7\x23", "\x78\x01\x77\x31\xcf\x65\xfa\xb0\x74\xd5\x20\x89\x52\x51\x2e\xb1", "\x9e\x25\xfc\x83\x3f\x22\x90\x73\x3e\x93\x44\xa5\xe8\x38\x39\xeb", "\x56\x8e\x49\x5a\xbe\x52\x5a\x21\x8a\x22\x14\xcd\x3e\x07\x1d\x12", "\x4a\x29\xb5\x45\x52\xd1\x6b\x9a\x46\x9c\x10\x52\x8e\xff\x0a\xae", "\xc9\xd1\x84\xdd\xd5\xa9\xf5\xe0\xcf\x8c\xe2\x9a\x9a\xbf\x69\x1c", "\x2d\xb4\x79\xae\x78\xbd\x50\xd8\x88\x2a\x8a\x17\x8a\x61\x32\xad", "\x8e\xce\x5f\x04\x2d\x5e\x44\x7b\x50\x51\xb9\xea\xcb\x8d\x8f\x6f", "\x9c\x0b\x53\xb4\xb3\xc3\x07\xe8\x7e\xae\xe0\x86\x78\x14\x1f\x66", "\xab\xf2\x48\xaf\x69\xa6\xea\xe4\xbf\xd3\xeb\x2f\x12\x9e\xeb\x94", "\x06\x64\xda\x16\x68\x57\x4b\x88\xb9\x35\xf3\x02\x73\x58\xae\xf4", "\xaa\x4b\x9d\xc4\xbf\x33\x7d\xe9\x0c\xd4\xfd\x3c\x46\x7c\x6a\xb7", "\xea\x5c\x7f\x47\x1f\xaf\x6b\xde\x2b\x1a\xd7\xd4\x68\x6d\x22\x87", "\x29\x39\xb0\x18\x32\x23\xfa\xfc\x17\x23\xde\x4f\x52\xc4\x3d\x35", "\x7c\x39\x56\xca\x5e\xea\xfc\x3e\x36\x3e\x9d\x55\x65\x46\xeb\x68", "\x77\xc6\x07\x71\x46\xf0\x1c\x32\xb6\xb6\x9d\x5f\x4e\xa9\xff\xcf", "\x37\xa6\x98\x6c\xb8\x84\x7e\xdf\x09\x25\xf0\xf1\x30\x9b\x54\xde", "\xa7\x05\xf0\xe6\x9d\xa9\xa8\xf9\x07\x24\x1a\x2e\x92\x3c\x8c\xc8", "\x3d\xc4\x7d\x1f\x29\xc4\x48\x46\x1e\x9e\x76\xed\x90\x4f\x67\x11", "\x0d\x62\xbf\x01\xe6\xfc\x0e\x1a\x0d\x3c\x47\x51\xc5\xd3\x69\x2b", "\x8c\x03\x46\x8b\xca\x7c\x66\x9e\xe4\xfd\x5e\x08\x4b\xbe\xe7\xb5", "\x52\x8a\x5b\xb9\x3b\xaf\x2c\x9c\x44\x73\xcc\xe5\xd0\xd2\x2b\xd9", "\xdf\x6a\x30\x1e\x95\xc9\x5d\xad\x97\xae\x0c\xc8\xc6\x91\x3b\xd8", "\x80\x11\x89\x90\x2c\x85\x7f\x39\xe7\x35\x91\x28\x5e\x70\xb6\xdb", "\xe6\x17\x34\x6a\xc9\xc2\x31\xbb\x36\x50\xae\x34\xcc\xca\x0c\x5b", "\x27\xd9\x34\x37\xef\xb7\x21\xaa\x40\x18\x21\xdc\xec\x5a\xdf\x89", "\x89\x23\x7d\x9d\xed\x9c\x5e\x78\xd8\xb1\xc9\xb1\x66\xcc\x73\x42", "\x4a\x6d\x80\x91\xbf\x5e\x7d\x65\x11\x89\xfa\x94\xa2\x50\xb1\x4c", "\x0e\x33\xf9\x60\x55\xe7\xae\x89\x3f\xfc\x0e\x3d\xcf\x49\x29\x02", "\xe6\x1c\x43\x2b\x72\x0b\x19\xd1\x8e\xc8\xd8\x4b\xdc\x63\x15\x1b", "\xf7\xe5\xae\xf5\x49\xf7\x82\xcf\x37\x90\x55\xa6\x08\x26\x9b\x16", "\x43\x8d\x03\x0f\xd0\xb7\xa5\x4f\xa8\x37\xf2\xad\x20\x1a\x64\x03", "\xa5\x90\xd3\xee\x4f\xbf\x04\xe3\x24\x7e\x0d\x27\xf2\x86\x42\x3f", "\x5f\xe2\xc1\xa1\x72\xfe\x93\xc4\xb1\x5c\xd3\x7c\xae\xf9\xf5\x38", "\x2c\x97\x32\x5c\xbd\x06\xb3\x6e\xb2\x13\x3d\xd0\x8b\x3a\x01\x7c", "\x92\xc8\x14\x22\x7a\x6b\xca\x94\x9f\xf0\x65\x9f\x00\x2a\xd3\x9e", "\xdc\xe8\x50\x11\x0b\xd8\x32\x8c\xfb\xd5\x08\x41\xd6\x91\x1d\x87", "\x67\xf1\x49\x84\xc7\xda\x79\x12\x48\xe3\x2b\xb5\x92\x25\x83\xda", "\x19\x38\xf2\xcf\x72\xd5\x4e\xe9\x7e\x94\x16\x6f\xa9\x1d\x2a\x36", "\x74\x48\x1e\x96\x46\xed\x49\xfe\x0f\x62\x24\x30\x16\x04\x69\x8e", "\x57\xfc\xa5\xde\x98\xa9\xd6\xd8\x00\x64\x38\xd0\x58\x3d\x8a\x1d", "\x9f\xec\xde\x1c\xef\xdc\x1c\xbe\xd4\x76\x36\x74\xd9\x57\x53\x59", "\xe3\x04\x0c\x00\xeb\x28\xf1\x53\x66\xca\x73\xcb\xd8\x72\xe7\x40", "\x76\x97\x00\x9a\x6a\x83\x1d\xfe\xcc\xa9\x1c\x59\x93\x67\x0f\x7a", "\x58\x53\x54\x23\x21\xf5\x67\xa0\x05\xd5\x47\xa4\xf0\x47\x59\xbd", "\x51\x50\xd1\x77\x2f\x50\x83\x4a\x50\x3e\x06\x9a\x97\x3f\xbd\x7c", }; const siphash = SipHash128(2, 4); var buffer: [64]u8 = undefined; for (vectors) |vector, i| { buffer[i] = @intCast(u8, i); const expected = mem.readIntLittle(u128, &vector); debug.assert(siphash.hash(test_key, buffer[0..i]) == expected); } }
std/hash/siphash.zig
const std = @import("../../std.zig"); const net = @import("net.zig"); const os = std.os; const fmt = std.fmt; const mem = std.mem; const time = std.time; /// A generic socket abstraction. const Socket = @This(); /// A socket-address pair. pub const Connection = struct { socket: Socket, address: Socket.Address, /// Enclose a socket and address into a socket-address pair. pub fn from(socket: Socket, address: Socket.Address) Socket.Connection { return .{ .socket = socket, .address = address }; } }; /// A generic socket address abstraction. It is safe to directly access and modify /// the fields of a `Socket.Address`. pub const Address = union(enum) { ipv4: net.IPv4.Address, ipv6: net.IPv6.Address, /// Instantiate a new address with a IPv4 host and port. pub fn initIPv4(host: net.IPv4, port: u16) Socket.Address { return .{ .ipv4 = .{ .host = host, .port = port } }; } /// Instantiate a new address with a IPv6 host and port. pub fn initIPv6(host: net.IPv6, port: u16) Socket.Address { return .{ .ipv6 = .{ .host = host, .port = port } }; } /// Parses a `sockaddr` into a generic socket address. pub fn fromNative(address: *align(4) const os.sockaddr) Socket.Address { switch (address.family) { os.AF_INET => { const info = @ptrCast(*const os.sockaddr_in, address); const host = net.IPv4{ .octets = @bitCast([4]u8, info.addr) }; const port = mem.bigToNative(u16, info.port); return Socket.Address.initIPv4(host, port); }, os.AF_INET6 => { const info = @ptrCast(*const os.sockaddr_in6, address); const host = net.IPv6{ .octets = info.addr, .scope_id = info.scope_id }; const port = mem.bigToNative(u16, info.port); return Socket.Address.initIPv6(host, port); }, else => unreachable, } } /// Encodes a generic socket address into an extern union that may be reliably /// casted into a `sockaddr` which may be passed into socket syscalls. pub fn toNative(self: Socket.Address) extern union { ipv4: os.sockaddr_in, ipv6: os.sockaddr_in6, } { return switch (self) { .ipv4 => |address| .{ .ipv4 = .{ .addr = @bitCast(u32, address.host.octets), .port = mem.nativeToBig(u16, address.port), }, }, .ipv6 => |address| .{ .ipv6 = .{ .addr = address.host.octets, .port = mem.nativeToBig(u16, address.port), .scope_id = address.host.scope_id, .flowinfo = 0, }, }, }; } /// Returns the number of bytes that make up the `sockaddr` equivalent to the address. pub fn getNativeSize(self: Socket.Address) u32 { return switch (self) { .ipv4 => @sizeOf(os.sockaddr_in), .ipv6 => @sizeOf(os.sockaddr_in6), }; } /// Implements the `std.fmt.format` API. pub fn format( self: Socket.Address, comptime layout: []const u8, opts: fmt.FormatOptions, writer: anytype, ) !void { switch (self) { .ipv4 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }), .ipv6 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }), } } }; /// The underlying handle of a socket. fd: os.socket_t, /// Open a new socket. pub fn init(domain: u32, socket_type: u32, protocol: u32) !Socket { return Socket{ .fd = try os.socket(domain, socket_type, protocol) }; } /// Enclose a socket abstraction over an existing socket file descriptor. pub fn from(fd: os.socket_t) Socket { return Socket{ .fd = fd }; } /// Closes the socket. pub fn deinit(self: Socket) void { os.closeSocket(self.fd); } /// Shutdown either the read side, write side, or all side of the socket. pub fn shutdown(self: Socket, how: os.ShutdownHow) !void { return os.shutdown(self.fd, how); } /// Binds the socket to an address. pub fn bind(self: Socket, address: Socket.Address) !void { return os.bind(self.fd, @ptrCast(*const os.sockaddr, &address.toNative()), address.getNativeSize()); } /// Start listening for incoming connections on the socket. pub fn listen(self: Socket, max_backlog_size: u31) !void { return os.listen(self.fd, max_backlog_size); } /// Have the socket attempt to the connect to an address. pub fn connect(self: Socket, address: Socket.Address) !void { return os.connect(self.fd, @ptrCast(*const os.sockaddr, &address.toNative()), address.getNativeSize()); } /// Accept a pending incoming connection queued to the kernel backlog /// of the socket. pub fn accept(self: Socket, flags: u32) !Socket.Connection { var address: os.sockaddr = undefined; var address_len: u32 = @sizeOf(os.sockaddr); const socket = Socket{ .fd = try os.accept(self.fd, &address, &address_len, flags) }; const socket_address = Socket.Address.fromNative(@alignCast(4, &address)); return Socket.Connection.from(socket, socket_address); } /// Read data from the socket into the buffer provided. It returns the /// number of bytes read into the buffer provided. pub fn read(self: Socket, buf: []u8) !usize { return os.read(self.fd, buf); } /// Read data from the socket into the buffer provided with a set of flags /// specified. It returns the number of bytes read into the buffer provided. pub fn recv(self: Socket, buf: []u8, flags: u32) !usize { return os.recv(self.fd, buf, flags); } /// Write a buffer of data provided to the socket. It returns the number /// of bytes that are written to the socket. pub fn write(self: Socket, buf: []const u8) !usize { return os.write(self.fd, buf); } /// Writes multiple I/O vectors to the socket. It returns the number /// of bytes that are written to the socket. pub fn writev(self: Socket, buffers: []const os.iovec_const) !usize { return os.writev(self.fd, buffers); } /// Write a buffer of data provided to the socket with a set of flags specified. /// It returns the number of bytes that are written to the socket. pub fn send(self: Socket, buf: []const u8, flags: u32) !usize { return os.send(self.fd, buf, flags); } /// Writes multiple I/O vectors with a prepended message header to the socket /// with a set of flags specified. It returns the number of bytes that are /// written to the socket. pub fn sendmsg(self: Socket, msg: os.msghdr_const, flags: u32) !usize { return os.sendmsg(self.fd, msg, flags); } /// Query the address that the socket is locally bounded to. pub fn getLocalAddress(self: Socket) !Socket.Address { var address: os.sockaddr = undefined; var address_len: u32 = @sizeOf(os.sockaddr); try os.getsockname(self.fd, &address, &address_len); return Socket.Address.fromNative(@alignCast(4, &address)); } /// Query and return the latest cached error on the socket. pub fn getError(self: Socket) !void { return os.getsockoptError(self.fd); } /// Query the read buffer size of the socket. pub fn getReadBufferSize(self: Socket) !u32 { var value: u32 = undefined; var value_len: u32 = @sizeOf(u32); const rc = os.system.getsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVBUF, mem.asBytes(&value), &value_len); return switch (os.errno(rc)) { 0 => value, os.EBADF => error.BadFileDescriptor, os.EFAULT => error.InvalidAddressSpace, os.EINVAL => error.InvalidSocketOption, os.ENOPROTOOPT => error.UnknownSocketOption, os.ENOTSOCK => error.NotASocket, else => |err| os.unexpectedErrno(err), }; } /// Query the write buffer size of the socket. pub fn getWriteBufferSize(self: Socket) !u32 { var value: u32 = undefined; var value_len: u32 = @sizeOf(u32); const rc = os.system.getsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDBUF, mem.asBytes(&value), &value_len); return switch (os.errno(rc)) { 0 => value, os.EBADF => error.BadFileDescriptor, os.EFAULT => error.InvalidAddressSpace, os.EINVAL => error.InvalidSocketOption, os.ENOPROTOOPT => error.UnknownSocketOption, os.ENOTSOCK => error.NotASocket, else => |err| os.unexpectedErrno(err), }; } /// Allow multiple sockets on the same host to listen on the same address. It returns `error.UnsupportedSocketOption` if /// the host does not support sockets listening the same address. pub fn setReuseAddress(self: Socket, enabled: bool) !void { if (comptime @hasDecl(os, "SO_REUSEADDR")) { return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_REUSEADDR, mem.asBytes(&@as(usize, @boolToInt(enabled)))); } return error.UnsupportedSocketOption; } /// Allow multiple sockets on the same host to listen on the same port. It returns `error.UnsupportedSocketOption` if /// the host does not supports sockets listening on the same port. pub fn setReusePort(self: Socket, enabled: bool) !void { if (comptime @hasDecl(os, "SO_REUSEPORT")) { return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_REUSEPORT, mem.asBytes(&@as(usize, @boolToInt(enabled)))); } return error.UnsupportedSocketOption; } /// Set the write buffer size of the socket. pub fn setWriteBufferSize(self: Socket, size: u32) !void { return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDBUF, mem.asBytes(&size)); } /// Set the read buffer size of the socket. pub fn setReadBufferSize(self: Socket, size: u32) !void { return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVBUF, mem.asBytes(&size)); } /// Set a timeout on the socket that is to occur if no messages are successfully written /// to its bound destination after a specified number of milliseconds. A subsequent write /// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded. pub fn setWriteTimeout(self: Socket, milliseconds: usize) !void { const timeout = os.timeval{ .tv_sec = @intCast(i32, milliseconds / time.ms_per_s), .tv_usec = @intCast(i32, (milliseconds % time.ms_per_s) * time.us_per_ms), }; return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDTIMEO, mem.asBytes(&timeout)); } /// Set a timeout on the socket that is to occur if no messages are successfully read /// from its bound destination after a specified number of milliseconds. A subsequent /// read from the socket will thereafter return `error.WouldBlock` should the timeout be /// exceeded. pub fn setReadTimeout(self: Socket, milliseconds: usize) !void { const timeout = os.timeval{ .tv_sec = @intCast(i32, milliseconds / time.ms_per_s), .tv_usec = @intCast(i32, (milliseconds % time.ms_per_s) * time.us_per_ms), }; return os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVTIMEO, mem.asBytes(&timeout)); }
lib/std/x/os/Socket.zig
const std = @import("std.zig"); const tokenizer = @import("zig/tokenizer.zig"); pub const Token = tokenizer.Token; pub const Tokenizer = tokenizer.Tokenizer; pub const parse = @import("zig/parse.zig").parse; pub const parseStringLiteral = @import("zig/string_literal.zig").parse; pub const render = @import("zig/render.zig").render; pub const ast = @import("zig/ast.zig"); pub const system = @import("zig/system.zig"); pub const CrossTarget = @import("zig/cross_target.zig").CrossTarget; pub const SrcHash = [16]u8; /// If the source is small enough, it is used directly as the hash. /// If it is long, blake3 hash is computed. pub fn hashSrc(src: []const u8) SrcHash { var out: SrcHash = undefined; if (src.len <= @typeInfo(SrcHash).Array.len) { std.mem.copy(u8, &out, src); std.mem.set(u8, out[src.len..], 0); } else { std.crypto.hash.Blake3.hash(src, &out, .{}); } return out; } pub fn findLineColumn(source: []const u8, byte_offset: usize) struct { line: usize, column: usize } { var line: usize = 0; var column: usize = 0; for (source[0..byte_offset]) |byte| { switch (byte) { '\n' => { line += 1; column = 0; }, else => { column += 1; }, } } return .{ .line = line, .column = column }; } pub fn lineDelta(source: []const u8, start: usize, end: usize) isize { var line: isize = 0; if (end >= start) { for (source[start..end]) |byte| switch (byte) { '\n' => line += 1, else => continue, }; } else { for (source[end..start]) |byte| switch (byte) { '\n' => line -= 1, else => continue, }; } return line; } pub const BinNameOptions = struct { root_name: []const u8, target: std.Target, output_mode: std.builtin.OutputMode, link_mode: ?std.builtin.LinkMode = null, object_format: ?std.Target.ObjectFormat = null, version: ?std.builtin.Version = null, }; /// Returns the standard file system basename of a binary generated by the Zig compiler. pub fn binNameAlloc(allocator: *std.mem.Allocator, options: BinNameOptions) error{OutOfMemory}![]u8 { const root_name = options.root_name; const target = options.target; switch (options.object_format orelse target.getObjectFormat()) { .coff, .pe => switch (options.output_mode) { .Exe => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.exeFileExt() }), .Lib => { const suffix = switch (options.link_mode orelse .Static) { .Static => ".lib", .Dynamic => ".dll", }; return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, suffix }); }, .Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.oFileExt() }), }, .elf => switch (options.output_mode) { .Exe => return allocator.dupe(u8, root_name), .Lib => { switch (options.link_mode orelse .Static) { .Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{ target.libPrefix(), root_name, }), .Dynamic => { if (options.version) |ver| { return std.fmt.allocPrint(allocator, "{s}{s}.so.{d}.{d}.{d}", .{ target.libPrefix(), root_name, ver.major, ver.minor, ver.patch, }); } else { return std.fmt.allocPrint(allocator, "{s}{s}.so", .{ target.libPrefix(), root_name, }); } }, } }, .Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.oFileExt() }), }, .macho => switch (options.output_mode) { .Exe => return allocator.dupe(u8, root_name), .Lib => { switch (options.link_mode orelse .Static) { .Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{ target.libPrefix(), root_name, }), .Dynamic => { if (options.version) |ver| { return std.fmt.allocPrint(allocator, "{s}{s}.{d}.{d}.{d}.dylib", .{ target.libPrefix(), root_name, ver.major, ver.minor, ver.patch, }); } else { return std.fmt.allocPrint(allocator, "{s}{s}.dylib", .{ target.libPrefix(), root_name, }); } }, } return std.fmt.allocPrint(allocator, "{s}{s}{s}", .{ target.libPrefix(), root_name, suffix }); }, .Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.oFileExt() }), }, .wasm => switch (options.output_mode) { .Exe => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.exeFileExt() }), .Obj => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, target.oFileExt() }), .Lib => return std.fmt.allocPrint(allocator, "{s}.wasm", .{root_name}), }, .c => return std.fmt.allocPrint(allocator, "{s}.c", .{root_name}), .hex => return std.fmt.allocPrint(allocator, "{s}.ihex", .{root_name}), .raw => return std.fmt.allocPrint(allocator, "{s}.bin", .{root_name}), } } /// Only validates escape sequence characters. /// Slice must be valid utf8 starting and ending with "'" and exactly one codepoint in between. pub fn parseCharLiteral( slice: []const u8, bad_index: *usize, // populated if error.InvalidCharacter is returned ) error{InvalidCharacter}!u32 { std.debug.assert(slice.len >= 3 and slice[0] == '\'' and slice[slice.len - 1] == '\''); if (slice[1] == '\\') { switch (slice[2]) { 'n' => return '\n', 'r' => return '\r', '\\' => return '\\', 't' => return '\t', '\'' => return '\'', '"' => return '"', 'x' => { if (slice.len != 6) { bad_index.* = slice.len - 2; return error.InvalidCharacter; } var value: u32 = 0; for (slice[3..5]) |c, i| { switch (c) { '0'...'9' => { value *= 16; value += c - '0'; }, 'a'...'f' => { value *= 16; value += c - 'a' + 10; }, 'A'...'F' => { value *= 16; value += c - 'A' + 10; }, else => { bad_index.* = 3 + i; return error.InvalidCharacter; }, } } return value; }, 'u' => { if (slice.len < "'\\u{0}'".len or slice[3] != '{' or slice[slice.len - 2] != '}') { bad_index.* = 2; return error.InvalidCharacter; } var value: u32 = 0; for (slice[4 .. slice.len - 2]) |c, i| { switch (c) { '0'...'9' => { value *= 16; value += c - '0'; }, 'a'...'f' => { value *= 16; value += c - 'a' + 10; }, 'A'...'F' => { value *= 16; value += c - 'A' + 10; }, else => { bad_index.* = 4 + i; return error.InvalidCharacter; }, } if (value > 0x10ffff) { bad_index.* = 4 + i; return error.InvalidCharacter; } } return value; }, else => { bad_index.* = 2; return error.InvalidCharacter; }, } } return std.unicode.utf8Decode(slice[1 .. slice.len - 1]) catch unreachable; } test "parseCharLiteral" { var bad_index: usize = undefined; std.testing.expectEqual(try parseCharLiteral("'a'", &bad_index), 'a'); std.testing.expectEqual(try parseCharLiteral("'ä'", &bad_index), 'ä'); std.testing.expectEqual(try parseCharLiteral("'\\x00'", &bad_index), 0); std.testing.expectEqual(try parseCharLiteral("'\\x4f'", &bad_index), 0x4f); std.testing.expectEqual(try parseCharLiteral("'\\x4F'", &bad_index), 0x4f); std.testing.expectEqual(try parseCharLiteral("'ぁ'", &bad_index), 0x3041); std.testing.expectEqual(try parseCharLiteral("'\\u{0}'", &bad_index), 0); std.testing.expectEqual(try parseCharLiteral("'\\u{3041}'", &bad_index), 0x3041); std.testing.expectEqual(try parseCharLiteral("'\\u{7f}'", &bad_index), 0x7f); std.testing.expectEqual(try parseCharLiteral("'\\u{7FFF}'", &bad_index), 0x7FFF); std.testing.expectError(error.InvalidCharacter, parseCharLiteral("'\\x0'", &bad_index)); std.testing.expectError(error.InvalidCharacter, parseCharLiteral("'\\x000'", &bad_index)); std.testing.expectError(error.InvalidCharacter, parseCharLiteral("'\\y'", &bad_index)); std.testing.expectError(error.InvalidCharacter, parseCharLiteral("'\\u'", &bad_index)); std.testing.expectError(error.InvalidCharacter, parseCharLiteral("'\\uFFFF'", &bad_index)); std.testing.expectError(error.InvalidCharacter, parseCharLiteral("'\\u{}'", &bad_index)); std.testing.expectError(error.InvalidCharacter, parseCharLiteral("'\\u{FFFFFF}'", &bad_index)); std.testing.expectError(error.InvalidCharacter, parseCharLiteral("'\\u{FFFF'", &bad_index)); std.testing.expectError(error.InvalidCharacter, parseCharLiteral("'\\u{FFFF}x'", &bad_index)); } test "" { @import("std").testing.refAllDecls(@This()); }
lib/std/zig.zig
const builtin = @import("builtin"); const std = @import("std"); const os = std.os; const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompareOutputContext) void { cases.addC("hello world with libc", \\const c = @cImport({ \\ // See https://github.com/ziglang/zig/issues/515 \\ @cDefine("_NO_CRT_STDIO_INLINE", "1"); \\ @cInclude("stdio.h"); \\}); \\pub export fn main(argc: c_int, argv: [*][*]u8) c_int { \\ _ = c.puts("Hello, world!"); \\ return 0; \\} , "Hello, world!" ++ std.cstr.line_sep); cases.addCase(x: { var tc = cases.create("multiple files with private function", \\usingnamespace @import("std").io; \\usingnamespace @import("foo.zig"); \\ \\pub fn main() void { \\ privateFunction(); \\ const stdout = &getStdOut().outStream().stream; \\ stdout.print("OK 2\n", .{}) catch unreachable; \\} \\ \\fn privateFunction() void { \\ printText(); \\} , "OK 1\nOK 2\n"); tc.addSourceFile("foo.zig", \\usingnamespace @import("std").io; \\ \\// purposefully conflicting function with main.zig \\// but it's private so it should be OK \\fn privateFunction() void { \\ const stdout = &getStdOut().outStream().stream; \\ stdout.print("OK 1\n", .{}) catch unreachable; \\} \\ \\pub fn printText() void { \\ privateFunction(); \\} ); break :x tc; }); cases.addCase(x: { var tc = cases.create("import segregation", \\usingnamespace @import("foo.zig"); \\usingnamespace @import("bar.zig"); \\ \\pub fn main() void { \\ foo_function(); \\ bar_function(); \\} , "OK\nOK\n"); tc.addSourceFile("foo.zig", \\usingnamespace @import("std").io; \\pub fn foo_function() void { \\ const stdout = &getStdOut().outStream().stream; \\ stdout.print("OK\n", .{}) catch unreachable; \\} ); tc.addSourceFile("bar.zig", \\usingnamespace @import("other.zig"); \\usingnamespace @import("std").io; \\ \\pub fn bar_function() void { \\ if (foo_function()) { \\ const stdout = &getStdOut().outStream().stream; \\ stdout.print("OK\n", .{}) catch unreachable; \\ } \\} ); tc.addSourceFile("other.zig", \\pub fn foo_function() bool { \\ // this one conflicts with the one from foo \\ return true; \\} ); break :x tc; }); cases.addCase(x: { var tc = cases.create("two files usingnamespace import each other", \\usingnamespace @import("a.zig"); \\ \\pub fn main() void { \\ ok(); \\} , "OK\n"); tc.addSourceFile("a.zig", \\usingnamespace @import("b.zig"); \\const io = @import("std").io; \\ \\pub const a_text = "OK\n"; \\ \\pub fn ok() void { \\ const stdout = &io.getStdOut().outStream().stream; \\ stdout.print(b_text, .{}) catch unreachable; \\} ); tc.addSourceFile("b.zig", \\usingnamespace @import("a.zig"); \\ \\pub const b_text = a_text; ); break :x tc; }); cases.add("hello world without libc", \\const io = @import("std").io; \\ \\pub fn main() void { \\ const stdout = &io.getStdOut().outStream().stream; \\ stdout.print("Hello, world!\n{d:4} {x:3} {c}\n", .{@as(u32, 12), @as(u16, 0x12), @as(u8, 'a')}) catch unreachable; \\} , "Hello, world!\n 12 12 a\n"); cases.addC("number literals", \\const builtin = @import("builtin"); \\const is_windows = builtin.os == builtin.Os.windows; \\const c = @cImport({ \\ if (is_windows) { \\ // See https://github.com/ziglang/zig/issues/515 \\ @cDefine("_NO_CRT_STDIO_INLINE", "1"); \\ @cInclude("io.h"); \\ @cInclude("fcntl.h"); \\ } \\ @cInclude("stdio.h"); \\}); \\ \\pub export fn main(argc: c_int, argv: [*][*]u8) c_int { \\ if (is_windows) { \\ // we want actual \n, not \r\n \\ _ = c._setmode(1, c._O_BINARY); \\ } \\ _ = c.printf("0: %llu\n", \\ @as(u64, 0)); \\ _ = c.printf("320402575052271: %llu\n", \\ @as(u64, 320402575052271)); \\ _ = c.printf("0x01236789abcdef: %llu\n", \\ @as(u64, 0x01236789abcdef)); \\ _ = c.printf("0xffffffffffffffff: %llu\n", \\ @as(u64, 0xffffffffffffffff)); \\ _ = c.printf("0x000000ffffffffffffffff: %llu\n", \\ @as(u64, 0x000000ffffffffffffffff)); \\ _ = c.printf("0o1777777777777777777777: %llu\n", \\ @as(u64, 0o1777777777777777777777)); \\ _ = c.printf("0o0000001777777777777777777777: %llu\n", \\ @as(u64, 0o0000001777777777777777777777)); \\ _ = c.printf("0b1111111111111111111111111111111111111111111111111111111111111111: %llu\n", \\ @as(u64, 0b1111111111111111111111111111111111111111111111111111111111111111)); \\ _ = c.printf("0b0000001111111111111111111111111111111111111111111111111111111111111111: %llu\n", \\ @as(u64, 0b0000001111111111111111111111111111111111111111111111111111111111111111)); \\ \\ _ = c.printf("\n"); \\ \\ _ = c.printf("0.0: %.013a\n", \\ @as(f64, 0.0)); \\ _ = c.printf("0e0: %.013a\n", \\ @as(f64, 0e0)); \\ _ = c.printf("0.0e0: %.013a\n", \\ @as(f64, 0.0e0)); \\ _ = c.printf("000000000000000000000000000000000000000000000000000000000.0e0: %.013a\n", \\ @as(f64, 000000000000000000000000000000000000000000000000000000000.0e0)); \\ _ = c.printf("0.000000000000000000000000000000000000000000000000000000000e0: %.013a\n", \\ @as(f64, 0.000000000000000000000000000000000000000000000000000000000e0)); \\ _ = c.printf("0.0e000000000000000000000000000000000000000000000000000000000: %.013a\n", \\ @as(f64, 0.0e000000000000000000000000000000000000000000000000000000000)); \\ _ = c.printf("1.0: %.013a\n", \\ @as(f64, 1.0)); \\ _ = c.printf("10.0: %.013a\n", \\ @as(f64, 10.0)); \\ _ = c.printf("10.5: %.013a\n", \\ @as(f64, 10.5)); \\ _ = c.printf("10.5e5: %.013a\n", \\ @as(f64, 10.5e5)); \\ _ = c.printf("10.5e+5: %.013a\n", \\ @as(f64, 10.5e+5)); \\ _ = c.printf("50.0e-2: %.013a\n", \\ @as(f64, 50.0e-2)); \\ _ = c.printf("50e-2: %.013a\n", \\ @as(f64, 50e-2)); \\ \\ _ = c.printf("\n"); \\ \\ _ = c.printf("0x1.0: %.013a\n", \\ @as(f64, 0x1.0)); \\ _ = c.printf("0x10.0: %.013a\n", \\ @as(f64, 0x10.0)); \\ _ = c.printf("0x100.0: %.013a\n", \\ @as(f64, 0x100.0)); \\ _ = c.printf("0x103.0: %.013a\n", \\ @as(f64, 0x103.0)); \\ _ = c.printf("0x103.7: %.013a\n", \\ @as(f64, 0x103.7)); \\ _ = c.printf("0x103.70: %.013a\n", \\ @as(f64, 0x103.70)); \\ _ = c.printf("0x103.70p4: %.013a\n", \\ @as(f64, 0x103.70p4)); \\ _ = c.printf("0x103.70p5: %.013a\n", \\ @as(f64, 0x103.70p5)); \\ _ = c.printf("0x103.70p+5: %.013a\n", \\ @as(f64, 0x103.70p+5)); \\ _ = c.printf("0x103.70p-5: %.013a\n", \\ @as(f64, 0x103.70p-5)); \\ \\ return 0; \\} , \\0: 0 \\320402575052271: 320402575052271 \\0x01236789abcdef: 320402575052271 \\0xffffffffffffffff: 18446744073709551615 \\0x000000ffffffffffffffff: 18446744073709551615 \\0o1777777777777777777777: 18446744073709551615 \\0o0000001777777777777777777777: 18446744073709551615 \\0b1111111111111111111111111111111111111111111111111111111111111111: 18446744073709551615 \\0b0000001111111111111111111111111111111111111111111111111111111111111111: 18446744073709551615 \\ \\0.0: 0x0.0000000000000p+0 \\0e0: 0x0.0000000000000p+0 \\0.0e0: 0x0.0000000000000p+0 \\000000000000000000000000000000000000000000000000000000000.0e0: 0x0.0000000000000p+0 \\0.000000000000000000000000000000000000000000000000000000000e0: 0x0.0000000000000p+0 \\0.0e000000000000000000000000000000000000000000000000000000000: 0x0.0000000000000p+0 \\1.0: 0x1.0000000000000p+0 \\10.0: 0x1.4000000000000p+3 \\10.5: 0x1.5000000000000p+3 \\10.5e5: 0x1.0059000000000p+20 \\10.5e+5: 0x1.0059000000000p+20 \\50.0e-2: 0x1.0000000000000p-1 \\50e-2: 0x1.0000000000000p-1 \\ \\0x1.0: 0x1.0000000000000p+0 \\0x10.0: 0x1.0000000000000p+4 \\0x100.0: 0x1.0000000000000p+8 \\0x103.0: 0x1.0300000000000p+8 \\0x103.7: 0x1.0370000000000p+8 \\0x103.70: 0x1.0370000000000p+8 \\0x103.70p4: 0x1.0370000000000p+12 \\0x103.70p5: 0x1.0370000000000p+13 \\0x103.70p+5: 0x1.0370000000000p+13 \\0x103.70p-5: 0x1.0370000000000p+3 \\ ); cases.add("order-independent declarations", \\const io = @import("std").io; \\const z = io.stdin_fileno; \\const x : @TypeOf(y) = 1234; \\const y : u16 = 5678; \\pub fn main() void { \\ var x_local : i32 = print_ok(x); \\} \\fn print_ok(val: @TypeOf(x)) @TypeOf(foo) { \\ const stdout = &io.getStdOut().outStream().stream; \\ stdout.print("OK\n", .{}) catch unreachable; \\ return 0; \\} \\const foo : i32 = 0; , "OK\n"); cases.addC("expose function pointer to C land", \\const c = @cImport(@cInclude("stdlib.h")); \\ \\export fn compare_fn(a: ?*const c_void, b: ?*const c_void) c_int { \\ const a_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), a)); \\ const b_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), b)); \\ if (a_int.* < b_int.*) { \\ return -1; \\ } else if (a_int.* > b_int.*) { \\ return 1; \\ } else { \\ return 0; \\ } \\} \\ \\pub export fn main() c_int { \\ var array = [_]u32{ 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 }; \\ \\ c.qsort(@ptrCast(?*c_void, array[0..].ptr), @intCast(c_ulong, array.len), @sizeOf(i32), compare_fn); \\ \\ for (array) |item, i| { \\ if (item != i) { \\ c.abort(); \\ } \\ } \\ \\ return 0; \\} , ""); cases.addC("casting between float and integer types", \\const builtin = @import("builtin"); \\const is_windows = builtin.os == builtin.Os.windows; \\const c = @cImport({ \\ if (is_windows) { \\ // See https://github.com/ziglang/zig/issues/515 \\ @cDefine("_NO_CRT_STDIO_INLINE", "1"); \\ @cInclude("io.h"); \\ @cInclude("fcntl.h"); \\ } \\ @cInclude("stdio.h"); \\}); \\ \\pub export fn main(argc: c_int, argv: [*][*]u8) c_int { \\ if (is_windows) { \\ // we want actual \n, not \r\n \\ _ = c._setmode(1, c._O_BINARY); \\ } \\ const small: f32 = 3.25; \\ const x: f64 = small; \\ const y = @floatToInt(i32, x); \\ const z = @intToFloat(f64, y); \\ _ = c.printf("%.2f\n%d\n%.2f\n%.2f\n", x, y, z, @as(f64, -0.4)); \\ return 0; \\} , "3.25\n3\n3.00\n-0.40\n"); cases.add("same named methods in incomplete struct", \\const io = @import("std").io; \\ \\const Foo = struct { \\ field1: Bar, \\ \\ fn method(a: *const Foo) bool { return true; } \\}; \\ \\const Bar = struct { \\ field2: i32, \\ \\ fn method(b: *const Bar) bool { return true; } \\}; \\ \\pub fn main() void { \\ const bar = Bar {.field2 = 13,}; \\ const foo = Foo {.field1 = bar,}; \\ const stdout = &io.getStdOut().outStream().stream; \\ if (!foo.method()) { \\ stdout.print("BAD\n", .{}) catch unreachable; \\ } \\ if (!bar.method()) { \\ stdout.print("BAD\n", .{}) catch unreachable; \\ } \\ stdout.print("OK\n", .{}) catch unreachable; \\} , "OK\n"); cases.add("defer with only fallthrough", \\const io = @import("std").io; \\pub fn main() void { \\ const stdout = &io.getStdOut().outStream().stream; \\ stdout.print("before\n", .{}) catch unreachable; \\ defer stdout.print("defer1\n", .{}) catch unreachable; \\ defer stdout.print("defer2\n", .{}) catch unreachable; \\ defer stdout.print("defer3\n", .{}) catch unreachable; \\ stdout.print("after\n", .{}) catch unreachable; \\} , "before\nafter\ndefer3\ndefer2\ndefer1\n"); cases.add("defer with return", \\const io = @import("std").io; \\const os = @import("std").os; \\pub fn main() void { \\ const stdout = &io.getStdOut().outStream().stream; \\ stdout.print("before\n", .{}) catch unreachable; \\ defer stdout.print("defer1\n", .{}) catch unreachable; \\ defer stdout.print("defer2\n", .{}) catch unreachable; \\ var args_it = @import("std").process.args(); \\ if (args_it.skip() and !args_it.skip()) return; \\ defer stdout.print("defer3\n", .{}) catch unreachable; \\ stdout.print("after\n", .{}) catch unreachable; \\} , "before\ndefer2\ndefer1\n"); cases.add("errdefer and it fails", \\const io = @import("std").io; \\pub fn main() void { \\ do_test() catch return; \\} \\fn do_test() !void { \\ const stdout = &io.getStdOut().outStream().stream; \\ stdout.print("before\n", .{}) catch unreachable; \\ defer stdout.print("defer1\n", .{}) catch unreachable; \\ errdefer stdout.print("deferErr\n", .{}) catch unreachable; \\ try its_gonna_fail(); \\ defer stdout.print("defer3\n", .{}) catch unreachable; \\ stdout.print("after\n", .{}) catch unreachable; \\} \\fn its_gonna_fail() !void { \\ return error.IToldYouItWouldFail; \\} , "before\ndeferErr\ndefer1\n"); cases.add("errdefer and it passes", \\const io = @import("std").io; \\pub fn main() void { \\ do_test() catch return; \\} \\fn do_test() !void { \\ const stdout = &io.getStdOut().outStream().stream; \\ stdout.print("before\n", .{}) catch unreachable; \\ defer stdout.print("defer1\n", .{}) catch unreachable; \\ errdefer stdout.print("deferErr\n", .{}) catch unreachable; \\ try its_gonna_pass(); \\ defer stdout.print("defer3\n", .{}) catch unreachable; \\ stdout.print("after\n", .{}) catch unreachable; \\} \\fn its_gonna_pass() anyerror!void { } , "before\nafter\ndefer3\ndefer1\n"); cases.addCase(x: { var tc = cases.create("@embedFile", \\const foo_txt = @embedFile("foo.txt"); \\const io = @import("std").io; \\ \\pub fn main() void { \\ const stdout = &io.getStdOut().outStream().stream; \\ stdout.print(foo_txt, .{}) catch unreachable; \\} , "1234\nabcd\n"); tc.addSourceFile("foo.txt", "1234\nabcd\n"); break :x tc; }); cases.addCase(x: { var tc = cases.create("parsing args", \\const std = @import("std"); \\const io = std.io; \\const os = std.os; \\const allocator = std.debug.global_allocator; \\ \\pub fn main() !void { \\ var args_it = std.process.args(); \\ var stdout_file = io.getStdOut(); \\ var stdout_adapter = stdout_file.outStream(); \\ const stdout = &stdout_adapter.stream; \\ var index: usize = 0; \\ _ = args_it.skip(); \\ while (args_it.next(allocator)) |arg_or_err| : (index += 1) { \\ const arg = try arg_or_err; \\ try stdout.print("{}: {}\n", .{index, arg}); \\ } \\} , \\0: first arg \\1: 'a' 'b' \ \\2: bare \\3: ba""re \\4: " \\5: last arg \\ ); tc.setCommandLineArgs(&[_][]const u8{ "first arg", "'a' 'b' \\", "bare", "ba\"\"re", "\"", "last arg", }); break :x tc; }); cases.addCase(x: { var tc = cases.create("parsing args new API", \\const std = @import("std"); \\const io = std.io; \\const os = std.os; \\const allocator = std.debug.global_allocator; \\ \\pub fn main() !void { \\ var args_it = std.process.args(); \\ var stdout_file = io.getStdOut(); \\ var stdout_adapter = stdout_file.outStream(); \\ const stdout = &stdout_adapter.stream; \\ var index: usize = 0; \\ _ = args_it.skip(); \\ while (args_it.next(allocator)) |arg_or_err| : (index += 1) { \\ const arg = try arg_or_err; \\ try stdout.print("{}: {}\n", .{index, arg}); \\ } \\} , \\0: first arg \\1: 'a' 'b' \ \\2: bare \\3: ba""re \\4: " \\5: last arg \\ ); tc.setCommandLineArgs(&[_][]const u8{ "first arg", "'a' 'b' \\", "bare", "ba\"\"re", "\"", "last arg", }); break :x tc; }); }
test/compare_output.zig
const builtin = @import("builtin"); const std = @import("std"); pub fn build(b: *std.build.Builder) void { var options = Options{ .build_mode = b.standardReleaseOptions(), .target = b.standardTargetOptions(.{}), }; options.enable_tracy = b.option(bool, "enable-tracy", "Enable Tracy profiler") orelse false; options.dawn_from_source = b.option(bool, "dawn-from-source", "Build Dawn (WebGPU) from source") orelse false; if (options.dawn_from_source) { ensureSubmodules(b.allocator) catch |err| @panic(@errorName(err)); } // // Cross-platform demos // installDemo(b, network_test.build(b, options), "network_test"); installDemo(b, triangle_wgpu.build(b, options), "triangle_wgpu"); installDemo(b, procedural_mesh_wgpu.build(b, options), "procedural_mesh_wgpu"); installDemo(b, textured_quad_wgpu.build(b, options), "textured_quad_wgpu"); // // Windows-only demos // if (options.target.isWindows()) { options.enable_pix = b.option(bool, "enable-pix", "Enable PIX GPU events and markers") orelse false; options.enable_dx_debug = b.option( bool, "enable-dx-debug", "Enable debug layer for D3D12, D2D1, DirectML and DXGI", ) orelse false; options.enable_dx_gpu_debug = b.option( bool, "enable-dx-gpu-debug", "Enable GPU-based validation for D3D12", ) orelse false; installDemo(b, audio_experiments.build(b, options), "audio_experiments"); installDemo(b, audio_playback_test.build(b, options), "audio_playback_test"); installDemo(b, bindless.build(b, options), "bindless"); installDemo(b, bullet_physics_test.build(b, options), "bullet_physics_test"); installDemo(b, directml_convolution_test.build(b, options), "directml_convolution_test"); installDemo(b, mesh_shader_test.build(b, options), "mesh_shader_test"); installDemo(b, physically_based_rendering.build(b, options), "physically_based_rendering"); installDemo(b, rasterization.build(b, options), "rasterization"); installDemo(b, simple3d.build(b, options), "simple3d"); installDemo(b, simple_raytracer.build(b, options), "simple_raytracer"); installDemo(b, textured_quad.build(b, options), "textured_quad"); installDemo(b, vector_graphics_test.build(b, options), "vector_graphics_test"); installDemo(b, triangle.build(b, options), "triangle"); installDemo(b, minimal.build(b, options), "minimal"); installDemo(b, procedural_mesh.build(b, options), "procedural_mesh"); comptime var intro_index: u32 = 0; inline while (intro_index < 7) : (intro_index += 1) { const name = "intro" ++ comptime std.fmt.comptimePrint("{}", .{intro_index}); installDemo(b, intro.build(b, options, intro_index), name); } } // // Tests // const zbullet_tests = @import("libs/zbullet/build.zig").buildTests(b, options.build_mode, options.target); const zmesh_tests = @import("libs/zmesh/build.zig").buildTests(b, options.build_mode, options.target); const zmath_tests = @import("libs/zmath/build.zig").buildTests(b, options.build_mode, options.target); const znoise_tests = @import("libs/znoise/build.zig").buildTests(b, options.build_mode, options.target); const zenet_tests = @import("libs/zenet/build.zig").buildTests(b, options.build_mode, options.target); const test_step = b.step("test", "Run all tests"); test_step.dependOn(&zbullet_tests.step); test_step.dependOn(&zmesh_tests.step); test_step.dependOn(&zmath_tests.step); test_step.dependOn(&znoise_tests.step); test_step.dependOn(&zenet_tests.step); } const audio_experiments = @import("samples/audio_experiments/build.zig"); const audio_playback_test = @import("samples/audio_playback_test/build.zig"); const bindless = @import("samples/bindless/build.zig"); const bullet_physics_test = @import("samples/bullet_physics_test/build.zig"); const directml_convolution_test = @import("samples/directml_convolution_test/build.zig"); const mesh_shader_test = @import("samples/mesh_shader_test/build.zig"); const physically_based_rendering = @import("samples/physically_based_rendering/build.zig"); const rasterization = @import("samples/rasterization/build.zig"); const simple3d = @import("samples/simple3d/build.zig"); const simple_raytracer = @import("samples/simple_raytracer/build.zig"); const textured_quad = @import("samples/textured_quad/build.zig"); const triangle = @import("samples/triangle/build.zig"); const vector_graphics_test = @import("samples/vector_graphics_test/build.zig"); const intro = @import("samples/intro/build.zig"); const minimal = @import("samples/minimal/build.zig"); const procedural_mesh = @import("samples/procedural_mesh/build.zig"); const network_test = @import("samples/network_test/build.zig"); const triangle_wgpu = @import("samples/triangle_wgpu/build.zig"); const procedural_mesh_wgpu = @import("samples/procedural_mesh_wgpu/build.zig"); const textured_quad_wgpu = @import("samples/textured_quad_wgpu/build.zig"); pub const Options = struct { build_mode: std.builtin.Mode, target: std.zig.CrossTarget, enable_dx_debug: bool = false, enable_dx_gpu_debug: bool = false, enable_tracy: bool = false, enable_pix: bool = false, dawn_from_source: bool = false, }; fn installDemo(b: *std.build.Builder, exe: *std.build.LibExeObjStep, comptime name: []const u8) void { comptime var desc_name: [256]u8 = undefined; comptime _ = std.mem.replace(u8, name, "_", " ", desc_name[0..]); const install = b.step(name, "Build '" ++ desc_name ++ "' demo"); install.dependOn(&b.addInstallArtifact(exe).step); const run_step = b.step(name ++ "-run", "Run '" ++ desc_name ++ "' demo"); const run_cmd = exe.run(); run_cmd.step.dependOn(install); run_step.dependOn(&run_cmd.step); b.getInstallStep().dependOn(install); } fn ensureSubmodules(allocator: std.mem.Allocator) !void { if (std.process.getEnvVarOwned(allocator, "NO_ENSURE_SUBMODULES")) |no_ensure_submodules| { if (std.mem.eql(u8, no_ensure_submodules, "true")) return; } else |_| {} var child = std.ChildProcess.init(&.{ "git", "submodule", "update", "--init", "--recursive" }, allocator); child.cwd = thisDir(); child.stderr = std.io.getStdErr(); child.stdout = std.io.getStdOut(); _ = try child.spawnAndWait(); } fn thisDir() []const u8 { return std.fs.path.dirname(@src().file) orelse "."; }
build.zig
const std = @import("std"); const nvg = @import("nanovg"); const gui = @import("gui.zig"); usingnamespace @import("event.zig"); const Point = @import("geometry.zig").Point; const Application = @This(); pub const SystemCallbacks = struct { // essential createWindow: fn ([:0]const u8, u32, u32, gui.Window.CreateOptions, *gui.Window) anyerror!u32, destroyWindow: fn (u32) void, setWindowTitle: fn (u32, [:0]const u8) void, // optional startTimer: ?fn (*gui.Timer, u32) u32 = null, cancelTimer: ?fn (u32) void = null, showCursor: ?fn (bool) void = null, getClipboardText: ?fn (*std.mem.Allocator) anyerror!?[]const u8 = null, setClipboardText: ?fn (*std.mem.Allocator, []const u8) anyerror!void = null, }; // TODO: get rid of globals (Timer might need a reference to Application) var startTimerFn: ?fn (*gui.Timer, u32) u32 = null; var cancelTimerFn : ?fn (u32) void = null; allocator: *std.mem.Allocator, system_callbacks: SystemCallbacks, windows: std.ArrayList(*gui.Window), //main_window: ?*gui.Window = null, const Self = @This(); pub fn init(allocator: *std.mem.Allocator, system_callbacks: SystemCallbacks) !*Self { var self = try allocator.create(Application); self.* = Self{ .allocator = allocator, .system_callbacks = system_callbacks, .windows = std.ArrayList(*gui.Window).init(allocator), }; startTimerFn = system_callbacks.startTimer; cancelTimerFn = system_callbacks.cancelTimer; return self; } pub fn deinit(self: *Self) void { for (self.windows.items) |window| { self.allocator.destroy(window); } self.windows.deinit(); self.allocator.destroy(self); } pub fn createWindow(self: *Self, title: [:0]const u8, width: f32, height: f32, options: gui.Window.CreateOptions) !*gui.Window { var window = try gui.Window.init(self.allocator, self); errdefer self.allocator.destroy(window); const system_window_id = try self.system_callbacks.createWindow( title, @floatToInt(u32, width), @floatToInt(u32, height), options, window, ); window.id = system_window_id; window.width = width; window.height = height; try self.windows.append(window); return window; } pub fn setWindowTitle(self: *Self, window_id: u32, title: [:0]const u8) void { self.system_callbacks.setWindowTitle(window_id, title); } pub fn requestWindowClose(self: *Self, window: *gui.Window) void { if (window.isBlockedByModal()) return; if (window.onCloseRequestFn) |onCloseRequest| { if (!onCloseRequest(window)) return; // request denied } self.system_callbacks.destroyWindow(window.id); if (std.mem.indexOfScalar(*gui.Window, self.windows.items, window)) |i| { _ = self.windows.swapRemove(i); window.setMainWidget(null); // also removes reference to this window in main_widget window.deinit(); } } pub fn showCursor(self: Self, show: bool) void { if (self.system_callbacks.showCursor) |showCursorFn| { showCursorFn(show); } } pub fn setClipboardText(self: Self, allocator: *std.mem.Allocator, text: []const u8) !void { if (self.system_callbacks.setClipboardText) |setClipboardTextFn| { try setClipboardTextFn(allocator, text); } } pub fn getClipboardText(self: Self, allocator: *std.mem.Allocator) !?[]const u8 { if (self.system_callbacks.getClipboardText) |getClipboardTextFn| { return try getClipboardTextFn(allocator); } return null; } pub fn startTimer(timer: *gui.Timer, interval: u32) u32 { if (startTimerFn) |systemStartTimer| { return systemStartTimer(timer, interval); } return 0; } pub fn cancelTimer(id: u32) void { if (cancelTimerFn) |systemCancelTimer| { systemCancelTimer(id); } } pub fn broadcastEvent(self: *Self, event: *gui.Event) void { for (self.windows.items) |window| { window.handleEvent(event); } }
src/gui/Application.zig
const std = @import("std"); const testing = std.testing; const expect = testing.expect; const expectEqual = testing.expectEqual; const expectApproxEqRel = testing.expectApproxEqRel; pub const Vector4 = packed struct { x: f32, y: f32, z: f32, w: f32, pub fn init(x: f32, y: f32, z: f32, w: f32) Vector4 { return .{ .x = x, .y = y, .z = z, .w = w }; } pub fn set(value: f32) Vector4 { return .{ .x = value, .y = value, .z = value, .w = value }; } pub fn normalize(v: Vector4) Vector4 { return multiplyScalar(v, 1.0 / length(v)); } pub fn length(v: Vector4) f32 { return std.math.sqrt(dot(v, v)); } pub fn squaredLength(v: Vector4) f32 { return dot(v, v); } pub fn dot(a: Vector4, b: Vector4) f32 { return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w; } pub fn add(a: Vector4, b: Vector4) Vector4 { return init(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } pub fn subtract(a: Vector4, b: Vector4) Vector4 { return init(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } pub fn multiply(a: Vector4, b: Vector4) Vector4 { return init(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } pub fn divide(a: Vector4, b: Vector4) Vector4 { return init(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); } pub fn addScalar(a: Vector4, b: f32) Vector4 { return init(a.x + b, a.y + b, a.z + b, a.w + b); } pub fn subtractScalar(a: Vector4, b: f32) Vector4 { return init(a.x - b, a.y - b, a.z - b, a.w - b); } pub fn multiplyScalar(a: Vector4, b: f32) Vector4 { return init(a.x * b, a.y * b, a.z * b, a.w * b); } pub fn divideScalar(a: Vector4, b: f32) Vector4 { return init(a.x / b, a.y / b, a.z / b, a.w / b); } }; test "Vector4" { var v1 = Vector4.init(1, 2, 3, 4); var v2 = Vector4.init(5, 6, 7, 8); var v: Vector4 = undefined; var s: f32 = undefined; v = Vector4.init(1, 2, 3, 4); try expectEqual(@as(f32, 1.0), v.x); try expectEqual(@as(f32, 2.0), v.y); try expectEqual(@as(f32, 3.0), v.z); try expectEqual(@as(f32, 4.0), v.w); v = Vector4.set(4); try expectEqual(@as(f32, 4.0), v.x); try expectEqual(@as(f32, 4.0), v.y); try expectEqual(@as(f32, 4.0), v.z); try expectEqual(@as(f32, 4.0), v.w); v = v1.add(v2); try expectEqual(Vector4.init(6, 8, 10, 12), v); v = v1.addScalar(14); try expectEqual(Vector4.init(15, 16, 17, 18), v); v = v1.subtract(v2); try expectEqual(Vector4.init(-4, -4, -4, -4), v); v = v1.subtractScalar(-4); try expectEqual(Vector4.init(5, 6, 7, 8), v); v = v1.multiply(v2); try expectEqual(Vector4.init(5, 12, 21, 32), v); v = v1.multiplyScalar(-4); try expectEqual(Vector4.init(-4, -8, -12, -16), v); v = v1.divide(v2); try expectApproxEqRel(@as(f32, 1.0/5.0), v.x, 0.0001); try expectApproxEqRel(@as(f32, 2.0/6.0), v.y, 0.0001); try expectApproxEqRel(@as(f32, 3.0/7.0), v.z, 0.0001); try expectApproxEqRel(@as(f32, 4.0/8.0), v.w, 0.0001); v = v1.divideScalar(2); try expectApproxEqRel(@as(f32, 1.0/2.0), v.x, 0.0001); try expectApproxEqRel(@as(f32, 2.0/2.0), v.y, 0.0001); try expectApproxEqRel(@as(f32, 3.0/2.0), v.z, 0.0001); try expectApproxEqRel(@as(f32, 4.0/2.0), v.w, 0.0001); s = Vector4.dot(v1, v2); try expectApproxEqRel(@as(f32, 70.0), s, 0.0001); s = Vector4.squaredLength(v1); try expectApproxEqRel(@as(f32, 30.0), s, 0.0001); s = Vector4.length(v1); try expectApproxEqRel(@as(f32, 5.477), s, 0.0001); v = Vector4.normalize(v1); try expectApproxEqRel(@as(f32, 1.0/5.477), v.x, 0.0001); try expectApproxEqRel(@as(f32, 2.0/5.477), v.y, 0.0001); try expectApproxEqRel(@as(f32, 3.0/5.477), v.z, 0.0001); try expectApproxEqRel(@as(f32, 4.0/5.477), v.w, 0.0001); }
src/vector4.zig
const std = @import("std"); const mem = std.mem; const math = std.math; const testing = std.testing; const Allocator = std.mem.Allocator; const Stream = std.net.Stream; const assert = std.debug.assert; pub const Bytes = std.ArrayList(u8); pub const native_endian = std.builtin.target.cpu.arch.endian(); pub inline fn isCtrlChar(ch: u8) bool { return (ch < @as(u8, 40) and ch != '\t') or ch == @as(u8, 177); } test "is-control-char" { try testing.expect(isCtrlChar('A') == false); try testing.expect(isCtrlChar('\t') == false); try testing.expect(isCtrlChar('\r') == true); } const token_map = [_]u1{ // 0, 1, 2, 3, 4, 5, 6, 7 ,8, 9,10,11,12,13,14,15 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; pub inline fn isTokenChar(ch: u8) bool { return token_map[ch] == 1; } pub const IOStream = struct { pub const invalid_stream = Stream{.handle=0}; pub const Error = Stream.WriteError; pub const ReadError = Stream.ReadError; const Self = @This(); allocator: ?*Allocator = null, in_buffer: []u8 = undefined, out_buffer: []u8 = undefined, _in_start_index: usize = 0, _in_end_index: usize = 0, _in_count: usize = 0, _out_count: usize = 0, _out_index: usize = 0, closed: bool = false, owns_in_buffer: bool = true, unbuffered: bool = false, in_stream: Stream, out_stream: Stream, // ------------------------------------------------------------------------ // Constructors // ------------------------------------------------------------------------ pub fn init(stream: Stream) IOStream { return IOStream{ .in_stream = stream, .out_stream = stream, .in_buffer = &[_]u8{}, .out_buffer = &[_]u8{}, }; } pub fn initCapacity(allocator: *Allocator, stream: ?Stream, in_capacity: usize, out_capacity: usize) !IOStream { return IOStream{ .allocator = allocator, .in_stream = if (stream) |s| s else invalid_stream, .out_stream = if (stream) |s| s else invalid_stream, .in_buffer = try allocator.alloc(u8, in_capacity), .out_buffer = try allocator.alloc(u8, out_capacity), .owns_in_buffer = in_capacity == 0, ._in_start_index = in_capacity, ._in_end_index = in_capacity, }; } // Used to read only from a fixed buffer // the buffer must exist for the lifetime of the stream (or until swapped) pub fn fromBuffer(in_buffer: []u8) IOStream { return IOStream{ .in_stream = invalid_stream, .out_stream = invalid_stream, .in_buffer = in_buffer, .owns_in_buffer = false, ._in_start_index = 0, ._in_end_index = in_buffer.len, }; } // ------------------------------------------------------------------------ // Testing utilities // ------------------------------------------------------------------------ pub fn initTest(allocator: *Allocator, in_buffer: []const u8) !IOStream { return IOStream{ .allocator = allocator, .in_stream = invalid_stream, .out_stream = invalid_stream, .in_buffer = try mem.dupe(allocator, u8, in_buffer), .owns_in_buffer = in_buffer.len > 0, ._in_start_index = 0, ._in_end_index = in_buffer.len, }; } // Load into the in buffer for testing purposes pub fn load(self: *Self, allocator: *Allocator, in_buffer: []const u8) !void { self.in_buffer = try mem.dupe(allocator, u8, in_buffer); self._in_start_index = 0; self._in_end_index = in_buffer.len; } // ------------------------------------------------------------------------ // Custom Stream API // ------------------------------------------------------------------------ pub fn reset(self: *Self) void { self._in_start_index = 0; self._in_count = 0; self._out_count = 0; self.closed = false; self.unbuffered = false; } // Reset the the initial state without reallocating pub fn reinit(self: *Self, stream: Stream) void { self.close(); // Close old files self.in_stream = stream; self.out_stream = stream; self._in_start_index = self.in_buffer.len; self._in_end_index = self.in_buffer.len; self._in_count = 0; self._out_index = 0; self._out_count = 0; self.closed = false; self.unbuffered = false; } // Swap the current buffer with a new buffer copying any unread bytes // into the new buffer pub fn swapBuffer(self: *Self, buffer: []u8) void { //const left = self.amountBuffered(); // Reset counter self._in_count = 0; // No swap needed if (buffer.ptr == self.in_buffer.ptr) return; // So we know not to free the in buf at deinit self.owns_in_buffer = false; self.unbuffered = false; // Copy what is left const remaining = self.readBuffered(); if (remaining.len > 0) { std.mem.copy(u8, buffer, remaining); self.in_buffer = buffer; // Set it right away self._in_start_index = 0; self._in_end_index = remaining.len; } else { self.in_buffer = buffer; // Set it right away self._in_start_index = buffer.len; self._in_end_index = buffer.len; } } // Switch between buffered and unbuffered reads pub fn readUnbuffered(self: *Self, unbuffered: bool) void { self.unbuffered = unbuffered; } // TODO: Inline is broken pub fn shiftAndFillBuffer(self: *Self, start: usize) !usize { self.unbuffered = true; defer self.unbuffered = false; // Move buffer to beginning const end = self.readCount(); const remaining = self.in_buffer[start..end]; std.mem.copyBackwards(u8, self.in_buffer, remaining); // Try to read more if (remaining.len >= self.in_buffer.len) { return error.EndOfBuffer; } const n = try self.reader().read(self.in_buffer[remaining.len..]); self._in_start_index = 0; self._in_end_index = remaining.len + n; return n; } // ------------------------------------------------------------------------ // Reader // ------------------------------------------------------------------------ pub const Reader = std.io.Reader(*IOStream, Stream.ReadError, IOStream.readFn); pub fn reader(self: *Self) Reader { return Reader{.context=self}; } // Return the amount of bytes waiting in the input buffer pub inline fn amountBuffered(self: *Self) usize { return self._in_end_index-self._in_start_index; } pub inline fn isEmpty(self: *Self) bool { return self._in_end_index == self._in_start_index; } pub inline fn readCount(self: *Self) usize { //return self._in_count + self._in_start_index; return self._in_start_index; } pub inline fn consumeBuffered(self: *Self, size: usize) usize { const n = math.min(size, self.amountBuffered()); self._in_start_index += n; return n; } pub inline fn skipBytes(self: *Self, n: usize) void { self._in_start_index += n; } pub inline fn readBuffered(self: *Self) []u8 { return self.in_buffer[self._in_start_index..self._in_end_index]; } // Read any generic type from a stream as long as it is // a multiple of 8 bytes. This does a an endianness conversion if needed pub fn readType(self: *Self, comptime T: type, comptime endian: std.builtin.Endian) !T { const n = @sizeOf(T); const I = switch (n) { 1 => u8, 2 => u16, 4 => u32, 8 => u64, 16 => u128, else => @compileError("Not implemented"), }; while (self.amountBuffered() < n) { try self.fillBuffer(); } const d = @bitCast(I, self.readBuffered()[0..n].*); const r = if (endian != native_endian) @byteSwap(I, d) else d; self.skipBytes(n); return @bitCast(T, r); } pub fn readFn(self: *Self, dest: []u8) !usize { //const self = @fieldParentPtr(BufferedReader, "stream", in_stream); if (self.unbuffered) return try self.in_stream.read(dest); // Hot path for one byte reads if (dest.len == 1 and self._in_end_index > self._in_start_index) { dest[0] = self.in_buffer[self._in_start_index]; self._in_start_index += 1; return 1; } var dest_index: usize = 0; while (true) { const dest_space = dest.len - dest_index; if (dest_space == 0) { return dest_index; } const amt_buffered = self.amountBuffered(); if (amt_buffered == 0) { assert(self._in_end_index <= self.in_buffer.len); // Make sure the last read actually gave us some data if (self._in_end_index == 0) { // reading from the unbuffered stream returned nothing // so we have nothing left to read. return dest_index; } // we can read more data from the unbuffered stream if (dest_space < self.in_buffer.len) { self._in_start_index = 0; self._in_end_index = try self.in_stream.read(self.in_buffer[0..]); //self._in_count += self._in_end_index; // Shortcut if (self._in_end_index >= dest_space) { mem.copy(u8, dest[dest_index..], self.in_buffer[0..dest_space]); self._in_start_index = dest_space; return dest.len; } } else { // asking for so much data that buffering is actually less efficient. // forward the request directly to the unbuffered stream const amt_read = try self.in_stream.read(dest[dest_index..]); //self._in_count += amt_read; return dest_index + amt_read; } } const copy_amount = math.min(dest_space, amt_buffered); const copy_end_index = self._in_start_index + copy_amount; mem.copy(u8, dest[dest_index..], self.in_buffer[self._in_start_index..copy_end_index]); self._in_start_index = copy_end_index; dest_index += copy_amount; } } // TODO: Inline is broken pub fn fillBuffer(self: *Self) !void { const n = try self.readFn(self.in_buffer); if (n == 0) return error.EndOfStream; self._in_start_index = 0; self._in_end_index = n; } /// Reads 1 byte from the stream or returns `error.EndOfStream`. pub fn readByte(self: *Self) !u8 { if (self._in_end_index == self._in_start_index) { // Do a direct read into the input buffer self._in_end_index = try self.readFn( self.in_buffer[0..self.in_buffer.len]); self._in_start_index = 0; if (self._in_end_index < 1) return error.EndOfStream; } const c = self.in_buffer[self._in_start_index]; self._in_start_index += 1; //self._in_count += 1; return c; } pub inline fn readByteSafe(self: *Self) !u8 { if (self._in_end_index == self._in_start_index) { return error.EndOfBuffer; } return self.readByteUnsafe(); } pub inline fn readByteUnsafe(self: *Self) u8 { const c = self.in_buffer[self._in_start_index]; self._in_start_index += 1; return c; } pub inline fn lastByte(self: *Self) u8 { return self.in_buffer[self._in_start_index]; } // Read up to limit bytes from the stream buffer until the expression // returns true or the limit is hit. The initial value is checked first. pub fn readUntilExpr( self: *Self, comptime expr: fn(ch: u8) bool, initial: u8, limit: usize) u8 { var found = false; var ch: u8 = initial; while (!found and self.readCount() + 8 < limit) { inline for ("01234567") |_| { if (expr(ch)) { found = true; break; } ch = self.readByteUnsafe(); } } if (!found) { while (self.readCount() < limit) { if (expr(ch)) { break; } ch = self.readByteUnsafe(); } } return ch; } // Read up to limit bytes from the stream buffer until the expression // returns true or the limit is hit. The initial value is checked first. // If the expression returns an error abort. pub fn readUntilExprValidate( self: *Self, comptime ErrorType: type, comptime expr: fn(ch: u8) ErrorType!bool, initial: u8, limit: usize) !u8 { var found = false; var ch: u8 = initial; while (!found and self.readCount() + 8 < limit) { inline for ("01234567") |_| { if (try expr(ch)) { found = true; break; } ch = self.readByteUnsafe(); } } if (!found) { while (self.readCount() < limit) { if (try expr(ch)) { break; } ch = self.readByteUnsafe(); } } return ch; } // ------------------------------------------------------------------------ // OutStream // ------------------------------------------------------------------------ pub const Writer = std.io.Writer(*IOStream, Stream.WriteError, IOStream.writeFn); pub fn writer(self: *Self) Writer { return Writer{.context=self}; } fn writeFn(self: *Self, bytes: []const u8) !usize { if (bytes.len == 1) { self.out_buffer[self._out_index] = bytes[0]; self._out_index += 1; if (self._out_index == self.out_buffer.len) { try self.flush(); } return @as(usize, 1); } else if (bytes.len >= self.out_buffer.len) { try self.flush(); return self.out_stream.write(bytes); } var src_index: usize = 0; while (src_index < bytes.len) { const dest_space_left = self.out_buffer.len - self._out_index; const copy_amt = math.min(dest_space_left, bytes.len - src_index); mem.copy(u8, self.out_buffer[self._out_index..], bytes[src_index .. src_index + copy_amt]); self._out_index += copy_amt; assert(self._out_index <= self.out_buffer.len); if (self._out_index == self.out_buffer.len) { try self.flush(); } src_index += copy_amt; } return src_index; } pub fn flush(self: *Self) !void { try self.out_stream.writer().writeAll(self.out_buffer[0..self._out_index]); self._out_index = 0; } // Flush 'size' bytes from the start of the buffer out the stream pub fn flushBuffered(self: *Self, size: usize) !void { self._out_index = std.math.min(size, self.out_buffer.len); try self.flush(); } // Read directly into the output buffer then flush it out pub fn writeFromReader(self: *Self, in_stream: anytype) !usize { var total_wrote: usize = 0; if (self._out_index != 0) { total_wrote += self._out_index; try self.flush(); } while (true) { self._out_index = try in_stream.read(self.out_buffer); if (self._out_index == 0) break; total_wrote += self._out_index; try self.flush(); } return total_wrote; } // ------------------------------------------------------------------------ // Cleanup // ------------------------------------------------------------------------ pub fn close(self: *Self) void { if (self.closed) return; self.closed = true; // TODO: Doesn't need closed? // const in_stream = &self.in_stream; // const out_stream = &self.out_stream ; // if (in_stream.handle != 0) in_stream.close(); // std.debug.warn("Close in={} out={}\n", .{in_stream, out_stream}); // if (in_stream.handle != out_stream.handle and out_stream.handle != 0) { // out_stream.close(); // } } pub fn deinit(self: *Self) void { if (!self.closed) self.close(); if (self.allocator) |allocator| { // If the buffer was swapped assume that it is no longer owned if (self.owns_in_buffer) { allocator.free(self.in_buffer); } allocator.free(self.out_buffer); } } }; // The event based lock doesn't work without evented io pub const Lock = if (std.io.is_async) std.event.Lock else std.Thread.Mutex; pub fn ObjectPool(comptime T: type) type { return struct { const Self = @This(); pub const ObjectList = std.ArrayList(*T); allocator: *Allocator, // Stores all created objects objects: ObjectList, // Stores objects that have been released free_objects: ObjectList, // Lock to use if using threads lock: Lock = Lock{}, pub fn init(allocator: *Allocator) Self { return Self{ .allocator = allocator, .objects = ObjectList.init(allocator), .free_objects = ObjectList.init(allocator), }; } // Get an object released back into the pool pub fn get(self: *Self) ?*T { if (self.free_objects.items.len == 0) return null; return self.free_objects.swapRemove(0); // Pull the oldest } // Create an object and allocate space for it in the pool pub fn create(self: *Self) !*T { const obj = try self.allocator.create(T); try self.objects.append(obj); try self.free_objects.ensureCapacity(self.objects.items.len); return obj; } // Return a object back to the pool, this assumes it was created // using create (which ensures capacity to return this quickly). pub fn release(self: *Self, object: *T) void { return self.free_objects.appendAssumeCapacity(object); } pub fn deinit(self: *Self) void { while (self.objects.popOrNull()) |obj| { self.allocator.destroy(obj); } self.objects.deinit(); self.free_objects.deinit(); } }; } test "object-pool" { const Point = struct { x: u8, y: u8, }; var pool = ObjectPool(Point).init(std.testing.allocator); defer pool.deinit(); // Pool is empty try testing.expect(pool.get() == null); // Create var test_point = Point{.x=10, .y=3}; const pt = try pool.create(); pt.* = test_point; // Pool is still empty try testing.expect(pool.get() == null); // Relase pool.release(pt); // Should get the same thing back try testing.expectEqual(pool.get().?.*, test_point); } // An unmanaged map of arrays pub fn StringArrayMap(comptime T: type) type { return struct { const Self = @This(); pub const Array = std.ArrayList(T); pub const Map = std.StringHashMap(*Array); allocator: *Allocator, storage: Map, pub fn init(allocator: *Allocator) Self { return Self{ .allocator = allocator, .storage = Map.init(allocator), }; } pub fn deinit(self: *Self) void { // Deinit each array var it = self.storage.iterator(); while (it.next()) |entry| { const array = entry.value_ptr.*; array.deinit(); self.allocator.destroy(array); } self.storage.deinit(); } pub fn reset(self: *Self) void { // Deinit each array var it = self.storage.iterator(); while (it.pop()) |entry| { const array = entry.value_ptr.*; array.deinit(); self.allocator.destroy(array); } } pub fn append(self: *Self, name: []const u8, arg: T) !void { if (!self.storage.contains(name)) { const ptr = try self.allocator.create(Array); ptr.* = Array.init(self.allocator); _ = try self.storage.put(name, ptr); } var array = self.getArray(name).?; try array.append(arg); } // Return entire set pub fn getArray(self: *Self, name: []const u8) ?*Array { if (self.storage.getEntry(name)) |entry| { return entry.value_ptr.*; } return null; } // Return first field pub fn get(self: *Self, name: []const u8) ?T { if (self.getArray(name)) |array| { return if (array.items.len > 0) array.items[0] else null; } return null; } }; } test "string-array-map" { const Map = StringArrayMap([]const u8); var map = Map.init(std.testing.allocator); defer map.deinit(); try map.append("query", "a"); try map.append("query", "b"); try map.append("query", "c"); const query = map.getArray("query").?; try testing.expect(query.items.len == 3); try testing.expect(mem.eql(u8, query.items[0], "a")); }
packages/zhp/src/util.zig
const std = @import("std"); const assert = std.debug.assert; pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); const allocator = gpa.allocator(); var input_file = try std.fs.cwd().openFile("input/14.txt", .{}); defer input_file.close(); var buffered_reader = std.io.bufferedReader(input_file.reader()); const count = try performPolymerization(allocator, buffered_reader.reader(), 40); std.debug.print("quantity of most common element - quantity of least common element: {}\n", .{count}); } const Rule = struct { condition: [2]u8, insert: u8 }; const num_letters: usize = 'Z' - 'A' + 1; fn asciiNumber(x: u8) u8 { return x - 'A'; } fn performPolymerization(gpa: std.mem.Allocator, reader: anytype, steps: u64) !u64 { var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); const allocator = arena.allocator(); var buf: [4096]u8 = undefined; var rules = std.ArrayList(Rule).init(allocator); const template = try allocator.dupe(u8, (try reader.readUntilDelimiterOrEof(&buf, '\n')) orelse return error.UnexpectedEOF); if (template.len < 2) return error.TemplateTooSmall; if (((try reader.readUntilDelimiterOrEof(&buf, '\n')) orelse return error.UnexpectedEOF).len > 0) return error.WrongFormat; _ = template; while (try reader.readUntilDelimiterOrEof(&buf, '\n')) |line| { if (line.len != 7) return error.WrongFormat; if (!std.mem.eql(u8, " -> ", line[2..6])) return error.WrongFormat; try rules.append(.{ .condition = line[0..2].*, .insert = line[6], }); } // pairs is basically a 2D matrix indexed via ['A']['B'] for a // pair AB var pairs = try allocator.alloc(u64, num_letters * num_letters); std.mem.set(u64, pairs, 0); var last_pair: [2]u8 = template[template.len - 2 ..][0..2].*; // populate pairs with initial template { var i: u64 = 0; while (i + 1 < template.len) : (i += 1) { pairs[asciiNumber(template[i]) + asciiNumber(template[i + 1]) * num_letters] += 1; } } // do steps { var tmp_pairs = try allocator.alloc(u64, num_letters * num_letters); var tmp_last_pair: [2]u8 = undefined; var i: u64 = 0; while (i < steps) : (i += 1) { std.mem.copy(u64, tmp_pairs, pairs); std.mem.copy(u8, &tmp_last_pair, &last_pair); for (rules.items) |rule| { const left = asciiNumber(rule.condition[0]); const right = asciiNumber(rule.condition[1]); const middle = asciiNumber(rule.insert); const num_matching_pairs = pairs[left + right * num_letters]; tmp_pairs[left + right * num_letters] -= num_matching_pairs; tmp_pairs[left + middle * num_letters] += num_matching_pairs; tmp_pairs[middle + right * num_letters] += num_matching_pairs; if (std.mem.eql(u8, &rule.condition, &last_pair)) { tmp_last_pair[0] = rule.insert; } } std.mem.copy(u64, pairs, tmp_pairs); std.mem.copy(u8, &last_pair, &tmp_last_pair); } } var histogram = [_]u64{0} ** num_letters; // calculate histogram { var i: u64 = 0; while (i < num_letters) : (i += 1) { var j: u64 = 0; while (j < num_letters) : (j += 1) { histogram[i] += pairs[i + j * num_letters]; } } histogram[asciiNumber(last_pair[1])] += 1; } // std.debug.print("last pair: {s}\n", .{&last_pair}); // std.debug.print("histogram: {any}\n", .{histogram}); const max = std.mem.max(u64, &histogram); const min = blk: { var min: ?u64 = null; for (histogram) |x| { if (x > 0) { if (min) |current_min| { min = std.math.min(current_min, x); } else { min = x; } } } break :blk min.?; // no minimum found }; return max - min; } test "example 1" { const text = \\NNCB \\ \\CH -> B \\HH -> N \\CB -> H \\NH -> C \\HB -> C \\HC -> B \\HN -> C \\NN -> C \\BH -> H \\NC -> B \\NB -> B \\BN -> B \\BB -> N \\BC -> B \\CC -> N \\CN -> C ; var fbs = std.io.fixedBufferStream(text); const count = try performPolymerization(std.testing.allocator, fbs.reader(), 40); try std.testing.expectEqual(@as(u64, 2188189693529), count); }
src/14_2.zig
pub const BracketPairing = struct { codepoint: u21, pair: u21, type: Type, pub const Type = enum { o, c, n, }; }; pub const data = [_]BracketPairing{ .{ .codepoint = 0x0028, .pair = 0x0029, .type = .o }, // LEFT PARENTHESIS .{ .codepoint = 0x0029, .pair = 0x0028, .type = .c }, // RIGHT PARENTHESIS .{ .codepoint = 0x005B, .pair = 0x005D, .type = .o }, // LEFT SQUARE BRACKET .{ .codepoint = 0x005D, .pair = 0x005B, .type = .c }, // RIGHT SQUARE BRACKET .{ .codepoint = 0x007B, .pair = 0x007D, .type = .o }, // LEFT CURLY BRACKET .{ .codepoint = 0x007D, .pair = 0x007B, .type = .c }, // RIGHT CURLY BRACKET .{ .codepoint = 0x0F3A, .pair = 0x0F3B, .type = .o }, // TIBETAN MARK GUG RTAGS GYON .{ .codepoint = 0x0F3B, .pair = 0x0F3A, .type = .c }, // TIBETAN MARK GUG RTAGS GYAS .{ .codepoint = 0x0F3C, .pair = 0x0F3D, .type = .o }, // TIBETAN MARK ANG KHANG GYON .{ .codepoint = 0x0F3D, .pair = 0x0F3C, .type = .c }, // TIBETAN MARK ANG KHANG GYAS .{ .codepoint = 0x169B, .pair = 0x169C, .type = .o }, // OGHAM FEATHER MARK .{ .codepoint = 0x169C, .pair = 0x169B, .type = .c }, // OGHAM REVERSED FEATHER MARK .{ .codepoint = 0x2045, .pair = 0x2046, .type = .o }, // LEFT SQUARE BRACKET WITH QUILL .{ .codepoint = 0x2046, .pair = 0x2045, .type = .c }, // RIGHT SQUARE BRACKET WITH QUILL .{ .codepoint = 0x207D, .pair = 0x207E, .type = .o }, // SUPERSCRIPT LEFT PARENTHESIS .{ .codepoint = 0x207E, .pair = 0x207D, .type = .c }, // SUPERSCRIPT RIGHT PARENTHESIS .{ .codepoint = 0x208D, .pair = 0x208E, .type = .o }, // SUBSCRIPT LEFT PARENTHESIS .{ .codepoint = 0x208E, .pair = 0x208D, .type = .c }, // SUBSCRIPT RIGHT PARENTHESIS .{ .codepoint = 0x2308, .pair = 0x2309, .type = .o }, // LEFT CEILING .{ .codepoint = 0x2309, .pair = 0x2308, .type = .c }, // RIGHT CEILING .{ .codepoint = 0x230A, .pair = 0x230B, .type = .o }, // LEFT FLOOR .{ .codepoint = 0x230B, .pair = 0x230A, .type = .c }, // RIGHT FLOOR .{ .codepoint = 0x2329, .pair = 0x232A, .type = .o }, // LEFT-POINTING ANGLE BRACKET .{ .codepoint = 0x232A, .pair = 0x2329, .type = .c }, // RIGHT-POINTING ANGLE BRACKET .{ .codepoint = 0x2768, .pair = 0x2769, .type = .o }, // MEDIUM LEFT PARENTHESIS ORNAMENT .{ .codepoint = 0x2769, .pair = 0x2768, .type = .c }, // MEDIUM RIGHT PARENTHESIS ORNAMENT .{ .codepoint = 0x276A, .pair = 0x276B, .type = .o }, // MEDIUM FLATTENED LEFT PARENTHESIS ORNAMENT .{ .codepoint = 0x276B, .pair = 0x276A, .type = .c }, // MEDIUM FLATTENED RIGHT PARENTHESIS ORNAMENT .{ .codepoint = 0x276C, .pair = 0x276D, .type = .o }, // MEDIUM LEFT-POINTING ANGLE BRACKET ORNAMENT .{ .codepoint = 0x276D, .pair = 0x276C, .type = .c }, // MEDIUM RIGHT-POINTING ANGLE BRACKET ORNAMENT .{ .codepoint = 0x276E, .pair = 0x276F, .type = .o }, // HEAVY LEFT-POINTING ANGLE QUOTATION MARK ORNAMENT .{ .codepoint = 0x276F, .pair = 0x276E, .type = .c }, // HEAVY RIGHT-POINTING ANGLE QUOTATION MARK ORNAMENT .{ .codepoint = 0x2770, .pair = 0x2771, .type = .o }, // HEAVY LEFT-POINTING ANGLE BRACKET ORNAMENT .{ .codepoint = 0x2771, .pair = 0x2770, .type = .c }, // HEAVY RIGHT-POINTING ANGLE BRACKET ORNAMENT .{ .codepoint = 0x2772, .pair = 0x2773, .type = .o }, // LIGHT LEFT TORTOISE SHELL BRACKET ORNAMENT .{ .codepoint = 0x2773, .pair = 0x2772, .type = .c }, // LIGHT RIGHT TORTOISE SHELL BRACKET ORNAMENT .{ .codepoint = 0x2774, .pair = 0x2775, .type = .o }, // MEDIUM LEFT CURLY BRACKET ORNAMENT .{ .codepoint = 0x2775, .pair = 0x2774, .type = .c }, // MEDIUM RIGHT CURLY BRACKET ORNAMENT .{ .codepoint = 0x27C5, .pair = 0x27C6, .type = .o }, // LEFT S-SHAPED BAG DELIMITER .{ .codepoint = 0x27C6, .pair = 0x27C5, .type = .c }, // RIGHT S-SHAPED BAG DELIMITER .{ .codepoint = 0x27E6, .pair = 0x27E7, .type = .o }, // MATHEMATICAL LEFT WHITE SQUARE BRACKET .{ .codepoint = 0x27E7, .pair = 0x27E6, .type = .c }, // MATHEMATICAL RIGHT WHITE SQUARE BRACKET .{ .codepoint = 0x27E8, .pair = 0x27E9, .type = .o }, // MATHEMATICAL LEFT ANGLE BRACKET .{ .codepoint = 0x27E9, .pair = 0x27E8, .type = .c }, // MATHEMATICAL RIGHT ANGLE BRACKET .{ .codepoint = 0x27EA, .pair = 0x27EB, .type = .o }, // MATHEMATICAL LEFT DOUBLE ANGLE BRACKET .{ .codepoint = 0x27EB, .pair = 0x27EA, .type = .c }, // MATHEMATICAL RIGHT DOUBLE ANGLE BRACKET .{ .codepoint = 0x27EC, .pair = 0x27ED, .type = .o }, // MATHEMATICAL LEFT WHITE TORTOISE SHELL BRACKET .{ .codepoint = 0x27ED, .pair = 0x27EC, .type = .c }, // MATHEMATICAL RIGHT WHITE TORTOISE SHELL BRACKET .{ .codepoint = 0x27EE, .pair = 0x27EF, .type = .o }, // MATHEMATICAL LEFT FLATTENED PARENTHESIS .{ .codepoint = 0x27EF, .pair = 0x27EE, .type = .c }, // MATHEMATICAL RIGHT FLATTENED PARENTHESIS .{ .codepoint = 0x2983, .pair = 0x2984, .type = .o }, // LEFT WHITE CURLY BRACKET .{ .codepoint = 0x2984, .pair = 0x2983, .type = .c }, // RIGHT WHITE CURLY BRACKET .{ .codepoint = 0x2985, .pair = 0x2986, .type = .o }, // LEFT WHITE PARENTHESIS .{ .codepoint = 0x2986, .pair = 0x2985, .type = .c }, // RIGHT WHITE PARENTHESIS .{ .codepoint = 0x2987, .pair = 0x2988, .type = .o }, // Z NOTATION LEFT IMAGE BRACKET .{ .codepoint = 0x2988, .pair = 0x2987, .type = .c }, // Z NOTATION RIGHT IMAGE BRACKET .{ .codepoint = 0x2989, .pair = 0x298A, .type = .o }, // Z NOTATION LEFT BINDING BRACKET .{ .codepoint = 0x298A, .pair = 0x2989, .type = .c }, // Z NOTATION RIGHT BINDING BRACKET .{ .codepoint = 0x298B, .pair = 0x298C, .type = .o }, // LEFT SQUARE BRACKET WITH UNDERBAR .{ .codepoint = 0x298C, .pair = 0x298B, .type = .c }, // RIGHT SQUARE BRACKET WITH UNDERBAR .{ .codepoint = 0x298D, .pair = 0x2990, .type = .o }, // LEFT SQUARE BRACKET WITH TICK IN TOP CORNER .{ .codepoint = 0x298E, .pair = 0x298F, .type = .c }, // RIGHT SQUARE BRACKET WITH TICK IN BOTTOM CORNER .{ .codepoint = 0x298F, .pair = 0x298E, .type = .o }, // LEFT SQUARE BRACKET WITH TICK IN BOTTOM CORNER .{ .codepoint = 0x2990, .pair = 0x298D, .type = .c }, // RIGHT SQUARE BRACKET WITH TICK IN TOP CORNER .{ .codepoint = 0x2991, .pair = 0x2992, .type = .o }, // LEFT ANGLE BRACKET WITH DOT .{ .codepoint = 0x2992, .pair = 0x2991, .type = .c }, // RIGHT ANGLE BRACKET WITH DOT .{ .codepoint = 0x2993, .pair = 0x2994, .type = .o }, // LEFT ARC LESS-THAN BRACKET .{ .codepoint = 0x2994, .pair = 0x2993, .type = .c }, // RIGHT ARC GREATER-THAN BRACKET .{ .codepoint = 0x2995, .pair = 0x2996, .type = .o }, // DOUBLE LEFT ARC GREATER-THAN BRACKET .{ .codepoint = 0x2996, .pair = 0x2995, .type = .c }, // DOUBLE RIGHT ARC LESS-THAN BRACKET .{ .codepoint = 0x2997, .pair = 0x2998, .type = .o }, // LEFT BLACK TORTOISE SHELL BRACKET .{ .codepoint = 0x2998, .pair = 0x2997, .type = .c }, // RIGHT BLACK TORTOISE SHELL BRACKET .{ .codepoint = 0x29D8, .pair = 0x29D9, .type = .o }, // LEFT WIGGLY FENCE .{ .codepoint = 0x29D9, .pair = 0x29D8, .type = .c }, // RIGHT WIGGLY FENCE .{ .codepoint = 0x29DA, .pair = 0x29DB, .type = .o }, // LEFT DOUBLE WIGGLY FENCE .{ .codepoint = 0x29DB, .pair = 0x29DA, .type = .c }, // RIGHT DOUBLE WIGGLY FENCE .{ .codepoint = 0x29FC, .pair = 0x29FD, .type = .o }, // LEFT-POINTING CURVED ANGLE BRACKET .{ .codepoint = 0x29FD, .pair = 0x29FC, .type = .c }, // RIGHT-POINTING CURVED ANGLE BRACKET .{ .codepoint = 0x2E22, .pair = 0x2E23, .type = .o }, // TOP LEFT HALF BRACKET .{ .codepoint = 0x2E23, .pair = 0x2E22, .type = .c }, // TOP RIGHT HALF BRACKET .{ .codepoint = 0x2E24, .pair = 0x2E25, .type = .o }, // BOTTOM LEFT HALF BRACKET .{ .codepoint = 0x2E25, .pair = 0x2E24, .type = .c }, // BOTTOM RIGHT HALF BRACKET .{ .codepoint = 0x2E26, .pair = 0x2E27, .type = .o }, // LEFT SIDEWAYS U BRACKET .{ .codepoint = 0x2E27, .pair = 0x2E26, .type = .c }, // RIGHT SIDEWAYS U BRACKET .{ .codepoint = 0x2E28, .pair = 0x2E29, .type = .o }, // LEFT DOUBLE PARENTHESIS .{ .codepoint = 0x2E29, .pair = 0x2E28, .type = .c }, // RIGHT DOUBLE PARENTHESIS .{ .codepoint = 0x3008, .pair = 0x3009, .type = .o }, // LEFT ANGLE BRACKET .{ .codepoint = 0x3009, .pair = 0x3008, .type = .c }, // RIGHT ANGLE BRACKET .{ .codepoint = 0x300A, .pair = 0x300B, .type = .o }, // LEFT DOUBLE ANGLE BRACKET .{ .codepoint = 0x300B, .pair = 0x300A, .type = .c }, // RIGHT DOUBLE ANGLE BRACKET .{ .codepoint = 0x300C, .pair = 0x300D, .type = .o }, // LEFT CORNER BRACKET .{ .codepoint = 0x300D, .pair = 0x300C, .type = .c }, // RIGHT CORNER BRACKET .{ .codepoint = 0x300E, .pair = 0x300F, .type = .o }, // LEFT WHITE CORNER BRACKET .{ .codepoint = 0x300F, .pair = 0x300E, .type = .c }, // RIGHT WHITE CORNER BRACKET .{ .codepoint = 0x3010, .pair = 0x3011, .type = .o }, // LEFT BLACK LENTICULAR BRACKET .{ .codepoint = 0x3011, .pair = 0x3010, .type = .c }, // RIGHT BLACK LENTICULAR BRACKET .{ .codepoint = 0x3014, .pair = 0x3015, .type = .o }, // LEFT TORTOISE SHELL BRACKET .{ .codepoint = 0x3015, .pair = 0x3014, .type = .c }, // RIGHT TORTOISE SHELL BRACKET .{ .codepoint = 0x3016, .pair = 0x3017, .type = .o }, // LEFT WHITE LENTICULAR BRACKET .{ .codepoint = 0x3017, .pair = 0x3016, .type = .c }, // RIGHT WHITE LENTICULAR BRACKET .{ .codepoint = 0x3018, .pair = 0x3019, .type = .o }, // LEFT WHITE TORTOISE SHELL BRACKET .{ .codepoint = 0x3019, .pair = 0x3018, .type = .c }, // RIGHT WHITE TORTOISE SHELL BRACKET .{ .codepoint = 0x301A, .pair = 0x301B, .type = .o }, // LEFT WHITE SQUARE BRACKET .{ .codepoint = 0x301B, .pair = 0x301A, .type = .c }, // RIGHT WHITE SQUARE BRACKET .{ .codepoint = 0xFE59, .pair = 0xFE5A, .type = .o }, // SMALL LEFT PARENTHESIS .{ .codepoint = 0xFE5A, .pair = 0xFE59, .type = .c }, // SMALL RIGHT PARENTHESIS .{ .codepoint = 0xFE5B, .pair = 0xFE5C, .type = .o }, // SMALL LEFT CURLY BRACKET .{ .codepoint = 0xFE5C, .pair = 0xFE5B, .type = .c }, // SMALL RIGHT CURLY BRACKET .{ .codepoint = 0xFE5D, .pair = 0xFE5E, .type = .o }, // SMALL LEFT TORTOISE SHELL BRACKET .{ .codepoint = 0xFE5E, .pair = 0xFE5D, .type = .c }, // SMALL RIGHT TORTOISE SHELL BRACKET .{ .codepoint = 0xFF08, .pair = 0xFF09, .type = .o }, // FULLWIDTH LEFT PARENTHESIS .{ .codepoint = 0xFF09, .pair = 0xFF08, .type = .c }, // FULLWIDTH RIGHT PARENTHESIS .{ .codepoint = 0xFF3B, .pair = 0xFF3D, .type = .o }, // FULLWIDTH LEFT SQUARE BRACKET .{ .codepoint = 0xFF3D, .pair = 0xFF3B, .type = .c }, // FULLWIDTH RIGHT SQUARE BRACKET .{ .codepoint = 0xFF5B, .pair = 0xFF5D, .type = .o }, // FULLWIDTH LEFT CURLY BRACKET .{ .codepoint = 0xFF5D, .pair = 0xFF5B, .type = .c }, // FULLWIDTH RIGHT CURLY BRACKET .{ .codepoint = 0xFF5F, .pair = 0xFF60, .type = .o }, // FULLWIDTH LEFT WHITE PARENTHESIS .{ .codepoint = 0xFF60, .pair = 0xFF5F, .type = .c }, // FULLWIDTH RIGHT WHITE PARENTHESIS .{ .codepoint = 0xFF62, .pair = 0xFF63, .type = .o }, // HALFWIDTH LEFT CORNER BRACKET .{ .codepoint = 0xFF63, .pair = 0xFF62, .type = .c }, // HALFWIDTH RIGHT CORNER BRACKET };
src/bidi_brackets.zig
const std = @import("std"); const termelot = @import("termelot"); const Position = termelot.Position; const Cell = termelot.Cell; usingnamespace termelot.style; pub fn log( comptime level: std.log.Level, comptime scope: @TypeOf(.EnumLiteral), comptime format: []const u8, args: anytype, ) void { termelot.log(level, scope, format, args); } pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); const config = termelot.Config{ .raw_mode = false, .alternate_screen = true, .initial_buffer_size = null, }; var term: termelot.Termelot = undefined; term = try term.init(&gpa.allocator, config); defer term.deinit(); try term.setCursorVisibility(false); defer term.setCursorVisibility(true) catch {}; try term.setTitle("Termelot Donut"); const screen_width: f32 = 60.0; const screen_height: f32 = 20.0; const theta_spacing: f32 = 0.07; const phi_spacing: f32 = 0.02; const R1: f32 = 1.0; const R2: f32 = 2.0; const K2: f32 = 5.0; const K1: f32 = screen_height * K2 * 3.0 / (8.0 * (R1 + R2)); var A: f32 = 0.0; var B: f32 = 0.0; while (true) { term.clearScreen(); A += 0.02; B += 0.01; const sin_A = std.math.sin(A); const cos_A = std.math.cos(A); const sin_B = std.math.sin(B); const cos_B = std.math.cos(B); var theta: f32 = 0.0; while (theta < 2.0 * std.math.pi) : (theta += theta_spacing) { const sin_theta = std.math.sin(theta); const cos_theta = std.math.cos(theta); var phi: f32 = 0.0; while (phi < 2 * std.math.pi) : (phi += phi_spacing) { const sin_phi = std.math.sin(phi); const cos_phi = std.math.cos(phi); const circle_x = R2 + R1 * cos_theta; const circle_y = R1 * sin_theta; const x = 2.0 * circle_x * (cos_B * cos_phi + sin_A * sin_B * sin_phi) - circle_y * cos_A * sin_B; const y = circle_x * (sin_B * cos_phi - sin_A * cos_B * sin_phi) + circle_y * cos_A * cos_B; const z = K2 + cos_A * circle_x * sin_phi + circle_y * sin_A; const ooz = 1.0 / z; const xp: u16 = @floatToInt( u16, @floor(screen_width / 2 + K1 * ooz * x), ); const yp: u16 = @floatToInt( u16, @floor(screen_height / 2 - K1 * ooz * y), ); const L = cos_phi * cos_theta * sin_B - cos_A * cos_theta * sin_phi - sin_A * sin_theta + cos_B * (cos_A * sin_theta - cos_theta * sin_A * sin_phi); if (L > 0.0) { const lum: u8 = @floatToInt(u8, @floor(L * 8.0)); var c: Color = undefined; if (lum == 0) { c = Color{ .Bit24 = ColorBit24.RGB(7, 72, 146) }; } else if (lum == 1) { c = Color{ .Bit24 = ColorBit24.RGB(22, 87, 161) }; } else if (lum == 2) { c = Color{ .Bit24 = ColorBit24.RGB(34, 101, 178) }; } else if (lum == 3) { c = Color{ .Bit24 = ColorBit24.RGB(46, 111, 185) }; } else if (lum == 4) { c = Color{ .Bit24 = ColorBit24.RGB(58, 120, 192) }; } else if (lum == 5) { c = Color{ .Bit24 = ColorBit24.RGB(72, 131, 199) }; } else if (lum == 6) { c = Color{ .Bit24 = ColorBit24.RGB(88, 147, 215) }; } else if (lum == 7) { c = Color{ .Bit24 = ColorBit24.RGB(99, 154, 218) }; } else if (lum == 8) { c = Color{ .Bit24 = ColorBit24.RGB(113, 167, 229) }; } else if (lum == 9) { c = Color{ .Bit24 = ColorBit24.RGB(124, 176, 235) }; } else if (lum == 10) { c = Color{ .Bit24 = ColorBit24.RGB(140, 187, 240) }; } else if (lum == 11) { c = Color{ .Bit24 = ColorBit24.RGB(160, 202, 249) }; } term.setCell( Position{ .row = @intCast(u16, yp), .col = @intCast(u16, xp) }, Cell{ .rune = 'X', .style = Style{ .fg_color = c, .bg_color = Color.Default, .decorations = Decorations{ .bold = false, .italic = false, .underline = false, .blinking = false, }, }, }, ); } } } try term.drawScreen(); } }
examples/donut.zig
const std = @import("std"); const log = std.log; const mem = std.mem; const os = std.os; const c = std.c; const integer_t = c_int; const task_flavor_t = c.natural_t; const task_info_t = *integer_t; const task_name_t = c.mach_port_name_t; const vm_address_t = c.vm_offset_t; const vm_size_t = c.mach_vm_size_t; const vm_machine_attribute_t = usize; const vm_machine_attribute_val_t = isize; /// Cachability const MATTR_CACHE = 1; /// Migrability const MATTR_MIGRATE = 2; /// Replicability const MATTR_REPLICATE = 4; /// (Generic) turn attribute off const MATTR_VAL_OFF = 0; /// (Generic) turn attribute on const MATTR_VAL_ON = 1; /// (Generic) return current value const MATTR_VAL_GET = 2; /// Flush from all caches const MATTR_VAL_CACHE_FLUSH = 6; /// Flush from data caches const MATTR_VAL_DCACHE_FLUSH = 7; /// Flush from instruction caches const MATTR_VAL_ICACHE_FLUSH = 8; /// Sync I+D caches const MATTR_VAL_CACHE_SYNC = 9; /// Get page info (stats) const MATTR_VAL_GET_INFO = 10; const TASK_VM_INFO = 22; const TASK_VM_INFO_COUNT: c.mach_msg_type_number_t = @sizeOf(task_vm_info_data_t) / @sizeOf(c.natural_t); const task_vm_info = extern struct { // virtual memory size (bytes) virtual_size: c.mach_vm_size_t, // number of memory regions region_count: integer_t, page_size: integer_t, // resident memory size (bytes) resident_size: c.mach_vm_size_t, // peak resident size (bytes) resident_size_peak: c.mach_vm_size_t, device: c.mach_vm_size_t, device_peak: c.mach_vm_size_t, internal: c.mach_vm_size_t, internal_peak: c.mach_vm_size_t, external: c.mach_vm_size_t, external_peak: c.mach_vm_size_t, reusable: c.mach_vm_size_t, reusable_peak: c.mach_vm_size_t, purgeable_volatile_pmap: c.mach_vm_size_t, purgeable_volatile_resident: c.mach_vm_size_t, purgeable_volatile_virtual: c.mach_vm_size_t, compressed: c.mach_vm_size_t, compressed_peak: c.mach_vm_size_t, compressed_lifetime: c.mach_vm_size_t, // added for rev1 phys_footprint: c.mach_vm_size_t, // added for rev2 min_address: c.mach_vm_address_t, max_address: c.mach_vm_address_t, // added for rev3 ledger_phys_footprint_peak: i64, ledger_purgeable_nonvolatile: i64, ledger_purgeable_novolatile_compressed: i64, ledger_purgeable_volatile: i64, ledger_purgeable_volatile_compressed: i64, ledger_tag_network_nonvolatile: i64, ledger_tag_network_nonvolatile_compressed: i64, ledger_tag_network_volatile: i64, ledger_tag_network_volatile_compressed: i64, ledger_tag_media_footprint: i64, ledger_tag_media_footprint_compressed: i64, ledger_tag_media_nofootprint: i64, ledger_tag_media_nofootprint_compressed: i64, ledger_tag_graphics_footprint: i64, ledger_tag_graphics_footprint_compressed: i64, ledger_tag_graphics_nofootprint: i64, ledger_tag_graphics_nofootprint_compressed: i64, ledger_tag_neural_footprint: i64, ledger_tag_neural_footprint_compressed: i64, ledger_tag_neural_nofootprint: i64, ledger_tag_neural_nofootprint_compressed: i64, // added for rev4 limit_bytes_remaining: u64, // added for rev5 decompressions: integer_t, }; const task_vm_info_data_t = task_vm_info; extern "c" fn task_info( target_task: task_name_t, flavor: task_flavor_t, task_info_out: task_info_t, task_info_outCnt: *c.mach_msg_type_number_t, ) c.kern_return_t; extern "c" fn _host_page_size(task: c.mach_port_t, size: *vm_size_t) c.kern_return_t; extern "c" fn vm_deallocate(target_task: c.vm_map_t, address: vm_address_t, size: vm_size_t) c.kern_return_t; extern "c" fn vm_machine_attribute( target_task: c.vm_map_t, address: vm_address_t, size: vm_size_t, attribute: vm_machine_attribute_t, value: *vm_machine_attribute_val_t, ) c.kern_return_t; const errno = c.getErrno; var gpa_alloc = std.heap.GeneralPurposeAllocator(.{}){}; const gpa = gpa_alloc.allocator(); pub fn main() anyerror!void { const args = try std.process.argsAlloc(gpa); defer std.process.argsFree(gpa, args); const exe_path = args[1]; const aslr_off = if (args.len == 3 and mem.eql(u8, args[2], "--aslr-off")) true else false; log.info("Init...", .{}); var attr: c.posix_spawnattr_t = undefined; var res = c.posix_spawnattr_init(&attr); defer c.posix_spawnattr_destroy(&attr); log.info("attr = {*}", .{attr}); switch (errno(res)) { .SUCCESS => {}, .NOMEM => return error.NoMemory, .INVAL => return error.InvalidValue, else => unreachable, } log.info("Setting flags...", .{}); var flags = c.POSIX_SPAWN_SETSIGDEF | c.POSIX_SPAWN_SETSIGMASK; if (aslr_off) { flags |= c._POSIX_SPAWN_DISABLE_ASLR; } res = c.posix_spawnattr_setflags(&attr, @intCast(c_short, flags)); switch (errno(res)) { .SUCCESS => {}, .INVAL => return error.InvalidValue, else => unreachable, } // const path: [:0]const u8 = "./hello"; const argv: [][*:0]const u8 = &.{}; const env: [][*:0]const u8 = &.{}; var pid: os.pid_t = -1; res = c.posix_spawnp(&pid, exe_path, null, &attr, argv.ptr, env.ptr); switch (errno(res)) { .SUCCESS => {}, else => return error.SpawnpFailed, } log.info("pid = {d}", .{pid}); const stderr = std.io.getStdErr().writer(); const stdin = std.io.getStdIn().reader(); var repl_buf: [4096]u8 = undefined; while (true) { try stderr.print("\nOverwrite address 0x100052030, thus stopping the process?", .{}); if (stdin.readUntilDelimiterOrEof(&repl_buf, '\n') catch |err| { try stderr.print("\nunable to parse command: {s}\n", .{@errorName(err)}); continue; }) |_| { var port: c.mach_port_name_t = undefined; var kern_res = c.task_for_pid(c.mach_task_self(), pid, &port); if (kern_res != 0) { return error.TaskForPidFailed; } log.info("kern_res = {}, port = {}", .{ kern_res, port }); const page_size = try pageSize(port); log.info("page_size = {}", .{page_size}); const address: c.mach_vm_address_t = 0x100052030; var buf: [8]u8 = undefined; const rr = try vmRead(port, address, &buf, buf.len); log.info("{x}, {x}", .{ std.fmt.fmtSliceHexLower(rr), mem.readIntLittle(u64, &buf) }); const swap_addr: u64 = 0x100001082; var tbuf: [8]u8 = undefined; mem.writeIntLittle(u64, &tbuf, swap_addr); const nwritten = try vmWrite(port, address, &tbuf, tbuf.len, .x86_64); log.info("nwritten = {d}", .{nwritten}); const pid_res = os.waitpid(pid, 0); log.info("pid_res = {}", .{pid_res}); break; } } } fn vmWrite(task: c.mach_port_name_t, address: u64, buf: []const u8, count: usize, arch: std.Target.Cpu.Arch) !usize { var total_written: usize = 0; var curr_addr = address; const page_size = try pageSize(task); var out_buf = buf[0..]; while (total_written < count) { const curr_size = maxBytesLeftInPage(page_size, curr_addr, count - total_written); var kern_res = c.mach_vm_write( task, curr_addr, @ptrToInt(out_buf.ptr), @intCast(c.mach_msg_type_number_t, curr_size), ); if (kern_res != 0) { log.err("mach_vm_write failed with error: {d}", .{kern_res}); return error.MachVmWriteFailed; } switch (arch) { .aarch64 => { var mattr_value: vm_machine_attribute_val_t = MATTR_VAL_CACHE_FLUSH; kern_res = vm_machine_attribute(task, curr_addr, curr_size, MATTR_CACHE, &mattr_value); if (kern_res != 0) { log.err("vm_machine_attribute failed with error: {d}", .{kern_res}); return error.VmMachineAttributeFailed; } }, .x86_64 => {}, else => unreachable, } out_buf = out_buf[curr_size..]; total_written += curr_size; curr_addr += curr_size; } return total_written; } fn vmRead(task: c.mach_port_name_t, address: u64, buf: []u8, count: usize) ![]u8 { var total_read: usize = 0; var curr_addr = address; const page_size = try pageSize(task); var out_buf = buf[0..]; while (total_read < count) { const curr_size = maxBytesLeftInPage(page_size, curr_addr, count - total_read); var curr_bytes_read: c.mach_msg_type_number_t = 0; var vm_memory: c.vm_offset_t = undefined; var kern_res = c.mach_vm_read(task, curr_addr, curr_size, &vm_memory, &curr_bytes_read); if (kern_res != 0) { log.err("mach_vm_read failed with error: {d}", .{kern_res}); return error.MachVmReadFailed; } @memcpy(out_buf[0..].ptr, @intToPtr([*]const u8, vm_memory), curr_bytes_read); kern_res = vm_deallocate(c.mach_task_self(), vm_memory, curr_bytes_read); if (kern_res != 0) { log.err("vm_deallocate failed with error: {d}", .{kern_res}); } out_buf = out_buf[curr_bytes_read..]; curr_addr += curr_bytes_read; total_read += curr_bytes_read; } return buf[0..total_read]; } fn maxBytesLeftInPage(page_size: usize, address: u64, count: usize) usize { var left = count; if (page_size > 0) { const page_offset = address % page_size; const bytes_left_in_page = page_size - page_offset; if (count > bytes_left_in_page) { left = bytes_left_in_page; } } return left; } fn pageSize(task: c.mach_port_name_t) !usize { if (task != 0) { var info_count = TASK_VM_INFO_COUNT; var vm_info: task_vm_info_data_t = undefined; const kern_res = task_info(task, TASK_VM_INFO, @ptrCast(task_info_t, &vm_info), &info_count); if (kern_res != 0) { log.err("task_info failed with error: {d}", .{kern_res}); } else { log.info("page_size = {x}", .{vm_info.page_size}); return @intCast(usize, vm_info.page_size); } } var page_size: vm_size_t = undefined; const kern_res = _host_page_size(c.mach_host_self(), &page_size); if (kern_res != 0) { log.err("_host_page_size failed with error: {d}", .{kern_res}); } return page_size; }
src/main.zig
const std = @import("std"); const getty = @import("getty"); const assert = std.debug.assert; const expectEqual = std.testing.expectEqual; const expectEqualSlices = std.testing.expectEqualSlices; const Token = @import("common/token.zig").Token; pub const Deserializer = struct { allocator: std.mem.Allocator, tokens: []const Token, const Self = @This(); const impl = @"impl Deserializer"; pub fn init(allocator: std.mem.Allocator, tokens: []const Token) Self { return .{ .allocator = allocator, .tokens = tokens, }; } pub fn remaining(self: Self) usize { return self.tokens.len; } pub fn nextTokenOpt(self: *Self) ?Token { switch (self.remaining()) { 0 => return null, else => |len| { const first = self.tokens[0]; self.tokens = if (len == 1) &[_]Token{} else self.tokens[1..]; return first; }, } } pub fn nextToken(self: *Self) Token { switch (self.remaining()) { 0 => std.debug.panic("ran out of tokens to deserialize", .{}), else => |len| { const first = self.tokens[0]; self.tokens = if (len == 1) &[_]Token{} else self.tokens[1..]; return first; }, } } fn peekTokenOpt(self: Self) ?Token { return if (self.tokens.len > 0) self.tokens[0] else null; } fn peekToken(self: Self) Token { if (self.peekTokenOpt()) |token| { return token; } else { std.debug.panic("ran out of tokens to deserialize", .{}); } } pub usingnamespace getty.Deserializer( *Self, impl.deserializer.Error, impl.deserializer.deserializeBool, impl.deserializer.deserializeEnum, impl.deserializer.deserializeFloat, impl.deserializer.deserializeInt, impl.deserializer.deserializeMap, impl.deserializer.deserializeOptional, impl.deserializer.deserializeSequence, impl.deserializer.deserializeString, impl.deserializer.deserializeMap, impl.deserializer.deserializeVoid, ); }; const @"impl Deserializer" = struct { pub const deserializer = struct { pub const Error = getty.de.Error || error{TestExpectedEqual}; pub fn deserializeBool(self: *Deserializer, visitor: anytype) Error!@TypeOf(visitor).Value { switch (self.nextToken()) { .Bool => |v| return try visitor.visitBool(Error, v), else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), } } pub fn deserializeEnum(self: *Deserializer, visitor: anytype) Error!@TypeOf(visitor).Value { _ = self; } pub fn deserializeFloat(self: *Deserializer, visitor: anytype) Error!@TypeOf(visitor).Value { return switch (self.nextToken()) { .F16 => |v| try visitor.visitFloat(Error, v), .F32 => |v| try visitor.visitFloat(Error, v), .F64 => |v| try visitor.visitFloat(Error, v), .F128 => |v| try visitor.visitFloat(Error, v), else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), }; } pub fn deserializeInt(self: *Deserializer, visitor: anytype) Error!@TypeOf(visitor).Value { return switch (self.nextToken()) { .I8 => |v| try visitor.visitInt(Error, v), .I16 => |v| try visitor.visitInt(Error, v), .I32 => |v| try visitor.visitInt(Error, v), .I64 => |v| try visitor.visitInt(Error, v), .I128 => |v| try visitor.visitInt(Error, v), .U8 => |v| try visitor.visitInt(Error, v), .U16 => |v| try visitor.visitInt(Error, v), .U32 => |v| try visitor.visitInt(Error, v), .U64 => |v| try visitor.visitInt(Error, v), .U128 => |v| try visitor.visitInt(Error, v), else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), }; } pub fn deserializeMap(self: *Deserializer, visitor: anytype) Error!@TypeOf(visitor).Value { switch (self.nextToken()) { .Map => |v| return try visitMap(self, v.len, .MapEnd, visitor), .Struct => |v| return try visitMap(self, v.len, .StructEnd, visitor), else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), } } pub fn deserializeOptional(self: *Deserializer, visitor: anytype) Error!@TypeOf(visitor).Value { switch (self.peekToken()) { .Null => { _ = self.nextToken(); return try visitor.visitNull(Error); }, .Some => { _ = self.nextToken(); return try visitor.visitSome(Error); }, else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), } } pub fn deserializeSequence(self: *Deserializer, visitor: anytype) Error!@TypeOf(visitor).Value { return switch (self.nextToken()) { .Seq => |v| try visitSequence(self, v.len, .SeqEnd, visitor), .Tuple => |v| try visitSequence(self, v.len, .TupleEnd, visitor), else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), }; } pub fn deserializeString(self: *Deserializer, visitor: anytype) Error!@TypeOf(visitor).Value { switch (self.nextToken()) { .String => |v| return try visitor.visitString(Error, try self.allocator.dupe(u8, v)), else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), } } pub fn deserializeVoid(self: *Deserializer, visitor: anytype) Error!@TypeOf(visitor).Value { switch (self.nextToken()) { .Void => return try visitor.visitVoid(Error), else => |v| std.debug.panic("deserialization did not expect this token: {s}", .{@tagName(v)}), } } fn visitMap(self: *Deserializer, len: ?usize, end: Token, visitor: anytype) Error!@TypeOf(visitor).Value { var m = MapAccess{ .de = self, .len = len, .end = end }; var value = visitor.visitMap(m.mapAccess()); try assertNextToken(self, end); return value; } fn visitSequence(self: *Deserializer, len: ?usize, end: Token, visitor: anytype) Error!@TypeOf(visitor).Value { var s = SeqAccess{ .de = self, .len = len, .end = end }; var value = visitor.visitSequence(s.sequenceAccess()); try assertNextToken(self, end); return value; } fn assertNextToken(self: *Deserializer, expected: Token) !void { if (self.nextTokenOpt()) |token| { const token_tag = std.meta.activeTag(token); const expected_tag = std.meta.activeTag(expected); if (token_tag == expected_tag) { switch (token) { .MapEnd => try expectEqual(@field(token, "MapEnd"), @field(expected, "MapEnd")), .SeqEnd => try expectEqual(@field(token, "SeqEnd"), @field(expected, "SeqEnd")), .StructEnd => try expectEqual(@field(token, "StructEnd"), @field(expected, "StructEnd")), .TupleEnd => try expectEqual(@field(token, "TupleEnd"), @field(expected, "TupleEnd")), else => |v| std.debug.panic("unexpected token: {s}", .{@tagName(v)}), } } else { @panic("expected Token::{} but deserialization wants Token::{}"); } } else { @panic("end of tokens but deserialization wants Token::{}"); } } }; }; const SeqAccess = struct { de: *Deserializer, len: ?usize, end: Token, const Self = @This(); const impl = @"impl SeqAccess"; pub usingnamespace getty.de.SequenceAccess( *Self, impl.sequenceAccess.Error, impl.sequenceAccess.nextElementSeed, ); }; const @"impl SeqAccess" = struct { pub const sequenceAccess = struct { pub const Error = @"impl Deserializer".deserializer.Error; pub fn nextElementSeed(self: *SeqAccess, seed: anytype) Error!?@TypeOf(seed).Value { if (self.de.peekTokenOpt()) |token| { if (std.meta.eql(token, self.end)) return null; } self.len.? -= @as(usize, if (self.len.? > 0) 1 else 0); return try seed.deserialize(self.de.allocator, self.de.deserializer()); } }; }; const MapAccess = struct { de: *Deserializer, len: ?usize, end: Token, const Self = @This(); const impl = @"impl MapAccess"; pub usingnamespace getty.de.MapAccess( *Self, impl.mapAccess.Error, impl.mapAccess.nextKeySeed, impl.mapAccess.nextValueSeed, ); }; const @"impl MapAccess" = struct { pub const mapAccess = struct { pub const Error = @"impl Deserializer".deserializer.Error; pub fn nextKeySeed(self: *MapAccess, seed: anytype) Error!?@TypeOf(seed).Value { if (self.de.peekTokenOpt()) |token| { if (std.meta.eql(token, self.end)) return null; } self.len.? -= @as(usize, if (self.len.? > 0) 1 else 0); return try seed.deserialize(self.de.allocator, self.de.deserializer()); } pub fn nextValueSeed(self: *MapAccess, seed: anytype) Error!@TypeOf(seed).Value { return try seed.deserialize(self.de.allocator, self.de.deserializer()); } }; };
src/tests/de/deserializer.zig
const Allocator = std.mem.Allocator; const h11 = @import("h11"); const Method = @import("http").Method; const TcpSocket = @import("socket.zig").TcpSocket; const Request = @import("request.zig").Request; const Response = @import("response.zig").Response; const std = @import("std"); const StreamingResponse = @import("response.zig").StreamingResponse; const tls = @import("iguanaTLS"); const Uri = @import("http").Uri; pub const TcpConnection = Connection(TcpSocket); pub const Protocol = enum { Http, Https }; pub fn Connection(comptime SocketType: type) type { return struct { const Self = @This(); const H11Client = h11.Client(Reader, Writer); const TlsContext = tls.Client(SocketType.Reader, SocketType.Writer, tls.ciphersuites.all, true); const Reader = std.io.Reader(*Self, ReaderError, read); const ReaderError = TlsContext.Reader.Error; const Writer = std.io.Writer(*Self, WriterError, write); const WriterError = SocketType.Writer.Error; allocator: *Allocator, protocol: Protocol, socket: SocketType, state: H11Client, tls_context: TlsContext = undefined, pub fn connect(allocator: *Allocator, uri: Uri) !*Self { var connection = try allocator.create(Self); connection.allocator = allocator; connection.protocol = .Http; connection.socket = try SocketType.connect(allocator, uri); connection.state = H11Client.init(allocator, connection.reader(), connection.writer()); if (std.mem.eql(u8, uri.scheme, "https")) { connection.protocol = .Https; connection.tls_context = try tls.client_connect(.{ .reader = connection.socket.reader(), .writer = connection.socket.writer(), .cert_verifier = .none, .temp_allocator = allocator, .ciphersuites = tls.ciphersuites.all, .protocols = &[_][]const u8{"http/1.1"}, }, uri.host.name); } return connection; } pub fn reader(self: *Self) Reader { return .{ .context = self }; } pub fn writer(self: *Self) Writer { return .{ .context = self }; } pub fn deinit(self: *Self) void { self.state.deinit(); if (self.protocol == .Https) { self.tls_context.close_notify() catch {}; } self.socket.close(); self.allocator.destroy(self); } pub fn read(self: *Self, buffer: []u8) ReaderError!usize { return switch (self.protocol) { .Http => self.socket.read(buffer), .Https => self.tls_context.read(buffer), }; } pub fn write(self: *Self, buffer: []const u8) WriterError!usize { return switch (self.protocol) { .Http => self.socket.write(buffer), .Https => self.tls_context.write(buffer), }; } pub fn request(self: *Self, method: Method, uri: Uri, options: anytype) !Response { var _request = try Request.init(self.allocator, method, uri, options); defer _request.deinit(); try self.sendRequest(_request); var response = try self.readResponse(); errdefer response.deinit(); var body = try self.readResponseBody(); return Response{ .allocator = self.allocator, .buffer = response.raw_bytes, .status = response.statusCode, .version = response.version, .headers = response.headers, .body = body, }; } pub fn stream(self: *Self, method: Method, uri: Uri, options: anytype) !StreamingResponse(Self) { var _request = try Request.init(self.allocator, method, uri, options); defer _request.deinit(); try self.sendRequest(_request); var response = try self.readResponse(); return StreamingResponse(Self){ .allocator = self.allocator, .buffer = response.raw_bytes, .connection = self, .status = response.statusCode, .version = response.version, .headers = response.headers, }; } fn sendRequest(self: *Self, _request: Request) !void { var request_event = try h11.Request.init(_request.method, _request.path, _request.version, _request.headers); try self.state.send(h11.Event{ .Request = request_event }); switch (_request.body) { .Empty => return, .ContentLength => |body| { try self.state.send(.{ .Data = h11.Data{ .bytes = body.content } }); }, } } fn readResponse(self: *Self) !h11.Response { var event = try self.state.nextEvent(.{}); return event.Response; } fn readResponseBody(self: *Self) ![]const u8 { var body = std.ArrayList(u8).init(self.allocator); errdefer body.deinit(); while (true) { var buffer: [4096]u8 = undefined; var event = try self.state.nextEvent(.{ .buffer = &buffer }); switch (event) { .Data => |data| try body.appendSlice(data.bytes), .EndOfMessage => return body.toOwnedSlice(), else => unreachable, } } } pub fn nextEvent(self: *Self, options: anytype) !h11.Event { return self.state.nextEvent(options); } }; } const ConnectionMock = Connection(SocketMock); const expect = std.testing.expect; const expectEqualStrings = std.testing.expectEqualStrings; const Headers = @import("http").Headers; const SocketMock = @import("socket.zig").SocketMock; test "Get" { const uri = try Uri.parse("http://httpbin.org/get", false); var connection = try ConnectionMock.connect(std.testing.allocator, uri); defer connection.deinit(); try connection.socket.target.has_received("HTTP/1.1 200 OK\r\nContent-Length: 14\r\nServer: gunicorn/19.9.0\r\n\r\n" ++ "Gotta Go Fast!"); var response = try connection.request(.Get, uri, .{}); defer response.deinit(); try expect(response.status == .Ok); try expect(response.version == .Http11); var headers = response.headers.items(); try expectEqualStrings(headers[0].name.raw(), "Content-Length"); try expectEqualStrings(headers[1].name.raw(), "Server"); try expect(response.body.len == 14); } test "Get with headers" { const uri = try Uri.parse("http://httpbin.org/get", false); var connection = try ConnectionMock.connect(std.testing.allocator, uri); defer connection.deinit(); try connection.socket.target.has_received("HTTP/1.1 200 OK\r\nContent-Length: 14\r\nServer: gunicorn/19.9.0\r\n\r\n" ++ "Gotta Go Fast!"); var headers = Headers.init(std.testing.allocator); defer headers.deinit(); try headers.append("Gotta-go", "Fast!"); var response = try connection.request(.Get, uri, .{ .headers = headers.items() }); defer response.deinit(); try expect(connection.socket.target.has_sent("GET /get HTTP/1.1\r\nHost: httpbin.org\r\nGotta-go: Fast!\r\n\r\n")); } test "Get with compile-time headers" { const uri = try Uri.parse("http://httpbin.org/get", false); var connection = try ConnectionMock.connect(std.testing.allocator, uri); defer connection.deinit(); try connection.socket.target.has_received("HTTP/1.1 200 OK\r\nContent-Length: 14\r\nServer: gunicorn/19.9.0\r\n\r\n" ++ "Gotta Go Fast!"); var headers = .{.{ "Gotta-go", "Fast!" }}; var response = try connection.request(.Get, uri, .{ .headers = headers }); defer response.deinit(); try expect(connection.socket.target.has_sent("GET /get HTTP/1.1\r\nHost: httpbin.org\r\nGotta-go: Fast!\r\n\r\n")); } test "Post binary data" { const uri = try Uri.parse("http://httpbin.org/post", false); var connection = try ConnectionMock.connect(std.testing.allocator, uri); defer connection.deinit(); try connection.socket.target.has_received("HTTP/1.1 200 OK\r\nContent-Length: 14\r\nServer: gunicorn/19.9.0\r\n\r\n" ++ "Gotta Go Fast!"); var response = try connection.request(.Post, uri, .{ .content = "Gotta go fast!" }); defer response.deinit(); try expect(connection.socket.target.has_sent("POST /post HTTP/1.1\r\nHost: httpbin.org\r\nContent-Length: 14\r\n\r\nGotta go fast!")); } test "Head request has no message body" { const uri = try Uri.parse("http://httpbin.org/head", false); var connection = try ConnectionMock.connect(std.testing.allocator, uri); defer connection.deinit(); try connection.socket.target.has_received("HTTP/1.1 200 OK\r\nContent-Length: 14\r\nServer: gunicorn/19.9.0\r\n\r\n"); var response = try connection.request(.Head, uri, .{}); defer response.deinit(); try expect(response.body.len == 0); } test "IP address and a port should be set in HOST headers" { const uri = try Uri.parse("http://127.0.0.1:8080/", false); var connection = try ConnectionMock.connect(std.testing.allocator, uri); defer connection.deinit(); try connection.socket.target.has_received("HTTP/1.1 200 OK\r\n\r\n"); var response = try connection.request(.Get, uri, .{}); defer response.deinit(); try expect(connection.socket.target.has_sent("GET / HTTP/1.1\r\nHost: 127.0.0.1:8080\r\n\r\n")); } test "Request a URI without path defaults to /" { const uri = try Uri.parse("http://httpbin.org", false); var connection = try ConnectionMock.connect(std.testing.allocator, uri); defer connection.deinit(); try connection.socket.target.has_received("HTTP/1.1 200 OK\r\n\r\n"); var response = try connection.request(.Get, uri, .{}); defer response.deinit(); try expect(connection.socket.target.has_sent("GET / HTTP/1.1\r\nHost: httpbin.org\r\n\r\n")); } test "Get a response in multiple socket read" { const uri = try Uri.parse("http://httpbin.org", false); var connection = try ConnectionMock.connect(std.heap.page_allocator, uri); defer connection.deinit(); try connection.socket.target.has_received("HTTP/1.1 200 OK\r\nContent-Length: 14\r\n\r\n"); try connection.socket.target.has_received("Gotta go fast!"); var response = try connection.request(.Get, uri, .{}); defer response.deinit(); try expect(response.status == .Ok); try expect(response.version == .Http11); var headers = response.headers.items(); try expectEqualStrings(headers[0].name.raw(), "Content-Length"); try expectEqualStrings(headers[0].value, "14"); try expect(response.body.len == 14); } test "Get a streaming response" { const uri = try Uri.parse("http://httpbin.org", false); var connection = try ConnectionMock.connect(std.heap.page_allocator, uri); try connection.socket.target.has_received("HTTP/1.1 200 OK\r\nContent-Length: 12288\r\n\r\n"); var body = "a" ** 12288; try connection.socket.target.has_received(body); var response = try connection.stream(.Get, uri, .{}); defer response.deinit(); try expect(response.status == .Ok); try expect(response.version == .Http11); var headers = response.headers.items(); try expectEqualStrings(headers[0].name.raw(), "Content-Length"); try expectEqualStrings(headers[0].value, "12288"); var result = std.ArrayList(u8).init(std.testing.allocator); defer result.deinit(); while (true) { var buffer: [4096]u8 = undefined; var bytesRead = try response.read(&buffer); if (bytesRead == 0) { break; } try result.appendSlice(buffer[0..bytesRead]); } try expectEqualStrings(result.items, body); } test "Get a chunk encoded response" { const uri = try Uri.parse("http://httpbin.org/get", false); var connection = try ConnectionMock.connect(std.testing.allocator, uri); defer connection.deinit(); try connection.socket.target.has_received("HTTP/1.1 200 OK\r\n" ++ "Transfer-Encoding: chunked\r\n\r\n" ++ "7\r\nMozilla\r\n" ++ "9\r\nDeveloper\r\n" ++ "7\r\nNetwork\r\n" ++ "0\r\n\r\n"); var response = try connection.request(.Get, uri, .{}); defer response.deinit(); try expect(response.status == .Ok); try expect(response.version == .Http11); var headers = response.headers.items(); try expectEqualStrings(headers[0].name.raw(), "Transfer-Encoding"); try expectEqualStrings(headers[0].value, "chunked"); try expectEqualStrings(response.body, "MozillaDeveloperNetwork"); }
src/connection.zig
const std = @import("std"); const c = @import("c.zig"); const shaderc = @import("shaderc.zig"); const png = @import("png.zig"); const AsyncShaderc = @import("async_shaderc.zig").AsyncShaderc; const Blit = @import("blit.zig").Blit; const Preview = @import("preview.zig").Preview; const Scene = @import("scene.zig").Scene; const Options = @import("options.zig").Options; const Optimized = @import("optimized.zig").Optimized; const Viewport = @import("viewport.zig").Viewport; const FRAME_TIME_COUNT: u32 = 8; pub const Renderer = struct { const Self = @This(); initialized: bool = false, alloc: *std.mem.Allocator, device: c.WGPUDeviceId, queue: c.WGPUQueueId, scene: Scene, // This is a buffer which we use for ray storage. It's equivalent to // a texture, but can be read and written in the same shader. image_buf: c.WGPUBufferId, image_buf_size: u32, compiler: ?AsyncShaderc, preview: Preview, optimized: ?Optimized, blit: Blit, uniforms: c.rayUniforms, uniform_buf: c.WGPUBufferId, start_time_ms: i64, last_time_ms: i64, frame_times_ms: [FRAME_TIME_COUNT]u32, frame_time_index: u32, // We render continuously, but reset stats after the optimized renderer // is built to get a fair performance metric opt_time_ms: i64, opt_offset_samples: u32, pub fn init( alloc: *std.mem.Allocator, scene: Scene, options: Options, device: c.WGPUDeviceId, ) !Self { //////////////////////////////////////////////////////////////////////// // Uniform buffers (shared by both raytracing and blitter) const uniform_buf = c.wgpu_device_create_buffer( device, &(c.WGPUBufferDescriptor){ .label = "blit uniforms", .size = @sizeOf(c.rayUniforms), .usage = c.WGPUBufferUsage_UNIFORM | c.WGPUBufferUsage_COPY_DST, .mapped_at_creation = false, }, ); var out = Self{ .alloc = alloc, .device = device, .queue = c.wgpu_device_get_default_queue(device), .compiler = null, .preview = undefined, // Built after update_size() .optimized = null, .blit = undefined, // Built after update_size() .scene = scene, // Populated in update_size() .image_buf = undefined, .image_buf_size = undefined, .uniforms = .{ // Populated in update_size() .width_px = undefined, .height_px = undefined, .offset_x = 0, .offset_y = 0, .samples = 0, .samples_per_frame = 1, .spectral = if (options.spectral) 1 else 0, ._padding = undefined, .camera = scene.camera, }, .uniform_buf = uniform_buf, .start_time_ms = 0, .last_time_ms = 0, .frame_times_ms = [_]u32{0} ** FRAME_TIME_COUNT, .frame_time_index = 0, .opt_time_ms = 0, .opt_offset_samples = 0, }; out.update_size(options.width, options.height); out.blit = try Blit.init( alloc, device, uniform_buf, out.image_buf, out.image_buf_size, ); out.preview = try Preview.init( alloc, scene, device, uniform_buf, out.image_buf, out.image_buf_size, ); out.initialized = true; return out; } pub fn build_opt(self: *Self, scene: Scene) !bool { // The compiler is already running, so don't do anything yet if (self.compiler != null) { return false; } self.compiler = AsyncShaderc.init(scene, self.device); try (self.compiler orelse unreachable).start(); return true; } pub fn get_options(self: *const Self) Options { return .{ .total_samples = null, .width = self.uniforms.width_px, .height = self.uniforms.height_px, .spectral = self.uniforms.spectral != 0, }; } fn update_uniforms(self: *const Self) void { c.wgpu_queue_write_buffer( self.queue, self.uniform_buf, 0, @ptrCast([*c]const u8, &self.uniforms), @sizeOf(c.rayUniforms), ); } fn draw_camera_gui(self: *Self) bool { var changed = false; const width = c.igGetWindowWidth(); c.igPushItemWidth(width * 0.5); changed = c.igDragFloat3("pos", @ptrCast([*c]f32, &self.uniforms.camera.pos), 0.05, -10, 10, "%.1f", 0) or changed; changed = c.igDragFloat3("target", @ptrCast([*c]f32, &self.uniforms.camera.target), 0.05, -10, 10, "%.1f", 0) or changed; changed = c.igDragFloat3("up", @ptrCast([*c]f32, &self.uniforms.camera.up), 0.1, -1, 1, "%.1f", 0) or changed; changed = c.igDragFloat("perspective", &self.uniforms.camera.perspective, 0.01, 0, 1, "%.2f", 0) or changed; changed = c.igDragFloat("defocus", &self.uniforms.camera.defocus, 0.0001, 0, 0.1, "%.4f", 0) or changed; changed = c.igDragFloat("focal length", &self.uniforms.camera.focal_distance, 0.01, 0, 10, "%.2f", 0) or changed; changed = c.igDragFloat("scale", &self.uniforms.camera.scale, 0.05, 0, 10, "%.1f", 0) or changed; const w = width - c.igGetCursorPosX(); c.igIndent(w * 0.25); if (c.igButton("Reset", .{ .x = w * 0.5, .y = 0 })) { self.uniforms.camera = self.scene.camera; changed = true; } c.igUnindent(w * 0.25); return changed; } pub fn draw_gui(self: *Self, menu_height: f32, menu_width: *f32) !bool { var changed = false; c.igPushStyleVarFloat(c.ImGuiStyleVar_WindowRounding, 0.0); c.igPushStyleVarFloat(c.ImGuiStyleVar_WindowBorderSize, 1.0); c.igSetNextWindowPos(.{ .x = 0, .y = menu_height }, c.ImGuiCond_Always, .{ .x = 0, .y = 0 }); const window_size = c.igGetIO().*.DisplaySize; c.igSetNextWindowSizeConstraints(.{ .x = 0, .y = window_size.y - menu_height, }, .{ .x = window_size.x / 2, .y = window_size.y - menu_height, }, null, null); const flags = c.ImGuiWindowFlags_NoTitleBar | c.ImGuiWindowFlags_NoMove | c.ImGuiWindowFlags_NoCollapse; if (c.igBegin("rayray", null, flags)) { if (c.igCollapsingHeaderBoolPtr("Camera", null, 0)) { changed = self.draw_camera_gui() or changed; } if (c.igCollapsingHeaderBoolPtr("Shapes", null, 0)) { changed = (try self.scene.draw_shapes_gui()) or changed; } if (c.igCollapsingHeaderBoolPtr("Materials", null, 0)) { changed = (try self.scene.draw_materials_gui()) or changed; } menu_width.* = c.igGetWindowWidth(); } c.igEnd(); c.igPopStyleVar(2); if (changed) { try self.preview.upload_scene(self.scene); self.uniforms.samples = 0; self.uniforms.samples_per_frame = 1; self.frame_time_index = 0; if (self.optimized) |*opt| { opt.deinit(); self.optimized = null; } // This will tell us to ignore the compiler result, because // the shader has changed while it was running. if (self.compiler) |*comp| { comp.cancelled = true; } } return changed; } pub fn draw( self: *Self, viewport: Viewport, next_texture: c.WGPUOption_TextureViewId, cmd_encoder: c.WGPUCommandEncoderId, ) !void { // Check whether the compiler for a scene-specific shader has finished if (self.compiler) |*comp| { if (comp.check()) |comp_shader| { defer c.wgpu_shader_module_destroy(comp_shader); if (self.optimized) |*opt| { opt.deinit(); self.optimized = null; } if (!comp.cancelled) { self.optimized = try Optimized.init( self.alloc, comp_shader, self.device, self.uniform_buf, self.image_buf, self.image_buf_size, ); self.opt_time_ms = std.time.milliTimestamp(); self.opt_offset_samples = self.uniforms.samples; } comp.deinit(); self.compiler = null; } } const width = @floatToInt(u32, viewport.width); const height = @floatToInt(u32, viewport.height); if (width != self.uniforms.width_px or height != self.uniforms.height_px) { self.update_size(width, height); } self.uniforms.offset_x = @floatToInt(u32, viewport.x); self.uniforms.offset_y = @floatToInt(u32, viewport.y); self.update_uniforms(); // Cast another set of rays, one per pixel const first = self.uniforms.samples == 0; const n = self.uniforms.width_px * self.uniforms.height_px; const nt = (n + c.COMPUTE_SIZE - 1) / c.COMPUTE_SIZE; if (self.optimized) |*opt| { try opt.render(first, nt, cmd_encoder); } else { try self.preview.render(first, nt, cmd_encoder); } self.blit.draw(viewport, next_texture, cmd_encoder); const now_ms = std.time.milliTimestamp(); // Record the start time at the first frame, to skip startup time if (self.uniforms.samples == 0) { self.start_time_ms = now_ms; } else if (now_ms >= self.last_time_ms) { const dt = @intCast(u32, now_ms - self.last_time_ms); // Work around an issue where frames get submitted too fast, // which can causes the autoscaler to overestimate its capabilities if (dt > 15) { self.frame_times_ms[self.frame_time_index] = dt; self.frame_time_index = (self.frame_time_index + 1) % FRAME_TIME_COUNT; } } self.last_time_ms = now_ms; self.uniforms.samples += self.uniforms.samples_per_frame; // Skew samples per frame based on average frame time if (self.frame_time_index == FRAME_TIME_COUNT - 1) { var t: u32 = 0; for (self.frame_times_ms) |f| { t += f; } t = @divFloor(t, FRAME_TIME_COUNT); // When running normally, we expect a 16ms frame time. We target // a frame time of 20 ms, with 2 ms of hysteresis on either side, // biased to count up quickly (since we reset to 1 when the scene // changes) const delta: i32 = switch (t) { 0...16 => 4, 17 => 2, 18...22 => 0, else => -1, }; if (delta != 0) { const next = @intCast(i32, self.uniforms.samples_per_frame) + delta; if (next >= 1) { self.uniforms.samples_per_frame = @intCast(u32, next); } else { self.uniforms.samples_per_frame = 1; } self.frame_time_index = 0; } } } fn prefix(v: *f64) u8 { if (v.* > 1_000_000_000) { v.* /= 1_000_000_000; return 'G'; } else if (v.* > 1_000_000) { v.* /= 1_000_000; return 'M'; } else if (v.* > 1_000) { v.* /= 1_000; return 'K'; } else { return ' '; } } pub fn stats(self: *const Self, alloc: *std.mem.Allocator) ![]u8 { var samples = self.uniforms.samples; if (self.optimized != null) { samples -= self.opt_offset_samples; } var start_time_ms = if (self.optimized == null) self.start_time_ms else self.opt_time_ms; var ray_count = @intToFloat(f64, self.uniforms.width_px) * @intToFloat(f64, self.uniforms.height_px) * @intToFloat(f64, self.uniforms.samples); const dt_sec = @intToFloat(f64, std.time.milliTimestamp() - start_time_ms) / 1000.0; var rays_per_sec = ray_count / dt_sec; var rays_per_sec_prefix = prefix(&rays_per_sec); return try std.fmt.allocPrintZ( alloc, "{}x | {d:.2} {c}sps | {} spp | {} x {}", .{ self.uniforms.samples_per_frame, rays_per_sec, rays_per_sec_prefix, self.uniforms.samples, self.uniforms.width_px, self.uniforms.height_px, }, ); } pub fn deinit(self: *Self) void { self.blit.deinit(); self.preview.deinit(); if (self.optimized) |*opt| { opt.deinit(); } self.scene.deinit(); c.wgpu_buffer_destroy(self.uniform_buf, true); c.wgpu_buffer_destroy(self.image_buf, true); } pub fn update_size(self: *Self, width: u32, height: u32) void { if (self.initialized) { c.wgpu_buffer_destroy(self.image_buf, true); } self.image_buf_size = width * height * 4 * @sizeOf(f32); self.image_buf = c.wgpu_device_create_buffer( self.device, &(c.WGPUBufferDescriptor){ .label = "image buf", .size = self.image_buf_size, .usage = c.WGPUBufferUsage_STORAGE | c.WGPUBufferUsage_COPY_SRC, .mapped_at_creation = false, }, ); self.uniforms.width_px = width; self.uniforms.height_px = height; self.uniforms.samples = 0; self.uniforms.samples_per_frame = 1; self.frame_time_index = 0; self.start_time_ms = std.time.milliTimestamp(); if (self.initialized) { self.blit.bind(self.uniform_buf, self.image_buf, self.image_buf_size); self.preview.bind(self.image_buf, self.image_buf_size); if (self.optimized) |*opt| { opt.rebuild_bind_group(self.uniform_buf, self.image_buf, self.image_buf_size); self.opt_time_ms = self.start_time_ms; self.opt_offset_samples = 0; } } } pub fn save_png(self: *const Self) !void { const tmp_buf = c.wgpu_device_create_buffer( self.device, &(c.WGPUBufferDescriptor){ .label = "tmp png buffer", .size = self.image_buf_size, .usage = c.WGPUBufferUsage_COPY_DST | c.WGPUBufferUsage_MAP_READ, .mapped_at_creation = false, }, ); defer c.wgpu_buffer_destroy(tmp_buf, true); const cmd_encoder = c.wgpu_device_create_command_encoder( self.device, &(c.WGPUCommandEncoderDescriptor){ .label = "png save encoder" }, ); c.wgpu_command_encoder_copy_buffer_to_buffer( cmd_encoder, self.image_buf, 0, tmp_buf, 0, self.image_buf_size, ); const cmd_buf = c.wgpu_command_encoder_finish(cmd_encoder, null); c.wgpu_queue_submit(self.queue, &cmd_buf, 1); c.wgpu_buffer_map_read_async( tmp_buf, 0, self.image_buf_size, read_buffer_map_cb, null, ); c.wgpu_device_poll(self.device, true); const ptr = c.wgpu_buffer_get_mapped_range( tmp_buf, 0, self.image_buf_size, ); const data = @ptrCast([*]const f32, @alignCast(4, ptr)); try png.save_png( self.alloc, data, @intToFloat(f32, self.uniforms.samples), self.uniforms.width_px, self.uniforms.height_px, ); c.wgpu_buffer_unmap(tmp_buf); } }; export fn read_buffer_map_cb(status: c.WGPUBufferMapAsyncStatus, userdata: [*c]u8) void {}
src/renderer.zig
const std = @import("std"); const stdx = @import("stdx"); const Backend = @import("build_options").GraphicsBackend; const graphics = @import("graphics.zig"); const FontGroupId = graphics.FontGroupId; const gpu = @import("backend/gpu/graphics.zig"); const testg = @import("backend/test/graphics.zig"); pub const TextMetrics = struct { width: f32, height: f32, pub fn init(width: f32, height: f32) @This() { return .{ .width = width, .height = height, }; } }; // Result is stored with text description. pub const TextMeasure = struct { text: []const u8, font_size: f32, font_gid: FontGroupId, res: TextMetrics = TextMetrics.init(0, 0), }; pub const TextLayout = struct { lines: std.ArrayList(TextLine), /// Max width in logical pixels of all the lines. width: f32, /// Height in logical pixels of all the lines. height: f32, pub fn init(alloc: std.mem.Allocator) @This() { return .{ .lines = std.ArrayList(TextLine).init(alloc), .width = 0, .height = 0, }; } pub fn deinit(self: @This()) void { self.lines.deinit(); } }; pub const TextLine = struct { start_idx: u32, end_idx: u32, height: f32, }; /// Used to traverse text one UTF-8 codepoint at a time. pub const TextGlyphIterator = struct { inner: switch (Backend) { .OpenGL, .Vulkan => gpu.TextGlyphIterator, .Test => testg.TextGlyphIterator, else => stdx.unsupported(), }, /// The primary vertical metrics are available and won't change. primary_ascent: f32, primary_descent: f32, primary_height: f32, state: State, const Self = @This(); /// Units are scaled to the effective user font size. pub const State = struct { /// The current codepoint. cp: u21, /// The current codepoint's start idx in the given UTF-8 buffer. start_idx: usize, /// THe current codepoint's end idx in the given UTF-8 buffer. Not inclusive. end_idx: usize, /// The kern with the previous codepoint. kern: f32, /// How much this codepoint should advance the current x position. /// Note that this does not include the kern value with a previous codepoint. advance_width: f32, /// How much the glyph is above the baseline. ascent: f32, /// How much the glyph is below the baseline. descent: f32, /// Height would be ascent + descent. height: f32, /// y-offset needed in final glyph position in order to be aligned with the primary font. /// If the glyph is from the primary font, this should be zero. primary_offset_y: f32, }; pub inline fn nextCodepoint(self: *Self) bool { switch (Backend) { .OpenGL, .Vulkan => return gpu.TextGlyphIterator.nextCodepoint(&self.inner, &self.state, {}, null), .Test => return testg.TextGlyphIterator.nextCodepoint(&self.inner, &self.state), else => stdx.unsupported(), } } pub inline fn setIndex(self: *Self, i: usize) void { switch (Backend) { .OpenGL, .Vulkan => return gpu.TextGlyphIterator.setIndex(&self.inner, i), .Test => return testg.TextGlyphIterator.setIndex(&self.inner, i), else => stdx.unsupported(), } } };
graphics/src/text.zig
const std = @import("../std.zig"); const assert = std.debug.assert; const builtin = @import("builtin"); const macho = std.macho; usingnamespace @import("../os/bits.zig"); extern "c" fn __error() *c_int; pub extern "c" fn _NSGetExecutablePath(buf: [*]u8, bufsize: *u32) c_int; pub extern "c" fn _dyld_get_image_header(image_index: u32) ?*mach_header; pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: [*]u8, buf_len: usize, basep: *i64) isize; pub extern "c" fn mach_absolute_time() u64; pub extern "c" fn mach_timebase_info(tinfo: ?*mach_timebase_info_data) void; pub extern "c" fn kevent64( kq: c_int, changelist: [*]const kevent64_s, nchanges: c_int, eventlist: [*]kevent64_s, nevents: c_int, flags: c_uint, timeout: ?*const timespec, ) c_int; const mach_hdr = if (@sizeOf(usize) == 8) mach_header_64 else mach_header; /// The value of the link editor defined symbol _MH_EXECUTE_SYM is the address /// of the mach header in a Mach-O executable file type. It does not appear in /// any file type other than a MH_EXECUTE file type. The type of the symbol is /// absolute as the header is not part of any section. /// This symbol is populated when linking the system's libc, which is guaranteed /// on this operating system. However when building object files or libraries, /// the system libc won't be linked until the final executable. So we /// export a weak symbol here, to be overridden by the real one. pub extern "c" var _mh_execute_header: mach_hdr = undefined; comptime { if (std.Target.current.isDarwin()) { @export(_mh_execute_header, .{ .name = "_mh_execute_header", .linkage = .Weak }); } } pub const mach_header_64 = macho.mach_header_64; pub const mach_header = macho.mach_header; pub const _errno = __error; pub extern "c" fn @"close$NOCANCEL"(fd: fd_t) c_int; pub extern "c" fn mach_host_self() mach_port_t; pub extern "c" fn clock_get_time(clock_serv: clock_serv_t, cur_time: *mach_timespec_t) kern_return_t; pub extern "c" fn host_get_clock_service(host: host_t, clock_id: clock_id_t, clock_serv: ?[*]clock_serv_t) kern_return_t; pub extern "c" fn mach_port_deallocate(task: ipc_space_t, name: mach_port_name_t) kern_return_t; pub fn sigaddset(set: *sigset_t, signo: u5) void { set.* |= @as(u32, 1) << (signo - 1); } pub extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int; /// get address to use bind() pub const AI_PASSIVE = 0x00000001; /// fill ai_canonname pub const AI_CANONNAME = 0x00000002; /// prevent host name resolution pub const AI_NUMERICHOST = 0x00000004; /// prevent service name resolution pub const AI_NUMERICSERV = 0x00001000; /// address family for hostname not supported pub const EAI_ADDRFAMILY = 1; /// temporary failure in name resolution pub const EAI_AGAIN = 2; /// invalid value for ai_flags pub const EAI_BADFLAGS = 3; /// non-recoverable failure in name resolution pub const EAI_FAIL = 4; /// ai_family not supported pub const EAI_FAMILY = 5; /// memory allocation failure pub const EAI_MEMORY = 6; /// no address associated with hostname pub const EAI_NODATA = 7; /// hostname nor servname provided, or not known pub const EAI_NONAME = 8; /// servname not supported for ai_socktype pub const EAI_SERVICE = 9; /// ai_socktype not supported pub const EAI_SOCKTYPE = 10; /// system error returned in errno pub const EAI_SYSTEM = 11; /// invalid value for hints pub const EAI_BADHINTS = 12; /// resolved protocol is unknown pub const EAI_PROTOCOL = 13; /// argument buffer overflow pub const EAI_OVERFLOW = 14; pub const EAI_MAX = 15; pub const pthread_mutex_t = extern struct { __sig: c_long = 0x32AAABA7, __opaque: [__PTHREAD_MUTEX_SIZE__]u8 = [_]u8{0} ** __PTHREAD_MUTEX_SIZE__, }; pub const pthread_cond_t = extern struct { __sig: c_long = 0x3CB0B1BB, __opaque: [__PTHREAD_COND_SIZE__]u8 = [_]u8{0} ** __PTHREAD_COND_SIZE__, }; const __PTHREAD_MUTEX_SIZE__ = if (@sizeOf(usize) == 8) 56 else 40; const __PTHREAD_COND_SIZE__ = if (@sizeOf(usize) == 8) 40 else 24; pub const pthread_attr_t = extern struct { __sig: c_long, __opaque: [56]u8, };
lib/std/c/darwin.zig
const std = @import("std"); const realmode = @import("realmode.zig"); const RegisterState = realmode.RegisterState; // Global allocator const Allocator = std.mem.Allocator; const FixedBufferAllocator = std.heap.FixedBufferAllocator; var high_memory_allocator: FixedBufferAllocator = undefined; pub var GlobalAllocator: *Allocator = undefined; pub fn init() void { var regs = RegisterState{}; while (true) { var entry = AddressRangeDescriptor{}; regs.eax = 0xe820; regs.edi = @ptrToInt(&entry); regs.ecx = @sizeOf(@TypeOf(entry)); regs.edx = std.mem.readIntBig(u32, "SMAP"); realmode.invoke(0x15, &regs); // Check CF if (regs.efl & 1 != 0) { @panic("Error on BIOS E820h"); } // is High Memory if (entry.base == 0x0010_0000 and entry.ty == 1) { const base = @truncate(usize, entry.base); const size = @truncate(usize, entry.size); const high_memory = @intToPtr([*]u8, base)[0..size]; high_memory_allocator = FixedBufferAllocator.init(high_memory); GlobalAllocator = &high_memory_allocator.allocator; return; } // Last entry? if (regs.ebx == 0) { break; } } @panic("High Memory region not found."); } pub fn printMap(writer: anytype) !void { var regs = RegisterState{}; while (true) { var entry = AddressRangeDescriptor{}; regs.eax = 0xe820; regs.edi = @ptrToInt(&entry); regs.ecx = @sizeOf(@TypeOf(entry)); regs.edx = std.mem.readIntBig(u32, "SMAP"); realmode.invoke(0x15, &regs); // Check CF if (regs.efl & 1 != 0) { @panic("Error on BIOS E820h"); } try writer.print("{}\n", .{entry}); // Last entry? if (regs.ebx == 0) { break; } } } const AddressRangeDescriptor = packed struct { base: u64 = undefined, size: u64 = undefined, ty: u32 = undefined, pub fn format( self: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype, ) !void { const mem_type = switch (self.ty) { 1 => "Free", 2 => "Reserved", else => "Undefined", }; try writer.print("0x{x:0>8} | {:10} | ({}) {s}", .{ self.base, self.size, self.ty, mem_type, }); } };
src/mm.zig
const std = @import("std"); const input = @embedFile("data/input14"); usingnamespace @import("util.zig"); pub fn main() !void { var allocator_state = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer allocator_state.deinit(); const allocator = &allocator_state.allocator; const part1 = getSum(allocator, input, .v1); const part2 = getSum(allocator, input, .v2); print("[Part1] Sum: {}", .{part1}); print("[Part2] Sum: {}", .{part2}); } const Version = enum { v1, v2 }; const State = struct { const MemoryMap = std.AutoHashMap(u64, u64); maskX: u64, mask1: u64, memory: MemoryMap, pub fn init(allocator: *std.mem.Allocator) State { return .{ .maskX = 0, .mask1 = 0, .memory = MemoryMap.init(allocator), }; } }; fn setMask(state: *State, mask: []const u8) void { std.debug.assert(mask.len == 36); state.maskX = 0; state.mask1 = 0; for (mask) |c, i| { state.maskX |= @intCast(u64, @boolToInt(c == 'X')) << @intCast(u6, 35 - i); state.mask1 |= @intCast(u64, @boolToInt(c == '1')) << @intCast(u6, 35 - i); } } inline fn applyMaskV1(state: *const State, value: u64) u64 { return (value & state.maskX) | state.mask1; } fn writeMemory(state: *State, address: u64, value: u64, comptime version: Version) void { switch (version) { .v1 => state.memory.put(address, applyMaskV1(state, value)) catch @panic("memory failed"), .v2 => { var addr = address; addr &= ~state.maskX; // If the bitmask bit is 0, the corresponding memory address bit is unchanged. addr |= state.mask1; // If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1. const num_floating_bits = @popCount(u64, state.maskX); const num_variations = std.math.pow(u64, 2, num_floating_bits); var variation_index: u64 = 0; while (variation_index < num_variations) : (variation_index += 1) { // take the bits from variation index and place them in floating_addr as the X mask indicates var floating_addr = addr; var floating_bit_index: u6 = 0; var variation_bit_index: u6 = 0; while (variation_bit_index < 36) : (variation_bit_index += 1) { // get current bit const variation_bit = (variation_index & (@as(u64, 1) << variation_bit_index)) >> variation_bit_index; // find where the bit needs to be placed while ((state.maskX & (@as(u64, 1) << floating_bit_index)) == 0) : (floating_bit_index +%= 1) {} // place the bit floating_addr |= variation_bit << floating_bit_index; floating_bit_index +%= 1; } state.memory.put(floating_addr, value) catch @panic("memory failed"); } }, } } fn execute(state: *State, inputStr: []const u8, comptime version: Version) void { var reader = lines(inputStr); while (reader.next()) |line| { switch (std.mem.readIntSliceNative(u32, line[0..4])) { std.mem.readIntSliceNative(u32, "mask") => { setMask(state, line[7..]); }, std.mem.readIntSliceNative(u32, "mem[") => { const addrEnd = std.mem.indexOf(u8, line[4..], "]").?; const addr = std.fmt.parseUnsigned(u64, line[4..(4+addrEnd)], 10) catch @panic("invalid address"); const value = std.fmt.parseUnsigned(u64, line[(4+addrEnd+4)..], 10) catch @panic("invalid value"); writeMemory(state, addr, value, version); }, else => @panic("invalid instruction"), } } } fn getSum(allocator: *std.mem.Allocator, inputStr: []const u8, comptime version: Version) u64 { var s = State.init(allocator); execute(&s, inputStr, version); var sum: u64 = 0; var it = s.memory.iterator(); while (it.next()) |p| { sum += @intCast(u64, p.value); } return sum; } const expectEqual = std.testing.expectEqual; test "getSum" { var allocator_state = std.heap.ArenaAllocator.init(std.testing.allocator); defer allocator_state.deinit(); const allocator = &allocator_state.allocator; expectEqual(@as(u64, 165), getSum(allocator, testInput1, .v1)); expectEqual(@as(u64, 208), getSum(allocator, testInput2, .v2)); } const testInput1 = \\mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X \\mem[8] = 11 \\mem[7] = 101 \\mem[8] = 0 ; const testInput2 = \\mask = 000000000000000000000000000000X1001X \\mem[42] = 100 \\mask = 00000000000000000000000000000000X0XX \\mem[26] = 1 ;
src/day14.zig
const c = @import("c.zig"); const nk = @import("../nuklear.zig"); const std = @import("std"); const builtin = std.builtin; const debug = std.debug; const heap = std.heap; const math = std.math; const mem = std.mem; const meta = std.meta; const testing = std.testing; /// Same as `beginTitled` but instead of taking a `name` which the caller needs to ensure /// is a unique string, this function generates a unique name for each unique type passed /// to `id`. pub fn begin( ctx: *nk.Context, comptime Id: type, flags: nk.PanelFlags, ) bool { const id = nk.typeId(Id); return beginTitled(ctx, mem.asBytes(&id), flags); } pub fn beginTitled( ctx: *nk.Context, name: []const u8, flags: nk.PanelFlags, ) bool { return c.nk_group_begin_titled( ctx, nk.slice(name), nk.slice(flags.title orelse ""), flags.toNuklear(), ) != 0; } pub fn end(ctx: *nk.Context) void { c.nk_group_end(ctx); } pub fn scrolledOffsetBegin( ctx: *nk.Context, offset: *nk.ScrollOffset, comptime Id: type, ) bool { const id = nk.typeId(Id); var c_x_offset: c.nk_uint = @intCast(c.nk_uint, offset.x); var c_y_offset: c.nk_uint = @intCast(c.nk_uint, offset.y); defer { offset.x = @intCast(usize, c_x_offset); offset.y = @intCast(usize, c_y_offset); } return c.nk_group_scrolled_offset_begin( ctx, &c_x_offset, &c_y_offset, nk.slice(flags.title orelse mem.asBytes(&id)), flags.toNuklear(), ) != 0; } pub fn scrolledBegin( ctx: *nk.Context, off: *nk.Scroll, comptime Id: type, flags: nk.PanelFlags, ) bool { const id = nk.typeId(Id); return c.nk_group_scrolled_begin( ctx, off, nk.slice(flags.title orelse mem.asBytes(&id)), flags.toNuklear(), ) != 0; } pub fn scrolledEnd(ctx: *nk.Context) void { c.nk_group_scrolled_end(ctx); } pub fn getScroll(ctx: *nk.Context, comptime Id: type) nk.ScrollOffset { const id = nk.typeId(Id); return getScrollByName(ctx, mem.asBytes(&id)); } pub fn getScrollByName(ctx: *nk.Context, name: []const u8) nk.ScrollOffset { var x_offset: c.nk_uint = undefined; var y_offset: c.nk_uint = undefined; c.nk_group_get_scroll(ctx, nk.slice(name), &x_offset, &y_offset); return .{ .x = x_offset, .y = y_offset, }; } pub fn setScroll(ctx: *nk.Context, comptime Id: type, offset: nk.ScrollOffset) void { const id = nk.typeId(Id); return setScrollByName(ctx, mem.asBytes(&id), offset); } pub fn setScrollByName( ctx: *nk.Context, name: []const u8, offset: nk.ScrollOffset, ) void { c.nk_group_set_scroll( ctx, nk.slice(name), @intCast(c.nk_uint, offset.x), @intCast(c.nk_uint, offset.y), ); } test { testing.refAllDecls(@This()); } test "group" { var ctx = &try nk.testing.init(); defer nk.free(ctx); if (nk.window.begin(ctx, opaque {}, nk.rect(10, 10, 10, 10), .{})) |win| { nk.layout.rowDynamic(ctx, 10, 1); if (nk.group.begin(ctx, opaque {}, .{})) { defer nk.group.end(ctx); } } nk.window.end(ctx); }
src/group.zig
const std = @import("std"); pub const io_mode = .evented; pub fn main() !void { var cpu: u64 = try std.Thread.getCpuCount(); std.debug.print("CPUS online... {d} \n", .{cpu}); // Allocate room for an async frame for every // logical cpu core var promises = try std.heap.page_allocator.alloc(@Frame(worker), cpu); defer std.heap.page_allocator.free(promises); // Start a worker on every cpu var completion_token: bool = false; while (cpu > 0) : (cpu -= 1) { promises[cpu - 1] = async worker(cpu, &completion_token); } std.debug.print("Working...\n", .{}); // Wait for a worker to find the solution for (promises) |*future| { var result = await future; if (result != 0) { std.debug.print("The answer is {x}\n", .{result}); } } } fn worker(seed: u64, completion_token: *bool) u64 { // Inform the event loop we're cpu bound. // This effectively puts a worker on every logical core. std.event.Loop.startCpuBoundOperation(); // Seed the random number generator so each worker // look at different numbers var prng = std.rand.DefaultPrng.init(seed); while (true) { var attempt = prng.random.int(u64); // We're looking for a number whose N lower bits // are zero. Feel free to change the constant to // make this take a longer or shorter amount of time. if (attempt & 0xffffff == 0) { // Tell other workers we're done @atomicStore(bool, completion_token, true, std.builtin.AtomicOrder.Release); std.debug.print("I found the answer!\n", .{}); return attempt; } // Check if another worker has solved it, in which // case we stop working on the problem. if (@atomicLoad(bool, completion_token, std.builtin.AtomicOrder.Acquire)) { std.debug.print("Another worker won\n", .{}); break; } } return 0; }
src/main.zig
const std = @import("std"); const tools = @import("tools"); const with_trace = false; const with_dissassemble = false; const assert = std.debug.assert; fn trace(comptime fmt: []const u8, args: anytype) void { if (with_trace) std.debug.print(fmt, args); } const Computer = tools.IntCode_Computer; pub fn run(input: []const u8, allocator: std.mem.Allocator) tools.RunError![2][]const u8 { const int_count = std.mem.count(u8, input, ",") + 1; const boot_image = try allocator.alloc(Computer.Data, int_count); defer allocator.free(boot_image); { var it = std.mem.split(u8, input, ","); var i: usize = 0; while (it.next()) |num| : (i += 1) { const trimmed = std.mem.trim(u8, num, " \n\r\t"); boot_image[i] = try std.fmt.parseInt(Computer.Data, trimmed, 10); } if (with_dissassemble) Computer.disassemble(boot_image); } const Packet = [2]Computer.Data; const NIC = struct { cpu: Computer, in_queue: std.ArrayList(Packet), in_index: u32, out_packet: Packet, out_dest: u32, out_index: u32, }; var prev_nat: ?Packet = undefined; var nat: ?Packet = undefined; var cpus: [50]NIC = undefined; for (cpus) |*c, i| { c.cpu = Computer{ .name = try std.fmt.allocPrint(allocator, "Computer n°{}", .{i}), .memory = try allocator.alloc(Computer.Data, 5000), }; c.in_queue = std.ArrayList(Packet).init(allocator); c.in_index = 0; c.out_packet = undefined; c.out_index = 0; c.out_dest = undefined; c.cpu.boot(boot_image); trace("starting {s}\n", .{c.cpu.name}); _ = async c.cpu.run(); assert(c.cpu.io_mode == .input); c.cpu.io_port = @intCast(Computer.Data, i); trace("wrting input to {s} = {}\n", .{ c.cpu.name, c.cpu.io_port }); trace("resuming {s}\n", .{c.cpu.name}); resume c.cpu.io_runframe; } defer { for (cpus) |c| { c.in_queue.deinit(); allocator.free(c.cpu.memory); allocator.free(c.cpu.name); } } var ans1: ?Computer.Data = null; var ans2: ?Computer.Data = null; while (true) { var net_idle = true; for (cpus) |*c| { assert(!c.cpu.is_halted()); if (c.cpu.io_mode == .input) { if (c.in_queue.items.len == 0) { c.cpu.io_port = -1; } else { net_idle = false; const p = c.in_queue.items[0]; c.cpu.io_port = p[c.in_index]; c.in_index += 1; if (c.in_index >= p.len) { c.in_index = 0; _ = c.in_queue.orderedRemove(0); } } trace("wrting input to {s} = {}\n", .{ c.cpu.name, c.cpu.io_port }); } if (c.cpu.io_mode == .output) { net_idle = false; trace("{s} outputs {}\n", .{ c.cpu.name, c.cpu.io_port }); if (c.out_index == 0) { c.out_dest = @intCast(u32, c.cpu.io_port); } else { c.out_packet[c.out_index - 1] = c.cpu.io_port; } c.out_index += 1; if (c.out_index >= c.out_packet.len + 1) { c.out_index = 0; if (c.out_dest >= cpus.len) { if (ans1 == null) ans1 = c.out_packet[1]; nat = c.out_packet; } else { try cpus[c.out_dest].in_queue.append(c.out_packet); } } } trace("resuming {s}\n", .{c.cpu.name}); resume c.cpu.io_runframe; } if (net_idle and nat != null) { if (prev_nat) |prev| { if (prev[1] == nat.?[1]) { ans2 = nat.?[1]; break; } } try cpus[0].in_queue.append(nat.?); prev_nat = nat; nat = null; } } return [_][]const u8{ try std.fmt.allocPrint(allocator, "{}", .{ans1}), try std.fmt.allocPrint(allocator, "{}", .{ans2}), }; } pub const main = tools.defaultMain("2019/day23.txt", run);
2019/day23.zig
const std = @import("std"); const fs = std.fs; const hm = std.hash_map; const Bitmask = struct { and_mask: u64 = 0xFFFFFFFFFFFFFFFF, or_mask: u64 = 0, const Self = @This(); pub fn fromString(str: []const u8) Self { const bits = str.len - 1; var am: u64 = 0; var om: u64 = 0; for (str) |c, i| { const shift = @intCast(u6, bits - i); if (c == 'X') { am |= @as(u64, 1) << shift; } else if (c == '1') { am |= @as(u64, 1) << shift; om |= @as(u64, 1) << shift; } else if (c == '0') { } else unreachable; } // std.debug.print("Masks after parse: and = {}, or = {}\n", .{am, om}); return Self{.and_mask = am, .or_mask = om}; } }; const Bitmask2 = struct { mask: u64 = 0, floating: u64 = 0, const Self = @This(); pub fn fromString(str: []const u8) Self { const bits = str.len - 1; var f: u64 = 0; var m: u64 = 0; for (str) |c, i| { const shift = @intCast(u6, bits - i); if (c == 'X') { f |= @as(u64, 1) << shift; } else if (c == '1') { m |= @as(u64, 1) << shift; } else if (c == '0') { } else unreachable; } return Self{.mask = m, .floating = f}; } }; const eql_fn = hm.getAutoEqlFn(u64); const hash_fn = hm.getAutoHashFn(u64); const mask_prefix = "mask"; const mem_prefix = "mem["; pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; const allocator = &gpa.allocator; const input = try fs.cwd().readFileAlloc(allocator, "data/input_14_1.txt", std.math.maxInt(usize)); { // Solution one var lines = std.mem.tokenize(input, "\n"); var memory = hm.HashMap(u64, u64, hash_fn, eql_fn, hm.DefaultMaxLoadPercentage).init(allocator); defer memory.deinit(); var mask = Bitmask{}; while (lines.next()) |raw_line| { const line = std.mem.trim(u8, raw_line, " \r\n"); if (line.len == 0) break; var op_val_it = std.mem.tokenize(line, "="); const op = std.mem.trim(u8, op_val_it.next().?, " \r\n"); const val = std.mem.trim(u8, op_val_it.next().?, " \r\n"); std.debug.assert(op.len >= mask_prefix.len and op.len >= mem_prefix.len); if (std.mem.eql(u8, mask_prefix, op[0..mask_prefix.len])) { // std.debug.print("Found mask: \"{}\"\n", .{val}); mask = Bitmask.fromString(val); } else if (std.mem.eql(u8, mem_prefix, op[0..mem_prefix.len])) { var address_it = std.mem.tokenize(op[mem_prefix.len..], "]"); var address = try std.fmt.parseInt(u64, address_it.next().?, 10); var value = try std.fmt.parseInt(u64, val, 10); // std.debug.print("Found memory[{}] = {}\n", .{address, value}); // std.debug.print("Value after mask: {}\n", .{(value & mask.and_mask) | mask.or_mask}); try memory.put(address, (value & mask.and_mask) | mask.or_mask); } else unreachable; } var it = memory.iterator(); var sum: u64 = 0; while (it.next()) |kv| { sum += kv.value; } std.debug.print("Day 14 - Solution 1: {}\n", .{sum}); } { // Solution two var lines = std.mem.tokenize(input, "\n"); var memory = hm.HashMap(u64, u64, hash_fn, eql_fn, hm.DefaultMaxLoadPercentage).init(allocator); defer memory.deinit(); var addresses = std.ArrayList(u64).init(allocator); defer addresses.deinit(); var mask = Bitmask2{}; while (lines.next()) |raw_line| { const line = std.mem.trim(u8, raw_line, " \r\n"); if (line.len == 0) break; var op_val_it = std.mem.tokenize(line, "="); const op = std.mem.trim(u8, op_val_it.next().?, " \r\n"); const val = std.mem.trim(u8, op_val_it.next().?, " \r\n"); std.debug.assert(op.len >= mask_prefix.len and op.len >= mem_prefix.len); if (std.mem.eql(u8, mask_prefix, op[0..mask_prefix.len])) { // std.debug.print("Found mask: \"{}\"\n", .{val}); mask = Bitmask2.fromString(val); } else if (std.mem.eql(u8, mem_prefix, op[0..mem_prefix.len])) { var address_it = std.mem.tokenize(op[mem_prefix.len..], "]"); var address = try std.fmt.parseInt(u64, address_it.next().?, 10); var value = try std.fmt.parseInt(u64, val, 10); try addresses.resize(0); try addresses.append((address | mask.mask) & ~mask.floating); if (mask.floating != 0) { var i: usize = 0; while (i < 36) : (i += 1) { var m = @as(u64, 1) << @intCast(u6, i); if ((mask.floating & m) != 0) { const len = addresses.items.len; for (addresses.items) |a| { try addresses.append(a + m); } std.debug.assert(len * 2 == addresses.items.len); std.debug.assert(addresses.items.len > 0); } } } for (addresses.items) |a| { try memory.put(a, value); } } else unreachable; } var it = memory.iterator(); var sum: u64 = 0; while (it.next()) |kv| { sum += kv.value; } std.debug.print("Day 14 - Solution 2: {}\n", .{sum}); } }
2020/src/day_14.zig